file_id
stringlengths 5
9
| content
stringlengths 100
5.25M
| local_path
stringlengths 66
70
| kaggle_dataset_name
stringlengths 3
50
⌀ | kaggle_dataset_owner
stringlengths 3
20
⌀ | kversion
stringlengths 497
763
⌀ | kversion_datasetsources
stringlengths 71
5.46k
⌀ | dataset_versions
stringlengths 338
235k
⌀ | datasets
stringlengths 334
371
⌀ | users
stringlengths 111
264
⌀ | script
stringlengths 100
5.25M
| df_info
stringlengths 0
4.87M
| has_data_info
bool 2
classes | nb_filenames
int64 0
370
| retreived_data_description
stringlengths 0
4.44M
| script_nb_tokens
int64 25
663k
| upvotes
int64 0
1.65k
| tokens_description
int64 25
663k
| tokens_script
int64 25
663k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
129193444
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import riot_api
api = riot_api.RiotAPI(your_api_key)
response = api.get_summoner_by_name("Your Summoner Name")
summoner = response.json()
with open("summoner.json", "w") as f:
json.dump(summoner, f)
import riot_api
# Get your API key from https://developer.riotgames.com/
api_key = "YOUR_API_KEY"
# Create a Riot API object
api = riot_api.RiotAPI(api_key)
# Get a list of all champions
champions = api.get_champions()
# Save the champions to a file
with open("champions.json", "w") as f:
json.dump(champions, f)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/193/129193444.ipynb
| null | null |
[{"Id": 129193444, "ScriptId": 38408602, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14273573, "CreationDate": "05/11/2023 17:39:17", "VersionNumber": 1.0, "Title": "notebook7c6c041c4f", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 44.0, "LinesInsertedFromPrevious": 44.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import riot_api
api = riot_api.RiotAPI(your_api_key)
response = api.get_summoner_by_name("Your Summoner Name")
summoner = response.json()
with open("summoner.json", "w") as f:
json.dump(summoner, f)
import riot_api
# Get your API key from https://developer.riotgames.com/
api_key = "YOUR_API_KEY"
# Create a Riot API object
api = riot_api.RiotAPI(api_key)
# Get a list of all champions
champions = api.get_champions()
# Save the champions to a file
with open("champions.json", "w") as f:
json.dump(champions, f)
| false | 0 | 376 | 0 | 376 | 376 |
||
129193820
|
numbers = [13, 25, 39, 46, 54, 68]
list = [34, 21, 29, 86, 29]
list2 = [1, 2, 3, 4]
list.append(3)
list.insert(2, 3)
list.remove(29)
list.extend(list2)
[0] * 5
list = [4, 23, 16, 7, 29, 56, 81]
list[3:6]
len(list)
list1 = ["elements", "of", "our", "list"]
list2 = [21, 29, 86, 19, 42]
print("This list is" + str(list1))
print("This list is" + str(list2))
print("This list is", list1)
print("This list is", list2)
range(5)
range(0, 10, 2)
list = [3, 6, 5, 7, 15]
for i in list:
print(i)
list = [3, 6, 5, 7, 15]
for i in range(len(list)):
list[i] = list[i] + 1
print(list[i])
s = "Hello!"
s[1]
s[-1]
s[1:5]
s.count("l")
s = (123, 456, 789, 246, 357)
s[1]
s[-1]
s[1:4]
numbers = [12, 49, -2, 26, 5, 17, -6]
# Initialize List Variable
list1 = [2, 4, 6, 8, 1, 3, 5, 7, 0]
print("Original list")
print(list1)
print()
# List is ORDERED, as it can be sort with order
print("List is ORDERED, as it can be sort with order")
list1.sort()
print(list1)
print()
# List is CHANGABLE, You can change value of list element
print("List is CHANGABLE, You can change value of list element")
list1[1] = 2000
print(list1)
list1[1] = 4
print(list1)
print()
# List is INDEXED, You can get index of an element
list1 = [2, 4, 6, 8, 1, 3, 5, 7, 0]
print("List is INDEXED, You can get by its index")
print(list1[3]) # Get element by its index
print(list1.index(3)) # Get index by its element
print()
# List can have DUPLICATE values
list1 = [2, 4, 6, 8, 1, 3, 5, 7, 0, 2, 4, 6, 8, 1, 3, 5, 7, 0]
print("List can have DUPLICATE values")
print(list1)
# Initialize List Variable
tuple1 = (2, 4, 6, 8, 1, 3, 5, 7, 0)
print("Original tuple")
print(tuple1)
print()
# Tuple is ORDERED, as it can be sort with order
print("Tuple is ORDERED, as it can be sort with order")
tuple1 = sorted(tuple1)
print(tuple1) # It will give List
print()
# Tuple is NOT CHANGABLE, You can not change value of tuple element
tuple1 = (2, 4, 6, 8, 1, 3, 5, 7, 0)
print("Tuple is NOT CHANGABLE, You can not change value of tuple element")
# If you open below line, than it will give error
# tuple1[1] = 2000
# Tuple is INDEXED, You can get index of an element
tuple1 = [2, 4, 6, 8, 1, 3, 5, 7, 0]
print("Tuple is INDEXED, You can get by its index")
print(tuple1[3]) # Get element by its index
print(tuple1.index(3)) # Get index by its element
print()
# Tuple can have DUPLICATE values
tuple1 = (2, 4, 6, 8, 1, 3, 5, 7, 0, 2, 4, 6, 8, 1, 3, 5, 7, 0)
print("Tuple can have DUPLICATE values")
print(tuple1)
# Initialize Set Variable
set1 = {2, 4, 6, 8, 1, 3, 5, 7, 0}
print("Original set changed as it may return the changed sequence of the element")
# Set is NOT ORDERED,
print("Set is NOT ORDERED")
print(set1)
print()
# By below code you can change order which give output as List
print("By below code you can change order which give output as List")
set2 = sorted(set1, reverse=True)
print(set2) # It will give List
print()
# Set is CHANGABLE, You can change value of set element
print("Set is CHANGABLE, You can change value of set element")
set_dict = {1: "One", 2: "Two"}
print("Original set value")
print(set_dict)
set_dict.update({1: "Three", 2: "Four"})
print("Updated set value")
print(set_dict)
print()
# Set is NOT INDEXED, You can not get element value by index
print("Set is NOT INDEXED, You can not get element value by index")
# Opening below line will throw error, "'set' object is not subscriptable"
# print(set1[3]) # Get element by its index
print()
# Set can NOT HAVE DUPLICATE values, Duplicate values will be ignored
set1 = {2, 4, 6, 8, 1, 3, 5, 7, 0, 2, 4, 6, 8, 1, 3, 5, 7, 0}
print("Set can NOT HAVE DUPLICATE values, Duplicate values will be ignored")
print(set1)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/193/129193820.ipynb
| null | null |
[{"Id": 129193820, "ScriptId": 35883702, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14087623, "CreationDate": "05/11/2023 17:43:11", "VersionNumber": 3.0, "Title": "FATHI AZIZ 2209020058", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 147.0, "LinesInsertedFromPrevious": 93.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 54.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
numbers = [13, 25, 39, 46, 54, 68]
list = [34, 21, 29, 86, 29]
list2 = [1, 2, 3, 4]
list.append(3)
list.insert(2, 3)
list.remove(29)
list.extend(list2)
[0] * 5
list = [4, 23, 16, 7, 29, 56, 81]
list[3:6]
len(list)
list1 = ["elements", "of", "our", "list"]
list2 = [21, 29, 86, 19, 42]
print("This list is" + str(list1))
print("This list is" + str(list2))
print("This list is", list1)
print("This list is", list2)
range(5)
range(0, 10, 2)
list = [3, 6, 5, 7, 15]
for i in list:
print(i)
list = [3, 6, 5, 7, 15]
for i in range(len(list)):
list[i] = list[i] + 1
print(list[i])
s = "Hello!"
s[1]
s[-1]
s[1:5]
s.count("l")
s = (123, 456, 789, 246, 357)
s[1]
s[-1]
s[1:4]
numbers = [12, 49, -2, 26, 5, 17, -6]
# Initialize List Variable
list1 = [2, 4, 6, 8, 1, 3, 5, 7, 0]
print("Original list")
print(list1)
print()
# List is ORDERED, as it can be sort with order
print("List is ORDERED, as it can be sort with order")
list1.sort()
print(list1)
print()
# List is CHANGABLE, You can change value of list element
print("List is CHANGABLE, You can change value of list element")
list1[1] = 2000
print(list1)
list1[1] = 4
print(list1)
print()
# List is INDEXED, You can get index of an element
list1 = [2, 4, 6, 8, 1, 3, 5, 7, 0]
print("List is INDEXED, You can get by its index")
print(list1[3]) # Get element by its index
print(list1.index(3)) # Get index by its element
print()
# List can have DUPLICATE values
list1 = [2, 4, 6, 8, 1, 3, 5, 7, 0, 2, 4, 6, 8, 1, 3, 5, 7, 0]
print("List can have DUPLICATE values")
print(list1)
# Initialize List Variable
tuple1 = (2, 4, 6, 8, 1, 3, 5, 7, 0)
print("Original tuple")
print(tuple1)
print()
# Tuple is ORDERED, as it can be sort with order
print("Tuple is ORDERED, as it can be sort with order")
tuple1 = sorted(tuple1)
print(tuple1) # It will give List
print()
# Tuple is NOT CHANGABLE, You can not change value of tuple element
tuple1 = (2, 4, 6, 8, 1, 3, 5, 7, 0)
print("Tuple is NOT CHANGABLE, You can not change value of tuple element")
# If you open below line, than it will give error
# tuple1[1] = 2000
# Tuple is INDEXED, You can get index of an element
tuple1 = [2, 4, 6, 8, 1, 3, 5, 7, 0]
print("Tuple is INDEXED, You can get by its index")
print(tuple1[3]) # Get element by its index
print(tuple1.index(3)) # Get index by its element
print()
# Tuple can have DUPLICATE values
tuple1 = (2, 4, 6, 8, 1, 3, 5, 7, 0, 2, 4, 6, 8, 1, 3, 5, 7, 0)
print("Tuple can have DUPLICATE values")
print(tuple1)
# Initialize Set Variable
set1 = {2, 4, 6, 8, 1, 3, 5, 7, 0}
print("Original set changed as it may return the changed sequence of the element")
# Set is NOT ORDERED,
print("Set is NOT ORDERED")
print(set1)
print()
# By below code you can change order which give output as List
print("By below code you can change order which give output as List")
set2 = sorted(set1, reverse=True)
print(set2) # It will give List
print()
# Set is CHANGABLE, You can change value of set element
print("Set is CHANGABLE, You can change value of set element")
set_dict = {1: "One", 2: "Two"}
print("Original set value")
print(set_dict)
set_dict.update({1: "Three", 2: "Four"})
print("Updated set value")
print(set_dict)
print()
# Set is NOT INDEXED, You can not get element value by index
print("Set is NOT INDEXED, You can not get element value by index")
# Opening below line will throw error, "'set' object is not subscriptable"
# print(set1[3]) # Get element by its index
print()
# Set can NOT HAVE DUPLICATE values, Duplicate values will be ignored
set1 = {2, 4, 6, 8, 1, 3, 5, 7, 0, 2, 4, 6, 8, 1, 3, 5, 7, 0}
print("Set can NOT HAVE DUPLICATE values, Duplicate values will be ignored")
print(set1)
| false | 0 | 1,563 | 0 | 1,563 | 1,563 |
||
129193902
|
import numpy as np
import pandas as pd
import lofo
df = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv")
print(df.shape)
df.head()
features = [f for f in df.columns if f not in {"Id", "Class"}]
df["EJ"] = df["EJ"].astype("category")
ds = lofo.Dataset(df, target="Class", features=features, auto_group_threshold=0.5)
lofo_imp = lofo.LOFOImportance(ds, cv=5, scoring="neg_log_loss")
imp_df = lofo_imp.get_importance()
lofo.plot_importance(imp_df, figsize=(8, 8), kind="box")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/193/129193902.ipynb
| null | null |
[{"Id": 129193902, "ScriptId": 38408533, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 471945, "CreationDate": "05/11/2023 17:44:14", "VersionNumber": 1.0, "Title": "ICR LOFO Feature Importance", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 22.0, "LinesInsertedFromPrevious": 22.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 30}]
| null | null | null | null |
import numpy as np
import pandas as pd
import lofo
df = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv")
print(df.shape)
df.head()
features = [f for f in df.columns if f not in {"Id", "Class"}]
df["EJ"] = df["EJ"].astype("category")
ds = lofo.Dataset(df, target="Class", features=features, auto_group_threshold=0.5)
lofo_imp = lofo.LOFOImportance(ds, cv=5, scoring="neg_log_loss")
imp_df = lofo_imp.get_importance()
lofo.plot_importance(imp_df, figsize=(8, 8), kind="box")
| false | 0 | 186 | 30 | 186 | 186 |
||
129193192
|
# for importing pavkages and libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
from scipy import stats
# for reading the data file
data = pd.read_csv("/kaggle/input/air-quality/air_quality.csv")
# for initial observations of the data
data.head(10)
# for observing the number of (rows, columns) of data
data.shape
# creating a histogram to visualize the distribution fo the data
# there is a skew, but it is still bell shaped
data["aqi_log"].hist()
# for creating a Quantile-Quantile plot
# another way of seeing if the data is normally distributed
# if the data follows a straight line, then a normal distribution should fit the data
fig = sm.qqplot(data["aqi_log"], line="s")
plt.show()
# for defining variable for aqi_log mean
mean_aqi_log = data["aqi_log"].mean()
print(mean_aqi_log)
# for defining variable for aqi_log standard deviation
std_aqi_log = data["aqi_log"].std()
print(std_aqi_log)
# for defining variable for lower limit, 1 standard deviation below the mean
lower_limit = mean_aqi_log - 1 * std_aqi_log
# for defining variable for upper limit, 1 standard deviation above the mean
upper_limit = mean_aqi_log + 1 * std_aqi_log
print(lower_limit, upper_limit)
# for displaying the actual percentage of data that falls within 1 standard deviation of the mean
((data["aqi_log"] >= lower_limit) & (data["aqi_log"] <= upper_limit)).mean() * 100
# for defining variable for lower limit, 2 standard deviations below the mean
lower_limit = mean_aqi_log - 2 * std_aqi_log
# for defining variable for upper limit, 2 standard deviations above the mean
upper_limit = mean_aqi_log + 2 * std_aqi_log
print(lower_limit, upper_limit)
# for displaying the actual percentage of data that falls within 2 standard deviations of the mean
((data["aqi_log"] >= lower_limit) & (data["aqi_log"] <= upper_limit)).mean() * 100
# for defining variable for lower limit, 3 standard deviations below the mean
lower_limit = mean_aqi_log - 3 * std_aqi_log
# for defining variable for upper limit, 3 standard deviations above the mean
upper_limit = mean_aqi_log + 3 * std_aqi_log
print(lower_limit, upper_limit)
# for displaying the actual percentage of data that falls within 3 standard deviations of the mean.
((data["aqi_log"] >= lower_limit) & (data["aqi_log"] <= upper_limit)).mean() * 100
# for computing the z-score for every aqi_log value, and add a column named z_score in the data to store those results
data["z_score"] = stats.zscore(data["aqi_log"])
# for displaying the first 5 rows to ensure that the new column was added
data.head()
# for displaying data where `aqi_log` is above or below 3 standard deviations of the mean
data[(data["z_score"] > 3) | (data["z_score"] < -3)]
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/193/129193192.ipynb
| null | null |
[{"Id": 129193192, "ScriptId": 38407925, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9485427, "CreationDate": "05/11/2023 17:36:21", "VersionNumber": 1.0, "Title": "Exploring Probability Distributions in Python", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 81.0, "LinesInsertedFromPrevious": 81.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# for importing pavkages and libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
from scipy import stats
# for reading the data file
data = pd.read_csv("/kaggle/input/air-quality/air_quality.csv")
# for initial observations of the data
data.head(10)
# for observing the number of (rows, columns) of data
data.shape
# creating a histogram to visualize the distribution fo the data
# there is a skew, but it is still bell shaped
data["aqi_log"].hist()
# for creating a Quantile-Quantile plot
# another way of seeing if the data is normally distributed
# if the data follows a straight line, then a normal distribution should fit the data
fig = sm.qqplot(data["aqi_log"], line="s")
plt.show()
# for defining variable for aqi_log mean
mean_aqi_log = data["aqi_log"].mean()
print(mean_aqi_log)
# for defining variable for aqi_log standard deviation
std_aqi_log = data["aqi_log"].std()
print(std_aqi_log)
# for defining variable for lower limit, 1 standard deviation below the mean
lower_limit = mean_aqi_log - 1 * std_aqi_log
# for defining variable for upper limit, 1 standard deviation above the mean
upper_limit = mean_aqi_log + 1 * std_aqi_log
print(lower_limit, upper_limit)
# for displaying the actual percentage of data that falls within 1 standard deviation of the mean
((data["aqi_log"] >= lower_limit) & (data["aqi_log"] <= upper_limit)).mean() * 100
# for defining variable for lower limit, 2 standard deviations below the mean
lower_limit = mean_aqi_log - 2 * std_aqi_log
# for defining variable for upper limit, 2 standard deviations above the mean
upper_limit = mean_aqi_log + 2 * std_aqi_log
print(lower_limit, upper_limit)
# for displaying the actual percentage of data that falls within 2 standard deviations of the mean
((data["aqi_log"] >= lower_limit) & (data["aqi_log"] <= upper_limit)).mean() * 100
# for defining variable for lower limit, 3 standard deviations below the mean
lower_limit = mean_aqi_log - 3 * std_aqi_log
# for defining variable for upper limit, 3 standard deviations above the mean
upper_limit = mean_aqi_log + 3 * std_aqi_log
print(lower_limit, upper_limit)
# for displaying the actual percentage of data that falls within 3 standard deviations of the mean.
((data["aqi_log"] >= lower_limit) & (data["aqi_log"] <= upper_limit)).mean() * 100
# for computing the z-score for every aqi_log value, and add a column named z_score in the data to store those results
data["z_score"] = stats.zscore(data["aqi_log"])
# for displaying the first 5 rows to ensure that the new column was added
data.head()
# for displaying data where `aqi_log` is above or below 3 standard deviations of the mean
data[(data["z_score"] > 3) | (data["z_score"] < -3)]
| false | 0 | 832 | 0 | 832 | 832 |
||
129070268
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
Unk
import nltk
from nltk.tag import tnt
nltk.download("indian")
hi_ner = tnt.TnT()
train_data = nltk.corpus.indian.tagged_sents("hindi.pos")
hi_ner.train(train_data)
test_sentence = "राम ने अपनी किताब खो दी है।"
tagged_sentence = hi_ner.tag(nltk.word_tokenize(test_sentence))
print(tagged_sentence)
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix
raw_text = "राम ने अपनी किताब खो दी है ।"
predicted = []
name_text = hi_ner.tag(nltk.word_tokenize(raw_text))
for word in range(len(name_text)):
predicted.append(name_text[word][1])
print(predicted)
true = ["Unk", "PSP", "PRP", "NN", "VAUX", "VAUX", "VAUX", "SYM"]
all_features = list(set(true + predicted))
Matrix = confusion_matrix(true, predicted, labels=all_features)
df_Matrix = pd.DataFrame(Matrix, index=all_features, columns=all_features)
print(df_Matrix)
total_features = np.sum(Matrix)
True_positive = np.trace(Matrix)
accuracy = True_positive / total_features
print(f"The Accuracy for the particular Raw Text is {accuracy}")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/070/129070268.ipynb
| null | null |
[{"Id": 129070268, "ScriptId": 38368571, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11197433, "CreationDate": "05/10/2023 18:31:33", "VersionNumber": 1.0, "Title": "notebookeab4616cd5", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 62.0, "LinesInsertedFromPrevious": 62.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
Unk
import nltk
from nltk.tag import tnt
nltk.download("indian")
hi_ner = tnt.TnT()
train_data = nltk.corpus.indian.tagged_sents("hindi.pos")
hi_ner.train(train_data)
test_sentence = "राम ने अपनी किताब खो दी है।"
tagged_sentence = hi_ner.tag(nltk.word_tokenize(test_sentence))
print(tagged_sentence)
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix
raw_text = "राम ने अपनी किताब खो दी है ।"
predicted = []
name_text = hi_ner.tag(nltk.word_tokenize(raw_text))
for word in range(len(name_text)):
predicted.append(name_text[word][1])
print(predicted)
true = ["Unk", "PSP", "PRP", "NN", "VAUX", "VAUX", "VAUX", "SYM"]
all_features = list(set(true + predicted))
Matrix = confusion_matrix(true, predicted, labels=all_features)
df_Matrix = pd.DataFrame(Matrix, index=all_features, columns=all_features)
print(df_Matrix)
total_features = np.sum(Matrix)
True_positive = np.trace(Matrix)
accuracy = True_positive / total_features
print(f"The Accuracy for the particular Raw Text is {accuracy}")
| false | 0 | 569 | 0 | 569 | 569 |
||
129070264
|
import os
os.chdir("/kaggle/input/dsaa-2023-competition")
print(os.getcwd())
# Here we build a two tower MLP taking the two ids as input. After passing through an embedding and linear layer its finally concatenated for predicting 0 or a 1.
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import numpy as np
# Read train data
df = pd.read_csv("train.csv")
df = df.drop(["id"], axis=1)
df.head()
# Read test data
dft = pd.read_csv("test.csv")
dft = dft.drop(["id"], axis=1)
dft.head()
# Encode the ids such that they start from 0 to the max value.
# This is important as we specify embedding dimension to our neural net.
# df_id_map is a mapping between the orginal id and the encoded one.
df_id_map = pd.concat([df["id1"], df["id2"], dft["id1"], dft["id2"]], axis=0)
df_id_map = df_id_map.drop_duplicates()
df_id_map = df_id_map.reset_index(drop=True)
df_id_map = df_id_map.reset_index()
df_id_map = df_id_map.rename(columns={"index": "new_id", 0: "id"})
df_id_map.head()
print(df_id_map.shape)
print(df_id_map["new_id"].max())
# Apply the mapping to our train and test data
df = df.merge(df_id_map, left_on="id1", right_on="id", how="left")
df = df.rename(columns={"new_id": "new_id1"})
df = df.merge(df_id_map, left_on="id2", right_on="id", how="left")
df = df.rename(columns={"new_id": "new_id2"})
df.drop(["id1", "id2", "id_x", "id_y"], axis=1, inplace=True)
df.rename(columns={"new_id1": "id1", "new_id2": "id2"}, inplace=True)
df.head()
# Applying mapping to our test data.
dft = dft.merge(df_id_map, left_on="id1", right_on="id", how="left")
dft = dft.rename(columns={"new_id": "new_id1"})
dft = dft.merge(df_id_map, left_on="id2", right_on="id", how="left")
dft = dft.rename(columns={"new_id": "new_id2"})
dft.drop(["id1", "id2", "id_x", "id_y"], axis=1, inplace=True)
dft.rename(columns={"new_id1": "id1", "new_id2": "id2"}, inplace=True)
dft.head()
# As said earlier, we build two towers taking the two ids as inputs.
class MLP(nn.Module):
def __init__(self, input_size, hidden_size, dropout_rate):
super(MLP, self).__init__()
self.embedding = nn.Embedding(input_size, hidden_size)
self.seq1 = nn.Sequential(
nn.Dropout(dropout_rate),
nn.Linear(hidden_size, hidden_size // 2),
nn.ReLU(),
nn.Linear(hidden_size // 2, hidden_size // 2),
)
self.seq2 = nn.Sequential(
nn.Dropout(dropout_rate),
nn.Linear(hidden_size, hidden_size // 2),
nn.ReLU(),
nn.Linear(hidden_size // 2, hidden_size // 2),
)
self.decoder = nn.Sequential(
nn.Dropout(dropout_rate),
nn.Linear(hidden_size, hidden_size // 2),
nn.ReLU(),
nn.Linear(hidden_size // 2, 1),
nn.Sigmoid(),
)
def forward(self, x1, x2):
x1 = self.embedding(x1)
x1 = self.seq1(x1)
x2 = self.embedding(x2)
x2 = self.seq2(x2)
x = torch.cat((x1, x2), dim=1)
x = self.decoder(x).squeeze()
return x
class MyDataset(Dataset):
def __init__(self, df):
self.df = df
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
data1 = self.df.iloc[idx]["id1"]
data2 = self.df.iloc[idx]["id2"]
target = self.df.iloc[idx]["label"]
return data1, data2, target
train_loader = DataLoader(MyDataset(df), batch_size=1024, shuffle=True)
class MyTestDataset(Dataset):
def __init__(self, df):
self.df = df
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
data1 = self.df.iloc[idx]["id1"]
data2 = self.df.iloc[idx]["id2"]
return data1, data2
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device
model = MLP(df.shape[0], 100, 0.2).to(device)
criterion = nn.BCELoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
# Train the model.
model.train()
for epoch in range(1, 10):
for batch_idx, (data1, data2, target) in enumerate(train_loader):
data1, data2, target = data1.to(device), data2.to(device), target.to(device)
optimizer.zero_grad()
output = model(data1, data2)
loss = criterion(output, target.float())
loss.backward()
optimizer.step()
if batch_idx % 100 == 0:
print(
"Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
epoch,
batch_idx * len(data1),
len(train_loader.dataset),
100.0 * batch_idx / len(train_loader),
loss.item(),
)
)
# Evaluate on test dataset.
model.eval()
test_loader = DataLoader(MyTestDataset(dft), batch_size=1024, shuffle=False)
preds = []
with torch.no_grad():
for batch_idx, (data1, data2) in enumerate(test_loader):
data1, data2 = data1.to(device), data2.to(device)
output = model(data1, data2)
output = torch.round(output)
preds += output.tolist()
len(preds)
# Prepare the submission file.
dfss = pd.read_csv("test.csv")
dfss.drop(["id1", "id2"], axis=1, inplace=True)
dfss["label"] = preds
dfss["label"] = dfss["label"].astype(int)
dfss.head()
os.chdir("/kaggle/working")
os.getcwd()
dfss.to_csv("submission1.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/070/129070264.ipynb
| null | null |
[{"Id": 129070264, "ScriptId": 38369012, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1538607, "CreationDate": "05/10/2023 18:31:32", "VersionNumber": 1.0, "Title": "notebook77a44375f8", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 154.0, "LinesInsertedFromPrevious": 154.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import os
os.chdir("/kaggle/input/dsaa-2023-competition")
print(os.getcwd())
# Here we build a two tower MLP taking the two ids as input. After passing through an embedding and linear layer its finally concatenated for predicting 0 or a 1.
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import numpy as np
# Read train data
df = pd.read_csv("train.csv")
df = df.drop(["id"], axis=1)
df.head()
# Read test data
dft = pd.read_csv("test.csv")
dft = dft.drop(["id"], axis=1)
dft.head()
# Encode the ids such that they start from 0 to the max value.
# This is important as we specify embedding dimension to our neural net.
# df_id_map is a mapping between the orginal id and the encoded one.
df_id_map = pd.concat([df["id1"], df["id2"], dft["id1"], dft["id2"]], axis=0)
df_id_map = df_id_map.drop_duplicates()
df_id_map = df_id_map.reset_index(drop=True)
df_id_map = df_id_map.reset_index()
df_id_map = df_id_map.rename(columns={"index": "new_id", 0: "id"})
df_id_map.head()
print(df_id_map.shape)
print(df_id_map["new_id"].max())
# Apply the mapping to our train and test data
df = df.merge(df_id_map, left_on="id1", right_on="id", how="left")
df = df.rename(columns={"new_id": "new_id1"})
df = df.merge(df_id_map, left_on="id2", right_on="id", how="left")
df = df.rename(columns={"new_id": "new_id2"})
df.drop(["id1", "id2", "id_x", "id_y"], axis=1, inplace=True)
df.rename(columns={"new_id1": "id1", "new_id2": "id2"}, inplace=True)
df.head()
# Applying mapping to our test data.
dft = dft.merge(df_id_map, left_on="id1", right_on="id", how="left")
dft = dft.rename(columns={"new_id": "new_id1"})
dft = dft.merge(df_id_map, left_on="id2", right_on="id", how="left")
dft = dft.rename(columns={"new_id": "new_id2"})
dft.drop(["id1", "id2", "id_x", "id_y"], axis=1, inplace=True)
dft.rename(columns={"new_id1": "id1", "new_id2": "id2"}, inplace=True)
dft.head()
# As said earlier, we build two towers taking the two ids as inputs.
class MLP(nn.Module):
def __init__(self, input_size, hidden_size, dropout_rate):
super(MLP, self).__init__()
self.embedding = nn.Embedding(input_size, hidden_size)
self.seq1 = nn.Sequential(
nn.Dropout(dropout_rate),
nn.Linear(hidden_size, hidden_size // 2),
nn.ReLU(),
nn.Linear(hidden_size // 2, hidden_size // 2),
)
self.seq2 = nn.Sequential(
nn.Dropout(dropout_rate),
nn.Linear(hidden_size, hidden_size // 2),
nn.ReLU(),
nn.Linear(hidden_size // 2, hidden_size // 2),
)
self.decoder = nn.Sequential(
nn.Dropout(dropout_rate),
nn.Linear(hidden_size, hidden_size // 2),
nn.ReLU(),
nn.Linear(hidden_size // 2, 1),
nn.Sigmoid(),
)
def forward(self, x1, x2):
x1 = self.embedding(x1)
x1 = self.seq1(x1)
x2 = self.embedding(x2)
x2 = self.seq2(x2)
x = torch.cat((x1, x2), dim=1)
x = self.decoder(x).squeeze()
return x
class MyDataset(Dataset):
def __init__(self, df):
self.df = df
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
data1 = self.df.iloc[idx]["id1"]
data2 = self.df.iloc[idx]["id2"]
target = self.df.iloc[idx]["label"]
return data1, data2, target
train_loader = DataLoader(MyDataset(df), batch_size=1024, shuffle=True)
class MyTestDataset(Dataset):
def __init__(self, df):
self.df = df
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
data1 = self.df.iloc[idx]["id1"]
data2 = self.df.iloc[idx]["id2"]
return data1, data2
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device
model = MLP(df.shape[0], 100, 0.2).to(device)
criterion = nn.BCELoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
# Train the model.
model.train()
for epoch in range(1, 10):
for batch_idx, (data1, data2, target) in enumerate(train_loader):
data1, data2, target = data1.to(device), data2.to(device), target.to(device)
optimizer.zero_grad()
output = model(data1, data2)
loss = criterion(output, target.float())
loss.backward()
optimizer.step()
if batch_idx % 100 == 0:
print(
"Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
epoch,
batch_idx * len(data1),
len(train_loader.dataset),
100.0 * batch_idx / len(train_loader),
loss.item(),
)
)
# Evaluate on test dataset.
model.eval()
test_loader = DataLoader(MyTestDataset(dft), batch_size=1024, shuffle=False)
preds = []
with torch.no_grad():
for batch_idx, (data1, data2) in enumerate(test_loader):
data1, data2 = data1.to(device), data2.to(device)
output = model(data1, data2)
output = torch.round(output)
preds += output.tolist()
len(preds)
# Prepare the submission file.
dfss = pd.read_csv("test.csv")
dfss.drop(["id1", "id2"], axis=1, inplace=True)
dfss["label"] = preds
dfss["label"] = dfss["label"].astype(int)
dfss.head()
os.chdir("/kaggle/working")
os.getcwd()
dfss.to_csv("submission1.csv", index=False)
| false | 0 | 1,833 | 0 | 1,833 | 1,833 |
||
129070821
|
import numpy as np
import pandas as pd
import tensorflow as tf
# # Explore the data
#
data = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
data.shape
data.head()
data.label.describe()
data.iloc[0, 1:].describe()
# # Sepration of data to train and validation
# **95%** Train
# **05%** Validation
# **labels** ranges from 0 -> 9
# **pixels** ranges from 0 -> 255. we need to normalize pixels to range from 0 -> 1
#
data_count = tf.convert_to_tensor(data.groupby("label").count().iloc[:, 0])
print(data_count)
print(tf.cast(data_count, dtype=tf.float32) * 0.05)
train_dataframes = []
validation_dataframes = []
for i in range(10):
mask = np.random.rand(len(data.loc[data.label == i])) <= 0.95
temp_train = data.loc[data.label == i][mask]
temp_validation = data.loc[data.label == i][~mask]
train_dataframes.append(temp_train)
validation_dataframes.append(temp_validation)
for i in range(10):
assert (
train_dataframes[i].iloc[:, 0].count()
+ validation_dataframes[i].iloc[:, 0].count()
== data_count[i]
), "counts doesn't match"
train_data = pd.concat(train_dataframes)
validation_data = pd.concat(validation_dataframes)
train_data
# ## Shuffle the training data
train_data = train_data.sample(frac=1)
train_data
# ## Seprate the data into features and labels
train_data_features = train_data.copy()
train_data_labels = train_data_features.pop("label")
validation_data_features = validation_data.copy()
validation_data_labels = validation_data_features.pop("label")
validation_data_features
train_data_labels
# ## convert pandas dataframes to tensorflow data sets
tf_train_X = tf.convert_to_tensor(train_data_features)
# convert 1D into 2D for the convolution
tf_train_X = tf.reshape(tf_train_X, [-1, 28, 28])
# normalize pixels from 0->255 into 0 -> 1
tf_train_X /= 255
tf_train_X = tf.expand_dims(tf_train_X, axis=-1)
tf_train_Y = tf.convert_to_tensor(train_data_labels)
tf_train_Y = tf_train_Y[:, tf.newaxis]
tf_validation_X = tf.convert_to_tensor(validation_data_features)
tf_validation_X = tf.reshape(tf_validation_X, [-1, 28, 28])
tf_validation_X /= 255
tf_validation_Y = tf.convert_to_tensor(validation_data_labels)
tf_validation_Y = tf_validation_Y[:, tf.newaxis]
print("Train data shape: ", tf_train_X.shape)
print("Train labels shape: ", tf_train_Y.shape)
assert tf_train_Y.shape[0] == tf_train_X.shape[0], "train data sizes doesn't match"
print("validation data shape: ", tf_validation_X.shape)
print("validation labels shape: ", tf_validation_Y.shape)
assert (
tf_validation_X.shape[0] == tf_validation_Y.shape[0]
), "validation data sizes doesn't match"
train_dataset = tf.data.Dataset.from_tensor_slices((tf_train_X, tf_train_Y))
validation_dataset = tf.data.Dataset.from_tensor_slices(
(tf_validation_X, tf_validation_Y)
)
AUTOTUNE = tf.data.experimental.AUTOTUNE
train_dataset = train_dataset.batch(512)
train_dataset = train_dataset.prefetch(AUTOTUNE)
validation_dataset = validation_dataset.batch(264)
validation_dataset = validation_dataset.prefetch(AUTOTUNE)
# # Building the model
model = tf.keras.Sequential(
[
tf.keras.Input((28, 28, 1)),
tf.keras.layers.Conv2D(
filters=64, kernel_size=3, strides=1, activation="relu", name="conv2"
),
tf.keras.layers.MaxPool2D(pool_size=2),
tf.keras.layers.Conv2D(
filters=64, kernel_size=3, strides=1, activation="relu", name="conv3"
),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(units=128, activation="relu", name="dense1"),
tf.keras.layers.Dense(units=10, activation="linear", name="dense2"),
]
)
model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(lr=0.001),
metrics=["accuracy"],
)
model.summary()
model.fit(train_dataset, validation_data=validation_dataset, epochs=25)
model.evaluate(validation_dataset)
# # Generate Predictions for test set
data_test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv")
data_test
test_X = tf.convert_to_tensor(data_test)
# convert 1D into 2D for the convolution
test_X = tf.reshape(test_X, [-1, 28, 28])
# normalize pixels from 0->255 into 0 -> 1
test_X /= 255
test_X
test_dataset = tf.data.Dataset.from_tensor_slices(test_X)
test_dataset = test_dataset.batch(264)
test_dataset = test_dataset.prefetch(AUTOTUNE)
logits = model.predict(test_dataset)
logits.shape
predictions = tf.nn.softmax(logits, axis=1)
predictions = tf.argmax(predictions, axis=1)
predictions
ids = tf.range(1, predictions.shape[0] + 1)
submmision_dic = {"ImageId": ids, "Label": predictions}
sub_df = pd.DataFrame(submmision_dic)
sub_df.to_csv("/kaggle/working/sub3.csv", index=False)
my_sub = pd.read_csv("/kaggle/working/sub3.csv")
my_sub
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/070/129070821.ipynb
| null | null |
[{"Id": 129070821, "ScriptId": 38319189, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11227017, "CreationDate": "05/10/2023 18:36:46", "VersionNumber": 1.0, "Title": "digit_recognizer", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 167.0, "LinesInsertedFromPrevious": 167.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np
import pandas as pd
import tensorflow as tf
# # Explore the data
#
data = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
data.shape
data.head()
data.label.describe()
data.iloc[0, 1:].describe()
# # Sepration of data to train and validation
# **95%** Train
# **05%** Validation
# **labels** ranges from 0 -> 9
# **pixels** ranges from 0 -> 255. we need to normalize pixels to range from 0 -> 1
#
data_count = tf.convert_to_tensor(data.groupby("label").count().iloc[:, 0])
print(data_count)
print(tf.cast(data_count, dtype=tf.float32) * 0.05)
train_dataframes = []
validation_dataframes = []
for i in range(10):
mask = np.random.rand(len(data.loc[data.label == i])) <= 0.95
temp_train = data.loc[data.label == i][mask]
temp_validation = data.loc[data.label == i][~mask]
train_dataframes.append(temp_train)
validation_dataframes.append(temp_validation)
for i in range(10):
assert (
train_dataframes[i].iloc[:, 0].count()
+ validation_dataframes[i].iloc[:, 0].count()
== data_count[i]
), "counts doesn't match"
train_data = pd.concat(train_dataframes)
validation_data = pd.concat(validation_dataframes)
train_data
# ## Shuffle the training data
train_data = train_data.sample(frac=1)
train_data
# ## Seprate the data into features and labels
train_data_features = train_data.copy()
train_data_labels = train_data_features.pop("label")
validation_data_features = validation_data.copy()
validation_data_labels = validation_data_features.pop("label")
validation_data_features
train_data_labels
# ## convert pandas dataframes to tensorflow data sets
tf_train_X = tf.convert_to_tensor(train_data_features)
# convert 1D into 2D for the convolution
tf_train_X = tf.reshape(tf_train_X, [-1, 28, 28])
# normalize pixels from 0->255 into 0 -> 1
tf_train_X /= 255
tf_train_X = tf.expand_dims(tf_train_X, axis=-1)
tf_train_Y = tf.convert_to_tensor(train_data_labels)
tf_train_Y = tf_train_Y[:, tf.newaxis]
tf_validation_X = tf.convert_to_tensor(validation_data_features)
tf_validation_X = tf.reshape(tf_validation_X, [-1, 28, 28])
tf_validation_X /= 255
tf_validation_Y = tf.convert_to_tensor(validation_data_labels)
tf_validation_Y = tf_validation_Y[:, tf.newaxis]
print("Train data shape: ", tf_train_X.shape)
print("Train labels shape: ", tf_train_Y.shape)
assert tf_train_Y.shape[0] == tf_train_X.shape[0], "train data sizes doesn't match"
print("validation data shape: ", tf_validation_X.shape)
print("validation labels shape: ", tf_validation_Y.shape)
assert (
tf_validation_X.shape[0] == tf_validation_Y.shape[0]
), "validation data sizes doesn't match"
train_dataset = tf.data.Dataset.from_tensor_slices((tf_train_X, tf_train_Y))
validation_dataset = tf.data.Dataset.from_tensor_slices(
(tf_validation_X, tf_validation_Y)
)
AUTOTUNE = tf.data.experimental.AUTOTUNE
train_dataset = train_dataset.batch(512)
train_dataset = train_dataset.prefetch(AUTOTUNE)
validation_dataset = validation_dataset.batch(264)
validation_dataset = validation_dataset.prefetch(AUTOTUNE)
# # Building the model
model = tf.keras.Sequential(
[
tf.keras.Input((28, 28, 1)),
tf.keras.layers.Conv2D(
filters=64, kernel_size=3, strides=1, activation="relu", name="conv2"
),
tf.keras.layers.MaxPool2D(pool_size=2),
tf.keras.layers.Conv2D(
filters=64, kernel_size=3, strides=1, activation="relu", name="conv3"
),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(units=128, activation="relu", name="dense1"),
tf.keras.layers.Dense(units=10, activation="linear", name="dense2"),
]
)
model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(lr=0.001),
metrics=["accuracy"],
)
model.summary()
model.fit(train_dataset, validation_data=validation_dataset, epochs=25)
model.evaluate(validation_dataset)
# # Generate Predictions for test set
data_test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv")
data_test
test_X = tf.convert_to_tensor(data_test)
# convert 1D into 2D for the convolution
test_X = tf.reshape(test_X, [-1, 28, 28])
# normalize pixels from 0->255 into 0 -> 1
test_X /= 255
test_X
test_dataset = tf.data.Dataset.from_tensor_slices(test_X)
test_dataset = test_dataset.batch(264)
test_dataset = test_dataset.prefetch(AUTOTUNE)
logits = model.predict(test_dataset)
logits.shape
predictions = tf.nn.softmax(logits, axis=1)
predictions = tf.argmax(predictions, axis=1)
predictions
ids = tf.range(1, predictions.shape[0] + 1)
submmision_dic = {"ImageId": ids, "Label": predictions}
sub_df = pd.DataFrame(submmision_dic)
sub_df.to_csv("/kaggle/working/sub3.csv", index=False)
my_sub = pd.read_csv("/kaggle/working/sub3.csv")
my_sub
| false | 0 | 1,649 | 0 | 1,649 | 1,649 |
||
129070807
|
<jupyter_start><jupyter_text>Video Game Sales
This dataset contains a list of video games with sales greater than 100,000 copies. It was generated by a scrape of [vgchartz.com][1].
Fields include
* Rank - Ranking of overall sales
* Name - The games name
* Platform - Platform of the games release (i.e. PC,PS4, etc.)
* Year - Year of the game's release
* Genre - Genre of the game
* Publisher - Publisher of the game
* NA_Sales - Sales in North America (in millions)
* EU_Sales - Sales in Europe (in millions)
* JP_Sales - Sales in Japan (in millions)
* Other_Sales - Sales in the rest of the world (in millions)
* Global_Sales - Total worldwide sales.
The script to scrape the data is available at https://github.com/GregorUT/vgchartzScrape.
It is based on BeautifulSoup using Python.
There are 16,598 records. 2 records were dropped due to incomplete information.
[1]: http://www.vgchartz.com/
Kaggle dataset identifier: videogamesales
<jupyter_code>import pandas as pd
df = pd.read_csv('videogamesales/vgsales.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 16598 entries, 0 to 16597
Data columns (total 11 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Rank 16598 non-null int64
1 Name 16598 non-null object
2 Platform 16598 non-null object
3 Year 16327 non-null float64
4 Genre 16598 non-null object
5 Publisher 16540 non-null object
6 NA_Sales 16598 non-null float64
7 EU_Sales 16598 non-null float64
8 JP_Sales 16598 non-null float64
9 Other_Sales 16598 non-null float64
10 Global_Sales 16598 non-null float64
dtypes: float64(6), int64(1), object(4)
memory usage: 1.4+ MB
<jupyter_text>Examples:
{
"Rank": 1,
"Name": "Wii Sports",
"Platform": "Wii",
"Year": 2006,
"Genre": "Sports",
"Publisher": "Nintendo",
"NA_Sales": 41.49,
"EU_Sales": 29.02,
"JP_Sales": 3.77,
"Other_Sales": 8.46,
"Global_Sales": 82.74
}
{
"Rank": 2,
"Name": "Super Mario Bros.",
"Platform": "NES",
"Year": 1985,
"Genre": "Platform",
"Publisher": "Nintendo",
"NA_Sales": 29.08,
"EU_Sales": 3.58,
"JP_Sales": 6.8100000000000005,
"Other_Sales": 0.77,
"Global_Sales": 40.24
}
{
"Rank": 3,
"Name": "Mario Kart Wii",
"Platform": "Wii",
"Year": 2008,
"Genre": "Racing",
"Publisher": "Nintendo",
"NA_Sales": 15.85,
"EU_Sales": 12.88,
"JP_Sales": 3.79,
"Other_Sales": 3.31,
"Global_Sales": 35.82
}
{
"Rank": 4,
"Name": "Wii Sports Resort",
"Platform": "Wii",
"Year": 2009,
"Genre": "Sports",
"Publisher": "Nintendo",
"NA_Sales": 15.75,
"EU_Sales": 11.01,
"JP_Sales": 3.2800000000000002,
"Other_Sales": 2.96,
"Global_Sales": 33.0
}
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ### Data Analysis with Pandas
# #### Data set name : Video Gaming Sales
# #### Author : Doha Khamaiseh
# #### Date : 9-5-2023
#
df = pd.read_csv("../input/videogamesales/vgsales.csv")
df
# df.describe ()
# ### Which company is the most common video game publisher?
df["Publisher"].mode()
# ### What’s the most common platform?
df["Platform"].mode()
# ### What about the most common genre?
df["Genre"].mode()
# ### What are the top 20 highest grossing games?
top_20 = df.nlargest(20, "Global_Sales")
top_20["Name"]
# ### For North American video game sales, what’s the median?
# #### Provide a secondary output showing ten games surrounding the median sales output.
# #### Assume that games with same median value are sorted in descending order.
df = df.sort_index(ascending=False)
# df
med = df["NA_Sales"].median()
med # med = 0.08
# d = df[df["NA_Sales"]==0.08]
# d
# ten games surrounding the median:
ten_games = df[(df["NA_Sales"] >= med - 0.05) & (df["NA_Sales"] <= med + 0.05)]
ten_games.head(10)
# ### For the top-selling game of all time, how many standard deviations above/below the mean are its sales for North America?
s = df["Global_Sales"].max()
# s
n = df[df["Global_Sales"] == s]
# # n
std_score = (n["NA_Sales"] - df["NA_Sales"].mean()) / df["NA_Sales"].std()
std_score
# ### The Nintendo Wii seems to have outdone itself with games. How does its average number of sales compare with all of the other platforms?
wii = df[(df["Platform"] == "Wii") & (df["Publisher"] == "Nintendo")]
wii_mean = wii["Global_Sales"].mean()
# wii_mean
other = df[df["Platform"] != "Wii"]
other_mean = other["Global_Sales"].mean()
# other_mean
print(
"Does the Nintendo Wii mean bigger than other platforms mean?",
wii_mean > other_mean,
)
# ### 3 more questions :
# #### How much are the sales for all versions of the Super Mario game?
Super_Mario = df[df["Name"].str.contains("Super Mario")]
Super_Mario["Global_Sales"].sum()
# #### Compare the sales for all games in 1980 and in 2000
year_1980 = df[df["Year"] == 1980]
# year_1980
year_2000 = df[df["Year"] == 2000]
year_2000
print(
"Did the gaming rate increase between 1980 and 2000?",
year_2000["Global_Sales"].sum() > year_1980["Global_Sales"].sum(),
)
# #### What is the most common game in Europe?
most = df[df["EU_Sales"] == df["EU_Sales"].max()]
print("The most common game is :", most["Name"])
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/070/129070807.ipynb
|
videogamesales
|
gregorut
|
[{"Id": 129070807, "ScriptId": 38311157, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14721492, "CreationDate": "05/10/2023 18:36:38", "VersionNumber": 1.0, "Title": "vg-stats", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 110.0, "LinesInsertedFromPrevious": 110.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 184801602, "KernelVersionId": 129070807, "SourceDatasetVersionId": 618}]
|
[{"Id": 618, "DatasetId": 284, "DatasourceVersionId": 618, "CreatorUserId": 462330, "LicenseName": "Unknown", "CreationDate": "10/26/2016 09:10:49", "VersionNumber": 2.0, "Title": "Video Game Sales", "Slug": "videogamesales", "Subtitle": "Analyze sales data from more than 16,500 games.", "Description": "This dataset contains a list of video games with sales greater than 100,000 copies. It was generated by a scrape of [vgchartz.com][1].\n\nFields include\n\n* Rank - Ranking of overall sales\n\n* Name - The games name\n\n* Platform - Platform of the games release (i.e. PC,PS4, etc.)\n\n* Year - Year of the game's release\n\n* Genre - Genre of the game\n\n* Publisher - Publisher of the game\n\n* NA_Sales - Sales in North America (in millions)\n\n* EU_Sales - Sales in Europe (in millions)\n\n* JP_Sales - Sales in Japan (in millions)\n\n* Other_Sales - Sales in the rest of the world (in millions)\n\n* Global_Sales - Total worldwide sales.\n\nThe script to scrape the data is available at https://github.com/GregorUT/vgchartzScrape.\nIt is based on BeautifulSoup using Python.\nThere are 16,598 records. 2 records were dropped due to incomplete information.\n\n\n [1]: http://www.vgchartz.com/", "VersionNotes": "Cleaned up formating", "TotalCompressedBytes": 1355781.0, "TotalUncompressedBytes": 1355781.0}]
|
[{"Id": 284, "CreatorUserId": 462330, "OwnerUserId": 462330.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 618.0, "CurrentDatasourceVersionId": 618.0, "ForumId": 1788, "Type": 2, "CreationDate": "10/26/2016 08:17:30", "LastActivityDate": "02/06/2018", "TotalViews": 1798828, "TotalDownloads": 471172, "TotalVotes": 5485, "TotalKernels": 1480}]
|
[{"Id": 462330, "UserName": "gregorut", "DisplayName": "GregorySmith", "RegisterDate": "11/09/2015", "PerformanceTier": 1}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ### Data Analysis with Pandas
# #### Data set name : Video Gaming Sales
# #### Author : Doha Khamaiseh
# #### Date : 9-5-2023
#
df = pd.read_csv("../input/videogamesales/vgsales.csv")
df
# df.describe ()
# ### Which company is the most common video game publisher?
df["Publisher"].mode()
# ### What’s the most common platform?
df["Platform"].mode()
# ### What about the most common genre?
df["Genre"].mode()
# ### What are the top 20 highest grossing games?
top_20 = df.nlargest(20, "Global_Sales")
top_20["Name"]
# ### For North American video game sales, what’s the median?
# #### Provide a secondary output showing ten games surrounding the median sales output.
# #### Assume that games with same median value are sorted in descending order.
df = df.sort_index(ascending=False)
# df
med = df["NA_Sales"].median()
med # med = 0.08
# d = df[df["NA_Sales"]==0.08]
# d
# ten games surrounding the median:
ten_games = df[(df["NA_Sales"] >= med - 0.05) & (df["NA_Sales"] <= med + 0.05)]
ten_games.head(10)
# ### For the top-selling game of all time, how many standard deviations above/below the mean are its sales for North America?
s = df["Global_Sales"].max()
# s
n = df[df["Global_Sales"] == s]
# # n
std_score = (n["NA_Sales"] - df["NA_Sales"].mean()) / df["NA_Sales"].std()
std_score
# ### The Nintendo Wii seems to have outdone itself with games. How does its average number of sales compare with all of the other platforms?
wii = df[(df["Platform"] == "Wii") & (df["Publisher"] == "Nintendo")]
wii_mean = wii["Global_Sales"].mean()
# wii_mean
other = df[df["Platform"] != "Wii"]
other_mean = other["Global_Sales"].mean()
# other_mean
print(
"Does the Nintendo Wii mean bigger than other platforms mean?",
wii_mean > other_mean,
)
# ### 3 more questions :
# #### How much are the sales for all versions of the Super Mario game?
Super_Mario = df[df["Name"].str.contains("Super Mario")]
Super_Mario["Global_Sales"].sum()
# #### Compare the sales for all games in 1980 and in 2000
year_1980 = df[df["Year"] == 1980]
# year_1980
year_2000 = df[df["Year"] == 2000]
year_2000
print(
"Did the gaming rate increase between 1980 and 2000?",
year_2000["Global_Sales"].sum() > year_1980["Global_Sales"].sum(),
)
# #### What is the most common game in Europe?
most = df[df["EU_Sales"] == df["EU_Sales"].max()]
print("The most common game is :", most["Name"])
|
[{"videogamesales/vgsales.csv": {"column_names": "[\"Rank\", \"Name\", \"Platform\", \"Year\", \"Genre\", \"Publisher\", \"NA_Sales\", \"EU_Sales\", \"JP_Sales\", \"Other_Sales\", \"Global_Sales\"]", "column_data_types": "{\"Rank\": \"int64\", \"Name\": \"object\", \"Platform\": \"object\", \"Year\": \"float64\", \"Genre\": \"object\", \"Publisher\": \"object\", \"NA_Sales\": \"float64\", \"EU_Sales\": \"float64\", \"JP_Sales\": \"float64\", \"Other_Sales\": \"float64\", \"Global_Sales\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 16598 entries, 0 to 16597\nData columns (total 11 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Rank 16598 non-null int64 \n 1 Name 16598 non-null object \n 2 Platform 16598 non-null object \n 3 Year 16327 non-null float64\n 4 Genre 16598 non-null object \n 5 Publisher 16540 non-null object \n 6 NA_Sales 16598 non-null float64\n 7 EU_Sales 16598 non-null float64\n 8 JP_Sales 16598 non-null float64\n 9 Other_Sales 16598 non-null float64\n 10 Global_Sales 16598 non-null float64\ndtypes: float64(6), int64(1), object(4)\nmemory usage: 1.4+ MB\n", "summary": "{\"Rank\": {\"count\": 16598.0, \"mean\": 8300.605253645017, \"std\": 4791.853932896403, \"min\": 1.0, \"25%\": 4151.25, \"50%\": 8300.5, \"75%\": 12449.75, \"max\": 16600.0}, \"Year\": {\"count\": 16327.0, \"mean\": 2006.4064433147546, \"std\": 5.828981114712805, \"min\": 1980.0, \"25%\": 2003.0, \"50%\": 2007.0, \"75%\": 2010.0, \"max\": 2020.0}, \"NA_Sales\": {\"count\": 16598.0, \"mean\": 0.26466742981082064, \"std\": 0.8166830292988796, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.08, \"75%\": 0.24, \"max\": 41.49}, \"EU_Sales\": {\"count\": 16598.0, \"mean\": 0.14665200626581515, \"std\": 0.5053512312869116, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.02, \"75%\": 0.11, \"max\": 29.02}, \"JP_Sales\": {\"count\": 16598.0, \"mean\": 0.077781660441017, \"std\": 0.30929064808220297, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.04, \"max\": 10.22}, \"Other_Sales\": {\"count\": 16598.0, \"mean\": 0.0480630196409206, \"std\": 0.18858840291271461, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.01, \"75%\": 0.04, \"max\": 10.57}, \"Global_Sales\": {\"count\": 16598.0, \"mean\": 0.5374406555006628, \"std\": 1.5550279355699124, \"min\": 0.01, \"25%\": 0.06, \"50%\": 0.17, \"75%\": 0.47, \"max\": 82.74}}", "examples": "{\"Rank\":{\"0\":1,\"1\":2,\"2\":3,\"3\":4},\"Name\":{\"0\":\"Wii Sports\",\"1\":\"Super Mario Bros.\",\"2\":\"Mario Kart Wii\",\"3\":\"Wii Sports Resort\"},\"Platform\":{\"0\":\"Wii\",\"1\":\"NES\",\"2\":\"Wii\",\"3\":\"Wii\"},\"Year\":{\"0\":2006.0,\"1\":1985.0,\"2\":2008.0,\"3\":2009.0},\"Genre\":{\"0\":\"Sports\",\"1\":\"Platform\",\"2\":\"Racing\",\"3\":\"Sports\"},\"Publisher\":{\"0\":\"Nintendo\",\"1\":\"Nintendo\",\"2\":\"Nintendo\",\"3\":\"Nintendo\"},\"NA_Sales\":{\"0\":41.49,\"1\":29.08,\"2\":15.85,\"3\":15.75},\"EU_Sales\":{\"0\":29.02,\"1\":3.58,\"2\":12.88,\"3\":11.01},\"JP_Sales\":{\"0\":3.77,\"1\":6.81,\"2\":3.79,\"3\":3.28},\"Other_Sales\":{\"0\":8.46,\"1\":0.77,\"2\":3.31,\"3\":2.96},\"Global_Sales\":{\"0\":82.74,\"1\":40.24,\"2\":35.82,\"3\":33.0}}"}}]
| true | 1 |
<start_data_description><data_path>videogamesales/vgsales.csv:
<column_names>
['Rank', 'Name', 'Platform', 'Year', 'Genre', 'Publisher', 'NA_Sales', 'EU_Sales', 'JP_Sales', 'Other_Sales', 'Global_Sales']
<column_types>
{'Rank': 'int64', 'Name': 'object', 'Platform': 'object', 'Year': 'float64', 'Genre': 'object', 'Publisher': 'object', 'NA_Sales': 'float64', 'EU_Sales': 'float64', 'JP_Sales': 'float64', 'Other_Sales': 'float64', 'Global_Sales': 'float64'}
<dataframe_Summary>
{'Rank': {'count': 16598.0, 'mean': 8300.605253645017, 'std': 4791.853932896403, 'min': 1.0, '25%': 4151.25, '50%': 8300.5, '75%': 12449.75, 'max': 16600.0}, 'Year': {'count': 16327.0, 'mean': 2006.4064433147546, 'std': 5.828981114712805, 'min': 1980.0, '25%': 2003.0, '50%': 2007.0, '75%': 2010.0, 'max': 2020.0}, 'NA_Sales': {'count': 16598.0, 'mean': 0.26466742981082064, 'std': 0.8166830292988796, 'min': 0.0, '25%': 0.0, '50%': 0.08, '75%': 0.24, 'max': 41.49}, 'EU_Sales': {'count': 16598.0, 'mean': 0.14665200626581515, 'std': 0.5053512312869116, 'min': 0.0, '25%': 0.0, '50%': 0.02, '75%': 0.11, 'max': 29.02}, 'JP_Sales': {'count': 16598.0, 'mean': 0.077781660441017, 'std': 0.30929064808220297, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.04, 'max': 10.22}, 'Other_Sales': {'count': 16598.0, 'mean': 0.0480630196409206, 'std': 0.18858840291271461, 'min': 0.0, '25%': 0.0, '50%': 0.01, '75%': 0.04, 'max': 10.57}, 'Global_Sales': {'count': 16598.0, 'mean': 0.5374406555006628, 'std': 1.5550279355699124, 'min': 0.01, '25%': 0.06, '50%': 0.17, '75%': 0.47, 'max': 82.74}}
<dataframe_info>
RangeIndex: 16598 entries, 0 to 16597
Data columns (total 11 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Rank 16598 non-null int64
1 Name 16598 non-null object
2 Platform 16598 non-null object
3 Year 16327 non-null float64
4 Genre 16598 non-null object
5 Publisher 16540 non-null object
6 NA_Sales 16598 non-null float64
7 EU_Sales 16598 non-null float64
8 JP_Sales 16598 non-null float64
9 Other_Sales 16598 non-null float64
10 Global_Sales 16598 non-null float64
dtypes: float64(6), int64(1), object(4)
memory usage: 1.4+ MB
<some_examples>
{'Rank': {'0': 1, '1': 2, '2': 3, '3': 4}, 'Name': {'0': 'Wii Sports', '1': 'Super Mario Bros.', '2': 'Mario Kart Wii', '3': 'Wii Sports Resort'}, 'Platform': {'0': 'Wii', '1': 'NES', '2': 'Wii', '3': 'Wii'}, 'Year': {'0': 2006.0, '1': 1985.0, '2': 2008.0, '3': 2009.0}, 'Genre': {'0': 'Sports', '1': 'Platform', '2': 'Racing', '3': 'Sports'}, 'Publisher': {'0': 'Nintendo', '1': 'Nintendo', '2': 'Nintendo', '3': 'Nintendo'}, 'NA_Sales': {'0': 41.49, '1': 29.08, '2': 15.85, '3': 15.75}, 'EU_Sales': {'0': 29.02, '1': 3.58, '2': 12.88, '3': 11.01}, 'JP_Sales': {'0': 3.77, '1': 6.81, '2': 3.79, '3': 3.28}, 'Other_Sales': {'0': 8.46, '1': 0.77, '2': 3.31, '3': 2.96}, 'Global_Sales': {'0': 82.74, '1': 40.24, '2': 35.82, '3': 33.0}}
<end_description>
| 1,005 | 0 | 2,119 | 1,005 |
129070449
|
<jupyter_start><jupyter_text>Tweets Blogs News - Swiftkey Dataset 4million
### Context
A zip file from SWIFTKEY in partnership with the Johns Hopkins Data Science Specialization. This Natural Language Processing dataset include millions of tweets, blog posts, and news articles in multiple languages.
### Content
This RAW data is zipped. It is readable as a large character data type. Select which file (based on language) that you wish to download and the source. Here is the list of files:
## Name Length Date
## 1 final/ 0 2014-07-22 10:10:00
## 2 final/de_DE/ 0 2014-07-22 10:10:00
## 3 final/de_DE/de_DE.twitter.txt 75578341 2014-07-22 10:11:00
## 4 final/de_DE/de_DE.blogs.txt 85459666 2014-07-22 10:11:00
## 5 final/de_DE/de_DE.news.txt 95591959 2014-07-22 10:11:00
## 6 final/ru_RU/ 0 2014-07-22 10:10:00
## 7 final/ru_RU/ru_RU.blogs.txt 116855835 2014-07-22 10:12:00
## 8 final/ru_RU/ru_RU.news.txt 118996424 2014-07-22 10:12:00
## 9 final/ru_RU/ru_RU.twitter.txt 105182346 2014-07-22 10:12:00
## 10 final/en_US/ 0 2014-07-22 10:10:00
## 11 final/en_US/en_US.twitter.txt 167105338 2014-07-22 10:12:00
## 12 final/en_US/en_US.news.txt 205811889 2014-07-22 10:13:00
## 13 final/en_US/en_US.blogs.txt 210160014 2014-07-22 10:13:00
## 14 final/fi_FI/ 0 2014-07-22 10:10:00
## 15 final/fi_FI/fi_FI.news.txt 94234350 2014-07-22 10:11:00
## 16 final/fi_FI/fi_FI.blogs.txt 108503595 2014-07-22 10:12:00
## 17 final/fi_FI/fi_FI.twitter.txt 25331142 2014-07-22 10:10:00
Kaggle dataset identifier: tweets-blogs-news-swiftkey-dataset-4million
<jupyter_script>import math
import random
import numpy as np
import pandas as pd
import nltk
nltk.data.path.append(".")
## Basic File Paths
data_dir = "../input/tweets-blogs-news-swiftkey-dataset-4million/final/en_US"
file_path = data_dir + "/en_US.twitter.txt"
## nltk settings
nltk.data.path.append(data_dir)
## Opening the File in read mode ("r")
with open(file_path, "r") as f:
data = f.read()
print("Data type:", type(data))
print("Number of letters:", len(data))
print("First 300 letters of the data")
print("-------")
display(data[0:300])
print("-------")
print("Last 300 letters of the data")
print("-------")
display(data[-300:])
print("-------")
# UNQ_C1 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
### GRADED_FUNCTION: split_to_sentences ###
def split_to_sentences(data):
"""
Split data by linebreak "\n"
Args:
data: str
Returns:
A list of sentences
"""
### START CODE HERE (Replace instances of 'None' with your code) ###
sentences = data.split("\n")
### END CODE HERE ###
# Additional clearning (This part is already implemented)
# - Remove leading and trailing spaces from each sentence
# - Drop sentences if they are empty strings.
sentences = [s.strip() for s in sentences]
sentences = [s for s in sentences if len(s) > 0]
return sentences
# test your code
x = """
I have a pen.\nI have an apple. \nAh\nApple pen.\n
"""
print(x)
split_to_sentences(x)
# UNQ_C2 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
### GRADED_FUNCTION: tokenize_sentences ###
def tokenize_sentences(sentences):
"""
Tokenize sentences into tokens (words)
Args:
sentences: List of strings
Returns:
List of lists of tokens
"""
# Initialize the list of lists of tokenized sentences
tokenized_sentences = []
### START CODE HERE (Replace instances of 'None' with your code) ###
# Go through each sentence
for sentence in sentences:
# Convert to lowercase letters
sentence = sentence.lower()
# Convert into a list of words
tokenized = nltk.word_tokenize(sentence)
# append the list of words to the list of lists
tokenized_sentences.append(tokenized)
### END CODE HERE ###
return tokenized_sentences
# test your code
sentences = ["Sky is blue.", "Leaves are green.", "Roses are red."]
tokenize_sentences(sentences)
# UNQ_C3 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
### GRADED_FUNCTION: get_tokenized_data ###
def get_tokenized_data(data):
"""
Make a list of tokenized sentences
Args:
data: String
Returns:
List of lists of tokens
"""
### START CODE HERE (Replace instances of 'None' with your code) ###
# Get the sentences by splitting up the data
sentences = split_to_sentences(data)
# Get the list of lists of tokens by tokenizing the sentences
tokenized_sentences = tokenize_sentences(sentences)
### END CODE HERE ###
return tokenized_sentences
# test your function
x = "Sky is blue.\nLeaves are green\nRoses are red."
get_tokenized_data(x)
tokenized_data = get_tokenized_data(data)
random.seed(87)
random.shuffle(tokenized_data)
train_size = int(len(tokenized_data) * 0.8)
train_data = tokenized_data[0:train_size]
test_data = tokenized_data[train_size:]
print(
"{} data are split into {} train and {} test set".format(
len(tokenized_data), len(train_data), len(test_data)
)
)
print("First training sample:")
print(train_data[0])
print("First test sample")
print(test_data[0])
# UNQ_C4 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
### GRADED_FUNCTION: count_words ###
def count_words(tokenized_sentences):
"""
Count the number of word appearence in the tokenized sentences
Args:
tokenized_sentences: List of lists of strings
Returns:
dict that maps word (str) to the frequency (int)
"""
word_counts = {}
### START CODE HERE (Replace instances of 'None' with your code) ###
# Loop through each sentence
for sentence in tokenized_sentences: # complete this line
# Go through each token in the sentence
for token in sentence: # complete this line
# If the token is not in the dictionary yet, set the count to 1
if token not in word_counts.keys(): # complete this line
word_counts[token] = 1
# If the token is already in the dictionary, increment the count by 1
else:
word_counts[token] += 1
### END CODE HERE ###
return word_counts
# test your code
tokenized_sentences = [
["sky", "is", "blue", "."],
["leaves", "are", "green", "."],
["roses", "are", "red", "."],
]
count_words(tokenized_sentences)
# UNQ_C5 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
### GRADED_FUNCTION: get_words_with_nplus_frequency ###
def get_words_with_nplus_frequency(tokenized_sentences, count_threshold):
"""
Find the words that appear N times or more
Args:
tokenized_sentences: List of lists of sentences
count_threshold: minimum number of occurrences for a word to be in the closed vocabulary.
Returns:
List of words that appear N times or more
"""
# Initialize an empty list to contain the words that
# appear at least 'minimum_freq' times.
closed_vocab = []
# Get the word couts of the tokenized sentences
# Use the function that you defined earlier to count the words
word_counts = count_words(tokenized_sentences)
### START CODE HERE (Replace instances of 'None' with your code) ###
# for each word and its count
for word, cnt in word_counts.items(): # complete this line
# check that the word's count
# is at least as great as the minimum count
if cnt >= count_threshold:
# append the word to the list
closed_vocab.append(word)
### END CODE HERE ###
return closed_vocab
# test your code
tokenized_sentences = [
["sky", "is", "blue", "."],
["leaves", "are", "green", "."],
["roses", "are", "red", "."],
]
tmp_closed_vocab = get_words_with_nplus_frequency(
tokenized_sentences, count_threshold=2
)
print(f"Closed vocabulary:")
print(tmp_closed_vocab)
# UNQ_C6 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
### GRADED_FUNCTION: replace_oov_words_by_unk ###
def replace_oov_words_by_unk(tokenized_sentences, vocabulary, unknown_token="<unk>"):
"""
Replace words not in the given vocabulary with '<unk>' token.
Args:
tokenized_sentences: List of lists of strings
vocabulary: List of strings that we will use
unknown_token: A string representing unknown (out-of-vocabulary) words
Returns:
List of lists of strings, with words not in the vocabulary replaced
"""
# Place vocabulary into a set for faster search
vocabulary = set(vocabulary)
# Initialize a list that will hold the sentences
# after less frequent words are replaced by the unknown token
replaced_tokenized_sentences = []
# Go through each sentence
for sentence in tokenized_sentences:
# Initialize the list that will contain
# a single sentence with "unknown_token" replacements
replaced_sentence = []
### START CODE HERE (Replace instances of 'None' with your code) ###
# for each token in the sentence
for token in sentence: # complete this line
# Check if the token is in the closed vocabulary
if token in vocabulary: # complete this line
# If so, append the word to the replaced_sentence
replaced_sentence.append(token)
else:
# otherwise, append the unknown token instead
replaced_sentence.append(unknown_token)
### END CODE HERE ###
# Append the list of tokens to the list of lists
replaced_tokenized_sentences.append(replaced_sentence)
return replaced_tokenized_sentences
tokenized_sentences = [["dogs", "run"], ["cats", "sleep"]]
vocabulary = ["dogs", "sleep"]
tmp_replaced_tokenized_sentences = replace_oov_words_by_unk(
tokenized_sentences, vocabulary
)
print(f"Original sentence:")
print(tokenized_sentences)
print(f"tokenized_sentences with less frequent words converted to '<unk>':")
print(tmp_replaced_tokenized_sentences)
# UNQ_C7 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
### GRADED_FUNCTION: preprocess_data ###
def preprocess_data(train_data, test_data, count_threshold):
"""
Preprocess data, i.e.,
- Find tokens that appear at least N times in the training data.
- Replace tokens that appear less than N times by "<unk>" both for training and test data.
Args:
train_data, test_data: List of lists of strings.
count_threshold: Words whose count is less than this are
treated as unknown.
Returns:
Tuple of
- training data with low frequent words replaced by "<unk>"
- test data with low frequent words replaced by "<unk>"
- vocabulary of words that appear n times or more in the training data
"""
### START CODE HERE (Replace instances of 'None' with your code) ###
# Get the closed vocabulary using the train data
vocabulary = get_words_with_nplus_frequency(train_data, count_threshold)
# For the train data, replace less common words with "<unk>"
train_data_replaced = replace_oov_words_by_unk(train_data, vocabulary)
# For the test data, replace less common words with "<unk>"
test_data_replaced = replace_oov_words_by_unk(test_data, vocabulary)
### END CODE HERE ###
return train_data_replaced, test_data_replaced, vocabulary
# test your code
tmp_train = [["sky", "is", "blue", "."], ["leaves", "are", "green"]]
tmp_test = [["roses", "are", "red", "."]]
tmp_train_repl, tmp_test_repl, tmp_vocab = preprocess_data(
tmp_train, tmp_test, count_threshold=1
)
print("tmp_train_repl")
print(tmp_train_repl)
print()
print("tmp_test_repl")
print(tmp_test_repl)
print()
print("tmp_vocab")
print(tmp_vocab)
minimum_freq = 2
train_data_processed, test_data_processed, vocabulary = preprocess_data(
train_data, test_data, minimum_freq
)
print("First preprocessed training sample:")
print(train_data_processed[0])
print()
print("First preprocessed test sample:")
print(test_data_processed[0])
print()
print("First 10 vocabulary:")
print(vocabulary[0:10])
print()
print("Size of vocabulary:", len(vocabulary))
# UNQ_C8 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
### GRADED FUNCTION: count_n_grams ###
def count_n_grams(data, n, start_token="<s>", end_token="<e>"):
"""
Count all n-grams in the data
Args:
data: List of lists of words
n: number of words in a sequence
Returns:
A dictionary that maps a tuple of n-words to its frequency
"""
# Initialize dictionary of n-grams and their counts
n_grams = {}
### START CODE HERE (Replace instances of 'None' with your code) ###
# Go through each sentence in the data
for sentence in data: # complete this line
# prepend start token n times, and append <e> one time
sentence = [start_token] * n + sentence + [end_token]
# convert list to tuple
# So that the sequence of words can be used as
# a key in the dictionary
sentence = tuple(sentence)
# Use 'i' to indicate the start of the n-gram
# from index 0
# to the last index where the end of the n-gram
# is within the sentence.
m = len(sentence) if n == 1 else len(sentence) - 1
for i in range(m): # complete this line
# Get the n-gram from i to i+n
n_gram = sentence[i : i + n]
# check if the n-gram is in the dictionary
if n_gram in n_grams.keys(): # complete this line
# Increment the count for this n-gram
n_grams[n_gram] += 1
else:
# Initialize this n-gram count to 1
n_grams[n_gram] = 1
### END CODE HERE ###
return n_grams
# test your code
# CODE REVIEW COMMENT: Outcome does not match expected outcome
sentences = [["i", "like", "a", "cat"], ["this", "dog", "is", "like", "a", "cat"]]
print("Uni-gram:")
print(count_n_grams(sentences, 1))
print("Bi-gram:")
print(count_n_grams(sentences, 2))
# UNQ_C9 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
### GRADED FUNCTION: estimate_probability ###
def estimate_probability(
word, previous_n_gram, n_gram_counts, n_plus1_gram_counts, vocabulary_size, k=1.0
):
"""
Estimate the probabilities of a next word using the n-gram counts with k-smoothing
Args:
word: next word
previous_n_gram: A sequence of words of length n
n_gram_counts: Dictionary of counts of n-grams
n_plus1_gram_counts: Dictionary of counts of (n+1)-grams
vocabulary_size: number of words in the vocabulary
k: positive constant, smoothing parameter
Returns:
A probability
"""
# convert list to tuple to use it as a dictionary key
previous_n_gram = tuple(previous_n_gram)
### START CODE HERE (Replace instances of 'None' with your code) ###
# Set the denominator
# If the previous n-gram exists in the dictionary of n-gram counts,
# Get its count. Otherwise set the count to zero
# Use the dictionary that has counts for n-grams
previous_n_gram_count = (
n_gram_counts[previous_n_gram] if previous_n_gram in n_gram_counts else 0
)
# Calculate the denominator using the count of the previous n gram
# and apply k-smoothing
denominator = previous_n_gram_count + k * vocabulary_size
# Define n plus 1 gram as the previous n-gram plus the current word as a tuple
n_plus1_gram = previous_n_gram + (word,)
# Set the count to the count in the dictionary,
# otherwise 0 if not in the dictionary
# use the dictionary that has counts for the n-gram plus current word
n_plus1_gram_count = (
n_plus1_gram_counts[n_plus1_gram] if n_plus1_gram in n_plus1_gram_counts else 0
)
# Define the numerator use the count of the n-gram plus current word,
# and apply smoothing
numerator = n_plus1_gram_count + k
# Calculate the probability as the numerator divided by denominator
probability = numerator / denominator
### END CODE HERE ###
return probability
# test your code
sentences = [["i", "like", "a", "cat"], ["this", "dog", "is", "like", "a", "cat"]]
unique_words = list(set(sentences[0] + sentences[1]))
unigram_counts = count_n_grams(sentences, 1)
bigram_counts = count_n_grams(sentences, 2)
tmp_prob = estimate_probability(
"cat", "a", unigram_counts, bigram_counts, len(unique_words), k=1
)
print(
f"The estimated probability of word 'cat' given the previous n-gram 'a' is: {tmp_prob:.4f}"
)
def estimate_probabilities(
previous_n_gram, n_gram_counts, n_plus1_gram_counts, vocabulary, k=1.0
):
"""
Estimate the probabilities of next words using the n-gram counts with k-smoothing
Args:
previous_n_gram: A sequence of words of length n
n_gram_counts: Dictionary of counts of (n+1)-grams
n_plus1_gram_counts: Dictionary of counts of (n+1)-grams
vocabulary: List of words
k: positive constant, smoothing parameter
Returns:
A dictionary mapping from next words to the probability.
"""
# convert list to tuple to use it as a dictionary key
previous_n_gram = tuple(previous_n_gram)
# add <e> <unk> to the vocabulary
# <s> is not needed since it should not appear as the next word
vocabulary = vocabulary + ["<e>", "<unk>"]
vocabulary_size = len(vocabulary)
probabilities = {}
for word in vocabulary:
probability = estimate_probability(
word,
previous_n_gram,
n_gram_counts,
n_plus1_gram_counts,
vocabulary_size,
k=k,
)
probabilities[word] = probability
return probabilities
# test your code
sentences = [["i", "like", "a", "cat"], ["this", "dog", "is", "like", "a", "cat"]]
unique_words = list(set(sentences[0] + sentences[1]))
unigram_counts = count_n_grams(sentences, 1)
bigram_counts = count_n_grams(sentences, 2)
estimate_probabilities("a", unigram_counts, bigram_counts, unique_words, k=1)
# Additional test
trigram_counts = count_n_grams(sentences, 3)
estimate_probabilities(["<s>", "<s>"], bigram_counts, trigram_counts, unique_words, k=1)
def make_count_matrix(n_plus1_gram_counts, vocabulary):
# add <e> <unk> to the vocabulary
# <s> is omitted since it should not appear as the next word
vocabulary = vocabulary + ["<e>", "<unk>"]
# obtain unique n-grams
n_grams = []
for n_plus1_gram in n_plus1_gram_counts.keys():
n_gram = n_plus1_gram[0:-1]
n_grams.append(n_gram)
n_grams = list(set(n_grams))
# mapping from n-gram to row
row_index = {n_gram: i for i, n_gram in enumerate(n_grams)}
# mapping from next word to column
col_index = {word: j for j, word in enumerate(vocabulary)}
nrow = len(n_grams)
ncol = len(vocabulary)
count_matrix = np.zeros((nrow, ncol))
for n_plus1_gram, count in n_plus1_gram_counts.items():
n_gram = n_plus1_gram[0:-1]
word = n_plus1_gram[-1]
if word not in vocabulary:
continue
i = row_index[n_gram]
j = col_index[word]
count_matrix[i, j] = count
count_matrix = pd.DataFrame(count_matrix, index=n_grams, columns=vocabulary)
return count_matrix
sentences = [["i", "like", "a", "cat"], ["this", "dog", "is", "like", "a", "cat"]]
unique_words = list(set(sentences[0] + sentences[1]))
bigram_counts = count_n_grams(sentences, 2)
print("bigram counts")
display(make_count_matrix(bigram_counts, unique_words))
# Show trigram counts
print("\ntrigram counts")
trigram_counts = count_n_grams(sentences, 3)
display(make_count_matrix(trigram_counts, unique_words))
def make_probability_matrix(n_plus1_gram_counts, vocabulary, k):
count_matrix = make_count_matrix(n_plus1_gram_counts, unique_words)
count_matrix += k
prob_matrix = count_matrix.div(count_matrix.sum(axis=1), axis=0)
return prob_matrix
sentences = [["i", "like", "a", "cat"], ["this", "dog", "is", "like", "a", "cat"]]
unique_words = list(set(sentences[0] + sentences[1]))
bigram_counts = count_n_grams(sentences, 2)
print("bigram probabilities")
display(make_probability_matrix(bigram_counts, unique_words, k=1))
print("trigram probabilities")
trigram_counts = count_n_grams(sentences, 3)
display(make_probability_matrix(trigram_counts, unique_words, k=1))
# UNQ_C10 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: calculate_perplexity
def calculate_perplexity(
sentence, n_gram_counts, n_plus1_gram_counts, vocabulary_size, k=1.0
):
"""
Calculate perplexity for a list of sentences
Args:
sentence: List of strings
n_gram_counts: Dictionary of counts of (n+1)-grams
n_plus1_gram_counts: Dictionary of counts of (n+1)-grams
vocabulary_size: number of unique words in the vocabulary
k: Positive smoothing constant
Returns:
Perplexity score
"""
# length of previous words
n = len(list(n_gram_counts.keys())[0])
# prepend <s> and append <e>
sentence = ["<s>"] * n + sentence + ["<e>"]
# Cast the sentence from a list to a tuple
sentence = tuple(sentence)
# length of sentence (after adding <s> and <e> tokens)
N = len(sentence)
# The variable p will hold the product
# that is calculated inside the n-root
# Update this in the code below
product_pi = 1.0
### START CODE HERE (Replace instances of 'None' with your code) ###
# Index t ranges from n to N - 1, inclusive on both ends
for t in range(n, N): # complete this line
# get the n-gram preceding the word at position t
n_gram = sentence[t - n : t]
# get the word at position t
word = sentence[t]
# Estimate the probability of the word given the n-gram
# using the n-gram counts, n-plus1-gram counts,
# vocabulary size, and smoothing constant
probability = estimate_probability(
word, n_gram, n_gram_counts, n_plus1_gram_counts, len(unique_words), k=1
)
# Update the product of the probabilities
# This 'product_pi' is a cumulative product
# of the (1/P) factors that are calculated in the loop
product_pi *= 1 / probability
# Take the Nth root of the product
perplexity = product_pi ** (1 / float(N))
### END CODE HERE ###
return perplexity
# test your code
sentences = [["i", "like", "a", "cat"], ["this", "dog", "is", "like", "a", "cat"]]
unique_words = list(set(sentences[0] + sentences[1]))
unigram_counts = count_n_grams(sentences, 1)
bigram_counts = count_n_grams(sentences, 2)
perplexity_train1 = calculate_perplexity(
sentences[0], unigram_counts, bigram_counts, len(unique_words), k=1.0
)
print(f"Perplexity for first train sample: {perplexity_train1:.4f}")
test_sentence = ["i", "like", "a", "dog"]
perplexity_test = calculate_perplexity(
test_sentence, unigram_counts, bigram_counts, len(unique_words), k=1.0
)
print(f"Perplexity for test sample: {perplexity_test:.4f}")
# UNQ_C11 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: suggest_a_word
def suggest_a_word(
previous_tokens,
n_gram_counts,
n_plus1_gram_counts,
vocabulary,
k=1.0,
start_with=None,
):
"""
Get suggestion for the next word
Args:
previous_tokens: The sentence you input where each token is a word. Must have length > n
n_gram_counts: Dictionary of counts of (n+1)-grams
n_plus1_gram_counts: Dictionary of counts of (n+1)-grams
vocabulary: List of words
k: positive constant, smoothing parameter
start_with: If not None, specifies the first few letters of the next word
Returns:
A tuple of
- string of the most likely next word
- corresponding probability
"""
# length of previous words
n = len(list(n_gram_counts.keys())[0])
# From the words that the user already typed
# get the most recent 'n' words as the previous n-gram
previous_n_gram = previous_tokens[-n:]
# Estimate the probabilities that each word in the vocabulary
# is the next word,
# given the previous n-gram, the dictionary of n-gram counts,
# the dictionary of n plus 1 gram counts, and the smoothing constant
probabilities = estimate_probabilities(
previous_n_gram, n_gram_counts, n_plus1_gram_counts, vocabulary, k=k
)
# Initialize suggested word to None
# This will be set to the word with highest probability
suggestion = None
# Initialize the highest word probability to 0
# this will be set to the highest probability
# of all words to be suggested
max_prob = 0
### START CODE HERE (Replace instances of 'None' with your code) ###
# For each word and its probability in the probabilities dictionary:
for word, prob in probabilities.items(): # complete this line
# If the optional start_with string is set
if start_with: # complete this line
# Check if the beginning of word does not match with the letters in 'start_with'
if not word.startswith(start_with): # complete this line
# if they don't match, skip this word (move onto the next word)
continue # complete this line
# Check if this word's probability
# is greater than the current maximum probability
if prob > max_prob: # complete this line
# If so, save this word as the best suggestion (so far)
suggestion = word
# Save the new maximum probability
max_prob = prob
### END CODE HERE
return suggestion, max_prob
# test your code
sentences = [["i", "like", "a", "cat"], ["this", "dog", "is", "like", "a", "cat"]]
unique_words = list(set(sentences[0] + sentences[1]))
unigram_counts = count_n_grams(sentences, 1)
bigram_counts = count_n_grams(sentences, 2)
previous_tokens = ["i", "like"]
tmp_suggest1 = suggest_a_word(
previous_tokens, unigram_counts, bigram_counts, unique_words, k=1.0
)
print(
f"The previous words are 'i like',\n\tand the suggested word is `{tmp_suggest1[0]}` with a probability of {tmp_suggest1[1]:.4f}"
)
print()
# test your code when setting the starts_with
tmp_starts_with = "c"
tmp_suggest2 = suggest_a_word(
previous_tokens,
unigram_counts,
bigram_counts,
unique_words,
k=1.0,
start_with=tmp_starts_with,
)
print(
f"The previous words are 'i like', the suggestion must start with `{tmp_starts_with}`\n\tand the suggested word is `{tmp_suggest2[0]}` with a probability of {tmp_suggest2[1]:.4f}"
)
def get_suggestions(
previous_tokens, n_gram_counts_list, vocabulary, k=1.0, start_with=None
):
model_counts = len(n_gram_counts_list)
suggestions = []
for i in range(model_counts - 1):
n_gram_counts = n_gram_counts_list[i]
n_plus1_gram_counts = n_gram_counts_list[i + 1]
suggestion = suggest_a_word(
previous_tokens,
n_gram_counts,
n_plus1_gram_counts,
vocabulary,
k=k,
start_with=start_with,
)
suggestions.append(suggestion)
return suggestions
# test your code
sentences = [["i", "like", "a", "cat"], ["this", "dog", "is", "like", "a", "cat"]]
unique_words = list(set(sentences[0] + sentences[1]))
unigram_counts = count_n_grams(sentences, 1)
bigram_counts = count_n_grams(sentences, 2)
trigram_counts = count_n_grams(sentences, 3)
quadgram_counts = count_n_grams(sentences, 4)
qintgram_counts = count_n_grams(sentences, 5)
n_gram_counts_list = [
unigram_counts,
bigram_counts,
trigram_counts,
quadgram_counts,
qintgram_counts,
]
previous_tokens = ["i", "like"]
tmp_suggest3 = get_suggestions(previous_tokens, n_gram_counts_list, unique_words, k=1.0)
print(f"The previous words are 'i like', the suggestions are:")
display(tmp_suggest3)
n_gram_counts_list = []
for n in range(1, 6):
print("Computing n-gram counts with n =", n, "...")
n_model_counts = count_n_grams(train_data_processed, n)
n_gram_counts_list.append(n_model_counts)
previous_tokens = ["i", "am", "to"]
tmp_suggest4 = get_suggestions(previous_tokens, n_gram_counts_list, vocabulary, k=1.0)
print(f"The previous words are {previous_tokens}, the suggestions are:")
display(tmp_suggest4)
previous_tokens = ["i", "want", "to", "go"]
tmp_suggest5 = get_suggestions(previous_tokens, n_gram_counts_list, vocabulary, k=1.0)
print(f"The previous words are {previous_tokens}, the suggestions are:")
display(tmp_suggest5)
previous_tokens = ["hey", "how", "are"]
tmp_suggest6 = get_suggestions(previous_tokens, n_gram_counts_list, vocabulary, k=1.0)
print(f"The previous words are {previous_tokens}, the suggestions are:")
display(tmp_suggest6)
previous_tokens = ["hey", "how", "are", "you"]
tmp_suggest7 = get_suggestions(previous_tokens, n_gram_counts_list, vocabulary, k=1.0)
print(f"The previous words are {previous_tokens}, the suggestions are:")
display(tmp_suggest7)
previous_tokens = ["hey", "how", "are", "you"]
tmp_suggest8 = get_suggestions(
previous_tokens, n_gram_counts_list, vocabulary, k=1.0, start_with="d"
)
print(f"The previous words are {previous_tokens}, the suggestions are:")
display(tmp_suggest8)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/070/129070449.ipynb
|
tweets-blogs-news-swiftkey-dataset-4million
|
crmercado
|
[{"Id": 129070449, "ScriptId": 38367341, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10072712, "CreationDate": "05/10/2023 18:33:35", "VersionNumber": 1.0, "Title": "Auto Complete using N-Gram Models", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 815.0, "LinesInsertedFromPrevious": 815.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
|
[{"Id": 184801112, "KernelVersionId": 129070449, "SourceDatasetVersionId": 9186}]
|
[{"Id": 9186, "DatasetId": 6261, "DatasourceVersionId": 9186, "CreatorUserId": 1455247, "LicenseName": "CC0: Public Domain", "CreationDate": "12/06/2017 15:42:31", "VersionNumber": 1.0, "Title": "Tweets Blogs News - Swiftkey Dataset 4million", "Slug": "tweets-blogs-news-swiftkey-dataset-4million", "Subtitle": "NLP - Tweets, Blogs, and News Articles 4 million text entries", "Description": "### Context\n\nA zip file from SWIFTKEY in partnership with the Johns Hopkins Data Science Specialization. This Natural Language Processing dataset include millions of tweets, blog posts, and news articles in multiple languages. \n\n### Content\n\nThis RAW data is zipped. It is readable as a large character data type. Select which file (based on language) that you wish to download and the source. Here is the list of files: \n\n## Name Length Date\n## 1 final/ 0 2014-07-22 10:10:00\n## 2 final/de_DE/ 0 2014-07-22 10:10:00\n## 3 final/de_DE/de_DE.twitter.txt 75578341 2014-07-22 10:11:00\n## 4 final/de_DE/de_DE.blogs.txt 85459666 2014-07-22 10:11:00\n## 5 final/de_DE/de_DE.news.txt 95591959 2014-07-22 10:11:00\n## 6 final/ru_RU/ 0 2014-07-22 10:10:00\n## 7 final/ru_RU/ru_RU.blogs.txt 116855835 2014-07-22 10:12:00\n## 8 final/ru_RU/ru_RU.news.txt 118996424 2014-07-22 10:12:00\n## 9 final/ru_RU/ru_RU.twitter.txt 105182346 2014-07-22 10:12:00\n## 10 final/en_US/ 0 2014-07-22 10:10:00\n## 11 final/en_US/en_US.twitter.txt 167105338 2014-07-22 10:12:00\n## 12 final/en_US/en_US.news.txt 205811889 2014-07-22 10:13:00\n## 13 final/en_US/en_US.blogs.txt 210160014 2014-07-22 10:13:00\n## 14 final/fi_FI/ 0 2014-07-22 10:10:00\n## 15 final/fi_FI/fi_FI.news.txt 94234350 2014-07-22 10:11:00\n## 16 final/fi_FI/fi_FI.blogs.txt 108503595 2014-07-22 10:12:00\n## 17 final/fi_FI/fi_FI.twitter.txt 25331142 2014-07-22 10:10:00\n\n### Acknowledgements\n\nThank you to Swiftkey an the Johns Hopkins Bloomberg School of Public Health. \n\n### Inspiration\n\nHow does the 140 character limit twitter cause language to change? \nCan we predict the future development of innovative language (idk, lol, idr, nbd...)?\nIs this enough data to make an accurate predictive text app for texting? tweeting? writing full articles?", "VersionNotes": "Initial release", "TotalCompressedBytes": 574661177.0, "TotalUncompressedBytes": 574661177.0}]
|
[{"Id": 6261, "CreatorUserId": 1455247, "OwnerUserId": 1455247.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 9186.0, "CurrentDatasourceVersionId": 9186.0, "ForumId": 12710, "Type": 2, "CreationDate": "12/06/2017 15:42:31", "LastActivityDate": "02/04/2018", "TotalViews": 10442, "TotalDownloads": 1150, "TotalVotes": 14, "TotalKernels": 12}]
|
[{"Id": 1455247, "UserName": "crmercado", "DisplayName": "Carlos Rafael", "RegisterDate": "11/29/2017", "PerformanceTier": 0}]
|
import math
import random
import numpy as np
import pandas as pd
import nltk
nltk.data.path.append(".")
## Basic File Paths
data_dir = "../input/tweets-blogs-news-swiftkey-dataset-4million/final/en_US"
file_path = data_dir + "/en_US.twitter.txt"
## nltk settings
nltk.data.path.append(data_dir)
## Opening the File in read mode ("r")
with open(file_path, "r") as f:
data = f.read()
print("Data type:", type(data))
print("Number of letters:", len(data))
print("First 300 letters of the data")
print("-------")
display(data[0:300])
print("-------")
print("Last 300 letters of the data")
print("-------")
display(data[-300:])
print("-------")
# UNQ_C1 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
### GRADED_FUNCTION: split_to_sentences ###
def split_to_sentences(data):
"""
Split data by linebreak "\n"
Args:
data: str
Returns:
A list of sentences
"""
### START CODE HERE (Replace instances of 'None' with your code) ###
sentences = data.split("\n")
### END CODE HERE ###
# Additional clearning (This part is already implemented)
# - Remove leading and trailing spaces from each sentence
# - Drop sentences if they are empty strings.
sentences = [s.strip() for s in sentences]
sentences = [s for s in sentences if len(s) > 0]
return sentences
# test your code
x = """
I have a pen.\nI have an apple. \nAh\nApple pen.\n
"""
print(x)
split_to_sentences(x)
# UNQ_C2 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
### GRADED_FUNCTION: tokenize_sentences ###
def tokenize_sentences(sentences):
"""
Tokenize sentences into tokens (words)
Args:
sentences: List of strings
Returns:
List of lists of tokens
"""
# Initialize the list of lists of tokenized sentences
tokenized_sentences = []
### START CODE HERE (Replace instances of 'None' with your code) ###
# Go through each sentence
for sentence in sentences:
# Convert to lowercase letters
sentence = sentence.lower()
# Convert into a list of words
tokenized = nltk.word_tokenize(sentence)
# append the list of words to the list of lists
tokenized_sentences.append(tokenized)
### END CODE HERE ###
return tokenized_sentences
# test your code
sentences = ["Sky is blue.", "Leaves are green.", "Roses are red."]
tokenize_sentences(sentences)
# UNQ_C3 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
### GRADED_FUNCTION: get_tokenized_data ###
def get_tokenized_data(data):
"""
Make a list of tokenized sentences
Args:
data: String
Returns:
List of lists of tokens
"""
### START CODE HERE (Replace instances of 'None' with your code) ###
# Get the sentences by splitting up the data
sentences = split_to_sentences(data)
# Get the list of lists of tokens by tokenizing the sentences
tokenized_sentences = tokenize_sentences(sentences)
### END CODE HERE ###
return tokenized_sentences
# test your function
x = "Sky is blue.\nLeaves are green\nRoses are red."
get_tokenized_data(x)
tokenized_data = get_tokenized_data(data)
random.seed(87)
random.shuffle(tokenized_data)
train_size = int(len(tokenized_data) * 0.8)
train_data = tokenized_data[0:train_size]
test_data = tokenized_data[train_size:]
print(
"{} data are split into {} train and {} test set".format(
len(tokenized_data), len(train_data), len(test_data)
)
)
print("First training sample:")
print(train_data[0])
print("First test sample")
print(test_data[0])
# UNQ_C4 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
### GRADED_FUNCTION: count_words ###
def count_words(tokenized_sentences):
"""
Count the number of word appearence in the tokenized sentences
Args:
tokenized_sentences: List of lists of strings
Returns:
dict that maps word (str) to the frequency (int)
"""
word_counts = {}
### START CODE HERE (Replace instances of 'None' with your code) ###
# Loop through each sentence
for sentence in tokenized_sentences: # complete this line
# Go through each token in the sentence
for token in sentence: # complete this line
# If the token is not in the dictionary yet, set the count to 1
if token not in word_counts.keys(): # complete this line
word_counts[token] = 1
# If the token is already in the dictionary, increment the count by 1
else:
word_counts[token] += 1
### END CODE HERE ###
return word_counts
# test your code
tokenized_sentences = [
["sky", "is", "blue", "."],
["leaves", "are", "green", "."],
["roses", "are", "red", "."],
]
count_words(tokenized_sentences)
# UNQ_C5 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
### GRADED_FUNCTION: get_words_with_nplus_frequency ###
def get_words_with_nplus_frequency(tokenized_sentences, count_threshold):
"""
Find the words that appear N times or more
Args:
tokenized_sentences: List of lists of sentences
count_threshold: minimum number of occurrences for a word to be in the closed vocabulary.
Returns:
List of words that appear N times or more
"""
# Initialize an empty list to contain the words that
# appear at least 'minimum_freq' times.
closed_vocab = []
# Get the word couts of the tokenized sentences
# Use the function that you defined earlier to count the words
word_counts = count_words(tokenized_sentences)
### START CODE HERE (Replace instances of 'None' with your code) ###
# for each word and its count
for word, cnt in word_counts.items(): # complete this line
# check that the word's count
# is at least as great as the minimum count
if cnt >= count_threshold:
# append the word to the list
closed_vocab.append(word)
### END CODE HERE ###
return closed_vocab
# test your code
tokenized_sentences = [
["sky", "is", "blue", "."],
["leaves", "are", "green", "."],
["roses", "are", "red", "."],
]
tmp_closed_vocab = get_words_with_nplus_frequency(
tokenized_sentences, count_threshold=2
)
print(f"Closed vocabulary:")
print(tmp_closed_vocab)
# UNQ_C6 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
### GRADED_FUNCTION: replace_oov_words_by_unk ###
def replace_oov_words_by_unk(tokenized_sentences, vocabulary, unknown_token="<unk>"):
"""
Replace words not in the given vocabulary with '<unk>' token.
Args:
tokenized_sentences: List of lists of strings
vocabulary: List of strings that we will use
unknown_token: A string representing unknown (out-of-vocabulary) words
Returns:
List of lists of strings, with words not in the vocabulary replaced
"""
# Place vocabulary into a set for faster search
vocabulary = set(vocabulary)
# Initialize a list that will hold the sentences
# after less frequent words are replaced by the unknown token
replaced_tokenized_sentences = []
# Go through each sentence
for sentence in tokenized_sentences:
# Initialize the list that will contain
# a single sentence with "unknown_token" replacements
replaced_sentence = []
### START CODE HERE (Replace instances of 'None' with your code) ###
# for each token in the sentence
for token in sentence: # complete this line
# Check if the token is in the closed vocabulary
if token in vocabulary: # complete this line
# If so, append the word to the replaced_sentence
replaced_sentence.append(token)
else:
# otherwise, append the unknown token instead
replaced_sentence.append(unknown_token)
### END CODE HERE ###
# Append the list of tokens to the list of lists
replaced_tokenized_sentences.append(replaced_sentence)
return replaced_tokenized_sentences
tokenized_sentences = [["dogs", "run"], ["cats", "sleep"]]
vocabulary = ["dogs", "sleep"]
tmp_replaced_tokenized_sentences = replace_oov_words_by_unk(
tokenized_sentences, vocabulary
)
print(f"Original sentence:")
print(tokenized_sentences)
print(f"tokenized_sentences with less frequent words converted to '<unk>':")
print(tmp_replaced_tokenized_sentences)
# UNQ_C7 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
### GRADED_FUNCTION: preprocess_data ###
def preprocess_data(train_data, test_data, count_threshold):
"""
Preprocess data, i.e.,
- Find tokens that appear at least N times in the training data.
- Replace tokens that appear less than N times by "<unk>" both for training and test data.
Args:
train_data, test_data: List of lists of strings.
count_threshold: Words whose count is less than this are
treated as unknown.
Returns:
Tuple of
- training data with low frequent words replaced by "<unk>"
- test data with low frequent words replaced by "<unk>"
- vocabulary of words that appear n times or more in the training data
"""
### START CODE HERE (Replace instances of 'None' with your code) ###
# Get the closed vocabulary using the train data
vocabulary = get_words_with_nplus_frequency(train_data, count_threshold)
# For the train data, replace less common words with "<unk>"
train_data_replaced = replace_oov_words_by_unk(train_data, vocabulary)
# For the test data, replace less common words with "<unk>"
test_data_replaced = replace_oov_words_by_unk(test_data, vocabulary)
### END CODE HERE ###
return train_data_replaced, test_data_replaced, vocabulary
# test your code
tmp_train = [["sky", "is", "blue", "."], ["leaves", "are", "green"]]
tmp_test = [["roses", "are", "red", "."]]
tmp_train_repl, tmp_test_repl, tmp_vocab = preprocess_data(
tmp_train, tmp_test, count_threshold=1
)
print("tmp_train_repl")
print(tmp_train_repl)
print()
print("tmp_test_repl")
print(tmp_test_repl)
print()
print("tmp_vocab")
print(tmp_vocab)
minimum_freq = 2
train_data_processed, test_data_processed, vocabulary = preprocess_data(
train_data, test_data, minimum_freq
)
print("First preprocessed training sample:")
print(train_data_processed[0])
print()
print("First preprocessed test sample:")
print(test_data_processed[0])
print()
print("First 10 vocabulary:")
print(vocabulary[0:10])
print()
print("Size of vocabulary:", len(vocabulary))
# UNQ_C8 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
### GRADED FUNCTION: count_n_grams ###
def count_n_grams(data, n, start_token="<s>", end_token="<e>"):
"""
Count all n-grams in the data
Args:
data: List of lists of words
n: number of words in a sequence
Returns:
A dictionary that maps a tuple of n-words to its frequency
"""
# Initialize dictionary of n-grams and their counts
n_grams = {}
### START CODE HERE (Replace instances of 'None' with your code) ###
# Go through each sentence in the data
for sentence in data: # complete this line
# prepend start token n times, and append <e> one time
sentence = [start_token] * n + sentence + [end_token]
# convert list to tuple
# So that the sequence of words can be used as
# a key in the dictionary
sentence = tuple(sentence)
# Use 'i' to indicate the start of the n-gram
# from index 0
# to the last index where the end of the n-gram
# is within the sentence.
m = len(sentence) if n == 1 else len(sentence) - 1
for i in range(m): # complete this line
# Get the n-gram from i to i+n
n_gram = sentence[i : i + n]
# check if the n-gram is in the dictionary
if n_gram in n_grams.keys(): # complete this line
# Increment the count for this n-gram
n_grams[n_gram] += 1
else:
# Initialize this n-gram count to 1
n_grams[n_gram] = 1
### END CODE HERE ###
return n_grams
# test your code
# CODE REVIEW COMMENT: Outcome does not match expected outcome
sentences = [["i", "like", "a", "cat"], ["this", "dog", "is", "like", "a", "cat"]]
print("Uni-gram:")
print(count_n_grams(sentences, 1))
print("Bi-gram:")
print(count_n_grams(sentences, 2))
# UNQ_C9 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
### GRADED FUNCTION: estimate_probability ###
def estimate_probability(
word, previous_n_gram, n_gram_counts, n_plus1_gram_counts, vocabulary_size, k=1.0
):
"""
Estimate the probabilities of a next word using the n-gram counts with k-smoothing
Args:
word: next word
previous_n_gram: A sequence of words of length n
n_gram_counts: Dictionary of counts of n-grams
n_plus1_gram_counts: Dictionary of counts of (n+1)-grams
vocabulary_size: number of words in the vocabulary
k: positive constant, smoothing parameter
Returns:
A probability
"""
# convert list to tuple to use it as a dictionary key
previous_n_gram = tuple(previous_n_gram)
### START CODE HERE (Replace instances of 'None' with your code) ###
# Set the denominator
# If the previous n-gram exists in the dictionary of n-gram counts,
# Get its count. Otherwise set the count to zero
# Use the dictionary that has counts for n-grams
previous_n_gram_count = (
n_gram_counts[previous_n_gram] if previous_n_gram in n_gram_counts else 0
)
# Calculate the denominator using the count of the previous n gram
# and apply k-smoothing
denominator = previous_n_gram_count + k * vocabulary_size
# Define n plus 1 gram as the previous n-gram plus the current word as a tuple
n_plus1_gram = previous_n_gram + (word,)
# Set the count to the count in the dictionary,
# otherwise 0 if not in the dictionary
# use the dictionary that has counts for the n-gram plus current word
n_plus1_gram_count = (
n_plus1_gram_counts[n_plus1_gram] if n_plus1_gram in n_plus1_gram_counts else 0
)
# Define the numerator use the count of the n-gram plus current word,
# and apply smoothing
numerator = n_plus1_gram_count + k
# Calculate the probability as the numerator divided by denominator
probability = numerator / denominator
### END CODE HERE ###
return probability
# test your code
sentences = [["i", "like", "a", "cat"], ["this", "dog", "is", "like", "a", "cat"]]
unique_words = list(set(sentences[0] + sentences[1]))
unigram_counts = count_n_grams(sentences, 1)
bigram_counts = count_n_grams(sentences, 2)
tmp_prob = estimate_probability(
"cat", "a", unigram_counts, bigram_counts, len(unique_words), k=1
)
print(
f"The estimated probability of word 'cat' given the previous n-gram 'a' is: {tmp_prob:.4f}"
)
def estimate_probabilities(
previous_n_gram, n_gram_counts, n_plus1_gram_counts, vocabulary, k=1.0
):
"""
Estimate the probabilities of next words using the n-gram counts with k-smoothing
Args:
previous_n_gram: A sequence of words of length n
n_gram_counts: Dictionary of counts of (n+1)-grams
n_plus1_gram_counts: Dictionary of counts of (n+1)-grams
vocabulary: List of words
k: positive constant, smoothing parameter
Returns:
A dictionary mapping from next words to the probability.
"""
# convert list to tuple to use it as a dictionary key
previous_n_gram = tuple(previous_n_gram)
# add <e> <unk> to the vocabulary
# <s> is not needed since it should not appear as the next word
vocabulary = vocabulary + ["<e>", "<unk>"]
vocabulary_size = len(vocabulary)
probabilities = {}
for word in vocabulary:
probability = estimate_probability(
word,
previous_n_gram,
n_gram_counts,
n_plus1_gram_counts,
vocabulary_size,
k=k,
)
probabilities[word] = probability
return probabilities
# test your code
sentences = [["i", "like", "a", "cat"], ["this", "dog", "is", "like", "a", "cat"]]
unique_words = list(set(sentences[0] + sentences[1]))
unigram_counts = count_n_grams(sentences, 1)
bigram_counts = count_n_grams(sentences, 2)
estimate_probabilities("a", unigram_counts, bigram_counts, unique_words, k=1)
# Additional test
trigram_counts = count_n_grams(sentences, 3)
estimate_probabilities(["<s>", "<s>"], bigram_counts, trigram_counts, unique_words, k=1)
def make_count_matrix(n_plus1_gram_counts, vocabulary):
# add <e> <unk> to the vocabulary
# <s> is omitted since it should not appear as the next word
vocabulary = vocabulary + ["<e>", "<unk>"]
# obtain unique n-grams
n_grams = []
for n_plus1_gram in n_plus1_gram_counts.keys():
n_gram = n_plus1_gram[0:-1]
n_grams.append(n_gram)
n_grams = list(set(n_grams))
# mapping from n-gram to row
row_index = {n_gram: i for i, n_gram in enumerate(n_grams)}
# mapping from next word to column
col_index = {word: j for j, word in enumerate(vocabulary)}
nrow = len(n_grams)
ncol = len(vocabulary)
count_matrix = np.zeros((nrow, ncol))
for n_plus1_gram, count in n_plus1_gram_counts.items():
n_gram = n_plus1_gram[0:-1]
word = n_plus1_gram[-1]
if word not in vocabulary:
continue
i = row_index[n_gram]
j = col_index[word]
count_matrix[i, j] = count
count_matrix = pd.DataFrame(count_matrix, index=n_grams, columns=vocabulary)
return count_matrix
sentences = [["i", "like", "a", "cat"], ["this", "dog", "is", "like", "a", "cat"]]
unique_words = list(set(sentences[0] + sentences[1]))
bigram_counts = count_n_grams(sentences, 2)
print("bigram counts")
display(make_count_matrix(bigram_counts, unique_words))
# Show trigram counts
print("\ntrigram counts")
trigram_counts = count_n_grams(sentences, 3)
display(make_count_matrix(trigram_counts, unique_words))
def make_probability_matrix(n_plus1_gram_counts, vocabulary, k):
count_matrix = make_count_matrix(n_plus1_gram_counts, unique_words)
count_matrix += k
prob_matrix = count_matrix.div(count_matrix.sum(axis=1), axis=0)
return prob_matrix
sentences = [["i", "like", "a", "cat"], ["this", "dog", "is", "like", "a", "cat"]]
unique_words = list(set(sentences[0] + sentences[1]))
bigram_counts = count_n_grams(sentences, 2)
print("bigram probabilities")
display(make_probability_matrix(bigram_counts, unique_words, k=1))
print("trigram probabilities")
trigram_counts = count_n_grams(sentences, 3)
display(make_probability_matrix(trigram_counts, unique_words, k=1))
# UNQ_C10 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: calculate_perplexity
def calculate_perplexity(
sentence, n_gram_counts, n_plus1_gram_counts, vocabulary_size, k=1.0
):
"""
Calculate perplexity for a list of sentences
Args:
sentence: List of strings
n_gram_counts: Dictionary of counts of (n+1)-grams
n_plus1_gram_counts: Dictionary of counts of (n+1)-grams
vocabulary_size: number of unique words in the vocabulary
k: Positive smoothing constant
Returns:
Perplexity score
"""
# length of previous words
n = len(list(n_gram_counts.keys())[0])
# prepend <s> and append <e>
sentence = ["<s>"] * n + sentence + ["<e>"]
# Cast the sentence from a list to a tuple
sentence = tuple(sentence)
# length of sentence (after adding <s> and <e> tokens)
N = len(sentence)
# The variable p will hold the product
# that is calculated inside the n-root
# Update this in the code below
product_pi = 1.0
### START CODE HERE (Replace instances of 'None' with your code) ###
# Index t ranges from n to N - 1, inclusive on both ends
for t in range(n, N): # complete this line
# get the n-gram preceding the word at position t
n_gram = sentence[t - n : t]
# get the word at position t
word = sentence[t]
# Estimate the probability of the word given the n-gram
# using the n-gram counts, n-plus1-gram counts,
# vocabulary size, and smoothing constant
probability = estimate_probability(
word, n_gram, n_gram_counts, n_plus1_gram_counts, len(unique_words), k=1
)
# Update the product of the probabilities
# This 'product_pi' is a cumulative product
# of the (1/P) factors that are calculated in the loop
product_pi *= 1 / probability
# Take the Nth root of the product
perplexity = product_pi ** (1 / float(N))
### END CODE HERE ###
return perplexity
# test your code
sentences = [["i", "like", "a", "cat"], ["this", "dog", "is", "like", "a", "cat"]]
unique_words = list(set(sentences[0] + sentences[1]))
unigram_counts = count_n_grams(sentences, 1)
bigram_counts = count_n_grams(sentences, 2)
perplexity_train1 = calculate_perplexity(
sentences[0], unigram_counts, bigram_counts, len(unique_words), k=1.0
)
print(f"Perplexity for first train sample: {perplexity_train1:.4f}")
test_sentence = ["i", "like", "a", "dog"]
perplexity_test = calculate_perplexity(
test_sentence, unigram_counts, bigram_counts, len(unique_words), k=1.0
)
print(f"Perplexity for test sample: {perplexity_test:.4f}")
# UNQ_C11 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: suggest_a_word
def suggest_a_word(
previous_tokens,
n_gram_counts,
n_plus1_gram_counts,
vocabulary,
k=1.0,
start_with=None,
):
"""
Get suggestion for the next word
Args:
previous_tokens: The sentence you input where each token is a word. Must have length > n
n_gram_counts: Dictionary of counts of (n+1)-grams
n_plus1_gram_counts: Dictionary of counts of (n+1)-grams
vocabulary: List of words
k: positive constant, smoothing parameter
start_with: If not None, specifies the first few letters of the next word
Returns:
A tuple of
- string of the most likely next word
- corresponding probability
"""
# length of previous words
n = len(list(n_gram_counts.keys())[0])
# From the words that the user already typed
# get the most recent 'n' words as the previous n-gram
previous_n_gram = previous_tokens[-n:]
# Estimate the probabilities that each word in the vocabulary
# is the next word,
# given the previous n-gram, the dictionary of n-gram counts,
# the dictionary of n plus 1 gram counts, and the smoothing constant
probabilities = estimate_probabilities(
previous_n_gram, n_gram_counts, n_plus1_gram_counts, vocabulary, k=k
)
# Initialize suggested word to None
# This will be set to the word with highest probability
suggestion = None
# Initialize the highest word probability to 0
# this will be set to the highest probability
# of all words to be suggested
max_prob = 0
### START CODE HERE (Replace instances of 'None' with your code) ###
# For each word and its probability in the probabilities dictionary:
for word, prob in probabilities.items(): # complete this line
# If the optional start_with string is set
if start_with: # complete this line
# Check if the beginning of word does not match with the letters in 'start_with'
if not word.startswith(start_with): # complete this line
# if they don't match, skip this word (move onto the next word)
continue # complete this line
# Check if this word's probability
# is greater than the current maximum probability
if prob > max_prob: # complete this line
# If so, save this word as the best suggestion (so far)
suggestion = word
# Save the new maximum probability
max_prob = prob
### END CODE HERE
return suggestion, max_prob
# test your code
sentences = [["i", "like", "a", "cat"], ["this", "dog", "is", "like", "a", "cat"]]
unique_words = list(set(sentences[0] + sentences[1]))
unigram_counts = count_n_grams(sentences, 1)
bigram_counts = count_n_grams(sentences, 2)
previous_tokens = ["i", "like"]
tmp_suggest1 = suggest_a_word(
previous_tokens, unigram_counts, bigram_counts, unique_words, k=1.0
)
print(
f"The previous words are 'i like',\n\tand the suggested word is `{tmp_suggest1[0]}` with a probability of {tmp_suggest1[1]:.4f}"
)
print()
# test your code when setting the starts_with
tmp_starts_with = "c"
tmp_suggest2 = suggest_a_word(
previous_tokens,
unigram_counts,
bigram_counts,
unique_words,
k=1.0,
start_with=tmp_starts_with,
)
print(
f"The previous words are 'i like', the suggestion must start with `{tmp_starts_with}`\n\tand the suggested word is `{tmp_suggest2[0]}` with a probability of {tmp_suggest2[1]:.4f}"
)
def get_suggestions(
previous_tokens, n_gram_counts_list, vocabulary, k=1.0, start_with=None
):
model_counts = len(n_gram_counts_list)
suggestions = []
for i in range(model_counts - 1):
n_gram_counts = n_gram_counts_list[i]
n_plus1_gram_counts = n_gram_counts_list[i + 1]
suggestion = suggest_a_word(
previous_tokens,
n_gram_counts,
n_plus1_gram_counts,
vocabulary,
k=k,
start_with=start_with,
)
suggestions.append(suggestion)
return suggestions
# test your code
sentences = [["i", "like", "a", "cat"], ["this", "dog", "is", "like", "a", "cat"]]
unique_words = list(set(sentences[0] + sentences[1]))
unigram_counts = count_n_grams(sentences, 1)
bigram_counts = count_n_grams(sentences, 2)
trigram_counts = count_n_grams(sentences, 3)
quadgram_counts = count_n_grams(sentences, 4)
qintgram_counts = count_n_grams(sentences, 5)
n_gram_counts_list = [
unigram_counts,
bigram_counts,
trigram_counts,
quadgram_counts,
qintgram_counts,
]
previous_tokens = ["i", "like"]
tmp_suggest3 = get_suggestions(previous_tokens, n_gram_counts_list, unique_words, k=1.0)
print(f"The previous words are 'i like', the suggestions are:")
display(tmp_suggest3)
n_gram_counts_list = []
for n in range(1, 6):
print("Computing n-gram counts with n =", n, "...")
n_model_counts = count_n_grams(train_data_processed, n)
n_gram_counts_list.append(n_model_counts)
previous_tokens = ["i", "am", "to"]
tmp_suggest4 = get_suggestions(previous_tokens, n_gram_counts_list, vocabulary, k=1.0)
print(f"The previous words are {previous_tokens}, the suggestions are:")
display(tmp_suggest4)
previous_tokens = ["i", "want", "to", "go"]
tmp_suggest5 = get_suggestions(previous_tokens, n_gram_counts_list, vocabulary, k=1.0)
print(f"The previous words are {previous_tokens}, the suggestions are:")
display(tmp_suggest5)
previous_tokens = ["hey", "how", "are"]
tmp_suggest6 = get_suggestions(previous_tokens, n_gram_counts_list, vocabulary, k=1.0)
print(f"The previous words are {previous_tokens}, the suggestions are:")
display(tmp_suggest6)
previous_tokens = ["hey", "how", "are", "you"]
tmp_suggest7 = get_suggestions(previous_tokens, n_gram_counts_list, vocabulary, k=1.0)
print(f"The previous words are {previous_tokens}, the suggestions are:")
display(tmp_suggest7)
previous_tokens = ["hey", "how", "are", "you"]
tmp_suggest8 = get_suggestions(
previous_tokens, n_gram_counts_list, vocabulary, k=1.0, start_with="d"
)
print(f"The previous words are {previous_tokens}, the suggestions are:")
display(tmp_suggest8)
| false | 0 | 7,738 | 1 | 8,624 | 7,738 |
||
129070024
|
<jupyter_start><jupyter_text>All Lending Club loan data
# Context
Update: I probably won't be able to update the data anymore, as LendingClub now has a scary 'TOS' popup when downloading the data. Worst case, they will ask me/Kaggle to take it down from here.
This dataset contains the full LendingClub data available from [their site][1]. There are separate files for accepted and rejected loans. The accepted loans also include the FICO scores, which can only be downloaded when you are signed in to LendingClub and download the data.
See the Python and R getting started kernels to get started:
- R: https://www.kaggle.com/wordsforthewise/eda-in-r-arggghh
- Python: https://www.kaggle.com/wordsforthewise/eda-with-python
I created a git repo for the code which is used to create this data: https://github.com/nateGeorge/preprocess_lending_club_data
# Background
I wanted an easy way to share all the lending club data with others. Unfortunately, the [data on their site][1] is fragmented into many smaller files. There is another lending club [dataset on Kaggle][2], but it wasn't updated in years. It seems like the "Kaggle Team" is updating it now. I think it also doesn't include the full rejected loans, which are included here. It seems like the [other dataset][3] confusingly has some of the rejected loans mixed into the accepted ones. Now there are a ton of other [LendingClub datasets on here too][4], most of which seem to have no documentation or explanation of what the data actually is.
# Content
The definitions for the fields are on the [LendingClub site][5], at the bottom of the page. Kaggle won't let me upload the .xlsx file for some reason since it seems to be in multiple other data repos. This file seems to be in the [other main repo][6], but again, it's better to get it directly from the [source][5].
Unfortunately, there is (maybe "was" now?) a limit of 500MB for dataset files, so I had to compress the files with gzip in the Python pandas package.
I cleaned the data a tiny bit: I removed percent symbols (%) from `int_rate` and `revol_util` columns in the accepted loans and converted those columns to floats.
# Update
The URL column is in the dataset for completeness, as of 2018 Q2.
[1]: https://www.lendingclub.com/info/download-data.action
[2]: https://www.kaggle.com/wendykan/lending-club-loan-data
[3]: https://www.kaggle.com/wendykan/lending-club-loan-data
[4]: https://www.kaggle.com/datasets?sortBy=relevance&group=public&search=lending%20club&page=1&pageSize=20&size=all&filetype=all&license=all
[5]: https://www.lendingclub.com/info/download-data.action
[6]: https://www.kaggle.com/wendykan/lending-club-loan-data
Kaggle dataset identifier: lending-club
<jupyter_script>import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
df = pd.read_csv(
"/kaggle/input/lending-club/accepted_2007_to_2018Q4.csv.gz", low_memory=False
)
display(df.shape)
display(df.head())
# Check the dataset's columns and their data types
display(df.dtypes)
# Get summary statistics for numerical columns
display(df.describe())
# Check for missing values
missing_values = df.isnull().sum()
display(missing_values)
# Handle missing values: Drop columns and rows with too many missing values
df = df.dropna(axis=1, thresh=int(0.8 * len(df)))
df = df.dropna(axis=0, thresh=int(0.8 * len(df.columns)))
# Visualize the distribution of the target variable (loan_status)
plt.figure(figsize=(10, 5))
sns.countplot(data=df, x="loan_status")
plt.title("Loan Status Distribution")
plt.xticks(rotation=45)
plt.show()
# Visualize the distribution of loan amounts
plt.figure(figsize=(10, 5))
sns.histplot(data=df, x="loan_amnt", kde=True, bins=30)
plt.title("Loan Amount Distribution")
plt.show()
# Visualize the distribution of interest rates
plt.figure(figsize=(10, 5))
sns.histplot(data=df, x="int_rate", kde=True, bins=30)
plt.title("Interest Rate Distribution")
plt.show()
# Visualize the relationship between loan amount and interest rate
plt.figure(figsize=(10, 5))
sns.scatterplot(data=df, x="loan_amnt", y="int_rate")
plt.title("Loan Amount vs. Interest Rate")
plt.show()
# Visualize the relationship between loan amount, interest rate, and loan grade
plt.figure(figsize=(10, 5))
sns.scatterplot(data=df, x="loan_amnt", y="int_rate", hue="grade")
plt.title("Loan Amount vs. Interest Rate by Loan Grade")
plt.show()
# Analyze the distribution of loan amounts across different loan grades
plt.figure(figsize=(10, 5))
sns.boxplot(data=df, x="grade", y="loan_amnt", order=sorted(df["grade"].unique()))
plt.title("Loan Amount Distribution Across Loan Grades")
plt.show()
# Analyze the distribution of interest rates across different loan grades
plt.figure(figsize=(10, 5))
sns.boxplot(data=df, x="grade", y="int_rate", order=sorted(df["grade"].unique()))
plt.title("Interest Rate Distribution Across Loan Grades")
plt.show()
# Analyze the distribution of annual incomes
plt.figure(figsize=(10, 5))
sns.histplot(data=df[df["annual_inc"] < 300000], x="annual_inc", kde=True, bins=30)
plt.title("Annual Income Distribution")
plt.show()
# Analyze the distribution of debt-to-income ratios
plt.figure(figsize=(10, 5))
sns.histplot(data=df, x="dti", kde=True, bins=30)
plt.title("Debt-to-Income Ratio Distribution")
plt.show()
# Analyze the relationship between annual income and debt-to-income ratio
plt.figure(figsize=(10, 5))
sns.scatterplot(data=df[df["annual_inc"] < 300000], x="annual_inc", y="dti")
plt.title("Annual Income vs. Debt-to-Income Ratio")
plt.show()
# Convert grade column to numerical category
df["grade_cat"] = df["grade"].astype("category").cat.codes
# Analyze the relationship between annual income, debt-to-income ratio, and loan grade
plt.figure(figsize=(10, 5))
sns.scatterplot(
data=df[df["annual_inc"] < 300000], x="annual_inc", y="dti", hue="grade_cat"
)
plt.title("Annual Income vs. Debt-to-Income Ratio by Loan Grade")
plt.show()
# Analyze the correlation matrix between numerical variables
plt.figure(figsize=(10, 10))
sns.heatmap(
df[
[
"loan_amnt",
"int_rate",
"annual_inc",
"dti",
"open_acc",
"total_acc",
"loan_status",
]
].corr(),
annot=True,
cmap="coolwarm",
)
plt.title("Correlation Matrix of Numerical Variables")
plt.show()
# Analyze the distribution of loan amounts and interest rates by loan grade and loan status using violin plotsplt.figure(figsize=(12, 6))
for grade in sorted(df["grade"].unique()):
df_grade = df[df["grade"] == grade]
sns.violinplot(data=df_grade, x="loan_status", y="loan_amnt", inner="quartile")
plt.title(f"Loan Amount Distribution by Loan Status and Grade {grade}")
plt.xlabel("Loan Status")
plt.ylabel("Loan Amount (in USD)")
plt.show()
sns.violinplot(data=df_grade, x="loan_status", y="int_rate", inner="quartile")
plt.title(f"Interest Rate Distribution by Loan Status and Grade {grade}")
plt.xlabel("Loan Status")
plt.ylabel("Interest Rate")
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/070/129070024.ipynb
|
lending-club
|
wordsforthewise
|
[{"Id": 129070024, "ScriptId": 38366919, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7837074, "CreationDate": "05/10/2023 18:28:28", "VersionNumber": 1.0, "Title": "Lending_Loans", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 119.0, "LinesInsertedFromPrevious": 119.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
|
[{"Id": 184800243, "KernelVersionId": 129070024, "SourceDatasetVersionId": 370089}]
|
[{"Id": 370089, "DatasetId": 902, "DatasourceVersionId": 384604, "CreatorUserId": 393328, "LicenseName": "CC0: Public Domain", "CreationDate": "04/10/2019 18:03:34", "VersionNumber": 3.0, "Title": "All Lending Club loan data", "Slug": "lending-club", "Subtitle": "2007 through current Lending Club accepted and rejected loan data", "Description": "# Context \n\nUpdate: I probably won't be able to update the data anymore, as LendingClub now has a scary 'TOS' popup when downloading the data. Worst case, they will ask me/Kaggle to take it down from here.\n\nThis dataset contains the full LendingClub data available from [their site][1]. There are separate files for accepted and rejected loans. The accepted loans also include the FICO scores, which can only be downloaded when you are signed in to LendingClub and download the data.\n\nSee the Python and R getting started kernels to get started:\n\n- R: https://www.kaggle.com/wordsforthewise/eda-in-r-arggghh\n- Python: https://www.kaggle.com/wordsforthewise/eda-with-python\n\nI created a git repo for the code which is used to create this data: https://github.com/nateGeorge/preprocess_lending_club_data\n\n# Background\n\nI wanted an easy way to share all the lending club data with others. Unfortunately, the [data on their site][1] is fragmented into many smaller files. There is another lending club [dataset on Kaggle][2], but it wasn't updated in years. It seems like the \"Kaggle Team\" is updating it now. I think it also doesn't include the full rejected loans, which are included here. It seems like the [other dataset][3] confusingly has some of the rejected loans mixed into the accepted ones. Now there are a ton of other [LendingClub datasets on here too][4], most of which seem to have no documentation or explanation of what the data actually is.\n\n\n# Content\n\nThe definitions for the fields are on the [LendingClub site][5], at the bottom of the page. Kaggle won't let me upload the .xlsx file for some reason since it seems to be in multiple other data repos. This file seems to be in the [other main repo][6], but again, it's better to get it directly from the [source][5].\n\nUnfortunately, there is (maybe \"was\" now?) a limit of 500MB for dataset files, so I had to compress the files with gzip in the Python pandas package. \n\nI cleaned the data a tiny bit: I removed percent symbols (%) from `int_rate` and `revol_util` columns in the accepted loans and converted those columns to floats.\n\n# Update\nThe URL column is in the dataset for completeness, as of 2018 Q2.\n\n\n [1]: https://www.lendingclub.com/info/download-data.action\n [2]: https://www.kaggle.com/wendykan/lending-club-loan-data\n [3]: https://www.kaggle.com/wendykan/lending-club-loan-data\n [4]: https://www.kaggle.com/datasets?sortBy=relevance&group=public&search=lending%20club&page=1&pageSize=20&size=all&filetype=all&license=all\n [5]: https://www.lendingclub.com/info/download-data.action\n [6]: https://www.kaggle.com/wendykan/lending-club-loan-data", "VersionNotes": "Add data definitions file, even though it's on LendingClub's site. Also remove older datasets.", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 648053013.0}]
|
[{"Id": 902, "CreatorUserId": 393328, "OwnerUserId": 393328.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 370089.0, "CurrentDatasourceVersionId": 384604.0, "ForumId": 2758, "Type": 2, "CreationDate": "03/01/2017 22:25:40", "LastActivityDate": "02/06/2018", "TotalViews": 353986, "TotalDownloads": 48692, "TotalVotes": 669, "TotalKernels": 82}]
|
[{"Id": 393328, "UserName": "wordsforthewise", "DisplayName": "Nathan George", "RegisterDate": "07/30/2015", "PerformanceTier": 1}]
|
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
df = pd.read_csv(
"/kaggle/input/lending-club/accepted_2007_to_2018Q4.csv.gz", low_memory=False
)
display(df.shape)
display(df.head())
# Check the dataset's columns and their data types
display(df.dtypes)
# Get summary statistics for numerical columns
display(df.describe())
# Check for missing values
missing_values = df.isnull().sum()
display(missing_values)
# Handle missing values: Drop columns and rows with too many missing values
df = df.dropna(axis=1, thresh=int(0.8 * len(df)))
df = df.dropna(axis=0, thresh=int(0.8 * len(df.columns)))
# Visualize the distribution of the target variable (loan_status)
plt.figure(figsize=(10, 5))
sns.countplot(data=df, x="loan_status")
plt.title("Loan Status Distribution")
plt.xticks(rotation=45)
plt.show()
# Visualize the distribution of loan amounts
plt.figure(figsize=(10, 5))
sns.histplot(data=df, x="loan_amnt", kde=True, bins=30)
plt.title("Loan Amount Distribution")
plt.show()
# Visualize the distribution of interest rates
plt.figure(figsize=(10, 5))
sns.histplot(data=df, x="int_rate", kde=True, bins=30)
plt.title("Interest Rate Distribution")
plt.show()
# Visualize the relationship between loan amount and interest rate
plt.figure(figsize=(10, 5))
sns.scatterplot(data=df, x="loan_amnt", y="int_rate")
plt.title("Loan Amount vs. Interest Rate")
plt.show()
# Visualize the relationship between loan amount, interest rate, and loan grade
plt.figure(figsize=(10, 5))
sns.scatterplot(data=df, x="loan_amnt", y="int_rate", hue="grade")
plt.title("Loan Amount vs. Interest Rate by Loan Grade")
plt.show()
# Analyze the distribution of loan amounts across different loan grades
plt.figure(figsize=(10, 5))
sns.boxplot(data=df, x="grade", y="loan_amnt", order=sorted(df["grade"].unique()))
plt.title("Loan Amount Distribution Across Loan Grades")
plt.show()
# Analyze the distribution of interest rates across different loan grades
plt.figure(figsize=(10, 5))
sns.boxplot(data=df, x="grade", y="int_rate", order=sorted(df["grade"].unique()))
plt.title("Interest Rate Distribution Across Loan Grades")
plt.show()
# Analyze the distribution of annual incomes
plt.figure(figsize=(10, 5))
sns.histplot(data=df[df["annual_inc"] < 300000], x="annual_inc", kde=True, bins=30)
plt.title("Annual Income Distribution")
plt.show()
# Analyze the distribution of debt-to-income ratios
plt.figure(figsize=(10, 5))
sns.histplot(data=df, x="dti", kde=True, bins=30)
plt.title("Debt-to-Income Ratio Distribution")
plt.show()
# Analyze the relationship between annual income and debt-to-income ratio
plt.figure(figsize=(10, 5))
sns.scatterplot(data=df[df["annual_inc"] < 300000], x="annual_inc", y="dti")
plt.title("Annual Income vs. Debt-to-Income Ratio")
plt.show()
# Convert grade column to numerical category
df["grade_cat"] = df["grade"].astype("category").cat.codes
# Analyze the relationship between annual income, debt-to-income ratio, and loan grade
plt.figure(figsize=(10, 5))
sns.scatterplot(
data=df[df["annual_inc"] < 300000], x="annual_inc", y="dti", hue="grade_cat"
)
plt.title("Annual Income vs. Debt-to-Income Ratio by Loan Grade")
plt.show()
# Analyze the correlation matrix between numerical variables
plt.figure(figsize=(10, 10))
sns.heatmap(
df[
[
"loan_amnt",
"int_rate",
"annual_inc",
"dti",
"open_acc",
"total_acc",
"loan_status",
]
].corr(),
annot=True,
cmap="coolwarm",
)
plt.title("Correlation Matrix of Numerical Variables")
plt.show()
# Analyze the distribution of loan amounts and interest rates by loan grade and loan status using violin plotsplt.figure(figsize=(12, 6))
for grade in sorted(df["grade"].unique()):
df_grade = df[df["grade"] == grade]
sns.violinplot(data=df_grade, x="loan_status", y="loan_amnt", inner="quartile")
plt.title(f"Loan Amount Distribution by Loan Status and Grade {grade}")
plt.xlabel("Loan Status")
plt.ylabel("Loan Amount (in USD)")
plt.show()
sns.violinplot(data=df_grade, x="loan_status", y="int_rate", inner="quartile")
plt.title(f"Interest Rate Distribution by Loan Status and Grade {grade}")
plt.xlabel("Loan Status")
plt.ylabel("Interest Rate")
plt.show()
| false | 1 | 1,403 | 2 | 2,177 | 1,403 |
||
129960661
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
data = pd.read_csv("/kaggle/input/unemployment/clean_unemployment.csv")
unemp = pd.DataFrame(data)
print(unemp.info())
print(unemp[["country_code", "country_name", "continent"]][unemp["continent"].isna()])
unemp.loc[65, "continent"] = "Oceania"
unemp.loc[67, "continent"] = "Asia"
unemp.loc[99, "continent"] = "Asia"
unemp.loc[117, "continent"] = "Oceania"
unemp.loc[132, "continent"] = "North America"
print(unemp["continent"].value_counts())
sns.countplot(data=unemp, y="continent")
plt.show()
Pandemic = unemp.groupby("continent")[["2019", "2020", "2021"]].mean()
country_name = []
country_code = []
continent = []
year = []
unemployment_rate = []
column_names = unemp.columns[3:]
for index, row in unemp.iterrows():
for temp_year in column_names:
country_name.append(row["country_name"])
country_code.append(row["country_code"])
continent.append(row["continent"])
year.append(temp_year)
unemployment_rate.append(row[temp_year])
unemp_ = pd.DataFrame(
data={
"Year": year,
"Country Name": country_name,
"Country Code": country_code,
"Continent": continent,
"Unemployment Rate": unemployment_rate,
}
)
unemp_.info()
fig = px.choropleth(
unemp_,
locations="Country Code",
color="Unemployment Rate",
hover_name="Country Name",
animation_frame="Year",
range_color=(0, 40),
color_continuous_scale=px.colors.sequential.Plasma,
title="Interactive Unemployment Map",
)
fig.show()
Australia = unemp_[unemp_["Country Code"] == "AUS"]
print(Australia, end="\n\n")
sns.lineplot(data=Australia, x="Year", y="Unemployment Rate")
plt.show()
Egypt = unemp_[unemp_["Country Code"] == "EGY"]
print(Egypt, end="\n\n")
sns.lineplot(data=Egypt, x="Year", y="Unemployment Rate")
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/960/129960661.ipynb
| null | null |
[{"Id": 129960661, "ScriptId": 36986536, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8742412, "CreationDate": "05/17/2023 18:13:36", "VersionNumber": 1.0, "Title": "Unemployment_World", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 61.0, "LinesInsertedFromPrevious": 61.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
data = pd.read_csv("/kaggle/input/unemployment/clean_unemployment.csv")
unemp = pd.DataFrame(data)
print(unemp.info())
print(unemp[["country_code", "country_name", "continent"]][unemp["continent"].isna()])
unemp.loc[65, "continent"] = "Oceania"
unemp.loc[67, "continent"] = "Asia"
unemp.loc[99, "continent"] = "Asia"
unemp.loc[117, "continent"] = "Oceania"
unemp.loc[132, "continent"] = "North America"
print(unemp["continent"].value_counts())
sns.countplot(data=unemp, y="continent")
plt.show()
Pandemic = unemp.groupby("continent")[["2019", "2020", "2021"]].mean()
country_name = []
country_code = []
continent = []
year = []
unemployment_rate = []
column_names = unemp.columns[3:]
for index, row in unemp.iterrows():
for temp_year in column_names:
country_name.append(row["country_name"])
country_code.append(row["country_code"])
continent.append(row["continent"])
year.append(temp_year)
unemployment_rate.append(row[temp_year])
unemp_ = pd.DataFrame(
data={
"Year": year,
"Country Name": country_name,
"Country Code": country_code,
"Continent": continent,
"Unemployment Rate": unemployment_rate,
}
)
unemp_.info()
fig = px.choropleth(
unemp_,
locations="Country Code",
color="Unemployment Rate",
hover_name="Country Name",
animation_frame="Year",
range_color=(0, 40),
color_continuous_scale=px.colors.sequential.Plasma,
title="Interactive Unemployment Map",
)
fig.show()
Australia = unemp_[unemp_["Country Code"] == "AUS"]
print(Australia, end="\n\n")
sns.lineplot(data=Australia, x="Year", y="Unemployment Rate")
plt.show()
Egypt = unemp_[unemp_["Country Code"] == "EGY"]
print(Egypt, end="\n\n")
sns.lineplot(data=Egypt, x="Year", y="Unemployment Rate")
plt.show()
| false | 0 | 656 | 0 | 656 | 656 |
||
129960130
|
<jupyter_start><jupyter_text>U.S. Education Datasets: Unification Project
***Author's Note 2019/04/20:*** *Revisiting this project, I recently discovered the [incredibly comprehensive API](https://educationdata.urban.org/documentation/schools.html#overview) produced by the Urban Institute. It achieves all of the goals laid out for this dataset in wonderful detail. I recommend that users interested pay a visit to their site.*
### Context
This dataset is designed to bring together multiple facets of U.S. education data into one convenient CSV (states_all.csv).
### Contents
* **`states_all.csv`**:
The primary data file. Contains aggregates from all state-level sources in one CSV.
* **`output_files/states_all_extended.csv`**:
The contents of `states_all.csv` with additional data related to race and gender.
### Column Breakdown
#### **Identification**
* `PRIMARY_KEY`: A combination of the year and state name.
* `YEAR`
* `STATE`
#### **Enrollment**
A breakdown of students enrolled in schools by school year.
* `GRADES_PK`: Number of students in Pre-Kindergarten education.
* `GRADES_4`: Number of students in fourth grade.
* `GRADES_8`: Number of students in eighth grade.
* `GRADES_12`: Number of students in twelfth grade.
* `GRADES_1_8`: Number of students in the first through eighth grades.
* `GRADES 9_12`: Number of students in the ninth through twelfth grades.
* `GRADES_ALL`: The count of all students in the state. Comparable to ENROLL in the financial data (which is the U.S.
Census Bureau's estimate for students in the state).
The extended version of states_all contains additional columns that breakdown enrollment by race and gender. For example:
* `G06_A_A`: Total number of sixth grade students.
* `G06_AS_M`: Number of sixth grade male students whose ethnicity was classified as "Asian".
* `G08_AS_A_READING`: Average reading score of eighth grade students whose ethnicity was classified as "Asian".
The represented races include AM (American Indian or Alaska Native), AS (Asian), HI (Hispanic/Latino), BL (Black or African American), WH (White), HP (Hawaiian Native/Pacific Islander), and TR (Two or More Races). The represented genders include M (Male) and F (Female).
#### **Financials**
A breakdown of states by revenue and expenditure.
* `ENROLL`: The U.S. Census Bureau's count for students in the state. Should be comparable to GRADES_ALL (which is the
NCES's estimate for students in the state).
* `TOTAL REVENUE`: The total amount of revenue for the state.
* `FEDERAL_REVENUE`
* `STATE_REVENUE`
* `LOCAL_REVENUE`
* TOTAL_EXPENDITURE: The total expenditure for the state.
* `INSTRUCTION_EXPENDITURE`
* `SUPPORT_SERVICES_EXPENDITURE`
* `CAPITAL_OUTLAY_EXPENDITURE`
* `OTHER_EXPENDITURE`
#### **Academic Achievement**
A breakdown of student performance as assessed by the corresponding exams (math and reading,
grades 4 and 8).
* `AVG_MATH_4_SCORE`: The state's average score for fourth graders taking the NAEP math exam.
* `AVG_MATH_8_SCORE`: The state's average score for eight graders taking the NAEP math exam.
* `AVG_READING_4_SCORE`: The state's average score for fourth graders taking the NAEP reading exam.
* `AVG_READING_8_SCORE`: The state's average score for eighth graders taking the NAEP reading exam.
### Data Processing
The original sources can be found here:
<pre># Enrollment
https://nces.ed.gov/ccd/stnfis.asp
# Financials
https://www.census.gov/programs-surveys/school-finances/data/tables.html
# Academic Achievement
https://www.nationsreportcard.gov/ndecore/xplore/NDE
</pre>
Data was aggregated using a Python program I wrote. The code (as well as additional project information) can be found [here][1].
### Methodology Notes
* Spreadsheets for NCES enrollment data for 2014, 2011, 2010, and 2009
were modified to place key data on the same sheet, making scripting easier.
* The column 'ENROLL' represents the U.S. Census Bureau data value (financial data), while the
column 'GRADES_ALL' represents the NCES data value (demographic data). Though the two organizations
correspond on this matter, these values (which are ostensibly the same) do vary. Their documentation chalks this
up to differences in membership (i.e. what is and is not a fourth grade student).
* Enrollment data from NCES has seen a number of changes across survey years. One of the more notable is that data on student gender does not appear to have been collected until 2009. The information in states_all_extended.csv reflects this.
* NAEP test score data is only available for certain years
* The current version of this data is concerned with state-level patterns. It is the author's hope that future
versions will allow for school district-level granularity.
Kaggle dataset identifier: us-education-datasets-unification-project
<jupyter_script># # US Education
# [us-education-datasets-unification-project Data url on kaggle](https://www.kaggle.com/datasets/noriuk/us-education-datasets-unification-project)
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
data = pd.read_csv(
"/kaggle/input/us-education-datasets-unification-project/states_all.csv"
)
data.describe()
sns.set_theme()
# ## Correlation between Math and Reading Scores
sns.scatterplot(x="AVG_MATH_4_SCORE", y="AVG_READING_4_SCORE", data=data, hue="YEAR")
plt.xlabel("Average Math Score (Grade 4)")
plt.ylabel("Average Reading Score (Grade 4)")
plt.title("Correlation between Math and Reading Scores (Grade 4)")
plt.show()
sns.scatterplot(x="AVG_MATH_8_SCORE", y="AVG_READING_8_SCORE", data=data, hue="YEAR")
plt.xlabel("Average Math Score (Grade 8)")
plt.ylabel("Average Reading Score (Grade 8)")
plt.title("Correlation between Math and Reading Scores (Grade 8)")
plt.show()
# we can see that there is a strong correlation between the math and reading scores. so if a student is good at math, he/she is likely to be good at reading as well.and we can
# ## Trend of Average Reading Scores over the Years
da = data[data["AVG_READING_8_SCORE"] > 0]
plt.figure(figsize=(12, 6))
sns.barplot(x="YEAR", y="AVG_READING_8_SCORE", data=da)
plt.xlabel("Year")
plt.ylabel("Average Reading Score (Grade 8)")
plt.title("Trend of Average Reading Scores over the Years")
plt.show()
# avg math 8 score over the years
da = data[data["AVG_MATH_8_SCORE"] > 0]
plt.figure(figsize=(12, 6))
sns.barplot(x="YEAR", y="AVG_MATH_8_SCORE", data=da)
plt.xlabel("Year")
plt.ylabel("Average Math Score (Grade 8)")
plt.title("Trend of Average Math Scores over the Years")
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/960/129960130.ipynb
|
us-education-datasets-unification-project
|
noriuk
|
[{"Id": 129960130, "ScriptId": 38655276, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14998089, "CreationDate": "05/17/2023 18:08:05", "VersionNumber": 1.0, "Title": "us-education", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 52.0, "LinesInsertedFromPrevious": 52.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 186395846, "KernelVersionId": 129960130, "SourceDatasetVersionId": 1078789}]
|
[{"Id": 1078789, "DatasetId": 95317, "DatasourceVersionId": 1108572, "CreatorUserId": 1159129, "LicenseName": "Other (specified in description)", "CreationDate": "04/13/2020 17:09:38", "VersionNumber": 5.0, "Title": "U.S. Education Datasets: Unification Project", "Slug": "us-education-datasets-unification-project", "Subtitle": "K-12 financial, enrollment, and achievement data in one place", "Description": "***Author's Note 2019/04/20:*** *Revisiting this project, I recently discovered the [incredibly comprehensive API](https://educationdata.urban.org/documentation/schools.html#overview) produced by the Urban Institute. It achieves all of the goals laid out for this dataset in wonderful detail. I recommend that users interested pay a visit to their site.*\n\n### Context\n\nThis dataset is designed to bring together multiple facets of U.S. education data into one convenient CSV (states_all.csv). \n\n### Contents\n\n* **`states_all.csv`**:\nThe primary data file. Contains aggregates from all state-level sources in one CSV.\n\n* **`output_files/states_all_extended.csv`**:\nThe contents of `states_all.csv` with additional data related to race and gender. \n\n\n### Column Breakdown\n\n#### **Identification**\n\n* `PRIMARY_KEY`: A combination of the year and state name.\n* `YEAR`\n* `STATE`\n\n\n#### **Enrollment**\nA breakdown of students enrolled in schools by school year.\n\n* `GRADES_PK`: Number of students in Pre-Kindergarten education.\n\n* `GRADES_4`: Number of students in fourth grade.\n\n* `GRADES_8`: Number of students in eighth grade.\n\n* `GRADES_12`: Number of students in twelfth grade.\n\n* `GRADES_1_8`: Number of students in the first through eighth grades.\n\n* `GRADES 9_12`: Number of students in the ninth through twelfth grades.\n\n* `GRADES_ALL`: The count of all students in the state. Comparable to ENROLL in the financial data (which is the U.S.\nCensus Bureau's estimate for students in the state).\n\nThe extended version of states_all contains additional columns that breakdown enrollment by race and gender. For example:\n\n* `G06_A_A`: Total number of sixth grade students.\n\n* `G06_AS_M`: Number of sixth grade male students whose ethnicity was classified as \"Asian\".\n\n* `G08_AS_A_READING`: Average reading score of eighth grade students whose ethnicity was classified as \"Asian\".\n\nThe represented races include AM (American Indian or Alaska Native), AS (Asian), HI (Hispanic/Latino), BL (Black or African American), WH (White), HP (Hawaiian Native/Pacific Islander), and TR (Two or More Races). The represented genders include M (Male) and F (Female).\n\n#### **Financials**\nA breakdown of states by revenue and expenditure.\n\n* `ENROLL`: The U.S. Census Bureau's count for students in the state. Should be comparable to GRADES_ALL (which is the\nNCES's estimate for students in the state).\n\n* `TOTAL REVENUE`: The total amount of revenue for the state.\n * `FEDERAL_REVENUE`\n * `STATE_REVENUE`\n * `LOCAL_REVENUE`\n \n* TOTAL_EXPENDITURE: The total expenditure for the state.\n * `INSTRUCTION_EXPENDITURE`\n * `SUPPORT_SERVICES_EXPENDITURE`\n\n * `CAPITAL_OUTLAY_EXPENDITURE`\n * `OTHER_EXPENDITURE`\n\n#### **Academic Achievement**\nA breakdown of student performance as assessed by the corresponding exams (math and reading, \ngrades 4 and 8).\n\n* `AVG_MATH_4_SCORE`: The state's average score for fourth graders taking the NAEP math exam.\n\n* `AVG_MATH_8_SCORE`: The state's average score for eight graders taking the NAEP math exam.\n\n* `AVG_READING_4_SCORE`: The state's average score for fourth graders taking the NAEP reading exam.\n\n* `AVG_READING_8_SCORE`: The state's average score for eighth graders taking the NAEP reading exam.\n\n### Data Processing\n\nThe original sources can be found here:\n\n<pre># Enrollment\nhttps://nces.ed.gov/ccd/stnfis.asp\n# Financials\nhttps://www.census.gov/programs-surveys/school-finances/data/tables.html\n# Academic Achievement\nhttps://www.nationsreportcard.gov/ndecore/xplore/NDE\n</pre>\n\nData was aggregated using a Python program I wrote. The code (as well as additional project information) can be found [here][1].\n\n### Methodology Notes\n\n* Spreadsheets for NCES enrollment data for 2014, 2011, 2010, and 2009 \nwere modified to place key data on the same sheet, making scripting easier.\n\n* The column 'ENROLL' represents the U.S. Census Bureau data value (financial data), while the\ncolumn 'GRADES_ALL' represents the NCES data value (demographic data). Though the two organizations\ncorrespond on this matter, these values (which are ostensibly the same) do vary. Their documentation chalks this\nup to differences in membership (i.e. what is and is not a fourth grade student).\n\n* Enrollment data from NCES has seen a number of changes across survey years. One of the more notable is that data on student gender does not appear to have been collected until 2009. The information in states_all_extended.csv reflects this.\n\n* NAEP test score data is only available for certain years\n\n* The current version of this data is concerned with state-level patterns. It is the author's hope that future\nversions will allow for school district-level granularity.\n\n### Acknowledgements\n\nData is sourced from the U.S. Census Bureau and the National Center for Education Statistics (NCES).\n\n### Licensing Notes\n\nThe licensing of these datasets state that it must not be used to identify specific students or schools. So\ndon't do that.\n\n\n [1]: https://github.com/justinrgarrard/USEduData", "VersionNotes": "Added states_all_extended.csv to the top level for easier readability", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 95317, "CreatorUserId": 1159129, "OwnerUserId": 1159129.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1078789.0, "CurrentDatasourceVersionId": 1108572.0, "ForumId": 104970, "Type": 2, "CreationDate": "12/22/2018 15:57:15", "LastActivityDate": "12/22/2018", "TotalViews": 156827, "TotalDownloads": 17434, "TotalVotes": 461, "TotalKernels": 30}]
|
[{"Id": 1159129, "UserName": "noriuk", "DisplayName": "Roy Garrard", "RegisterDate": "07/07/2017", "PerformanceTier": 1}]
|
# # US Education
# [us-education-datasets-unification-project Data url on kaggle](https://www.kaggle.com/datasets/noriuk/us-education-datasets-unification-project)
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
data = pd.read_csv(
"/kaggle/input/us-education-datasets-unification-project/states_all.csv"
)
data.describe()
sns.set_theme()
# ## Correlation between Math and Reading Scores
sns.scatterplot(x="AVG_MATH_4_SCORE", y="AVG_READING_4_SCORE", data=data, hue="YEAR")
plt.xlabel("Average Math Score (Grade 4)")
plt.ylabel("Average Reading Score (Grade 4)")
plt.title("Correlation between Math and Reading Scores (Grade 4)")
plt.show()
sns.scatterplot(x="AVG_MATH_8_SCORE", y="AVG_READING_8_SCORE", data=data, hue="YEAR")
plt.xlabel("Average Math Score (Grade 8)")
plt.ylabel("Average Reading Score (Grade 8)")
plt.title("Correlation between Math and Reading Scores (Grade 8)")
plt.show()
# we can see that there is a strong correlation between the math and reading scores. so if a student is good at math, he/she is likely to be good at reading as well.and we can
# ## Trend of Average Reading Scores over the Years
da = data[data["AVG_READING_8_SCORE"] > 0]
plt.figure(figsize=(12, 6))
sns.barplot(x="YEAR", y="AVG_READING_8_SCORE", data=da)
plt.xlabel("Year")
plt.ylabel("Average Reading Score (Grade 8)")
plt.title("Trend of Average Reading Scores over the Years")
plt.show()
# avg math 8 score over the years
da = data[data["AVG_MATH_8_SCORE"] > 0]
plt.figure(figsize=(12, 6))
sns.barplot(x="YEAR", y="AVG_MATH_8_SCORE", data=da)
plt.xlabel("Year")
plt.ylabel("Average Math Score (Grade 8)")
plt.title("Trend of Average Math Scores over the Years")
plt.show()
| false | 1 | 587 | 0 | 1,993 | 587 |
||
129960755
|
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import plotly.express as px
df = pd.read_csv("/kaggle/input/world-happiness/world_happiness.csv")
df_ = pd.read_excel("/kaggle/input/country-abbr/Country_Codes.xlsx")
print(df.info(), end="\n\n")
print(df_.info())
Complete_df = df.merge(df_, left_on="country", right_on="Country", validate=None)
print(Complete_df.info())
Complete_df.drop(columns=["Unnamed: 0_x", "Country"], inplace=True)
Complete_df.rename(columns={"Unnamed: 0_y": "code"}, inplace=True)
Complete_df = Complete_df.loc[
:,
[
"Continent",
"code",
"country",
"gdp_per_cap",
"happiness_score",
"life_exp",
"social_support",
"freedom",
"corruption",
"generosity",
],
]
print(Complete_df.head(11))
Highest_gdp = Complete_df.sort_values("gdp_per_cap", ascending=False).head(10)
graph = sns.catplot(data=Highest_gdp, kind="bar", x="country", y="gdp_per_cap")
graph.fig.suptitle("Highest GDP Per Capita")
sns.set_palette("GnBu")
sns.set_context("notebook")
plt.xticks(rotation=90)
plt.show()
Lowest_gdp = Complete_df.sort_values("gdp_per_cap", ascending=True).head(10)
graph = sns.catplot(data=Lowest_gdp, kind="bar", x="country", y="gdp_per_cap")
graph.fig.suptitle("Lowest GDP Per Capita")
sns.set_palette("GnBu")
sns.set_context("notebook")
plt.xticks(rotation=90)
plt.show()
sns.set_context("notebook")
Highest_Lowest_gdp = pd.concat([Highest_gdp.tail(5), Lowest_gdp.tail(5)])
sns.barplot(data=Highest_Lowest_gdp, x="country", y="gdp_per_cap")
plt.xticks(rotation=90)
plt.show()
print(Complete_df["gdp_per_cap"].mean())
print(Complete_df["gdp_per_cap"].std())
fig = px.choropleth(
Complete_df,
locations="code",
color="gdp_per_cap",
hover_name="country",
range_color=(1000, 75000),
color_continuous_scale=px.colors.sequential.Plasma,
title="Gross Domestic Product Per Capita Map",
)
fig.show()
order = ["Africa", "Asia", "North America", "South America", "Europe", "Oceania"]
graph = sns.catplot(
data=Complete_df, kind="bar", x="Continent", y="life_exp", order=order
)
graph.fig.suptitle("Life Expectancy by Continent")
plt.xticks(rotation=45)
plt.show()
fig = px.choropleth(
Complete_df,
locations="code",
color="life_exp",
hover_name="country",
range_color=(50, 80),
color_continuous_scale=px.colors.sequential.Plasma,
title="Life Expectancy Map",
)
fig.show()
order = ["Africa", "Asia", "North America", "South America", "Europe", "Oceania"]
graph = sns.catplot(
data=Complete_df, kind="bar", x="Continent", y="happiness_score", order=order
)
graph.fig.suptitle("Happiness Score by Continent")
plt.xticks(rotation=45)
plt.show()
fig = px.choropleth(
Complete_df,
locations="code",
color="happiness_score",
hover_name="country",
range_color=(10, 180),
color_continuous_scale=px.colors.sequential.Plasma,
title="Happiness Score Map",
)
fig.show()
sns.set_palette("Dark2")
graph = sns.relplot(
data=Complete_df, kind="scatter", x="life_exp", y="happiness_score", hue="Continent"
)
graph.fig.suptitle("Life Expectancy vs. Happiness")
plt.show()
Complete_df["life_exp"].corr(Complete_df["happiness_score"])
sns.set_palette("CMRmap")
Complete_df["log_gdp_per_cap"] = np.log(Complete_df["gdp_per_cap"])
graph = sns.relplot(
data=Complete_df,
kind="scatter",
x="log_gdp_per_cap",
y="generosity",
hue="Continent",
)
graph.fig.suptitle("GDP vs. Generosity")
plt.show()
Complete_df["log_gdp_per_cap"].corr(Complete_df["generosity"])
graph = sns.relplot(
data=Complete_df,
kind="scatter",
x="log_gdp_per_cap",
y="social_support",
hue="Continent",
)
graph.fig.suptitle("GDP vs. Social Support")
plt.show()
Complete_df["log_gdp_per_cap"].corr(Complete_df["social_support"])
sns.heatmap(Complete_df.corr())
plt.show()
print(Complete_df.corr())
sns.pairplot(data=Complete_df, vars=["log_gdp_per_cap", "freedom", "corruption"])
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/960/129960755.ipynb
| null | null |
[{"Id": 129960755, "ScriptId": 37793429, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8742412, "CreationDate": "05/17/2023 18:14:30", "VersionNumber": 1.0, "Title": "World_Happiness_Analysis", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 106.0, "LinesInsertedFromPrevious": 106.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import plotly.express as px
df = pd.read_csv("/kaggle/input/world-happiness/world_happiness.csv")
df_ = pd.read_excel("/kaggle/input/country-abbr/Country_Codes.xlsx")
print(df.info(), end="\n\n")
print(df_.info())
Complete_df = df.merge(df_, left_on="country", right_on="Country", validate=None)
print(Complete_df.info())
Complete_df.drop(columns=["Unnamed: 0_x", "Country"], inplace=True)
Complete_df.rename(columns={"Unnamed: 0_y": "code"}, inplace=True)
Complete_df = Complete_df.loc[
:,
[
"Continent",
"code",
"country",
"gdp_per_cap",
"happiness_score",
"life_exp",
"social_support",
"freedom",
"corruption",
"generosity",
],
]
print(Complete_df.head(11))
Highest_gdp = Complete_df.sort_values("gdp_per_cap", ascending=False).head(10)
graph = sns.catplot(data=Highest_gdp, kind="bar", x="country", y="gdp_per_cap")
graph.fig.suptitle("Highest GDP Per Capita")
sns.set_palette("GnBu")
sns.set_context("notebook")
plt.xticks(rotation=90)
plt.show()
Lowest_gdp = Complete_df.sort_values("gdp_per_cap", ascending=True).head(10)
graph = sns.catplot(data=Lowest_gdp, kind="bar", x="country", y="gdp_per_cap")
graph.fig.suptitle("Lowest GDP Per Capita")
sns.set_palette("GnBu")
sns.set_context("notebook")
plt.xticks(rotation=90)
plt.show()
sns.set_context("notebook")
Highest_Lowest_gdp = pd.concat([Highest_gdp.tail(5), Lowest_gdp.tail(5)])
sns.barplot(data=Highest_Lowest_gdp, x="country", y="gdp_per_cap")
plt.xticks(rotation=90)
plt.show()
print(Complete_df["gdp_per_cap"].mean())
print(Complete_df["gdp_per_cap"].std())
fig = px.choropleth(
Complete_df,
locations="code",
color="gdp_per_cap",
hover_name="country",
range_color=(1000, 75000),
color_continuous_scale=px.colors.sequential.Plasma,
title="Gross Domestic Product Per Capita Map",
)
fig.show()
order = ["Africa", "Asia", "North America", "South America", "Europe", "Oceania"]
graph = sns.catplot(
data=Complete_df, kind="bar", x="Continent", y="life_exp", order=order
)
graph.fig.suptitle("Life Expectancy by Continent")
plt.xticks(rotation=45)
plt.show()
fig = px.choropleth(
Complete_df,
locations="code",
color="life_exp",
hover_name="country",
range_color=(50, 80),
color_continuous_scale=px.colors.sequential.Plasma,
title="Life Expectancy Map",
)
fig.show()
order = ["Africa", "Asia", "North America", "South America", "Europe", "Oceania"]
graph = sns.catplot(
data=Complete_df, kind="bar", x="Continent", y="happiness_score", order=order
)
graph.fig.suptitle("Happiness Score by Continent")
plt.xticks(rotation=45)
plt.show()
fig = px.choropleth(
Complete_df,
locations="code",
color="happiness_score",
hover_name="country",
range_color=(10, 180),
color_continuous_scale=px.colors.sequential.Plasma,
title="Happiness Score Map",
)
fig.show()
sns.set_palette("Dark2")
graph = sns.relplot(
data=Complete_df, kind="scatter", x="life_exp", y="happiness_score", hue="Continent"
)
graph.fig.suptitle("Life Expectancy vs. Happiness")
plt.show()
Complete_df["life_exp"].corr(Complete_df["happiness_score"])
sns.set_palette("CMRmap")
Complete_df["log_gdp_per_cap"] = np.log(Complete_df["gdp_per_cap"])
graph = sns.relplot(
data=Complete_df,
kind="scatter",
x="log_gdp_per_cap",
y="generosity",
hue="Continent",
)
graph.fig.suptitle("GDP vs. Generosity")
plt.show()
Complete_df["log_gdp_per_cap"].corr(Complete_df["generosity"])
graph = sns.relplot(
data=Complete_df,
kind="scatter",
x="log_gdp_per_cap",
y="social_support",
hue="Continent",
)
graph.fig.suptitle("GDP vs. Social Support")
plt.show()
Complete_df["log_gdp_per_cap"].corr(Complete_df["social_support"])
sns.heatmap(Complete_df.corr())
plt.show()
print(Complete_df.corr())
sns.pairplot(data=Complete_df, vars=["log_gdp_per_cap", "freedom", "corruption"])
plt.show()
| false | 0 | 1,424 | 0 | 1,424 | 1,424 |
||
129960895
|
<jupyter_start><jupyter_text>Homelessness_USA
Kaggle dataset identifier: homelessness-usa
<jupyter_script>import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import plotly.express as px
df = pd.read_csv("/kaggle/input/homelessness-usa/homelessness.csv")
print(df.info(), end="\n\n")
print(df.head(11))
df.drop(columns="Unnamed: 0", inplace=True)
df = df.sort_values(["region", "state"])
df["homeless_total"] = df["individuals"] + df["family_members"]
df["homeless_per_10k"] = df["homeless_total"] / df["state_pop"] * 10000
print(df.head(5), df.tail(5), sep="\n\n")
print("Highest Population in 'USA' per State:")
print(df["state_pop"].sort_values(ascending=False).head(10))
sns.set_context("notebook")
sns.set_palette("Dark2")
sns.set_style("darkgrid")
g = sns.catplot(
data=df.sort_values("state_pop", ascending=False).head(10),
kind="bar",
x="state",
y="state_pop",
)
g.fig.suptitle("Highest Population in 'USA' per State:")
plt.xticks(size=10, fontweight="bold")
plt.xlabel("States", fontsize=12)
plt.yticks(size=10, fontweight="bold")
plt.ylabel("Population in Ten Millions", fontsize=12)
plt.xticks(rotation=75)
plt.show()
print("Lowest Population in 'USA' per State:")
print(df["state_pop"].sort_values(ascending=True).head(10))
sns.set_context("notebook")
sns.set_palette("Set2")
sns.set_style("darkgrid")
g = sns.catplot(
data=df.sort_values("state_pop", ascending=True).head(10),
kind="bar",
x="state",
y="state_pop",
)
g.fig.suptitle("Lowest Population in 'USA' per State:")
plt.xticks(size=10, fontweight="bold")
plt.xlabel("States", fontsize=12)
plt.yticks(size=10, fontweight="bold")
plt.ylabel("Population in Millions", fontsize=12)
plt.xticks(rotation=75)
plt.show()
print("Population in 'USA' per region:")
df_agg = df.groupby("region")["state_pop"].sum()
df_agg = pd.DataFrame(df_agg.sort_values(ascending=False))
df_agg.reset_index(inplace=True)
print(df_agg)
g = sns.catplot(data=df_agg, kind="bar", x="region", y="state_pop")
g.fig.suptitle("Population in 'USA' per Region:")
plt.xticks(size=10, fontweight="bold")
plt.xlabel("Regions", fontsize=12)
plt.yticks(size=10, fontweight="bold")
plt.ylabel("Population in Ten Millions", fontsize=12)
plt.xticks(rotation=75)
plt.show()
print("Highest Homeless People in 'USA' per State:")
print(
df[["state", "homeless_total"]]
.sort_values("homeless_total", ascending=False)
.head(10)
)
print("Lowest Homeless People in 'USA' per State:")
print(
df[["state", "homeless_total"]]
.sort_values("homeless_total", ascending=True)
.head(10)
)
print("Highest Homeless_per_10k in 'USA' per State:")
print(
df[["state", "homeless_per_10k"]]
.sort_values("homeless_per_10k", ascending=False)
.head(10)
)
g = sns.catplot(
data=df.sort_values("homeless_per_10k", ascending=False).head(10),
kind="bar",
x="state",
y="homeless_per_10k",
)
g.fig.suptitle("Highest Homelessness in 'USA' per State:")
plt.xticks(size=10, fontweight="bold")
plt.xlabel("States", fontsize=12)
plt.yticks(size=10, fontweight="bold")
plt.ylabel("Homeless People in 10,000's", fontsize=12)
plt.xticks(rotation=75)
plt.show()
print("Lowest Homeless_per_10k in 'USA' per State:")
print(
df[["state", "homeless_per_10k"]]
.sort_values("homeless_per_10k", ascending=True)
.head(10)
)
g = sns.catplot(
data=df.sort_values("homeless_per_10k", ascending=True).head(10),
kind="bar",
x="state",
y="homeless_per_10k",
)
g.fig.suptitle("Lowest Homelessness in 'USA' per State:")
plt.xticks(size=10, fontweight="bold")
plt.xlabel("States", fontsize=12)
plt.yticks(size=10, fontweight="bold")
plt.ylabel("Homeless People in 10,000's", fontsize=12)
plt.xticks(rotation=75)
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/960/129960895.ipynb
|
homelessness-usa
|
ahmedashraf99
|
[{"Id": 129960895, "ScriptId": 38238116, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8742412, "CreationDate": "05/17/2023 18:15:50", "VersionNumber": 1.0, "Title": "Homelessness_In_USA", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 97.0, "LinesInsertedFromPrevious": 97.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 186396911, "KernelVersionId": 129960895, "SourceDatasetVersionId": 5627528}]
|
[{"Id": 5627528, "DatasetId": 3235740, "DatasourceVersionId": 5702755, "CreatorUserId": 8742412, "LicenseName": "Unknown", "CreationDate": "05/07/2023 18:45:46", "VersionNumber": 1.0, "Title": "Homelessness_USA", "Slug": "homelessness-usa", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3235740, "CreatorUserId": 8742412, "OwnerUserId": 8742412.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5627528.0, "CurrentDatasourceVersionId": 5702755.0, "ForumId": 3300907, "Type": 2, "CreationDate": "05/07/2023 18:45:46", "LastActivityDate": "05/07/2023", "TotalViews": 66, "TotalDownloads": 8, "TotalVotes": 0, "TotalKernels": 1}]
|
[{"Id": 8742412, "UserName": "ahmedashraf99", "DisplayName": "Ahmed Ashraf 99", "RegisterDate": "10/29/2021", "PerformanceTier": 0}]
|
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import plotly.express as px
df = pd.read_csv("/kaggle/input/homelessness-usa/homelessness.csv")
print(df.info(), end="\n\n")
print(df.head(11))
df.drop(columns="Unnamed: 0", inplace=True)
df = df.sort_values(["region", "state"])
df["homeless_total"] = df["individuals"] + df["family_members"]
df["homeless_per_10k"] = df["homeless_total"] / df["state_pop"] * 10000
print(df.head(5), df.tail(5), sep="\n\n")
print("Highest Population in 'USA' per State:")
print(df["state_pop"].sort_values(ascending=False).head(10))
sns.set_context("notebook")
sns.set_palette("Dark2")
sns.set_style("darkgrid")
g = sns.catplot(
data=df.sort_values("state_pop", ascending=False).head(10),
kind="bar",
x="state",
y="state_pop",
)
g.fig.suptitle("Highest Population in 'USA' per State:")
plt.xticks(size=10, fontweight="bold")
plt.xlabel("States", fontsize=12)
plt.yticks(size=10, fontweight="bold")
plt.ylabel("Population in Ten Millions", fontsize=12)
plt.xticks(rotation=75)
plt.show()
print("Lowest Population in 'USA' per State:")
print(df["state_pop"].sort_values(ascending=True).head(10))
sns.set_context("notebook")
sns.set_palette("Set2")
sns.set_style("darkgrid")
g = sns.catplot(
data=df.sort_values("state_pop", ascending=True).head(10),
kind="bar",
x="state",
y="state_pop",
)
g.fig.suptitle("Lowest Population in 'USA' per State:")
plt.xticks(size=10, fontweight="bold")
plt.xlabel("States", fontsize=12)
plt.yticks(size=10, fontweight="bold")
plt.ylabel("Population in Millions", fontsize=12)
plt.xticks(rotation=75)
plt.show()
print("Population in 'USA' per region:")
df_agg = df.groupby("region")["state_pop"].sum()
df_agg = pd.DataFrame(df_agg.sort_values(ascending=False))
df_agg.reset_index(inplace=True)
print(df_agg)
g = sns.catplot(data=df_agg, kind="bar", x="region", y="state_pop")
g.fig.suptitle("Population in 'USA' per Region:")
plt.xticks(size=10, fontweight="bold")
plt.xlabel("Regions", fontsize=12)
plt.yticks(size=10, fontweight="bold")
plt.ylabel("Population in Ten Millions", fontsize=12)
plt.xticks(rotation=75)
plt.show()
print("Highest Homeless People in 'USA' per State:")
print(
df[["state", "homeless_total"]]
.sort_values("homeless_total", ascending=False)
.head(10)
)
print("Lowest Homeless People in 'USA' per State:")
print(
df[["state", "homeless_total"]]
.sort_values("homeless_total", ascending=True)
.head(10)
)
print("Highest Homeless_per_10k in 'USA' per State:")
print(
df[["state", "homeless_per_10k"]]
.sort_values("homeless_per_10k", ascending=False)
.head(10)
)
g = sns.catplot(
data=df.sort_values("homeless_per_10k", ascending=False).head(10),
kind="bar",
x="state",
y="homeless_per_10k",
)
g.fig.suptitle("Highest Homelessness in 'USA' per State:")
plt.xticks(size=10, fontweight="bold")
plt.xlabel("States", fontsize=12)
plt.yticks(size=10, fontweight="bold")
plt.ylabel("Homeless People in 10,000's", fontsize=12)
plt.xticks(rotation=75)
plt.show()
print("Lowest Homeless_per_10k in 'USA' per State:")
print(
df[["state", "homeless_per_10k"]]
.sort_values("homeless_per_10k", ascending=True)
.head(10)
)
g = sns.catplot(
data=df.sort_values("homeless_per_10k", ascending=True).head(10),
kind="bar",
x="state",
y="homeless_per_10k",
)
g.fig.suptitle("Lowest Homelessness in 'USA' per State:")
plt.xticks(size=10, fontweight="bold")
plt.xlabel("States", fontsize=12)
plt.yticks(size=10, fontweight="bold")
plt.ylabel("Homeless People in 10,000's", fontsize=12)
plt.xticks(rotation=75)
plt.show()
| false | 1 | 1,352 | 0 | 1,376 | 1,352 |
||
129960177
|
<jupyter_start><jupyter_text>Mall Customer Segmentation Data
### Context
This data set is created only for the learning purpose of the customer segmentation concepts , also known as market basket analysis . I will demonstrate this by using unsupervised ML technique (KMeans Clustering Algorithm) in the simplest form.
### Content
You are owing a supermarket mall and through membership cards , you have some basic data about your customers like Customer ID, age, gender, annual income and spending score.
Spending Score is something you assign to the customer based on your defined parameters like customer behavior and purchasing data.
**Problem Statement**
You own the mall and want to understand the customers like who can be easily converge [Target Customers] so that the sense can be given to marketing team and plan the strategy accordingly.
Kaggle dataset identifier: customer-segmentation-tutorial-in-python
<jupyter_code>import pandas as pd
df = pd.read_csv('customer-segmentation-tutorial-in-python/Mall_Customers.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 200 entries, 0 to 199
Data columns (total 5 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 CustomerID 200 non-null int64
1 Gender 200 non-null object
2 Age 200 non-null int64
3 Annual Income (k$) 200 non-null int64
4 Spending Score (1-100) 200 non-null int64
dtypes: int64(4), object(1)
memory usage: 7.9+ KB
<jupyter_text>Examples:
{
"CustomerID": 1,
"Gender": "Male",
"Age": 19,
"Annual Income (k$)": 15,
"Spending Score (1-100)": 39
}
{
"CustomerID": 2,
"Gender": "Male",
"Age": 21,
"Annual Income (k$)": 15,
"Spending Score (1-100)": 81
}
{
"CustomerID": 3,
"Gender": "Female",
"Age": 20,
"Annual Income (k$)": 16,
"Spending Score (1-100)": 6
}
{
"CustomerID": 4,
"Gender": "Female",
"Age": 23,
"Annual Income (k$)": 16,
"Spending Score (1-100)": 77
}
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import plotly.graph_objs as go
import missingno as msno
from sklearn.cluster import KMeans
from scipy.cluster.hierarchy import dendrogram, linkage
from sklearn.cluster import AgglomerativeClustering
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets import make_blobs
from sklearn.preprocessing import StandardScaler
import warnings
warnings.filterwarnings("ignore")
plt.style.use("fivethirtyeight")
df = pd.read_csv("../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv")
df.head()
df.mean()
sns.relplot(data=df, x="Annual Income (k$)", y="Spending Score (1-100)", hue="Gender")
sex = df.value_counts("Gender")
type = ["Female", "Male"]
fig, ax = plt.subplots(figsize=(4, 4), dpi=100)
explode = (0, 0.06)
patches, texts, autotexts = ax.pie(
sex, labels=type, autopct="%1.2f%%", shadow=True, startangle=90, explode=explode
)
age_18_25 = df.Age[(df.Age >= 18) & (df.Age <= 25)]
age_26_35 = df.Age[(df.Age >= 26) & (df.Age <= 35)]
age_36_45 = df.Age[(df.Age >= 36) & (df.Age <= 45)]
age_46_55 = df.Age[(df.Age >= 46) & (df.Age <= 55)]
age_55above = df.Age[df.Age >= 55]
x_age = ["18-25", "26-35", "36-45", "46-55", "55+"]
y_age = [
len(age_18_25.values),
len(age_26_35.values),
len(age_36_45.values),
len(age_46_55.values),
len(age_55above.values),
]
px.bar(
data_frame=df,
x=x_age,
y=y_age,
color=x_age,
title="Number of customers per age group",
)
for i in ["Age", "Annual Income (k$)", "Spending Score (1-100)"]:
sns.displot(df, x=i, col="Gender")
X1 = df.loc[:, ["Annual Income (k$)", "Spending Score (1-100)"]].values
kmeans = KMeans(n_clusters=5)
labels = kmeans.fit_predict(X1)
print(labels)
plt.figure(figsize=(14, 8))
plt.scatter(X1[:, 0], X1[:, 1], c=kmeans.labels_, s=105)
plt.scatter(
kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], color="red", s=250
)
plt.title("Clusters depending on income\n", fontsize=20)
plt.xlabel("Annual Income (k$)")
plt.ylabel("Spending Score (1-100)")
plt.show()
kmeans = KMeans(n_clusters=5).fit(X1)
cluster_map = pd.DataFrame()
cluster_map["data_index"] = df.index.values
cluster_map["cluster"] = kmeans.labels_
cluster_map[cluster_map.cluster == 2]
clusters = pd.concat([df, pd.DataFrame({"cluster": labels})], axis=1)
clusters.head()
for c in clusters:
grid = sns.FacetGrid(clusters, col="cluster")
grid.map(plt.hist, c)
sns.pairplot(df, hue="labels")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/960/129960177.ipynb
|
customer-segmentation-tutorial-in-python
|
vjchoudhary7
|
[{"Id": 129960177, "ScriptId": 38609082, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14688789, "CreationDate": "05/17/2023 18:08:38", "VersionNumber": 1.0, "Title": "Mall spending AD", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 111.0, "LinesInsertedFromPrevious": 111.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 186396022, "KernelVersionId": 129960177, "SourceDatasetVersionId": 74935}]
|
[{"Id": 74935, "DatasetId": 42674, "DatasourceVersionId": 77392, "CreatorUserId": 1790645, "LicenseName": "Other (specified in description)", "CreationDate": "08/11/2018 07:23:02", "VersionNumber": 1.0, "Title": "Mall Customer Segmentation Data", "Slug": "customer-segmentation-tutorial-in-python", "Subtitle": "Market Basket Analysis", "Description": "### Context\n\nThis data set is created only for the learning purpose of the customer segmentation concepts , also known as market basket analysis . I will demonstrate this by using unsupervised ML technique (KMeans Clustering Algorithm) in the simplest form. \n\n\n### Content\n\nYou are owing a supermarket mall and through membership cards , you have some basic data about your customers like Customer ID, age, gender, annual income and spending score. \nSpending Score is something you assign to the customer based on your defined parameters like customer behavior and purchasing data. \n\n**Problem Statement**\nYou own the mall and want to understand the customers like who can be easily converge [Target Customers] so that the sense can be given to marketing team and plan the strategy accordingly. \n\n\n### Acknowledgements\n\nFrom Udemy's Machine Learning A-Z course.\n\nI am new to Data science field and want to share my knowledge to others\n\nhttps://github.com/SteffiPeTaffy/machineLearningAZ/blob/master/Machine%20Learning%20A-Z%20Template%20Folder/Part%204%20-%20Clustering/Section%2025%20-%20Hierarchical%20Clustering/Mall_Customers.csv\n\n### Inspiration\n\nBy the end of this case study , you would be able to answer below questions. \n1- How to achieve customer segmentation using machine learning algorithm (KMeans Clustering) in Python in simplest way.\n2- Who are your target customers with whom you can start marketing strategy [easy to converse]\n3- How the marketing strategy works in real world", "VersionNotes": "Initial release", "TotalCompressedBytes": 3981.0, "TotalUncompressedBytes": 3981.0}]
|
[{"Id": 42674, "CreatorUserId": 1790645, "OwnerUserId": 1790645.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 74935.0, "CurrentDatasourceVersionId": 77392.0, "ForumId": 51177, "Type": 2, "CreationDate": "08/11/2018 07:23:02", "LastActivityDate": "08/11/2018", "TotalViews": 710472, "TotalDownloads": 132442, "TotalVotes": 1487, "TotalKernels": 1044}]
|
[{"Id": 1790645, "UserName": "vjchoudhary7", "DisplayName": "Vijay Choudhary", "RegisterDate": "04/05/2018", "PerformanceTier": 1}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import plotly.graph_objs as go
import missingno as msno
from sklearn.cluster import KMeans
from scipy.cluster.hierarchy import dendrogram, linkage
from sklearn.cluster import AgglomerativeClustering
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets import make_blobs
from sklearn.preprocessing import StandardScaler
import warnings
warnings.filterwarnings("ignore")
plt.style.use("fivethirtyeight")
df = pd.read_csv("../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv")
df.head()
df.mean()
sns.relplot(data=df, x="Annual Income (k$)", y="Spending Score (1-100)", hue="Gender")
sex = df.value_counts("Gender")
type = ["Female", "Male"]
fig, ax = plt.subplots(figsize=(4, 4), dpi=100)
explode = (0, 0.06)
patches, texts, autotexts = ax.pie(
sex, labels=type, autopct="%1.2f%%", shadow=True, startangle=90, explode=explode
)
age_18_25 = df.Age[(df.Age >= 18) & (df.Age <= 25)]
age_26_35 = df.Age[(df.Age >= 26) & (df.Age <= 35)]
age_36_45 = df.Age[(df.Age >= 36) & (df.Age <= 45)]
age_46_55 = df.Age[(df.Age >= 46) & (df.Age <= 55)]
age_55above = df.Age[df.Age >= 55]
x_age = ["18-25", "26-35", "36-45", "46-55", "55+"]
y_age = [
len(age_18_25.values),
len(age_26_35.values),
len(age_36_45.values),
len(age_46_55.values),
len(age_55above.values),
]
px.bar(
data_frame=df,
x=x_age,
y=y_age,
color=x_age,
title="Number of customers per age group",
)
for i in ["Age", "Annual Income (k$)", "Spending Score (1-100)"]:
sns.displot(df, x=i, col="Gender")
X1 = df.loc[:, ["Annual Income (k$)", "Spending Score (1-100)"]].values
kmeans = KMeans(n_clusters=5)
labels = kmeans.fit_predict(X1)
print(labels)
plt.figure(figsize=(14, 8))
plt.scatter(X1[:, 0], X1[:, 1], c=kmeans.labels_, s=105)
plt.scatter(
kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], color="red", s=250
)
plt.title("Clusters depending on income\n", fontsize=20)
plt.xlabel("Annual Income (k$)")
plt.ylabel("Spending Score (1-100)")
plt.show()
kmeans = KMeans(n_clusters=5).fit(X1)
cluster_map = pd.DataFrame()
cluster_map["data_index"] = df.index.values
cluster_map["cluster"] = kmeans.labels_
cluster_map[cluster_map.cluster == 2]
clusters = pd.concat([df, pd.DataFrame({"cluster": labels})], axis=1)
clusters.head()
for c in clusters:
grid = sns.FacetGrid(clusters, col="cluster")
grid.map(plt.hist, c)
sns.pairplot(df, hue="labels")
|
[{"customer-segmentation-tutorial-in-python/Mall_Customers.csv": {"column_names": "[\"CustomerID\", \"Gender\", \"Age\", \"Annual Income (k$)\", \"Spending Score (1-100)\"]", "column_data_types": "{\"CustomerID\": \"int64\", \"Gender\": \"object\", \"Age\": \"int64\", \"Annual Income (k$)\": \"int64\", \"Spending Score (1-100)\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 200 entries, 0 to 199\nData columns (total 5 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 CustomerID 200 non-null int64 \n 1 Gender 200 non-null object\n 2 Age 200 non-null int64 \n 3 Annual Income (k$) 200 non-null int64 \n 4 Spending Score (1-100) 200 non-null int64 \ndtypes: int64(4), object(1)\nmemory usage: 7.9+ KB\n", "summary": "{\"CustomerID\": {\"count\": 200.0, \"mean\": 100.5, \"std\": 57.879184513951124, \"min\": 1.0, \"25%\": 50.75, \"50%\": 100.5, \"75%\": 150.25, \"max\": 200.0}, \"Age\": {\"count\": 200.0, \"mean\": 38.85, \"std\": 13.96900733155888, \"min\": 18.0, \"25%\": 28.75, \"50%\": 36.0, \"75%\": 49.0, \"max\": 70.0}, \"Annual Income (k$)\": {\"count\": 200.0, \"mean\": 60.56, \"std\": 26.264721165271244, \"min\": 15.0, \"25%\": 41.5, \"50%\": 61.5, \"75%\": 78.0, \"max\": 137.0}, \"Spending Score (1-100)\": {\"count\": 200.0, \"mean\": 50.2, \"std\": 25.823521668370173, \"min\": 1.0, \"25%\": 34.75, \"50%\": 50.0, \"75%\": 73.0, \"max\": 99.0}}", "examples": "{\"CustomerID\":{\"0\":1,\"1\":2,\"2\":3,\"3\":4},\"Gender\":{\"0\":\"Male\",\"1\":\"Male\",\"2\":\"Female\",\"3\":\"Female\"},\"Age\":{\"0\":19,\"1\":21,\"2\":20,\"3\":23},\"Annual Income (k$)\":{\"0\":15,\"1\":15,\"2\":16,\"3\":16},\"Spending Score (1-100)\":{\"0\":39,\"1\":81,\"2\":6,\"3\":77}}"}}]
| true | 1 |
<start_data_description><data_path>customer-segmentation-tutorial-in-python/Mall_Customers.csv:
<column_names>
['CustomerID', 'Gender', 'Age', 'Annual Income (k$)', 'Spending Score (1-100)']
<column_types>
{'CustomerID': 'int64', 'Gender': 'object', 'Age': 'int64', 'Annual Income (k$)': 'int64', 'Spending Score (1-100)': 'int64'}
<dataframe_Summary>
{'CustomerID': {'count': 200.0, 'mean': 100.5, 'std': 57.879184513951124, 'min': 1.0, '25%': 50.75, '50%': 100.5, '75%': 150.25, 'max': 200.0}, 'Age': {'count': 200.0, 'mean': 38.85, 'std': 13.96900733155888, 'min': 18.0, '25%': 28.75, '50%': 36.0, '75%': 49.0, 'max': 70.0}, 'Annual Income (k$)': {'count': 200.0, 'mean': 60.56, 'std': 26.264721165271244, 'min': 15.0, '25%': 41.5, '50%': 61.5, '75%': 78.0, 'max': 137.0}, 'Spending Score (1-100)': {'count': 200.0, 'mean': 50.2, 'std': 25.823521668370173, 'min': 1.0, '25%': 34.75, '50%': 50.0, '75%': 73.0, 'max': 99.0}}
<dataframe_info>
RangeIndex: 200 entries, 0 to 199
Data columns (total 5 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 CustomerID 200 non-null int64
1 Gender 200 non-null object
2 Age 200 non-null int64
3 Annual Income (k$) 200 non-null int64
4 Spending Score (1-100) 200 non-null int64
dtypes: int64(4), object(1)
memory usage: 7.9+ KB
<some_examples>
{'CustomerID': {'0': 1, '1': 2, '2': 3, '3': 4}, 'Gender': {'0': 'Male', '1': 'Male', '2': 'Female', '3': 'Female'}, 'Age': {'0': 19, '1': 21, '2': 20, '3': 23}, 'Annual Income (k$)': {'0': 15, '1': 15, '2': 16, '3': 16}, 'Spending Score (1-100)': {'0': 39, '1': 81, '2': 6, '3': 77}}
<end_description>
| 1,160 | 0 | 1,792 | 1,160 |
129960630
|
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
divorce = pd.read_csv("/kaggle/input/divorce/divorce.csv")
print(divorce.info())
divorce = pd.read_csv(
"/kaggle/input/divorce/divorce.csv",
parse_dates=["divorce_date", "dob_man", "dob_woman", "marriage_date"],
)
print(divorce.info())
print(divorce["num_kids"].isna().sum())
divorce["num_kids"].fillna(0, inplace=True)
divorce["num_kids"] = divorce["num_kids"].astype("int64")
print(divorce.info())
divorce["marriage_year"] = divorce["marriage_date"].dt.year
divorce["man_age_marriage"] = divorce["marriage_year"] - divorce["dob_man"].dt.year
divorce["woman_age_marriage"] = divorce["marriage_year"] - divorce["dob_woman"].dt.year
print(divorce.info())
sns.lineplot(data=divorce, x="marriage_year", y="num_kids")
plt.show
sns.heatmap(divorce.corr(), annot=True)
plt.show
sns.pairplot(data=divorce, vars=["marriage_duration", "marriage_year", "num_kids"])
plt.show
sns.kdeplot(data=divorce, x="woman_age_marriage", hue="education_woman")
plt.show
sns.scatterplot(
data=divorce, x="man_age_marriage", y="woman_age_marriage", hue="education_man"
)
plt.show
sns.scatterplot(
data=divorce, x="marriage_year", y="man_age_marriage", hue="education_man"
)
plt.show
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/960/129960630.ipynb
| null | null |
[{"Id": 129960630, "ScriptId": 36863966, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8742412, "CreationDate": "05/17/2023 18:13:17", "VersionNumber": 1.0, "Title": "Divorce_Mexico_EDA", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 37.0, "LinesInsertedFromPrevious": 37.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
divorce = pd.read_csv("/kaggle/input/divorce/divorce.csv")
print(divorce.info())
divorce = pd.read_csv(
"/kaggle/input/divorce/divorce.csv",
parse_dates=["divorce_date", "dob_man", "dob_woman", "marriage_date"],
)
print(divorce.info())
print(divorce["num_kids"].isna().sum())
divorce["num_kids"].fillna(0, inplace=True)
divorce["num_kids"] = divorce["num_kids"].astype("int64")
print(divorce.info())
divorce["marriage_year"] = divorce["marriage_date"].dt.year
divorce["man_age_marriage"] = divorce["marriage_year"] - divorce["dob_man"].dt.year
divorce["woman_age_marriage"] = divorce["marriage_year"] - divorce["dob_woman"].dt.year
print(divorce.info())
sns.lineplot(data=divorce, x="marriage_year", y="num_kids")
plt.show
sns.heatmap(divorce.corr(), annot=True)
plt.show
sns.pairplot(data=divorce, vars=["marriage_duration", "marriage_year", "num_kids"])
plt.show
sns.kdeplot(data=divorce, x="woman_age_marriage", hue="education_woman")
plt.show
sns.scatterplot(
data=divorce, x="man_age_marriage", y="woman_age_marriage", hue="education_man"
)
plt.show
sns.scatterplot(
data=divorce, x="marriage_year", y="man_age_marriage", hue="education_man"
)
plt.show
| false | 0 | 495 | 0 | 495 | 495 |
||
129960728
|
<jupyter_start><jupyter_text>Food_Consumption
Kaggle dataset identifier: food-consumption
<jupyter_script>import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import iqr
import plotly.express as px
df = pd.read_csv("/kaggle/input/food-consumption/food_consumption.csv")
print(df.info())
df = df.drop("Unnamed: 0", axis=1)
print(df.head(11))
sns.barplot(data=df, x="food_category", y="co2_emission")
plt.xticks(rotation=90)
plt.show()
sns.barplot(data=df, x="food_category", y="consumption")
plt.xticks(rotation=90)
plt.show()
print(
df.groupby("food_category")[["consumption", "co2_emission"]]
.agg([np.mean, np.std])
.sort_values(("co2_emission", "mean"), ascending=False)
)
emissions_by_country = df.groupby("country")["co2_emission"].sum()
print(emissions_by_country.sort_values(ascending=False).head(11))
q1 = np.quantile(emissions_by_country, 0.25)
q3 = np.quantile(emissions_by_country, 0.75)
iqr = q3 - q1
lower = q1 - 1.5 * iqr
upper = q3 + 1.5 * iqr
outliers = emissions_by_country[
(emissions_by_country > upper) | (emissions_by_country < lower)
]
print(outliers)
sns.heatmap(df.corr(), annot=True)
plt.show()
eg_and_aus = df[(df["country"] == "Egypt") | (df["country"] == "Australia")]
print(
eg_and_aus.sort_values(["country", "co2_emission"], ascending=[True, False]),
end="\n\n\n",
)
sns.barplot(data=eg_and_aus, x="food_category", y="co2_emission", hue="country")
plt.xticks(rotation=90)
plt.show()
print(
eg_and_aus.groupby("country")[["consumption", "co2_emission"]].agg(
[np.mean, np.median]
)
)
beef_consumption = df[df["food_category"] == "beef"].sort_values(
"consumption", ascending=False
)
sns.histplot(data=beef_consumption, x="consumption")
plt.show()
print(beef_consumption.groupby("country")[["consumption", "co2_emission"]].agg(np.mean))
sns.barplot(data=beef_consumption.head(11), x="country", y="consumption")
plt.xticks(rotation=90)
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/960/129960728.ipynb
|
food-consumption
|
ahmedashraf99
|
[{"Id": 129960728, "ScriptId": 37592805, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8742412, "CreationDate": "05/17/2023 18:14:16", "VersionNumber": 1.0, "Title": "Food_Consumption_Analysis", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 54.0, "LinesInsertedFromPrevious": 54.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 186396721, "KernelVersionId": 129960728, "SourceDatasetVersionId": 5489673}]
|
[{"Id": 5489673, "DatasetId": 3168621, "DatasourceVersionId": 5564026, "CreatorUserId": 8742412, "LicenseName": "Unknown", "CreationDate": "04/22/2023 19:27:00", "VersionNumber": 1.0, "Title": "Food_Consumption", "Slug": "food-consumption", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3168621, "CreatorUserId": 8742412, "OwnerUserId": 8742412.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5489673.0, "CurrentDatasourceVersionId": 5564026.0, "ForumId": 3232838, "Type": 2, "CreationDate": "04/22/2023 19:27:00", "LastActivityDate": "04/22/2023", "TotalViews": 49, "TotalDownloads": 5, "TotalVotes": 0, "TotalKernels": 1}]
|
[{"Id": 8742412, "UserName": "ahmedashraf99", "DisplayName": "Ahmed Ashraf 99", "RegisterDate": "10/29/2021", "PerformanceTier": 0}]
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import iqr
import plotly.express as px
df = pd.read_csv("/kaggle/input/food-consumption/food_consumption.csv")
print(df.info())
df = df.drop("Unnamed: 0", axis=1)
print(df.head(11))
sns.barplot(data=df, x="food_category", y="co2_emission")
plt.xticks(rotation=90)
plt.show()
sns.barplot(data=df, x="food_category", y="consumption")
plt.xticks(rotation=90)
plt.show()
print(
df.groupby("food_category")[["consumption", "co2_emission"]]
.agg([np.mean, np.std])
.sort_values(("co2_emission", "mean"), ascending=False)
)
emissions_by_country = df.groupby("country")["co2_emission"].sum()
print(emissions_by_country.sort_values(ascending=False).head(11))
q1 = np.quantile(emissions_by_country, 0.25)
q3 = np.quantile(emissions_by_country, 0.75)
iqr = q3 - q1
lower = q1 - 1.5 * iqr
upper = q3 + 1.5 * iqr
outliers = emissions_by_country[
(emissions_by_country > upper) | (emissions_by_country < lower)
]
print(outliers)
sns.heatmap(df.corr(), annot=True)
plt.show()
eg_and_aus = df[(df["country"] == "Egypt") | (df["country"] == "Australia")]
print(
eg_and_aus.sort_values(["country", "co2_emission"], ascending=[True, False]),
end="\n\n\n",
)
sns.barplot(data=eg_and_aus, x="food_category", y="co2_emission", hue="country")
plt.xticks(rotation=90)
plt.show()
print(
eg_and_aus.groupby("country")[["consumption", "co2_emission"]].agg(
[np.mean, np.median]
)
)
beef_consumption = df[df["food_category"] == "beef"].sort_values(
"consumption", ascending=False
)
sns.histplot(data=beef_consumption, x="consumption")
plt.show()
print(beef_consumption.groupby("country")[["consumption", "co2_emission"]].agg(np.mean))
sns.barplot(data=beef_consumption.head(11), x="country", y="consumption")
plt.xticks(rotation=90)
plt.show()
| false | 1 | 705 | 0 | 726 | 705 |
||
129960919
|
<jupyter_start><jupyter_text>Temperatures_over_Time
Kaggle dataset identifier: temperatures-over-time
<jupyter_script>import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import plotly.express as px
df_1 = pd.read_csv("/kaggle/input/temperatures-over-time/temperatures.csv")
df_2 = pd.read_excel("/kaggle/input/country-abbr/Country_Codes.xlsx")
print(df_1.info(), end="\n\n")
print(df_1["country"].unique(), end="\n\n")
print(df_2.info())
df = df_1.merge(df_2, left_on="country", how="left", right_on="Country")
df.drop(columns=["Unnamed: 0_x", "Country"], inplace=True)
df.rename(columns={"Unnamed: 0_y": "code"}, inplace=True)
df = df.loc[:, ["country", "code", "Continent", "city", "date", "avg_temp_c"]]
df.set_index("country", inplace=True)
df.sort_index(inplace=True)
df["date"] = df["date"].astype("datetime64")
df["year"] = df["date"].dt.year
df["month"] = df["date"].dt.month
print(df.head(11), df.tail(11), sep="\n\n", end="\n\n")
print(df.isna().sum())
print(df.index[df["code"].isna()].value_counts())
df.loc["Egypt", "code"] = "EGY"
df.loc["Turkey", "code"] = "TUR"
df.loc["Russia", "code"] = "RUS"
df.loc["Côte D'Ivoire", "code"] = "CIV"
df.loc["Syria", "code"] = "SYR"
# df.loc["Congo (Democratic Republic Of The)", "code"] = "COG"
# df.loc["Iran", "code"] = "IRN"
# df.loc["Burma", "code"] = "MMR"
# df.loc["South Korea", "code"] = "KOR"
# df.loc["Taiwan", "code"] = "TWN"
print(df.index[df["Continent"].isna()].value_counts())
df.loc["Egypt", "Continent"] = "Africa"
df.loc["Turkey", "Continent"] = "Asia"
df.loc["Russia", "Continent"] = "Asia"
df.loc["Côte D'Ivoire", "Continent"] = "Africa"
df.loc["Syria", "Continent"] = "Asia"
df.loc["Congo (Democratic Republic Of The)", "Continent"] = "Africa"
df.loc["Iran", "Continent"] = "Asia"
df.loc["Burma", "Continent"] = "Asia"
df.loc["South Korea", "Continent"] = "Asia"
df.loc["Taiwan", "Continent"] = "Asia"
print(df.isna().sum(), end="\n\n")
print(df["date"][df["avg_temp_c"].isna()].value_counts())
print(df["date"][df["date"] == "2013-9-1"])
avg_temp_september = df["avg_temp_c"][df["month"] == 9].mean()
print(avg_temp_september)
df["avg_temp_c"].fillna(avg_temp_september, inplace=True)
# Dataset is now ready for Analyzing...
print(df.info())
sns.set_style("darkgrid")
sns.set_context("paper", font_scale=1)
sns.set_palette("Dark2")
g = sns.catplot(data=df, kind="bar", x="year", y="avg_temp_c")
g.fig.suptitle("Average Temperatures on Earth over years")
plt.xticks(size=10, fontweight="bold")
plt.xlabel("Years", fontsize=12)
plt.yticks(size=10, fontweight="bold")
plt.ylabel("Degrees", fontsize=12)
plt.xticks(rotation=60)
plt.show()
print("\n\n", df.groupby("year")["avg_temp_c"].agg("mean"))
g = sns.catplot(data=df, kind="bar", x="month", y="avg_temp_c")
g.fig.suptitle("Average Temperatures on Earth over months")
plt.xticks(size=10, fontweight="bold")
plt.xlabel("Months", fontsize=12)
plt.yticks(size=10, fontweight="bold")
plt.ylabel("Degrees", fontsize=12)
plt.show()
g = sns.catplot(data=df, kind="bar", x="Continent", y="avg_temp_c")
g.fig.suptitle("Average Temperatures on the Continents")
plt.xticks(size=10, fontweight="bold")
plt.xlabel("Continents", fontsize=12)
plt.yticks(size=10, fontweight="bold")
plt.ylabel("Degrees", fontsize=12)
plt.xticks(rotation=60)
plt.show()
fig = px.choropleth(
df,
locations="code",
color="avg_temp_c",
hover_name=df.index,
range_color=(5, 40),
color_continuous_scale=px.colors.sequential.Plasma,
title="Average Temperatures Map",
)
fig.show()
Hottest_Countries = (
df.groupby("country")["avg_temp_c"].agg("mean").sort_values(ascending=False).head(5)
)
print(Hottest_Countries)
Coldest_Countries = (
df.groupby("country")["avg_temp_c"].agg("mean").sort_values().head(5)
)
print(Coldest_Countries)
Extreme_Countries = pd.concat([Hottest_Countries, Coldest_Countries])
Extreme_Countries = pd.DataFrame(Extreme_Countries)
Extreme_Countries.reset_index(inplace=True)
g = sns.catplot(data=Extreme_Countries, kind="bar", x="country", y="avg_temp_c")
g.fig.suptitle("Hottest vs. Coldest Countries")
plt.xticks(size=10, fontweight="bold")
plt.xlabel("Countries", fontsize=12)
plt.yticks(size=10, fontweight="bold")
plt.ylabel("Degrees", fontsize=12)
plt.xticks(rotation=90)
plt.show()
# Let's focus on Egypt...
temp_eg = df[df["city"].isin(["Cairo", "Alexandria"])].sort_values("date")
print(temp_eg)
temp_eg_months = temp_eg.pivot_table(values="avg_temp_c", index="month", columns="city")
print(temp_eg_months)
print(temp_eg.pivot_table(values="avg_temp_c", index="year", columns="city"))
temp_eg_1 = temp_eg[temp_eg["month"] == 1]
print(temp_eg_1.pivot_table("avg_temp_c", index="year", columns="city"))
temp_eg_7 = temp_eg[temp_eg["month"] == 7]
print(temp_eg_7.pivot_table("avg_temp_c", index="year", columns="city"))
temp_eg_winter = temp_eg[temp_eg["month"].isin([12, 1, 2])]
print(temp_eg_winter.pivot_table("avg_temp_c", index="year", columns="city"))
# Average Temperature in Cairo & Alexandria over the Winter period...
print(temp_eg_months.loc[[12, 1, 2]].mean(axis="index"))
temp_eg_summer = temp_eg[temp_eg["month"].isin([6, 7, 8])]
print(temp_eg_summer.pivot_table("avg_temp_c", index="year", columns="city"))
# Average Temperature in Cairo & Alexandria over the Summer period...
print(temp_eg_months.loc[[6, 7, 8]].mean(axis="index"))
# Let's focus on Australia...
temp_au = df[df["city"].isin(["Sydney", "Melbourne"])].sort_values("date")
print(temp_au)
temp_au_months = temp_au.pivot_table(values="avg_temp_c", index="month", columns="city")
print(temp_au_months)
print(temp_au.pivot_table(values="avg_temp_c", index="year", columns="city"))
temp_au_1 = temp_au[temp_au["month"] == 1]
print(temp_au_1.pivot_table("avg_temp_c", index="year", columns="city"))
temp_au_7 = temp_au[temp_au["month"] == 7]
print(temp_au_7.pivot_table("avg_temp_c", index="year", columns="city"))
temp_au_summer = temp_au[temp_au["month"].isin([12, 1, 2])]
print(temp_au_summer.pivot_table("avg_temp_c", index="year", columns="city"))
# Average Temperature in Sydney & Melbourne over the Summer period...
print(temp_au_months.loc[[12, 1, 2]].mean(axis="index"))
temp_au_winter = temp_au[temp_au["month"].isin([6, 7, 8])]
print(temp_au_winter.pivot_table("avg_temp_c", index="year", columns="city"))
# Average Temperature in Sydney & Melbourne over the Winter period...
print(temp_au_months.loc[[6, 7, 8]].mean(axis="index"))
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/960/129960919.ipynb
|
temperatures-over-time
|
ahmedashraf99
|
[{"Id": 129960919, "ScriptId": 37888060, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8742412, "CreationDate": "05/17/2023 18:16:01", "VersionNumber": 1.0, "Title": "Temperatures_World", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 182.0, "LinesInsertedFromPrevious": 182.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 186396952, "KernelVersionId": 129960919, "SourceDatasetVersionId": 5552219}]
|
[{"Id": 5552219, "DatasetId": 3198645, "DatasourceVersionId": 5626975, "CreatorUserId": 8742412, "LicenseName": "Unknown", "CreationDate": "04/28/2023 21:25:48", "VersionNumber": 1.0, "Title": "Temperatures_over_Time", "Slug": "temperatures-over-time", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3198645, "CreatorUserId": 8742412, "OwnerUserId": 8742412.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5552219.0, "CurrentDatasourceVersionId": 5626975.0, "ForumId": 3263281, "Type": 2, "CreationDate": "04/28/2023 21:25:48", "LastActivityDate": "04/28/2023", "TotalViews": 31, "TotalDownloads": 0, "TotalVotes": 0, "TotalKernels": 1}]
|
[{"Id": 8742412, "UserName": "ahmedashraf99", "DisplayName": "Ahmed Ashraf 99", "RegisterDate": "10/29/2021", "PerformanceTier": 0}]
|
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import plotly.express as px
df_1 = pd.read_csv("/kaggle/input/temperatures-over-time/temperatures.csv")
df_2 = pd.read_excel("/kaggle/input/country-abbr/Country_Codes.xlsx")
print(df_1.info(), end="\n\n")
print(df_1["country"].unique(), end="\n\n")
print(df_2.info())
df = df_1.merge(df_2, left_on="country", how="left", right_on="Country")
df.drop(columns=["Unnamed: 0_x", "Country"], inplace=True)
df.rename(columns={"Unnamed: 0_y": "code"}, inplace=True)
df = df.loc[:, ["country", "code", "Continent", "city", "date", "avg_temp_c"]]
df.set_index("country", inplace=True)
df.sort_index(inplace=True)
df["date"] = df["date"].astype("datetime64")
df["year"] = df["date"].dt.year
df["month"] = df["date"].dt.month
print(df.head(11), df.tail(11), sep="\n\n", end="\n\n")
print(df.isna().sum())
print(df.index[df["code"].isna()].value_counts())
df.loc["Egypt", "code"] = "EGY"
df.loc["Turkey", "code"] = "TUR"
df.loc["Russia", "code"] = "RUS"
df.loc["Côte D'Ivoire", "code"] = "CIV"
df.loc["Syria", "code"] = "SYR"
# df.loc["Congo (Democratic Republic Of The)", "code"] = "COG"
# df.loc["Iran", "code"] = "IRN"
# df.loc["Burma", "code"] = "MMR"
# df.loc["South Korea", "code"] = "KOR"
# df.loc["Taiwan", "code"] = "TWN"
print(df.index[df["Continent"].isna()].value_counts())
df.loc["Egypt", "Continent"] = "Africa"
df.loc["Turkey", "Continent"] = "Asia"
df.loc["Russia", "Continent"] = "Asia"
df.loc["Côte D'Ivoire", "Continent"] = "Africa"
df.loc["Syria", "Continent"] = "Asia"
df.loc["Congo (Democratic Republic Of The)", "Continent"] = "Africa"
df.loc["Iran", "Continent"] = "Asia"
df.loc["Burma", "Continent"] = "Asia"
df.loc["South Korea", "Continent"] = "Asia"
df.loc["Taiwan", "Continent"] = "Asia"
print(df.isna().sum(), end="\n\n")
print(df["date"][df["avg_temp_c"].isna()].value_counts())
print(df["date"][df["date"] == "2013-9-1"])
avg_temp_september = df["avg_temp_c"][df["month"] == 9].mean()
print(avg_temp_september)
df["avg_temp_c"].fillna(avg_temp_september, inplace=True)
# Dataset is now ready for Analyzing...
print(df.info())
sns.set_style("darkgrid")
sns.set_context("paper", font_scale=1)
sns.set_palette("Dark2")
g = sns.catplot(data=df, kind="bar", x="year", y="avg_temp_c")
g.fig.suptitle("Average Temperatures on Earth over years")
plt.xticks(size=10, fontweight="bold")
plt.xlabel("Years", fontsize=12)
plt.yticks(size=10, fontweight="bold")
plt.ylabel("Degrees", fontsize=12)
plt.xticks(rotation=60)
plt.show()
print("\n\n", df.groupby("year")["avg_temp_c"].agg("mean"))
g = sns.catplot(data=df, kind="bar", x="month", y="avg_temp_c")
g.fig.suptitle("Average Temperatures on Earth over months")
plt.xticks(size=10, fontweight="bold")
plt.xlabel("Months", fontsize=12)
plt.yticks(size=10, fontweight="bold")
plt.ylabel("Degrees", fontsize=12)
plt.show()
g = sns.catplot(data=df, kind="bar", x="Continent", y="avg_temp_c")
g.fig.suptitle("Average Temperatures on the Continents")
plt.xticks(size=10, fontweight="bold")
plt.xlabel("Continents", fontsize=12)
plt.yticks(size=10, fontweight="bold")
plt.ylabel("Degrees", fontsize=12)
plt.xticks(rotation=60)
plt.show()
fig = px.choropleth(
df,
locations="code",
color="avg_temp_c",
hover_name=df.index,
range_color=(5, 40),
color_continuous_scale=px.colors.sequential.Plasma,
title="Average Temperatures Map",
)
fig.show()
Hottest_Countries = (
df.groupby("country")["avg_temp_c"].agg("mean").sort_values(ascending=False).head(5)
)
print(Hottest_Countries)
Coldest_Countries = (
df.groupby("country")["avg_temp_c"].agg("mean").sort_values().head(5)
)
print(Coldest_Countries)
Extreme_Countries = pd.concat([Hottest_Countries, Coldest_Countries])
Extreme_Countries = pd.DataFrame(Extreme_Countries)
Extreme_Countries.reset_index(inplace=True)
g = sns.catplot(data=Extreme_Countries, kind="bar", x="country", y="avg_temp_c")
g.fig.suptitle("Hottest vs. Coldest Countries")
plt.xticks(size=10, fontweight="bold")
plt.xlabel("Countries", fontsize=12)
plt.yticks(size=10, fontweight="bold")
plt.ylabel("Degrees", fontsize=12)
plt.xticks(rotation=90)
plt.show()
# Let's focus on Egypt...
temp_eg = df[df["city"].isin(["Cairo", "Alexandria"])].sort_values("date")
print(temp_eg)
temp_eg_months = temp_eg.pivot_table(values="avg_temp_c", index="month", columns="city")
print(temp_eg_months)
print(temp_eg.pivot_table(values="avg_temp_c", index="year", columns="city"))
temp_eg_1 = temp_eg[temp_eg["month"] == 1]
print(temp_eg_1.pivot_table("avg_temp_c", index="year", columns="city"))
temp_eg_7 = temp_eg[temp_eg["month"] == 7]
print(temp_eg_7.pivot_table("avg_temp_c", index="year", columns="city"))
temp_eg_winter = temp_eg[temp_eg["month"].isin([12, 1, 2])]
print(temp_eg_winter.pivot_table("avg_temp_c", index="year", columns="city"))
# Average Temperature in Cairo & Alexandria over the Winter period...
print(temp_eg_months.loc[[12, 1, 2]].mean(axis="index"))
temp_eg_summer = temp_eg[temp_eg["month"].isin([6, 7, 8])]
print(temp_eg_summer.pivot_table("avg_temp_c", index="year", columns="city"))
# Average Temperature in Cairo & Alexandria over the Summer period...
print(temp_eg_months.loc[[6, 7, 8]].mean(axis="index"))
# Let's focus on Australia...
temp_au = df[df["city"].isin(["Sydney", "Melbourne"])].sort_values("date")
print(temp_au)
temp_au_months = temp_au.pivot_table(values="avg_temp_c", index="month", columns="city")
print(temp_au_months)
print(temp_au.pivot_table(values="avg_temp_c", index="year", columns="city"))
temp_au_1 = temp_au[temp_au["month"] == 1]
print(temp_au_1.pivot_table("avg_temp_c", index="year", columns="city"))
temp_au_7 = temp_au[temp_au["month"] == 7]
print(temp_au_7.pivot_table("avg_temp_c", index="year", columns="city"))
temp_au_summer = temp_au[temp_au["month"].isin([12, 1, 2])]
print(temp_au_summer.pivot_table("avg_temp_c", index="year", columns="city"))
# Average Temperature in Sydney & Melbourne over the Summer period...
print(temp_au_months.loc[[12, 1, 2]].mean(axis="index"))
temp_au_winter = temp_au[temp_au["month"].isin([6, 7, 8])]
print(temp_au_winter.pivot_table("avg_temp_c", index="year", columns="city"))
# Average Temperature in Sydney & Melbourne over the Winter period...
print(temp_au_months.loc[[6, 7, 8]].mean(axis="index"))
| false | 1 | 2,406 | 0 | 2,432 | 2,406 |
||
129960157
|
<jupyter_start><jupyter_text>Alcohol Effects On Study
This data approach student achievement in secondary education of two Portuguese schools. The data attributes include student grades, demographic, social and school related features) and it was collected by using school reports and questionnaires. Two datasets are provided regarding the performance in two distinct subjects: Mathematics (mat) and Portuguese language (por). In [Cortez and Silva, 2008], the two datasets were modeled under binary/five-level classification and regression tasks. Important note: the target attribute G3 has a strong correlation with attributes G2 and G1. This occurs because G3 is the final year grade (issued at the 3rd period), while G1 and G2 correspond to the 1st and 2nd period grades. It is more difficult to predict G3 without G2 and G1, but such prediction is much more useful (see paper source for more details).
## Attributes for both Maths.csv (Math course) and Portuguese.csv (Portuguese language course) datasets:
| Columns | Description |
| --- | --- |
| school | student's school (binary: 'GP' - Gabriel Pereira or 'MS' - Mousinho da Silveira) |
| sex | student's sex (binary: 'F' - female or 'M' - male) |
| age | student's age (numeric: from 15 to 22) |
| address | student's home address type (binary: 'U' - urban or 'R' - rural) |
| famsize | family size (binary: 'LE3' - less or equal to 3 or 'GT3' - greater than 3) |
| Pstatus | parent's cohabitation status (binary: 'T' - living together or 'A' - apart) |
| Medu | mother's education (numeric: 0 - none, 1 - primary education (4th grade), 2 – 5th to 9th grade, 3 – secondary education or 4 – higher education) |
| Fedu | father's education (numeric: 0 - none, 1 - primary education (4th grade), 2 – 5th to 9th grade, 3 – secondary education or 4 – higher education) |
| Mjob | mother's job (nominal: 'teacher', 'health' care related, civil 'services' (e.g. administrative or police), 'at_home' or 'other') |
| Fjob | father's job (nominal: 'teacher', 'health' care related, civil 'services' (e.g. administrative or police), 'at_home' or 'other') |
| reason | reason to choose this school (nominal: close to 'home', school 'reputation', 'course' preference or 'other') |
| guardian | student's guardian (nominal: 'mother', 'father' or 'other') |
| traveltime | home to school travel time (numeric: 1 - <15 min., 2 - 15 to 30 min., 3 - 30 min. to 1 hour, or 4 - >1 hour) |
| studytime | weekly study time (numeric: 1 - <2 hours, 2 - 2 to 5 hours, 3 - 5 to 10 hours, or 4 - >10 hours) |
| failures | number of past class failures (numeric: n if 1<=n<3, else 4) |
| schoolsup | extra educational support (binary: yes or no) |
| famsup | family educational support (binary: yes or no) |
| paid | extra paid classes within the course subject (Math or Portuguese) (binary: yes or no) |
| activities | extra-curricular activities (binary: yes or no) |
| nursery | attended nursery school (binary: yes or no) |
| higher | wants to take higher education (binary: yes or no) |
| internet | Internet access at home (binary: yes or no) |
| romantic | with a romantic relationship (binary: yes or no) |
| famrel | quality of family relationships (numeric: from 1 - very bad to 5 - excellent) |
| freetime | free time after school (numeric: from 1 - very low to 5 - very high) |
| goout | going out with friends (numeric: from 1 - very low to 5 - very high) |
| Dalc | workday alcohol consumption (numeric: from 1 - very low to 5 - very high) |
| Walc | weekend alcohol consumption (numeric: from 1 - very low to 5 - very high) |
| health | current health status (numeric: from 1 - very bad to 5 - very good) |
| absences | number of school absences (numeric: from 0 to 93) |
## These grades are related with the course subject, Math or Portuguese:
| Grade | Description |
| --- | --- |
| G1 | first period grade (numeric: from 0 to 20) |
| G2 | second period grade (numeric: from 0 to 20) |
| G3 | final grade (numeric: from 0 to 20, output target) |
> More
- Find More Exciting🙀 Datasets [Here](https://www.kaggle.com/whenamancodes/datasets)
- An Upvote👍 A Dayᕙ(`▿´)ᕗ , Keeps Aman Hurray Hurray..... ٩(˘◡˘)۶Haha
Kaggle dataset identifier: alcohol-effects-on-study
<jupyter_script># # Alcohol Effects on Study
# [alcohol-effects-on-study Data url on kaggle](https://www.kaggle.com/datasets/whenamancodes/alcohol-effects-on-study)
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from matplotlib import pyplot as plt
df = pd.read_csv("/kaggle/input/alcohol-effects-on-study/Maths.csv")
df.head()
import seaborn as sns
sns.set_theme()
# # Correlation between alcohol consumption and Marks
# claculate the percentage of marks
df["GPerc"] = (df["G1"] + df["G2"] + df["G3"]) * 100 / 60
# calculate the sum of alcohol consumption
df["Alc"] = df["Dalc"] + df["Walc"]
sns.set_theme(style="ticks")
sns.jointplot(data=df, x="Dalc", y="GPerc", hue="sex")
plt.xlabel("Workday Alcohol Consumption")
plt.ylabel("Mark (in %)")
# we can see that increasing in alcohol consumption is decreasing the marks of students, and see that male consume more alcohols than female
# # Correlation between alcohol consumption and freetime
sns.barplot(x="freetime", y="Alc", data=df)
plt.xlabel("Free Time")
plt.ylabel("Alcohol Consumption")
plt.title("Alcohol Consumption vs Free Time")
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/960/129960157.ipynb
|
alcohol-effects-on-study
|
whenamancodes
|
[{"Id": 129960157, "ScriptId": 38658416, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14998089, "CreationDate": "05/17/2023 18:08:24", "VersionNumber": 1.0, "Title": "alcohol-effects-on-study", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 39.0, "LinesInsertedFromPrevious": 39.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 186396002, "KernelVersionId": 129960157, "SourceDatasetVersionId": 4205955}]
|
[{"Id": 4205955, "DatasetId": 2479552, "DatasourceVersionId": 4262938, "CreatorUserId": 8676029, "LicenseName": "Attribution 4.0 International (CC BY 4.0)", "CreationDate": "09/15/2022 03:21:04", "VersionNumber": 1.0, "Title": "Alcohol Effects On Study", "Slug": "alcohol-effects-on-study", "Subtitle": "Alcohol Effects On Study", "Description": "This data approach student achievement in secondary education of two Portuguese schools. The data attributes include student grades, demographic, social and school related features) and it was collected by using school reports and questionnaires. Two datasets are provided regarding the performance in two distinct subjects: Mathematics (mat) and Portuguese language (por). In [Cortez and Silva, 2008], the two datasets were modeled under binary/five-level classification and regression tasks. Important note: the target attribute G3 has a strong correlation with attributes G2 and G1. This occurs because G3 is the final year grade (issued at the 3rd period), while G1 and G2 correspond to the 1st and 2nd period grades. It is more difficult to predict G3 without G2 and G1, but such prediction is much more useful (see paper source for more details).\n\n## Attributes for both Maths.csv (Math course) and Portuguese.csv (Portuguese language course) datasets:\n| Columns | Description |\n| --- | --- |\n| school | student's school (binary: 'GP' - Gabriel Pereira or 'MS' - Mousinho da Silveira) |\n| sex | student's sex (binary: 'F' - female or 'M' - male) |\n| age | student's age (numeric: from 15 to 22) |\n| address | student's home address type (binary: 'U' - urban or 'R' - rural) |\n| famsize | family size (binary: 'LE3' - less or equal to 3 or 'GT3' - greater than 3) |\n| Pstatus | parent's cohabitation status (binary: 'T' - living together or 'A' - apart) |\n| Medu | mother's education (numeric: 0 - none, 1 - primary education (4th grade), 2 \u00e2\u20ac\u201c 5th to 9th grade, 3 \u00e2\u20ac\u201c secondary education or 4 \u00e2\u20ac\u201c higher education) |\n| Fedu | father's education (numeric: 0 - none, 1 - primary education (4th grade), 2 \u00e2\u20ac\u201c 5th to 9th grade, 3 \u00e2\u20ac\u201c secondary education or 4 \u00e2\u20ac\u201c higher education) |\n| Mjob | mother's job (nominal: 'teacher', 'health' care related, civil 'services' (e.g. administrative or police), 'at_home' or 'other') |\n| Fjob | father's job (nominal: 'teacher', 'health' care related, civil 'services' (e.g. administrative or police), 'at_home' or 'other') |\n| reason | reason to choose this school (nominal: close to 'home', school 'reputation', 'course' preference or 'other') |\n| guardian | student's guardian (nominal: 'mother', 'father' or 'other') |\n| traveltime | home to school travel time (numeric: 1 - <15 min., 2 - 15 to 30 min., 3 - 30 min. to 1 hour, or 4 - >1 hour) |\n| studytime | weekly study time (numeric: 1 - <2 hours, 2 - 2 to 5 hours, 3 - 5 to 10 hours, or 4 - >10 hours) |\n| failures | number of past class failures (numeric: n if 1<=n<3, else 4) |\n| schoolsup | extra educational support (binary: yes or no) |\n| famsup | family educational support (binary: yes or no) |\n| paid | extra paid classes within the course subject (Math or Portuguese) (binary: yes or no) |\n| activities | extra-curricular activities (binary: yes or no) |\n| nursery | attended nursery school (binary: yes or no) |\n| higher | wants to take higher education (binary: yes or no) |\n| internet | Internet access at home (binary: yes or no) |\n| romantic | with a romantic relationship (binary: yes or no) |\n| famrel | quality of family relationships (numeric: from 1 - very bad to 5 - excellent) |\n| freetime | free time after school (numeric: from 1 - very low to 5 - very high) |\n| goout | going out with friends (numeric: from 1 - very low to 5 - very high) |\n| Dalc | workday alcohol consumption (numeric: from 1 - very low to 5 - very high) |\n| Walc | weekend alcohol consumption (numeric: from 1 - very low to 5 - very high) |\n| health | current health status (numeric: from 1 - very bad to 5 - very good) |\n| absences | number of school absences (numeric: from 0 to 93) |\n\n## These grades are related with the course subject, Math or Portuguese:\n| Grade | Description |\n| --- | --- |\n| G1 | first period grade (numeric: from 0 to 20) |\n| G2 | second period grade (numeric: from 0 to 20) |\n| G3 | final grade (numeric: from 0 to 20, output target) |\n\n> More\n- Find More Exciting\ud83d\ude40 Datasets [Here](https://www.kaggle.com/whenamancodes/datasets)\n- An Upvote\ud83d\udc4d A Day\u1559(`\u25bf\u00b4)\u1557 , Keeps Aman Hurray Hurray..... \u0669(\u02d8\u25e1\u02d8)\u06f6Haha", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 2479552, "CreatorUserId": 8676029, "OwnerUserId": 8676029.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4205955.0, "CurrentDatasourceVersionId": 4262938.0, "ForumId": 2507489, "Type": 2, "CreationDate": "09/15/2022 03:21:04", "LastActivityDate": "09/15/2022", "TotalViews": 106176, "TotalDownloads": 13346, "TotalVotes": 191, "TotalKernels": 22}]
|
[{"Id": 8676029, "UserName": "whenamancodes", "DisplayName": "Aman Chauhan", "RegisterDate": "10/22/2021", "PerformanceTier": 2}]
|
# # Alcohol Effects on Study
# [alcohol-effects-on-study Data url on kaggle](https://www.kaggle.com/datasets/whenamancodes/alcohol-effects-on-study)
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from matplotlib import pyplot as plt
df = pd.read_csv("/kaggle/input/alcohol-effects-on-study/Maths.csv")
df.head()
import seaborn as sns
sns.set_theme()
# # Correlation between alcohol consumption and Marks
# claculate the percentage of marks
df["GPerc"] = (df["G1"] + df["G2"] + df["G3"]) * 100 / 60
# calculate the sum of alcohol consumption
df["Alc"] = df["Dalc"] + df["Walc"]
sns.set_theme(style="ticks")
sns.jointplot(data=df, x="Dalc", y="GPerc", hue="sex")
plt.xlabel("Workday Alcohol Consumption")
plt.ylabel("Mark (in %)")
# we can see that increasing in alcohol consumption is decreasing the marks of students, and see that male consume more alcohols than female
# # Correlation between alcohol consumption and freetime
sns.barplot(x="freetime", y="Alc", data=df)
plt.xlabel("Free Time")
plt.ylabel("Alcohol Consumption")
plt.title("Alcohol Consumption vs Free Time")
plt.show()
| false | 1 | 395 | 0 | 1,755 | 395 |
||
129960537
|
<jupyter_start><jupyter_text>Impact of BMI on IOS measures on children
```
A longitudinal retrospective study was conducted to assess the impact of BMI on impulse oscillometry (IOS) estimates of airway resistance and reactance in children with sickle cell disease (C-SCD). The study encompassed the period from 2015 to 2020. Additionally, African-American children with asthma (C-Asthma) who underwent IOS testing during the same timeframe were included in the study to evaluate the influence of BMI on IOS estimates in this group. The association between BMI and IOS measures was estimated using a generalized linear mixed model (GLMM), which accounted for potential confounding factors. These factors included the diagnosis of asthma and the use of hydroxyurea in C-SCD, as well as gender and concurrent use of ICS +/-LABA for both study cohorts. Furthermore, a comparison was conducted between C-SCD and C-Asthma groups regarding age, BMI, and IOS estimates.
```
| Column | Description |
| --- | --- |
| Group | This column indicates the group to which the subject belongs. There are two groups in the study: children with sickle cell disease (C-SCD) and African-American children with asthma (C-Asthma). |
| Subject ID | Each subject in the study is assigned a unique identifier or ID, which is listed in this column. The ID is used to differentiate between individual participants. |
| Observation number | This column represents the number assigned to each observation or measurement taken for a particular subject. Since this is a longitudinal study, multiple observations may be recorded for each subject over time. |
| Hydroxyurea | This column indicates whether the subject with sickle cell disease (C-SCD) received hydroxyurea treatment. Hydroxyurea is a medication commonly used for the treatment of sickle cell disease. |
| Asthma | This column indicates whether the subject has a diagnosis of asthma. It distinguishes between children with sickle cell disease (C-SCD) and African-American children with asthma (C-Asthma). |
| ICS | This column indicates whether the subject is using inhaled corticosteroids (ICS). ICS is a type of medication commonly used for the treatment of asthma and certain other respiratory conditions. |
| LABA | This column indicates whether the subject is using a long-acting beta-agonist (LABA). LABA is a type of medication often used in combination with inhaled corticosteroids for the treatment of asthma. |
| Gender | This column represents the gender of the subject, indicating whether they are male or female. |
| Age | This column specifies the age of the subject at the time of the observation or measurement. Age is typically measured in months. |
| Height | This column represents the height of the subject, typically measured in a standard unit of length, such as centimeters or inches. Height is an important variable to consider in assessing the impact of BMI on respiratory measures. |
| Weight (Kg) | This column indicates the weight of the subject at the time of the observation or measurement. Weight is typically measured in kilograms (Kg) and is an important variable for calculating the body mass index (BMI). |
| BMI | Body Mass Index (BMI) is a measure that assesses body weight relative to height. It is calculated by dividing the weight of an individual (in kilograms) by the square of their height (in meters). The BMI column provides the calculated BMI value for each subject based on their weight and height measurements. BMI is commonly used as an indicator of overall body fatness and is often used to classify individuals into different weight categories (e.g., underweight, normal weight, overweight, obese). |
| R5Hz_PP | This column represents the estimate of airway resistance at 5 Hz using impulse oscillometry (IOS). Airway resistance is a measure of the impedance encountered by airflow during respiration. The R5Hz_PP value indicates the airway resistance at the frequency of 5 Hz and is obtained through the IOS testing. |
| R20Hz_PP | This column represents the estimate of airway resistance at 20 Hz using impulse oscillometry (IOS). Similar to R5Hz_PP, R20Hz_PP provides the measure of airway resistance at the frequency of 20 Hz based on the IOS testing. |
| X5Hz_PP | This column represents the estimate of airway reactance at 5 Hz using impulse oscillometry (IOS). Airway reactance is a measure of the elasticity and stiffness of the airway walls. The X5Hz_PP value indicates the airway reactance at the frequency of 5 Hz and is obtained through the IOS testing. |
| Fres_PP | This column represents the estimate of resonant frequency using impulse oscillometry (IOS). Resonant frequency is a measure of the point at which the reactance of the airways transitions from positive to negative during respiration. The Fres_PP value indicates the resonant frequency and is obtained through the IOS testing. |
```
These columns provide measurements and estimates related to airway resistance and reactance obtained using impulse oscillometry (IOS), which is a non-invasive method for assessing respiratory function. These parameters are valuable in understanding the impact of BMI on respiratory measures in children with sickle cell disease (C-SCD) and African-American children with asthma (C-Asthma) participating in the study.
```
Kaggle dataset identifier: impact-of-bmi-on-ios-measures
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
from matplotlib import pyplot as plt
import seaborn as sns
from autoviz.classify_method import data_cleaning_suggestions
from autoviz.AutoViz_Class import AutoViz_Class
AV = AutoViz_Class()
from scipy.stats import ttest_ind
# # Data Overview
df = pd.read_csv("/kaggle/input/impact-of-bmi-on-ios-measures/BMI_IOS_SCD_Asthma.csv")
df.head()
# # Shape
df.shape
# # Data Preprocessing
df["Fres_PP"].fillna(df["Fres_PP"].mean(), inplace=True)
# # Detecting Outliers
fig, axes = plt.subplots(nrows=1, ncols=5, figsize=(12, 4))
columns = ["Weight (Kg)", "BMI", "R5Hz_PP", "R20Hz_PP", "X5Hz_PP"]
for i, column in enumerate(columns):
df.plot(kind="box", column=column, ax=axes[i])
axes[i].set_title(column)
plt.tight_layout()
plt.show()
# # Frequency distribution of categorical variables and numericals variables summary
numerical_vars = [
"Age (months)",
"Height (cm)",
"Weight (Kg)",
"BMI",
"R5Hz_PP",
"R20Hz_PP",
"X5Hz_PP",
"Fres_PP",
]
summary_stats = df[numerical_vars].describe()
print(summary_stats)
categorical_vars = ["Gender", "Asthma", "Hydroxyurea", "ICS", "LABA"]
for var in categorical_vars:
freq_dist = df[var].value_counts()
print(f"\n{var}:\n{freq_dist}")
# # Vizualization Numerical Values
sns.set_palette("Set2")
plt.figure(figsize=(15, 14))
numerical_vars = [
"Age (months)",
"Height (cm)",
"Weight (Kg)",
"BMI",
"R5Hz_PP",
"R20Hz_PP",
"X5Hz_PP",
"Fres_PP",
]
for i, var in enumerate(numerical_vars):
plt.subplot(6, 4, i + 1)
sns.histplot(df[var], bins=10, kde=True, edgecolor="black", alpha=0.7)
plt.xlabel(var)
plt.ylabel("Frequency")
plt.title(f"Histogram of {var}")
plt.tight_layout()
categorical_vars = ["Gender", "Asthma", "Hydroxyurea", "ICS", "LABA"]
for i, var in enumerate(categorical_vars):
plt.subplot(6, 4, len(numerical_vars) + i + 1)
value_counts = df[var].value_counts().reset_index()
sns.barplot(x="index", y=var, data=value_counts)
plt.xlabel(var)
plt.ylabel("Frequency")
plt.title(f"Bar Plot of {var}")
plt.tight_layout()
plt.subplots_adjust(hspace=1.5)
plt.show()
# # Explore relationships b/w variables
plt.figure(figsize=(15, 4))
plt.subplot(1, 4, 1)
sns.scatterplot(data=df, x="BMI", y="R5Hz_PP")
plt.xlabel("BMI")
plt.ylabel("R5Hz_PP")
plt.subplot(1, 4, 2)
sns.scatterplot(data=df, x="BMI", y="R20Hz_PP")
plt.xlabel("BMI")
plt.ylabel("R20Hz_PP")
plt.subplot(1, 4, 3)
sns.scatterplot(data=df, x="BMI", y="X5Hz_PP")
plt.xlabel("BMI")
plt.ylabel("X5Hz_PP")
plt.subplot(1, 4, 4)
sns.scatterplot(data=df, x="BMI", y="Fres_PP")
plt.xlabel("BMI")
plt.ylabel("Fres_PP")
plt.tight_layout()
plt.show()
correlation_matrix = df[["BMI", "R5Hz_PP", "R20Hz_PP", "X5Hz_PP", "Fres_PP"]].corr()
plt.figure(figsize=(8, 6))
sns.heatmap(correlation_matrix, annot=True, cmap="coolwarm", vmin=-1, vmax=1)
plt.title("Correlation Matrix")
# # Comparing the C-SCD and C-Asthma groups
group_scd = df[df["Group"] == "C-SCD"]
group_asthma = df[df["Group"] == "C-Asthma"]
plt.figure()
sns.boxplot(x="Group", y="Age (months)", data=df)
plt.xlabel("Group")
plt.ylabel("Age (months)")
plt.title("Comparison of Age between C-SCD and C-Asthma")
plt.figure()
sns.boxplot(x="Group", y="BMI", data=df)
plt.xlabel("Group")
plt.ylabel("BMI")
plt.title("Comparison of BMI between C-SCD and C-Asthma")
ios_vars = ["R5Hz_PP", "R20Hz_PP", "X5Hz_PP", "Fres_PP"]
plt.figure(figsize=(12, 4))
for i, var in enumerate(ios_vars):
plt.subplot(1, 4, i + 1)
sns.boxplot(x="Group", y=var, data=df)
plt.xlabel("Group")
plt.ylabel(var)
plt.title(f"Comparison of {var} between C-SCD and C-Asthma")
plt.tight_layout()
age_ttest = ttest_ind(group_scd["Age (months)"], group_asthma["Age (months)"])
bmi_ttest = ttest_ind(group_scd["BMI"], group_asthma["BMI"])
print("T-Test Results:")
print(f"Age: t-value = {age_ttest.statistic:.2f}, p-value = {age_ttest.pvalue:.4f}")
print(f"BMI: t-value = {bmi_ttest.statistic:.2f}, p-value = {bmi_ttest.pvalue:.4f}")
subgroups = ["Hydroxyurea", "Asthma", "Gender", "ICS", "LABA"]
for factor in subgroups:
fig, axes = plt.subplots(1, 4, figsize=(15, 4))
for i, ios_measure in enumerate(ios_vars):
ax = axes[i]
grouped_data = df.groupby(factor)[[ios_measure, "BMI"]].mean()
for group, data in grouped_data.iterrows():
ax.scatter(data["BMI"], data[ios_measure], label=group)
ax.set_xlabel("BMI")
ax.set_ylabel(ios_measure)
ax.set_title(f"{ios_measure} vs BMI by {factor}")
ax.legend()
x_min = min(df["BMI"])
x_max = max(df["BMI"])
y_min = min(df[ios_measure])
y_max = max(df[ios_measure])
axis_min = min(x_min, y_min)
axis_max = max(x_max, y_max)
ax.set_xlim(axis_min, axis_max)
ax.set_ylim(axis_min, axis_max)
ax.tick_params(axis="x", rotation=45)
plt.tight_layout()
plt.subplots_adjust(top=0.8)
plt.show()
correlations = df.groupby(subgroups)[ios_vars + ["BMI"]].corr().iloc[0::2, -1]
print("Correlation between BMI and IOS measures by subgroup:")
print(correlations)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/960/129960537.ipynb
|
impact-of-bmi-on-ios-measures
|
utkarshx27
|
[{"Id": 129960537, "ScriptId": 38655764, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13364933, "CreationDate": "05/17/2023 18:12:19", "VersionNumber": 1.0, "Title": "Influence of BMI on IOS on children", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 193.0, "LinesInsertedFromPrevious": 193.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
|
[{"Id": 186396475, "KernelVersionId": 129960537, "SourceDatasetVersionId": 5709766}]
|
[{"Id": 5709766, "DatasetId": 3279348, "DatasourceVersionId": 5785856, "CreatorUserId": 13364933, "LicenseName": "CC BY-SA 4.0", "CreationDate": "05/17/2023 16:32:01", "VersionNumber": 3.0, "Title": "Impact of BMI on IOS measures on children", "Slug": "impact-of-bmi-on-ios-measures", "Subtitle": "Influence of BMI on IOS measures on children with sickle cell disease", "Description": "```\nA longitudinal retrospective study was conducted to assess the impact of BMI on impulse oscillometry (IOS) estimates of airway resistance and reactance in children with sickle cell disease (C-SCD). The study encompassed the period from 2015 to 2020. Additionally, African-American children with asthma (C-Asthma) who underwent IOS testing during the same timeframe were included in the study to evaluate the influence of BMI on IOS estimates in this group. The association between BMI and IOS measures was estimated using a generalized linear mixed model (GLMM), which accounted for potential confounding factors. These factors included the diagnosis of asthma and the use of hydroxyurea in C-SCD, as well as gender and concurrent use of ICS +/-LABA for both study cohorts. Furthermore, a comparison was conducted between C-SCD and C-Asthma groups regarding age, BMI, and IOS estimates.\n```\n| Column | Description |\n| --- | --- |\n| Group | This column indicates the group to which the subject belongs. There are two groups in the study: children with sickle cell disease (C-SCD) and African-American children with asthma (C-Asthma). |\n| Subject ID | Each subject in the study is assigned a unique identifier or ID, which is listed in this column. The ID is used to differentiate between individual participants. |\n| Observation number | This column represents the number assigned to each observation or measurement taken for a particular subject. Since this is a longitudinal study, multiple observations may be recorded for each subject over time. |\n| Hydroxyurea | This column indicates whether the subject with sickle cell disease (C-SCD) received hydroxyurea treatment. Hydroxyurea is a medication commonly used for the treatment of sickle cell disease. |\n| Asthma | This column indicates whether the subject has a diagnosis of asthma. It distinguishes between children with sickle cell disease (C-SCD) and African-American children with asthma (C-Asthma). |\n| ICS | This column indicates whether the subject is using inhaled corticosteroids (ICS). ICS is a type of medication commonly used for the treatment of asthma and certain other respiratory conditions. |\n| LABA | This column indicates whether the subject is using a long-acting beta-agonist (LABA). LABA is a type of medication often used in combination with inhaled corticosteroids for the treatment of asthma. |\n| Gender | This column represents the gender of the subject, indicating whether they are male or female. |\n| Age | This column specifies the age of the subject at the time of the observation or measurement. Age is typically measured in months. |\n| Height | This column represents the height of the subject, typically measured in a standard unit of length, such as centimeters or inches. Height is an important variable to consider in assessing the impact of BMI on respiratory measures. |\n| Weight (Kg) | This column indicates the weight of the subject at the time of the observation or measurement. Weight is typically measured in kilograms (Kg) and is an important variable for calculating the body mass index (BMI). |\n| BMI | Body Mass Index (BMI) is a measure that assesses body weight relative to height. It is calculated by dividing the weight of an individual (in kilograms) by the square of their height (in meters). The BMI column provides the calculated BMI value for each subject based on their weight and height measurements. BMI is commonly used as an indicator of overall body fatness and is often used to classify individuals into different weight categories (e.g., underweight, normal weight, overweight, obese). |\n| R5Hz_PP | This column represents the estimate of airway resistance at 5 Hz using impulse oscillometry (IOS). Airway resistance is a measure of the impedance encountered by airflow during respiration. The R5Hz_PP value indicates the airway resistance at the frequency of 5 Hz and is obtained through the IOS testing. |\n| R20Hz_PP | This column represents the estimate of airway resistance at 20 Hz using impulse oscillometry (IOS). Similar to R5Hz_PP, R20Hz_PP provides the measure of airway resistance at the frequency of 20 Hz based on the IOS testing. |\n| X5Hz_PP | This column represents the estimate of airway reactance at 5 Hz using impulse oscillometry (IOS). Airway reactance is a measure of the elasticity and stiffness of the airway walls. The X5Hz_PP value indicates the airway reactance at the frequency of 5 Hz and is obtained through the IOS testing. |\n| Fres_PP | This column represents the estimate of resonant frequency using impulse oscillometry (IOS). Resonant frequency is a measure of the point at which the reactance of the airways transitions from positive to negative during respiration. The Fres_PP value indicates the resonant frequency and is obtained through the IOS testing. |\n```\nThese columns provide measurements and estimates related to airway resistance and reactance obtained using impulse oscillometry (IOS), which is a non-invasive method for assessing respiratory function. These parameters are valuable in understanding the impact of BMI on respiratory measures in children with sickle cell disease (C-SCD) and African-American children with asthma (C-Asthma) participating in the study.\n```", "VersionNotes": "Data Update 2023-05-17", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3279348, "CreatorUserId": 13364933, "OwnerUserId": 13364933.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5709766.0, "CurrentDatasourceVersionId": 5785856.0, "ForumId": 3345061, "Type": 2, "CreationDate": "05/17/2023 03:46:13", "LastActivityDate": "05/17/2023", "TotalViews": 903, "TotalDownloads": 113, "TotalVotes": 7, "TotalKernels": 2}]
|
[{"Id": 13364933, "UserName": "utkarshx27", "DisplayName": "Utkarsh Singh", "RegisterDate": "01/21/2023", "PerformanceTier": 2}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
from matplotlib import pyplot as plt
import seaborn as sns
from autoviz.classify_method import data_cleaning_suggestions
from autoviz.AutoViz_Class import AutoViz_Class
AV = AutoViz_Class()
from scipy.stats import ttest_ind
# # Data Overview
df = pd.read_csv("/kaggle/input/impact-of-bmi-on-ios-measures/BMI_IOS_SCD_Asthma.csv")
df.head()
# # Shape
df.shape
# # Data Preprocessing
df["Fres_PP"].fillna(df["Fres_PP"].mean(), inplace=True)
# # Detecting Outliers
fig, axes = plt.subplots(nrows=1, ncols=5, figsize=(12, 4))
columns = ["Weight (Kg)", "BMI", "R5Hz_PP", "R20Hz_PP", "X5Hz_PP"]
for i, column in enumerate(columns):
df.plot(kind="box", column=column, ax=axes[i])
axes[i].set_title(column)
plt.tight_layout()
plt.show()
# # Frequency distribution of categorical variables and numericals variables summary
numerical_vars = [
"Age (months)",
"Height (cm)",
"Weight (Kg)",
"BMI",
"R5Hz_PP",
"R20Hz_PP",
"X5Hz_PP",
"Fres_PP",
]
summary_stats = df[numerical_vars].describe()
print(summary_stats)
categorical_vars = ["Gender", "Asthma", "Hydroxyurea", "ICS", "LABA"]
for var in categorical_vars:
freq_dist = df[var].value_counts()
print(f"\n{var}:\n{freq_dist}")
# # Vizualization Numerical Values
sns.set_palette("Set2")
plt.figure(figsize=(15, 14))
numerical_vars = [
"Age (months)",
"Height (cm)",
"Weight (Kg)",
"BMI",
"R5Hz_PP",
"R20Hz_PP",
"X5Hz_PP",
"Fres_PP",
]
for i, var in enumerate(numerical_vars):
plt.subplot(6, 4, i + 1)
sns.histplot(df[var], bins=10, kde=True, edgecolor="black", alpha=0.7)
plt.xlabel(var)
plt.ylabel("Frequency")
plt.title(f"Histogram of {var}")
plt.tight_layout()
categorical_vars = ["Gender", "Asthma", "Hydroxyurea", "ICS", "LABA"]
for i, var in enumerate(categorical_vars):
plt.subplot(6, 4, len(numerical_vars) + i + 1)
value_counts = df[var].value_counts().reset_index()
sns.barplot(x="index", y=var, data=value_counts)
plt.xlabel(var)
plt.ylabel("Frequency")
plt.title(f"Bar Plot of {var}")
plt.tight_layout()
plt.subplots_adjust(hspace=1.5)
plt.show()
# # Explore relationships b/w variables
plt.figure(figsize=(15, 4))
plt.subplot(1, 4, 1)
sns.scatterplot(data=df, x="BMI", y="R5Hz_PP")
plt.xlabel("BMI")
plt.ylabel("R5Hz_PP")
plt.subplot(1, 4, 2)
sns.scatterplot(data=df, x="BMI", y="R20Hz_PP")
plt.xlabel("BMI")
plt.ylabel("R20Hz_PP")
plt.subplot(1, 4, 3)
sns.scatterplot(data=df, x="BMI", y="X5Hz_PP")
plt.xlabel("BMI")
plt.ylabel("X5Hz_PP")
plt.subplot(1, 4, 4)
sns.scatterplot(data=df, x="BMI", y="Fres_PP")
plt.xlabel("BMI")
plt.ylabel("Fres_PP")
plt.tight_layout()
plt.show()
correlation_matrix = df[["BMI", "R5Hz_PP", "R20Hz_PP", "X5Hz_PP", "Fres_PP"]].corr()
plt.figure(figsize=(8, 6))
sns.heatmap(correlation_matrix, annot=True, cmap="coolwarm", vmin=-1, vmax=1)
plt.title("Correlation Matrix")
# # Comparing the C-SCD and C-Asthma groups
group_scd = df[df["Group"] == "C-SCD"]
group_asthma = df[df["Group"] == "C-Asthma"]
plt.figure()
sns.boxplot(x="Group", y="Age (months)", data=df)
plt.xlabel("Group")
plt.ylabel("Age (months)")
plt.title("Comparison of Age between C-SCD and C-Asthma")
plt.figure()
sns.boxplot(x="Group", y="BMI", data=df)
plt.xlabel("Group")
plt.ylabel("BMI")
plt.title("Comparison of BMI between C-SCD and C-Asthma")
ios_vars = ["R5Hz_PP", "R20Hz_PP", "X5Hz_PP", "Fres_PP"]
plt.figure(figsize=(12, 4))
for i, var in enumerate(ios_vars):
plt.subplot(1, 4, i + 1)
sns.boxplot(x="Group", y=var, data=df)
plt.xlabel("Group")
plt.ylabel(var)
plt.title(f"Comparison of {var} between C-SCD and C-Asthma")
plt.tight_layout()
age_ttest = ttest_ind(group_scd["Age (months)"], group_asthma["Age (months)"])
bmi_ttest = ttest_ind(group_scd["BMI"], group_asthma["BMI"])
print("T-Test Results:")
print(f"Age: t-value = {age_ttest.statistic:.2f}, p-value = {age_ttest.pvalue:.4f}")
print(f"BMI: t-value = {bmi_ttest.statistic:.2f}, p-value = {bmi_ttest.pvalue:.4f}")
subgroups = ["Hydroxyurea", "Asthma", "Gender", "ICS", "LABA"]
for factor in subgroups:
fig, axes = plt.subplots(1, 4, figsize=(15, 4))
for i, ios_measure in enumerate(ios_vars):
ax = axes[i]
grouped_data = df.groupby(factor)[[ios_measure, "BMI"]].mean()
for group, data in grouped_data.iterrows():
ax.scatter(data["BMI"], data[ios_measure], label=group)
ax.set_xlabel("BMI")
ax.set_ylabel(ios_measure)
ax.set_title(f"{ios_measure} vs BMI by {factor}")
ax.legend()
x_min = min(df["BMI"])
x_max = max(df["BMI"])
y_min = min(df[ios_measure])
y_max = max(df[ios_measure])
axis_min = min(x_min, y_min)
axis_max = max(x_max, y_max)
ax.set_xlim(axis_min, axis_max)
ax.set_ylim(axis_min, axis_max)
ax.tick_params(axis="x", rotation=45)
plt.tight_layout()
plt.subplots_adjust(top=0.8)
plt.show()
correlations = df.groupby(subgroups)[ios_vars + ["BMI"]].corr().iloc[0::2, -1]
print("Correlation between BMI and IOS measures by subgroup:")
print(correlations)
| false | 1 | 2,147 | 2 | 3,455 | 2,147 |
||
129960520
|
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
data = pd.read_csv("/kaggle/input/d/ahmedashraf99/salaries/ds_salaries_clean.csv")
sal = pd.DataFrame(data)
print(sal.info())
data_science = "Data Scientist|NLP"
data_analyst = "Analyst|Analytics"
data_engineer = "Data Engineer|ETL|Architect|Infrastructure"
ml_engineer = "Machine Learning|ML|Big Data|AI"
manager = "Manager|Head|Director|Lead|Principal|Staff"
consultant = "Consultant|Freelance"
conditions = [
(sal["Designation"].str.contains(data_science)),
(sal["Designation"].str.contains(data_analyst)),
(sal["Designation"].str.contains(data_engineer)),
(sal["Designation"].str.contains(ml_engineer)),
(sal["Designation"].str.contains(manager)),
(sal["Designation"].str.contains(consultant)),
]
job_categories = [
"Data_Science",
"Data_Analytics",
"Data_Engineering",
"Machine_Learning",
"Managerial",
"Consultant",
]
sal["Job_Category"] = np.select(conditions, job_categories, default="Other")
print(sal["Job_Category"].value_counts())
sns.countplot(data=sal, y="Job_Category")
plt.show
# Data_Correction_index[18], Salary*12.
sal.loc[18, "Salary_USD"] = sal.loc[18, "Salary_USD"] * 12
print(sal.loc[18, "Salary_USD"])
print(
pd.crosstab(
index=sal["Job_Category"],
columns=sal["Company_Size"],
values=sal["Salary_USD"] / 1000,
aggfunc="mean",
)
)
sns.heatmap(sal[["Remote_Working_Ratio", "Salary_USD"]].corr(), annot=True)
plt.show
Twenty_Fifth = sal["Salary_USD"].quantile(0.25)
Median = sal["Salary_USD"].median()
Seventy_Fifth = sal["Salary_USD"].quantile(0.75)
bins = [0, Twenty_Fifth, Median, Seventy_Fifth, sal["Salary_USD"].max()]
labels = ["Low_Income", "UnderMid_Income", "OverMid_Income", "High_Income"]
sal["Income"] = pd.cut(x=sal["Salary_USD"], labels=labels, bins=bins)
print(sal["Income"].value_counts(), end="\n\n")
print(sal.info())
IQR = Seventy_Fifth - Twenty_Fifth
lower_limit = Twenty_Fifth - (1.5 * IQR)
upper_limit = Seventy_Fifth + (1.5 * IQR)
print(
f"Interquartile Range = {IQR:.2f}\n"
f"Lower Limit for Yearly Salaries in USD = {lower_limit:.2f}\n"
f"Upper Limit for Yearly Salaries in USD = {upper_limit:.2f}\n"
)
Outliers = sal[(sal["Salary_USD"] > upper_limit) | (sal["Salary_USD"] < lower_limit)]
print(Outliers.sort_values(by="Salary_USD", ascending=False))
sns.barplot(data=Outliers, x="Salary_USD", y="Designation", hue="Experience")
plt.show
sal["Salary_USD"][sal["Salary_USD"] > upper_limit] = upper_limit
print(sal["Salary_USD"].sort_values(ascending=False))
sns.scatterplot(data=sal, x="Experience", y="Salary_USD", hue="Income")
plt.show
Top_Locations = pd.DataFrame(sal["Employee_Location"].value_counts().head(5))
print(Top_Locations)
Top_Locations_Salaries = sal[sal["Employee_Location"].isin(Top_Locations.index)]
print(Top_Locations_Salaries)
sns.boxplot(data=Top_Locations_Salaries, x="Employee_Location", y="Salary_USD")
plt.show
sns.barplot(
data=Top_Locations_Salaries,
x="Employee_Location",
y="Salary_USD",
hue="Working_Year",
)
plt.show
sns.barplot(
data=Top_Locations_Salaries, x="Employee_Location", y="Salary_USD", hue="Experience"
)
plt.show
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/960/129960520.ipynb
| null | null |
[{"Id": 129960520, "ScriptId": 36906283, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8742412, "CreationDate": "05/17/2023 18:12:06", "VersionNumber": 1.0, "Title": "Data_Salaries", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 83.0, "LinesInsertedFromPrevious": 83.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
data = pd.read_csv("/kaggle/input/d/ahmedashraf99/salaries/ds_salaries_clean.csv")
sal = pd.DataFrame(data)
print(sal.info())
data_science = "Data Scientist|NLP"
data_analyst = "Analyst|Analytics"
data_engineer = "Data Engineer|ETL|Architect|Infrastructure"
ml_engineer = "Machine Learning|ML|Big Data|AI"
manager = "Manager|Head|Director|Lead|Principal|Staff"
consultant = "Consultant|Freelance"
conditions = [
(sal["Designation"].str.contains(data_science)),
(sal["Designation"].str.contains(data_analyst)),
(sal["Designation"].str.contains(data_engineer)),
(sal["Designation"].str.contains(ml_engineer)),
(sal["Designation"].str.contains(manager)),
(sal["Designation"].str.contains(consultant)),
]
job_categories = [
"Data_Science",
"Data_Analytics",
"Data_Engineering",
"Machine_Learning",
"Managerial",
"Consultant",
]
sal["Job_Category"] = np.select(conditions, job_categories, default="Other")
print(sal["Job_Category"].value_counts())
sns.countplot(data=sal, y="Job_Category")
plt.show
# Data_Correction_index[18], Salary*12.
sal.loc[18, "Salary_USD"] = sal.loc[18, "Salary_USD"] * 12
print(sal.loc[18, "Salary_USD"])
print(
pd.crosstab(
index=sal["Job_Category"],
columns=sal["Company_Size"],
values=sal["Salary_USD"] / 1000,
aggfunc="mean",
)
)
sns.heatmap(sal[["Remote_Working_Ratio", "Salary_USD"]].corr(), annot=True)
plt.show
Twenty_Fifth = sal["Salary_USD"].quantile(0.25)
Median = sal["Salary_USD"].median()
Seventy_Fifth = sal["Salary_USD"].quantile(0.75)
bins = [0, Twenty_Fifth, Median, Seventy_Fifth, sal["Salary_USD"].max()]
labels = ["Low_Income", "UnderMid_Income", "OverMid_Income", "High_Income"]
sal["Income"] = pd.cut(x=sal["Salary_USD"], labels=labels, bins=bins)
print(sal["Income"].value_counts(), end="\n\n")
print(sal.info())
IQR = Seventy_Fifth - Twenty_Fifth
lower_limit = Twenty_Fifth - (1.5 * IQR)
upper_limit = Seventy_Fifth + (1.5 * IQR)
print(
f"Interquartile Range = {IQR:.2f}\n"
f"Lower Limit for Yearly Salaries in USD = {lower_limit:.2f}\n"
f"Upper Limit for Yearly Salaries in USD = {upper_limit:.2f}\n"
)
Outliers = sal[(sal["Salary_USD"] > upper_limit) | (sal["Salary_USD"] < lower_limit)]
print(Outliers.sort_values(by="Salary_USD", ascending=False))
sns.barplot(data=Outliers, x="Salary_USD", y="Designation", hue="Experience")
plt.show
sal["Salary_USD"][sal["Salary_USD"] > upper_limit] = upper_limit
print(sal["Salary_USD"].sort_values(ascending=False))
sns.scatterplot(data=sal, x="Experience", y="Salary_USD", hue="Income")
plt.show
Top_Locations = pd.DataFrame(sal["Employee_Location"].value_counts().head(5))
print(Top_Locations)
Top_Locations_Salaries = sal[sal["Employee_Location"].isin(Top_Locations.index)]
print(Top_Locations_Salaries)
sns.boxplot(data=Top_Locations_Salaries, x="Employee_Location", y="Salary_USD")
plt.show
sns.barplot(
data=Top_Locations_Salaries,
x="Employee_Location",
y="Salary_USD",
hue="Working_Year",
)
plt.show
sns.barplot(
data=Top_Locations_Salaries, x="Employee_Location", y="Salary_USD", hue="Experience"
)
plt.show
| false | 0 | 1,110 | 0 | 1,110 | 1,110 |
||
129960650
|
<jupyter_start><jupyter_text>Hotel Revenue Data Project
Kaggle dataset identifier: hotel-revenue-data-project
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
print(filename)
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import seaborn as sb
import matplotlib.pyplot as plt
from scipy.stats import linregress
# **Data analysis using python and power BI on Hotel Revenue Dataset**
# Your manager wants a report to understand the relationship between average daily rate (ADR) for hotels and the lead time before booking. Your manager has asked you to make a Power BI Dashboard for the same.
# * you need to compile a collection of excel sheets into a single dataset and filter out cancellations
# * create a simple linera regression between these two variables
# **Anaconda settings for python environment in power BI**
# open anaconda command prompt
# create env using : conda create -n new_env_pbi2 python==3.7
# conda activate new_env_pbi2
# pip install pandas
# pip install seaborn
df1 = pd.read_excel(
"/kaggle/input/hotel-revenue-data-project/SQL Project 1. Hotel_Revenue_Historical_Full.xlsx",
sheet_name="2018",
)
df2 = pd.read_excel(
"/kaggle/input/hotel-revenue-data-project/SQL Project 1. Hotel_Revenue_Historical_Full.xlsx",
sheet_name="2019",
)
df3 = pd.read_excel(
"/kaggle/input/hotel-revenue-data-project/SQL Project 1. Hotel_Revenue_Historical_Full.xlsx",
sheet_name="2020",
)
complete_data = pd.concat([df1, df2, df3], ignore_index=True)
complete_data.sample(5)
complete_data.columns
# Our first step is to load data and filter out the canceled bookings
complete_data["is_canceled"].value_counts()
filtered_data = complete_data[complete_data["is_canceled"] != 1]
filtered_data.to_csv("filtered_hotel_dataset.csv")
# Univariate and bivariate analysis are two fundamental approaches used in data analysis to explore and understand relationships and patterns within a dataset.
# Univariant and bivariant analysis
sb.distplot(filtered_data["adr"])
# Violin plot
plt.figure(figsize=(5, 5))
sb.violinplot(data=filtered_data, y="lead_time", x="hotel")
resort_mean = filtered_data[filtered_data["hotel"] == "Resort Hotel"][
"lead_time"
].mean()
city_mean = filtered_data[filtered_data["hotel"] == "City Hotel"]["lead_time"].mean()
plt.title(
f"The mean is {filtered_data['lead_time'].mean():.2f} and Resort mean is {resort_mean :.2f} and City mean is {city_mean:.2f}"
)
sb.distplot(
filtered_data[filtered_data["hotel"] == "Resort Hotel"]["adr"], label="Resort Hotel"
)
sb.distplot(
filtered_data[filtered_data["hotel"] == "City Hotel"]["adr"], label="City Hotel"
)
resort_mean = filtered_data[filtered_data["hotel"] == "Resort Hotel"]["adr"].mean()
city_mean = filtered_data[filtered_data["hotel"] == "City Hotel"]["adr"].mean()
plt.axvline(resort_mean, color="black", linestyle="--", label="Resort Mean")
plt.axvline(city_mean, color="red", linestyle="--", label="Cityn")
plt.title(
f"The mean is {filtered_data['adr'].mean():.2f} and Resort mean is {resort_mean :.2f} and City mean is {city_mean:.2f}"
)
plt.legend()
# plt.savefig('ADR_distribution.png')
sb.regplot(data=filtered_data, y="adr", x="lead_time", line_kws={"color": "red"})
sb.jointplot(
data=filtered_data,
y="adr",
x="lead_time",
kind="reg",
joint_kws={"line_kws": {"color": "red"}},
)
sb.jointplot(data=filtered_data, y="adr", x="lead_time", kind="kde", hue="hotel")
# Linear Regression
linregress(filtered_data["lead_time"], filtered_data["adr"])
slope = linregress(filtered_data["lead_time"], filtered_data["adr"])[0]
intercept = linregress(filtered_data["lead_time"], filtered_data["adr"])[1]
r2 = linregress(filtered_data["lead_time"], filtered_data["adr"])[2]
regression_table = pd.DataFrame(
{"Name": ["slope", "intercept", "r2"], "values": [slope, intercept, r2]}
)
regression_table
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/960/129960650.ipynb
|
hotel-revenue-data-project
|
ferranindata
|
[{"Id": 129960650, "ScriptId": 38641291, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1107042, "CreationDate": "05/17/2023 18:13:32", "VersionNumber": 1.0, "Title": "Hotel revenue analysis using python and power BI", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 91.0, "LinesInsertedFromPrevious": 91.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 186396606, "KernelVersionId": 129960650, "SourceDatasetVersionId": 3212607}]
|
[{"Id": 3212607, "DatasetId": 1948843, "DatasourceVersionId": 3262472, "CreatorUserId": 8800728, "LicenseName": "Unknown", "CreationDate": "02/21/2022 15:47:54", "VersionNumber": 1.0, "Title": "Hotel Revenue Data Project", "Slug": "hotel-revenue-data-project", "Subtitle": "Historical hotel data for analysis purposes", "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1948843, "CreatorUserId": 8800728, "OwnerUserId": 8800728.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 3212607.0, "CurrentDatasourceVersionId": 3262472.0, "ForumId": 1972740, "Type": 2, "CreationDate": "02/21/2022 15:47:54", "LastActivityDate": "02/21/2022", "TotalViews": 1760, "TotalDownloads": 181, "TotalVotes": 2, "TotalKernels": 1}]
|
[{"Id": 8800728, "UserName": "ferranindata", "DisplayName": "Ferranindata", "RegisterDate": "11/05/2021", "PerformanceTier": 0}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
print(filename)
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import seaborn as sb
import matplotlib.pyplot as plt
from scipy.stats import linregress
# **Data analysis using python and power BI on Hotel Revenue Dataset**
# Your manager wants a report to understand the relationship between average daily rate (ADR) for hotels and the lead time before booking. Your manager has asked you to make a Power BI Dashboard for the same.
# * you need to compile a collection of excel sheets into a single dataset and filter out cancellations
# * create a simple linera regression between these two variables
# **Anaconda settings for python environment in power BI**
# open anaconda command prompt
# create env using : conda create -n new_env_pbi2 python==3.7
# conda activate new_env_pbi2
# pip install pandas
# pip install seaborn
df1 = pd.read_excel(
"/kaggle/input/hotel-revenue-data-project/SQL Project 1. Hotel_Revenue_Historical_Full.xlsx",
sheet_name="2018",
)
df2 = pd.read_excel(
"/kaggle/input/hotel-revenue-data-project/SQL Project 1. Hotel_Revenue_Historical_Full.xlsx",
sheet_name="2019",
)
df3 = pd.read_excel(
"/kaggle/input/hotel-revenue-data-project/SQL Project 1. Hotel_Revenue_Historical_Full.xlsx",
sheet_name="2020",
)
complete_data = pd.concat([df1, df2, df3], ignore_index=True)
complete_data.sample(5)
complete_data.columns
# Our first step is to load data and filter out the canceled bookings
complete_data["is_canceled"].value_counts()
filtered_data = complete_data[complete_data["is_canceled"] != 1]
filtered_data.to_csv("filtered_hotel_dataset.csv")
# Univariate and bivariate analysis are two fundamental approaches used in data analysis to explore and understand relationships and patterns within a dataset.
# Univariant and bivariant analysis
sb.distplot(filtered_data["adr"])
# Violin plot
plt.figure(figsize=(5, 5))
sb.violinplot(data=filtered_data, y="lead_time", x="hotel")
resort_mean = filtered_data[filtered_data["hotel"] == "Resort Hotel"][
"lead_time"
].mean()
city_mean = filtered_data[filtered_data["hotel"] == "City Hotel"]["lead_time"].mean()
plt.title(
f"The mean is {filtered_data['lead_time'].mean():.2f} and Resort mean is {resort_mean :.2f} and City mean is {city_mean:.2f}"
)
sb.distplot(
filtered_data[filtered_data["hotel"] == "Resort Hotel"]["adr"], label="Resort Hotel"
)
sb.distplot(
filtered_data[filtered_data["hotel"] == "City Hotel"]["adr"], label="City Hotel"
)
resort_mean = filtered_data[filtered_data["hotel"] == "Resort Hotel"]["adr"].mean()
city_mean = filtered_data[filtered_data["hotel"] == "City Hotel"]["adr"].mean()
plt.axvline(resort_mean, color="black", linestyle="--", label="Resort Mean")
plt.axvline(city_mean, color="red", linestyle="--", label="Cityn")
plt.title(
f"The mean is {filtered_data['adr'].mean():.2f} and Resort mean is {resort_mean :.2f} and City mean is {city_mean:.2f}"
)
plt.legend()
# plt.savefig('ADR_distribution.png')
sb.regplot(data=filtered_data, y="adr", x="lead_time", line_kws={"color": "red"})
sb.jointplot(
data=filtered_data,
y="adr",
x="lead_time",
kind="reg",
joint_kws={"line_kws": {"color": "red"}},
)
sb.jointplot(data=filtered_data, y="adr", x="lead_time", kind="kde", hue="hotel")
# Linear Regression
linregress(filtered_data["lead_time"], filtered_data["adr"])
slope = linregress(filtered_data["lead_time"], filtered_data["adr"])[0]
intercept = linregress(filtered_data["lead_time"], filtered_data["adr"])[1]
r2 = linregress(filtered_data["lead_time"], filtered_data["adr"])[2]
regression_table = pd.DataFrame(
{"Name": ["slope", "intercept", "r2"], "values": [slope, intercept, r2]}
)
regression_table
| false | 0 | 1,331 | 0 | 1,358 | 1,331 |
||
129917281
|
# Імпорт необхідних для подальшої роботи бібліотек
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
color = sns.color_palette()
sns.set_style("darkgrid")
import warnings
def ignore_warn(*args, **kwargs):
pass
warnings.warn = ignore_warn
import sklearn.linear_model as linear_model
import xgboost as xgb
from sklearn.model_selection import KFold, cross_val_score, train_test_split
from IPython.display import HTML, display
from sklearn.manifold import TSNE
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
import scipy.stats as stats
from scipy import stats
from scipy.stats import norm, skew
pd.set_option("display.float_format", lambda x: "{:.3f}".format(x))
from subprocess import check_output
print(check_output(["ls", "../input"]).decode("utf8"))
# Імпорт тестових даних в фреймворк
train = pd.read_csv("../input/train.csv")
test = pd.read_csv("../input/test.csv")
quantitative = [f for f in train.columns if train.dtypes[f] != "object"]
quantitative.remove("SalePrice")
quantitative.remove("Id")
qualitative = [f for f in train.columns if train.dtypes[f] == "object"]
##Перші 10 рядків тренувального набору даних
train.head(10)
##Перші 10 рядків тестового набору даних
test.head(10)
# **Огляд:**
# Є 1460 екземплярів навчальних даних і 1460 тестових даних. Загальна кількість атрибутів дорівнює 81, з них 36 кількісних, 43 категоріальних + Id та SalePrice.
# Кількісні: 1stFlrSF, 2ndFlrSF, 3SsnPorch, BedroomAbvGr, BsmtFinSF1, BsmtFinSF2, BsmtFullBath, BsmtHalfBath, BsmtUnfSF, EnclosedPorch, Fireplaces, FullBath, GarageArea, GarageCars, GarageYrBlt, GrLivArea, HalfBath, KitchenAbvGr, LotArea, LotFrontage, LowQualFinSF, MSSubClass, MasVnrArea, MiscVal, MoSold, OpenPorchSF, OverallCond, OverallQual, PoolArea, ScreenPorch, TotRmsAbvGrd, TotalBsmtSF, WoodDeckSF, YearBuilt, YearRemodAdd, YrSold
# Якісні: Alley, BldgType, BsmtCond, BsmtExposure, BsmtFinType1, BsmtFinType2, BsmtQual, CentralAir, Condition1, Condition2, Electrical, ExterCond, ExterQual, Exterior1st, Exterior2nd, Fence, FireplaceQu, Foundation, Functional, GarageCond, GarageFinish, GarageQual, GarageType, Опалення, ОпаленняQC, HouseStyle, KitchenQual, LandContour, LandSlope, LotConfig, LotShape, MSZoning, MasVnrType, MiscFeature, Neighborhood, PavedDrive, PoolQC, RoofMatl, RoofStyle, SaleCondition, SaleType, Street, Utilities,
missing = train.isnull().sum()
missing = missing[missing > 0]
missing.sort_values(inplace=True)
missing.plot.bar()
# 19 атрибутів мають пропущені значення, 5 з яких становлять понад 50% усіх даних. У більшості випадків NAN означає відсутність об'єкта, описаного атрибутом, наприклад, відсутній басейн, паркан, немає гаража та підвалу.
import scipy.stats as st
y = train["SalePrice"]
plt.figure(1, figsize=(10, 6))
plt.title("Johnson SU")
sns.distplot(y, kde=False, fit=st.johnsonsu)
plt.figure(2, figsize=(10, 6))
plt.title("Нормальний розподіл")
sns.distplot(y, kde=False, fit=st.norm)
plt.figure(3, figsize=(10, 6))
plt.title("Логарифмічно нормальний розподіл")
sns.distplot(y, kde=False, fit=st.lognorm)
plt.show()
# Очевидно, що SalePrice не підпорядковується нормальному розподілу, тому перед виконанням регресії її потрібно трансформувати. Хоча логарифмічне перетворення дає досить хороші результати, найкраще підходить необмежений розподіл Джонсона.
test_normality = lambda x: stats.shapiro(x.fillna(0))[1] < 0.01
normal = pd.DataFrame(train[quantitative])
normal = normal.apply(test_normality)
print(not normal.any())
# Крім того, жодна з кількісних змінних не має нормального розподілу, тому їх також слід трансформувати.
f = pd.melt(train, value_vars=quantitative)
g = sns.FacetGrid(f, col="variable", col_wrap=2, sharex=False, sharey=False)
g = g.map(sns.distplot, "value")
# Деякі незалежні змінні виглядають як хороші кандидати для лог-перетворення: TotalBsmtSF, KitchenAbvGr, LotFrontage, LotArea та інші. В той час як регресійне перетворення згладить деякі нерівності, які можуть бути важливими, наприклад, велику кількість будинків з 0 2ndFlrSF. Такі нерівності є хорошими кандидатами для побудови функцій.
# **Категоріальні дані**
# З якісними змінними ми можемо застосувати два методи. Перший - перевірити розподіл SalePrice відносно значень змінної та перерахувати їх. Другий - створити фіктивну змінну для кожної можливої категорії.
for c in qualitative:
train[c] = train[c].astype("category")
if train[c].isnull().any():
train[c] = train[c].cat.add_categories(["MISSING"])
train[c] = train[c].fillna("MISSING")
def boxplot(x, y, **kwargs):
sns.boxplot(x=x, y=y)
x = plt.xticks(rotation=90)
f = pd.melt(train, id_vars=["SalePrice"], value_vars=qualitative)
g = sns.FacetGrid(f, col="variable", col_wrap=2, sharex=False, sharey=False, size=5)
g = g.map(boxplot, "value", "SalePrice")
# Деякі категорії здаються більш різноманітними щодо ціни продажу, ніж інші. Сусідство має великий вплив на ціни на житло. Найдорожчим здається частковий стан продажу. Наявність басейну на ділянці значно підвищує ціну. Існують також відмінності у варіативності між значеннями категорій
def anova(frame):
anv = pd.DataFrame()
anv["feature"] = qualitative
pvals = []
for c in qualitative:
samples = []
for cls in frame[c].unique():
s = frame[frame[c] == cls]["SalePrice"].values
samples.append(s)
pval = stats.f_oneway(*samples)[1]
pvals.append(pval)
anv["pval"] = pvals
return anv.sort_values("pval")
a = anova(train)
a["disparity"] = np.log(1.0 / a["pval"].values)
sns.barplot(data=a, x="feature", y="disparity")
x = plt.xticks(rotation=90)
# Нижче наведено швидку оцінку впливу категоріальної змінної на SalePrice. Для кожної змінної SalePrices розбиваються на окремі множини на основі значень категорій. Потім перевірте за допомогою ANOVA тесту, чи мають набори схожий розподіл. Якщо змінна має незначний вплив, то середні значення наборів повинні бути однаковими. Зменшення pval є ознакою збільшення різноманітності в розбиттях.
def encode(frame, feature):
ordering = pd.DataFrame()
ordering["val"] = frame[feature].unique()
ordering.index = ordering.val
ordering["spmean"] = (
frame[[feature, "SalePrice"]].groupby(feature).mean()["SalePrice"]
)
ordering = ordering.sort_values("spmean")
ordering["ordering"] = range(1, ordering.shape[0] + 1)
ordering = ordering["ordering"].to_dict()
for cat, o in ordering.items():
frame.loc[frame[feature] == cat, feature + "_E"] = o
qual_encoded = []
for q in qualitative:
encode(train, q)
qual_encoded.append(q + "_E")
print(qual_encoded)
# Тепер якісні змінні кодуються відповідно до впорядкування на основі середнього значення SalePrice.
# Кореляція
# Як правило, щоб зменшити плутанину, до регресійних моделей слід додавати лише некорельовані між собою змінні (які корелюють з SalePrice).
def spearman(frame, features):
spr = pd.DataFrame()
spr["feature"] = features
spr["spearman"] = [frame[f].corr(frame["SalePrice"], "spearman") for f in features]
spr = spr.sort_values("spearman")
plt.figure(figsize=(6, 0.25 * len(features)))
sns.barplot(data=spr, y="feature", x="spearman", orient="h")
features = quantitative + qual_encoded
spearman(train, features)
# Кореляція Спірмена краще підходить для цього випадку, оскільки вона вловлює зв'язки між змінними, навіть якщо вони нелінійні. OverallQual є основним критерієм у визначенні ціни на житло. Сусідство має великий вплив, частково воно має певну внутрішню цінність саме по собі, але також будинки в певних регіонах мають тенденцію мати однакові характеристики (змішування), що призводить до подібних оцінок.
plt.figure(1)
corr = train[quantitative + ["SalePrice"]].corr()
sns.heatmap(corr)
plt.figure(2)
corr = train[qual_encoded + ["SalePrice"]].corr()
sns.heatmap(corr)
plt.figure(3)
corr = pd.DataFrame(
np.zeros([len(quantitative) + 1, len(qual_encoded) + 1]),
index=quantitative + ["SalePrice"],
columns=qual_encoded + ["SalePrice"],
)
for q1 in quantitative + ["SalePrice"]:
for q2 in qual_encoded + ["SalePrice"]:
corr.loc[q1, q2] = train[q1].corr(train[q2])
sns.heatmap(corr)
# Існує багато сильних кореляцій між змінними. Гаражі, схоже, побудовані в тому ж році, що і будинки, підвали мають, як правило, таку ж площу, що і перші поверхи, що є досить очевидним. Площа гаража сильно корелює з кількістю автомобілів. Сусідство корелює з багатьма іншими змінними, і це підтверджує ідею, що будинки в одному регіоні мають однакові характеристики. Тип житла негативно корелює з площею кухні над рівнем підлоги.
# Парні графіки
# Також було б корисно побачити, як ціна продажу співвідноситься з кожною незалежною змінною.
def pairplot(x, y, **kwargs):
ax = plt.gca()
ts = pd.DataFrame({"time": x, "val": y})
ts = ts.groupby("time").mean()
ts.plot(ax=ax)
plt.xticks(rotation=90)
f = pd.melt(train, id_vars=["SalePrice"], value_vars=quantitative + qual_encoded)
g = sns.FacetGrid(f, col="variable", col_wrap=2, sharex=False, sharey=False, size=5)
g = g.map(pairplot, "value", "SalePrice")
# #Обробка даних
# Давайте поглянемо на відхилення
#
fig, ax1 = plt.subplots()
ax1.scatter(x=train["GrLivArea"], y=train["SalePrice"])
plt.ylabel("SalePrice", fontsize=13)
plt.xlabel("GrLivArea", fontsize=13)
plt.show()
# Внизу праворуч ми бачимо два з надзвичайно великими значеннями GrLivArea, які мають низьку ціну. Ці значення є величезними перебільшеннями. Тому ми можемо сміливо видалити їх
train = train.drop(
train[(train["GrLivArea"] > 4000) & (train["SalePrice"] < 300000)].index
)
fig, ax1 = plt.subplots()
ax1.scatter(train["GrLivArea"], train["SalePrice"])
plt.ylabel("SalePrice", fontsize=13)
plt.xlabel("GrLivArea", fontsize=13)
plt.show()
# Видалення викидів не завжди є безпечним. Ми вирішили видалити ці два, оскільки вони дуже великі і дуже погані (надзвичайно великі площі за дуже низькими цінами).
# Ймовірно, у навчальних даних є й інші викиди. Однак, видалення їх усіх може погано вплинути на наші моделі, якщо в тестових даних також були викиди. Тому замість того, щоб видаляти їх усі, ми просто зробимо деякі з наших моделей стійкими до них. Для цього ви можете звернутися до розділу про моделювання в цьому зошиті.
# ##Цільова змінна
# **SalePrice** - це змінна, яку нам потрібно спрогнозувати. Тож давайте спочатку проаналізуємо цю змінну.
sns.distplot(train["SalePrice"], fit=norm)
# Отримайте встановлені параметри, що використовуються функцією
(mu, sigma) = norm.fit(train["SalePrice"])
print("\n mu = {:.2f} and sigma = {:.2f}\n".format(mu, sigma))
# Тепер будуємо розподіл
plt.legend(
["Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )".format(mu, sigma)], loc="best"
)
plt.ylabel("Частота")
plt.title("Розподіл цін:")
# Отримаємо також QQ-графік
fig = plt.figure()
res = stats.probplot(train["SalePrice"], plot=plt)
plt.show()
# Цільова змінна є правосторонньою. Оскільки (лінійні) моделі люблять нормально розподілені дані, нам потрібно перетворити цю змінну і зробити її більш нормально розподіленою
# **Log-transformation of the target variable**
train["SalePrice"] = np.log1p(train["SalePrice"])
# Перевіряємо новий розподіл
sns.distplot(train["SalePrice"], fit=norm)
# Отримаємо встановлені параметри, що використовуються функцією
(mu, sigma) = norm.fit(train["SalePrice"])
print("\n mu = {:.2f} and sigma = {:.2f}\n".format(mu, sigma))
# Будуємо розподіл
plt.legend(
["Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )".format(mu, sigma)], loc="best"
)
plt.ylabel("Частота")
plt.title("Ціновий розподіл")
# Також QQ графік
fig = plt.figure()
res = stats.probplot(train["SalePrice"], plot=plt)
plt.show()
# #Здається, що перекіс тепер виправлено, і дані виглядають більш рівномірно розподіленими.
# ##Features engineering
# спочатку об'єднаємо навчальні та тестові дані в один фрейм даних
ntrain = train.shape[0]
ntest = test.shape[0]
y_train = train.SalePrice.values
all_data = pd.concat((train, test)).reset_index(drop=True)
all_data.drop(["SalePrice"], axis=1, inplace=True)
print("Весь розмір даних : {}".format(all_data.shape))
# ###Втрачені дані
# - **PoolQC** : В описі даних зазначено, що NA означає "Без басейну". Це має сенс, враховуючи величезний відсоток відсутніх значень (+99%) і те, що більшість будинків взагалі не мають басейну.
all_data["PoolQC"] = all_data["PoolQC"].fillna("None")
# - **MiscFeature** : в описі даних зазначено, що NA означає "немає помилки"
#
all_data["MiscFeature"] = all_data["MiscFeature"].fillna("None")
# - **Alley** : в описі даних зазначено, що NA означає "немає доступу до провулків"
all_data["Alley"] = all_data["Alley"].fillna("None")
# - **Fence** : в описі даних зазначено, що NA означає "без огорожі"
all_data["Fence"] = all_data["Fence"].fillna("None")
# - **FireplaceQu** : data description says NA means "no fireplace"
all_data["FireplaceQu"] = all_data["FireplaceQu"].fillna("None")
# - **LotFrontage** : Оскільки площа кожної вулиці, з'єднаної з будинком, скоріш за все, подібна до площі інших будинків по сусідству, ми можемо **заповнити пропущені значення медіанним значенням LotFrontage району**.
# Згрупуйте за районами та заповніть пропущене значення медіаною LotFrontage для всіх районів
all_data["LotFrontage"] = all_data.groupby("Neighborhood")["LotFrontage"].transform(
lambda x: x.fillna(x.median())
)
# - **GarageType, GarageFinish, GarageQual та GarageCond** : Заміна відсутніх даних на None
for col in ("GarageType", "GarageFinish", "GarageQual", "GarageCond"):
all_data[col] = all_data[col].fillna("None")
# - **GarageYrBlt, GarageArea та GarageCars** : Заміна відсутніх даних на 0 (оскільки відсутність гаража означає відсутність автомобілів у такому гаражі).
#
for col in ("GarageYrBlt", "GarageArea", "GarageCars"):
all_data[col] = all_data[col].fillna(0)
# - **BsmtFinSF1, BsmtFinSF2, BsmtUnfSF, TotalBsmtSF, BsmtFullBath and BsmtHalfBath** : відсутні значення, ймовірно, дорівнюють нулю для відсутності підвалу
for col in (
"BsmtFinSF1",
"BsmtFinSF2",
"BsmtUnfSF",
"TotalBsmtSF",
"BsmtFullBath",
"BsmtHalfBath",
):
all_data[col] = all_data[col].fillna(0)
# - **BsmtQual, BsmtCond, BsmtExposure, BsmtFinType1 and BsmtFinType2** : Для всіх цих категоричних ознак, пов'язаних з підвалом, NaN означає, що підвалу немає.
#
for col in ("BsmtQual", "BsmtCond", "BsmtExposure", "BsmtFinType1", "BsmtFinType2"):
all_data[col] = all_data[col].fillna("None")
# - **MasVnrArea and MasVnrType** : NA, швидше за все, означає, що для цих будинків немає шпону для кладки. Ми можемо ввести 0 для площі та None для типу.
#
all_data["MasVnrType"] = all_data["MasVnrType"].fillna("None")
all_data["MasVnrArea"] = all_data["MasVnrArea"].fillna(0)
# - **MSZoning (The general zoning classification)** : 'RL' - це, безумовно, найпоширеніше значення. Отже, ми можемо заповнити відсутні значення значенням 'RL'
#
all_data["MSZoning"] = all_data["MSZoning"].fillna(all_data["MSZoning"].mode()[0])
# - **Utilities** : Для цієї категоріальної ознаки всі записи є "AllPub", окрім одного "NoSeWa" та 2 NA . Оскільки будинок з "NoSewa" є в навчальній вибірці, **ця ознака не допоможе в предиктивному моделюванні**. Тоді ми можемо сміливо видалити її.
#
all_data = all_data.drop(["Utilities"], axis=1)
# - **Functional** : в описі даних зазначено, що NA означає типовий
all_data["Functional"] = all_data["Functional"].fillna("Typ")
# - **Electrical** : Він має одне значення NA. Оскільки цей параметр здебільшого має значення "SBrkr", ми можемо задати його замість відсутнього значення.
#
all_data["Electrical"] = all_data["Electrical"].fillna(all_data["Electrical"].mode()[0])
# - **KitchenQual**: Тільки одне значення NA, і так само, як і для електрики, ми встановили "TA" (яке найчастіше зустрічається) для відсутнього значення в KitchenQual.
#
all_data["KitchenQual"] = all_data["KitchenQual"].fillna(
all_data["KitchenQual"].mode()[0]
)
# - **Exterior1st and Exterior2nd** : Знову ж таки, в обох екстер'єрах 1 і 2 відсутнє лише одне значення. Ми просто підставимо найпоширеніший рядок
#
all_data["Exterior1st"] = all_data["Exterior1st"].fillna(
all_data["Exterior1st"].mode()[0]
)
all_data["Exterior2nd"] = all_data["Exterior2nd"].fillna(
all_data["Exterior2nd"].mode()[0]
)
# - **SaleType** : Заповніть ще раз найчастіше, тобто "WD"
all_data["SaleType"] = all_data["SaleType"].fillna(all_data["SaleType"].mode()[0])
# - **MSSubClass** : Na скоріш за все означає No building class. Ми можемо замінити відсутні значення на None
#
all_data["MSSubClass"] = all_data["MSSubClass"].fillna("None")
# ###Генерування більшої кількості функцій
# **Перетворення деяких числових змінних, які є дійсно категоріальними**
# MSSubClass=Клас будівлі
all_data["MSSubClass"] = all_data["MSSubClass"].apply(str)
# Перетворення OverallCond на категоріональну змінну
all_data["OverallCond"] = all_data["OverallCond"].astype(str)
# Рік і місяць продажу перетворюються на категоріальні ознаки.
all_data["YrSold"] = all_data["YrSold"].astype(str)
all_data["MoSold"] = all_data["MoSold"].astype(str)
# **Мітка Кодування деяких категорійних змінних, які можуть містити інформацію у своєму наборі порядку**
from sklearn.preprocessing import LabelEncoder
cols = (
"FireplaceQu",
"BsmtQual",
"BsmtCond",
"GarageQual",
"GarageCond",
"ExterQual",
"ExterCond",
"HeatingQC",
"PoolQC",
"KitchenQual",
"BsmtFinType1",
"BsmtFinType2",
"Functional",
"Fence",
"BsmtExposure",
"GarageFinish",
"LandSlope",
"LotShape",
"PavedDrive",
"Street",
"Alley",
"CentralAir",
"MSSubClass",
"OverallCond",
"YrSold",
"MoSold",
)
# оброблення стовпців, застосовуємо LabelEncoder до категоріональних ознак
for c in cols:
lbl = LabelEncoder()
lbl.fit(list(all_data[c].values))
all_data[c] = lbl.transform(list(all_data[c].values))
print("Сформовані дані: {}".format(all_data.shape))
# **Додамо ще одну важливу особливість**
# Оскільки характеристики, пов'язані з площею, є дуже важливими для визначення цін на житло, ми додали ще одну характеристику - загальну площу підвалу, першого та другого поверхів кожного будинку
# Додавання функції загальної площі
all_data["TotalSF"] = (
all_data["TotalBsmtSF"] + all_data["1stFlrSF"] + all_data["2ndFlrSF"]
)
# **Викривлені риси**
numeric_feats = all_data.dtypes[all_data.dtypes != "object"].index
# Перевірка відхидлень всіх числових ознак
skewed_feats = (
all_data[numeric_feats]
.apply(lambda x: skew(x.dropna()))
.sort_values(ascending=False)
)
print("\nВикривлення у числових характеристиках: \n")
skewness = pd.DataFrame({"Викривлення": skewed_feats})
skewness.head(10)
# **Перетворення Бокса-Кокса (сильно) викривлених ознак**
skewness = skewness[abs(skewness) > 0.75]
print(
"Тут є {} відхилень(ня) числових характеристик до перетворення Кокса-Бокса".format(
skewness.shape[0]
)
)
from scipy.special import boxcox1p
skewed_features = skewness.index
lam = 0.15
for feat in skewed_features:
# all_data[feat] += 1
all_data[feat] = boxcox1p(all_data[feat], lam)
# all_data[skewed_features] = np.log1p(all_data[skewed_features])
# **Отримання фіктивних категоріональних ознак**
all_data = pd.get_dummies(all_data)
print(all_data.shape)
# Отримання нового та тестового набору.
train = all_data[:ntrain]
test = all_data[ntrain:]
# #Modelling
# **Імпортування бібліотек**
from sklearn.linear_model import ElasticNet, Lasso, BayesianRidge, LassoLarsIC
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone
from sklearn.metrics import mean_squared_error
import xgboost as xgb
import lightgbm as lgb
# **Визначте стратегію перехресної перевірки**
# Ми використовуємо функцію cross_val_score з Sklearn. Однак ця функція не має атрибуту **shuffle**, тому ми додаємо один рядок коду, щоб перемішати набір даних перед перехресною перевіркою
# Validation function
n_folds = 5
def rmsle_cv(model):
kf = KFold(n_folds, shuffle=True, random_state=42).get_n_splits(train.values)
rmse = np.sqrt(
-cross_val_score(
model, train.values, y_train, scoring="neg_mean_squared_error", cv=kf
)
)
return rmse
# ##Базові моделі
# **LASSO Regression** :
# Ця модель може бути дуже чутливою до викидів. Тому нам потрібно зробити її більш стійкою до них. Для цього ми використовуємо метод Robustscaler() на конвеєрі sklearn
lasso = make_pipeline(RobustScaler(), Lasso(alpha=0.0005, random_state=1))
# - **Elastic Net Regression** :
# знову зроблено стійким до нестандартних ситуацій
ENet = make_pipeline(
RobustScaler(), ElasticNet(alpha=0.0005, l1_ratio=0.9, random_state=3)
)
# - **Kernel Ridge Regression** :
KRR = KernelRidge(alpha=0.6, kernel="polynomial", degree=2, coef0=2.5)
# - **Gradient Boosting Regression** :
# З втратою **huber**, що робить її стійкою до викидів
#
GBoost = GradientBoostingRegressor(
n_estimators=3000,
learning_rate=0.05,
max_depth=4,
max_features="sqrt",
min_samples_leaf=15,
min_samples_split=10,
loss="huber",
random_state=5,
)
# - **XGBoost** :
model_xgb = xgb.XGBRegressor(
colsample_bytree=0.4603,
gamma=0.0468,
learning_rate=0.05,
max_depth=3,
min_child_weight=1.7817,
n_estimators=2200,
reg_alpha=0.4640,
reg_lambda=0.8571,
subsample=0.5213,
silent=1,
random_state=7,
nthread=-1,
)
# - **LightGBM** :
model_lgb = lgb.LGBMRegressor(
objective="regression",
num_leaves=5,
learning_rate=0.05,
n_estimators=720,
max_bin=55,
bagging_fraction=0.8,
bagging_freq=5,
feature_fraction=0.2319,
feature_fraction_seed=9,
bagging_seed=9,
min_data_in_leaf=6,
min_sum_hessian_in_leaf=11,
)
# ###Бали базових моделей
# Давайте подивимося, як ці базові моделі працюють на даних, оцінивши середньоквадратичну похибку перехресної перевірки
score = rmsle_cv(lasso)
print("\nLasso score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()))
score = rmsle_cv(ENet)
print("ElasticNet score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()))
score = rmsle_cv(KRR)
print("Kernel Ridge score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()))
score = rmsle_cv(GBoost)
print("Gradient Boosting score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()))
score = rmsle_cv(model_xgb)
print("Xgboost score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()))
score = rmsle_cv(model_lgb)
print("LGBM score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()))
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/917/129917281.ipynb
| null | null |
[{"Id": 129917281, "ScriptId": 38407975, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6697926, "CreationDate": "05/17/2023 12:20:06", "VersionNumber": 5.0, "Title": "Predict4", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 577.0, "LinesInsertedFromPrevious": 14.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 563.0, "LinesInsertedFromFork": 296.0, "LinesDeletedFromFork": 428.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 281.0, "TotalVotes": 0}]
| null | null | null | null |
# Імпорт необхідних для подальшої роботи бібліотек
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
color = sns.color_palette()
sns.set_style("darkgrid")
import warnings
def ignore_warn(*args, **kwargs):
pass
warnings.warn = ignore_warn
import sklearn.linear_model as linear_model
import xgboost as xgb
from sklearn.model_selection import KFold, cross_val_score, train_test_split
from IPython.display import HTML, display
from sklearn.manifold import TSNE
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
import scipy.stats as stats
from scipy import stats
from scipy.stats import norm, skew
pd.set_option("display.float_format", lambda x: "{:.3f}".format(x))
from subprocess import check_output
print(check_output(["ls", "../input"]).decode("utf8"))
# Імпорт тестових даних в фреймворк
train = pd.read_csv("../input/train.csv")
test = pd.read_csv("../input/test.csv")
quantitative = [f for f in train.columns if train.dtypes[f] != "object"]
quantitative.remove("SalePrice")
quantitative.remove("Id")
qualitative = [f for f in train.columns if train.dtypes[f] == "object"]
##Перші 10 рядків тренувального набору даних
train.head(10)
##Перші 10 рядків тестового набору даних
test.head(10)
# **Огляд:**
# Є 1460 екземплярів навчальних даних і 1460 тестових даних. Загальна кількість атрибутів дорівнює 81, з них 36 кількісних, 43 категоріальних + Id та SalePrice.
# Кількісні: 1stFlrSF, 2ndFlrSF, 3SsnPorch, BedroomAbvGr, BsmtFinSF1, BsmtFinSF2, BsmtFullBath, BsmtHalfBath, BsmtUnfSF, EnclosedPorch, Fireplaces, FullBath, GarageArea, GarageCars, GarageYrBlt, GrLivArea, HalfBath, KitchenAbvGr, LotArea, LotFrontage, LowQualFinSF, MSSubClass, MasVnrArea, MiscVal, MoSold, OpenPorchSF, OverallCond, OverallQual, PoolArea, ScreenPorch, TotRmsAbvGrd, TotalBsmtSF, WoodDeckSF, YearBuilt, YearRemodAdd, YrSold
# Якісні: Alley, BldgType, BsmtCond, BsmtExposure, BsmtFinType1, BsmtFinType2, BsmtQual, CentralAir, Condition1, Condition2, Electrical, ExterCond, ExterQual, Exterior1st, Exterior2nd, Fence, FireplaceQu, Foundation, Functional, GarageCond, GarageFinish, GarageQual, GarageType, Опалення, ОпаленняQC, HouseStyle, KitchenQual, LandContour, LandSlope, LotConfig, LotShape, MSZoning, MasVnrType, MiscFeature, Neighborhood, PavedDrive, PoolQC, RoofMatl, RoofStyle, SaleCondition, SaleType, Street, Utilities,
missing = train.isnull().sum()
missing = missing[missing > 0]
missing.sort_values(inplace=True)
missing.plot.bar()
# 19 атрибутів мають пропущені значення, 5 з яких становлять понад 50% усіх даних. У більшості випадків NAN означає відсутність об'єкта, описаного атрибутом, наприклад, відсутній басейн, паркан, немає гаража та підвалу.
import scipy.stats as st
y = train["SalePrice"]
plt.figure(1, figsize=(10, 6))
plt.title("Johnson SU")
sns.distplot(y, kde=False, fit=st.johnsonsu)
plt.figure(2, figsize=(10, 6))
plt.title("Нормальний розподіл")
sns.distplot(y, kde=False, fit=st.norm)
plt.figure(3, figsize=(10, 6))
plt.title("Логарифмічно нормальний розподіл")
sns.distplot(y, kde=False, fit=st.lognorm)
plt.show()
# Очевидно, що SalePrice не підпорядковується нормальному розподілу, тому перед виконанням регресії її потрібно трансформувати. Хоча логарифмічне перетворення дає досить хороші результати, найкраще підходить необмежений розподіл Джонсона.
test_normality = lambda x: stats.shapiro(x.fillna(0))[1] < 0.01
normal = pd.DataFrame(train[quantitative])
normal = normal.apply(test_normality)
print(not normal.any())
# Крім того, жодна з кількісних змінних не має нормального розподілу, тому їх також слід трансформувати.
f = pd.melt(train, value_vars=quantitative)
g = sns.FacetGrid(f, col="variable", col_wrap=2, sharex=False, sharey=False)
g = g.map(sns.distplot, "value")
# Деякі незалежні змінні виглядають як хороші кандидати для лог-перетворення: TotalBsmtSF, KitchenAbvGr, LotFrontage, LotArea та інші. В той час як регресійне перетворення згладить деякі нерівності, які можуть бути важливими, наприклад, велику кількість будинків з 0 2ndFlrSF. Такі нерівності є хорошими кандидатами для побудови функцій.
# **Категоріальні дані**
# З якісними змінними ми можемо застосувати два методи. Перший - перевірити розподіл SalePrice відносно значень змінної та перерахувати їх. Другий - створити фіктивну змінну для кожної можливої категорії.
for c in qualitative:
train[c] = train[c].astype("category")
if train[c].isnull().any():
train[c] = train[c].cat.add_categories(["MISSING"])
train[c] = train[c].fillna("MISSING")
def boxplot(x, y, **kwargs):
sns.boxplot(x=x, y=y)
x = plt.xticks(rotation=90)
f = pd.melt(train, id_vars=["SalePrice"], value_vars=qualitative)
g = sns.FacetGrid(f, col="variable", col_wrap=2, sharex=False, sharey=False, size=5)
g = g.map(boxplot, "value", "SalePrice")
# Деякі категорії здаються більш різноманітними щодо ціни продажу, ніж інші. Сусідство має великий вплив на ціни на житло. Найдорожчим здається частковий стан продажу. Наявність басейну на ділянці значно підвищує ціну. Існують також відмінності у варіативності між значеннями категорій
def anova(frame):
anv = pd.DataFrame()
anv["feature"] = qualitative
pvals = []
for c in qualitative:
samples = []
for cls in frame[c].unique():
s = frame[frame[c] == cls]["SalePrice"].values
samples.append(s)
pval = stats.f_oneway(*samples)[1]
pvals.append(pval)
anv["pval"] = pvals
return anv.sort_values("pval")
a = anova(train)
a["disparity"] = np.log(1.0 / a["pval"].values)
sns.barplot(data=a, x="feature", y="disparity")
x = plt.xticks(rotation=90)
# Нижче наведено швидку оцінку впливу категоріальної змінної на SalePrice. Для кожної змінної SalePrices розбиваються на окремі множини на основі значень категорій. Потім перевірте за допомогою ANOVA тесту, чи мають набори схожий розподіл. Якщо змінна має незначний вплив, то середні значення наборів повинні бути однаковими. Зменшення pval є ознакою збільшення різноманітності в розбиттях.
def encode(frame, feature):
ordering = pd.DataFrame()
ordering["val"] = frame[feature].unique()
ordering.index = ordering.val
ordering["spmean"] = (
frame[[feature, "SalePrice"]].groupby(feature).mean()["SalePrice"]
)
ordering = ordering.sort_values("spmean")
ordering["ordering"] = range(1, ordering.shape[0] + 1)
ordering = ordering["ordering"].to_dict()
for cat, o in ordering.items():
frame.loc[frame[feature] == cat, feature + "_E"] = o
qual_encoded = []
for q in qualitative:
encode(train, q)
qual_encoded.append(q + "_E")
print(qual_encoded)
# Тепер якісні змінні кодуються відповідно до впорядкування на основі середнього значення SalePrice.
# Кореляція
# Як правило, щоб зменшити плутанину, до регресійних моделей слід додавати лише некорельовані між собою змінні (які корелюють з SalePrice).
def spearman(frame, features):
spr = pd.DataFrame()
spr["feature"] = features
spr["spearman"] = [frame[f].corr(frame["SalePrice"], "spearman") for f in features]
spr = spr.sort_values("spearman")
plt.figure(figsize=(6, 0.25 * len(features)))
sns.barplot(data=spr, y="feature", x="spearman", orient="h")
features = quantitative + qual_encoded
spearman(train, features)
# Кореляція Спірмена краще підходить для цього випадку, оскільки вона вловлює зв'язки між змінними, навіть якщо вони нелінійні. OverallQual є основним критерієм у визначенні ціни на житло. Сусідство має великий вплив, частково воно має певну внутрішню цінність саме по собі, але також будинки в певних регіонах мають тенденцію мати однакові характеристики (змішування), що призводить до подібних оцінок.
plt.figure(1)
corr = train[quantitative + ["SalePrice"]].corr()
sns.heatmap(corr)
plt.figure(2)
corr = train[qual_encoded + ["SalePrice"]].corr()
sns.heatmap(corr)
plt.figure(3)
corr = pd.DataFrame(
np.zeros([len(quantitative) + 1, len(qual_encoded) + 1]),
index=quantitative + ["SalePrice"],
columns=qual_encoded + ["SalePrice"],
)
for q1 in quantitative + ["SalePrice"]:
for q2 in qual_encoded + ["SalePrice"]:
corr.loc[q1, q2] = train[q1].corr(train[q2])
sns.heatmap(corr)
# Існує багато сильних кореляцій між змінними. Гаражі, схоже, побудовані в тому ж році, що і будинки, підвали мають, як правило, таку ж площу, що і перші поверхи, що є досить очевидним. Площа гаража сильно корелює з кількістю автомобілів. Сусідство корелює з багатьма іншими змінними, і це підтверджує ідею, що будинки в одному регіоні мають однакові характеристики. Тип житла негативно корелює з площею кухні над рівнем підлоги.
# Парні графіки
# Також було б корисно побачити, як ціна продажу співвідноситься з кожною незалежною змінною.
def pairplot(x, y, **kwargs):
ax = plt.gca()
ts = pd.DataFrame({"time": x, "val": y})
ts = ts.groupby("time").mean()
ts.plot(ax=ax)
plt.xticks(rotation=90)
f = pd.melt(train, id_vars=["SalePrice"], value_vars=quantitative + qual_encoded)
g = sns.FacetGrid(f, col="variable", col_wrap=2, sharex=False, sharey=False, size=5)
g = g.map(pairplot, "value", "SalePrice")
# #Обробка даних
# Давайте поглянемо на відхилення
#
fig, ax1 = plt.subplots()
ax1.scatter(x=train["GrLivArea"], y=train["SalePrice"])
plt.ylabel("SalePrice", fontsize=13)
plt.xlabel("GrLivArea", fontsize=13)
plt.show()
# Внизу праворуч ми бачимо два з надзвичайно великими значеннями GrLivArea, які мають низьку ціну. Ці значення є величезними перебільшеннями. Тому ми можемо сміливо видалити їх
train = train.drop(
train[(train["GrLivArea"] > 4000) & (train["SalePrice"] < 300000)].index
)
fig, ax1 = plt.subplots()
ax1.scatter(train["GrLivArea"], train["SalePrice"])
plt.ylabel("SalePrice", fontsize=13)
plt.xlabel("GrLivArea", fontsize=13)
plt.show()
# Видалення викидів не завжди є безпечним. Ми вирішили видалити ці два, оскільки вони дуже великі і дуже погані (надзвичайно великі площі за дуже низькими цінами).
# Ймовірно, у навчальних даних є й інші викиди. Однак, видалення їх усіх може погано вплинути на наші моделі, якщо в тестових даних також були викиди. Тому замість того, щоб видаляти їх усі, ми просто зробимо деякі з наших моделей стійкими до них. Для цього ви можете звернутися до розділу про моделювання в цьому зошиті.
# ##Цільова змінна
# **SalePrice** - це змінна, яку нам потрібно спрогнозувати. Тож давайте спочатку проаналізуємо цю змінну.
sns.distplot(train["SalePrice"], fit=norm)
# Отримайте встановлені параметри, що використовуються функцією
(mu, sigma) = norm.fit(train["SalePrice"])
print("\n mu = {:.2f} and sigma = {:.2f}\n".format(mu, sigma))
# Тепер будуємо розподіл
plt.legend(
["Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )".format(mu, sigma)], loc="best"
)
plt.ylabel("Частота")
plt.title("Розподіл цін:")
# Отримаємо також QQ-графік
fig = plt.figure()
res = stats.probplot(train["SalePrice"], plot=plt)
plt.show()
# Цільова змінна є правосторонньою. Оскільки (лінійні) моделі люблять нормально розподілені дані, нам потрібно перетворити цю змінну і зробити її більш нормально розподіленою
# **Log-transformation of the target variable**
train["SalePrice"] = np.log1p(train["SalePrice"])
# Перевіряємо новий розподіл
sns.distplot(train["SalePrice"], fit=norm)
# Отримаємо встановлені параметри, що використовуються функцією
(mu, sigma) = norm.fit(train["SalePrice"])
print("\n mu = {:.2f} and sigma = {:.2f}\n".format(mu, sigma))
# Будуємо розподіл
plt.legend(
["Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )".format(mu, sigma)], loc="best"
)
plt.ylabel("Частота")
plt.title("Ціновий розподіл")
# Також QQ графік
fig = plt.figure()
res = stats.probplot(train["SalePrice"], plot=plt)
plt.show()
# #Здається, що перекіс тепер виправлено, і дані виглядають більш рівномірно розподіленими.
# ##Features engineering
# спочатку об'єднаємо навчальні та тестові дані в один фрейм даних
ntrain = train.shape[0]
ntest = test.shape[0]
y_train = train.SalePrice.values
all_data = pd.concat((train, test)).reset_index(drop=True)
all_data.drop(["SalePrice"], axis=1, inplace=True)
print("Весь розмір даних : {}".format(all_data.shape))
# ###Втрачені дані
# - **PoolQC** : В описі даних зазначено, що NA означає "Без басейну". Це має сенс, враховуючи величезний відсоток відсутніх значень (+99%) і те, що більшість будинків взагалі не мають басейну.
all_data["PoolQC"] = all_data["PoolQC"].fillna("None")
# - **MiscFeature** : в описі даних зазначено, що NA означає "немає помилки"
#
all_data["MiscFeature"] = all_data["MiscFeature"].fillna("None")
# - **Alley** : в описі даних зазначено, що NA означає "немає доступу до провулків"
all_data["Alley"] = all_data["Alley"].fillna("None")
# - **Fence** : в описі даних зазначено, що NA означає "без огорожі"
all_data["Fence"] = all_data["Fence"].fillna("None")
# - **FireplaceQu** : data description says NA means "no fireplace"
all_data["FireplaceQu"] = all_data["FireplaceQu"].fillna("None")
# - **LotFrontage** : Оскільки площа кожної вулиці, з'єднаної з будинком, скоріш за все, подібна до площі інших будинків по сусідству, ми можемо **заповнити пропущені значення медіанним значенням LotFrontage району**.
# Згрупуйте за районами та заповніть пропущене значення медіаною LotFrontage для всіх районів
all_data["LotFrontage"] = all_data.groupby("Neighborhood")["LotFrontage"].transform(
lambda x: x.fillna(x.median())
)
# - **GarageType, GarageFinish, GarageQual та GarageCond** : Заміна відсутніх даних на None
for col in ("GarageType", "GarageFinish", "GarageQual", "GarageCond"):
all_data[col] = all_data[col].fillna("None")
# - **GarageYrBlt, GarageArea та GarageCars** : Заміна відсутніх даних на 0 (оскільки відсутність гаража означає відсутність автомобілів у такому гаражі).
#
for col in ("GarageYrBlt", "GarageArea", "GarageCars"):
all_data[col] = all_data[col].fillna(0)
# - **BsmtFinSF1, BsmtFinSF2, BsmtUnfSF, TotalBsmtSF, BsmtFullBath and BsmtHalfBath** : відсутні значення, ймовірно, дорівнюють нулю для відсутності підвалу
for col in (
"BsmtFinSF1",
"BsmtFinSF2",
"BsmtUnfSF",
"TotalBsmtSF",
"BsmtFullBath",
"BsmtHalfBath",
):
all_data[col] = all_data[col].fillna(0)
# - **BsmtQual, BsmtCond, BsmtExposure, BsmtFinType1 and BsmtFinType2** : Для всіх цих категоричних ознак, пов'язаних з підвалом, NaN означає, що підвалу немає.
#
for col in ("BsmtQual", "BsmtCond", "BsmtExposure", "BsmtFinType1", "BsmtFinType2"):
all_data[col] = all_data[col].fillna("None")
# - **MasVnrArea and MasVnrType** : NA, швидше за все, означає, що для цих будинків немає шпону для кладки. Ми можемо ввести 0 для площі та None для типу.
#
all_data["MasVnrType"] = all_data["MasVnrType"].fillna("None")
all_data["MasVnrArea"] = all_data["MasVnrArea"].fillna(0)
# - **MSZoning (The general zoning classification)** : 'RL' - це, безумовно, найпоширеніше значення. Отже, ми можемо заповнити відсутні значення значенням 'RL'
#
all_data["MSZoning"] = all_data["MSZoning"].fillna(all_data["MSZoning"].mode()[0])
# - **Utilities** : Для цієї категоріальної ознаки всі записи є "AllPub", окрім одного "NoSeWa" та 2 NA . Оскільки будинок з "NoSewa" є в навчальній вибірці, **ця ознака не допоможе в предиктивному моделюванні**. Тоді ми можемо сміливо видалити її.
#
all_data = all_data.drop(["Utilities"], axis=1)
# - **Functional** : в описі даних зазначено, що NA означає типовий
all_data["Functional"] = all_data["Functional"].fillna("Typ")
# - **Electrical** : Він має одне значення NA. Оскільки цей параметр здебільшого має значення "SBrkr", ми можемо задати його замість відсутнього значення.
#
all_data["Electrical"] = all_data["Electrical"].fillna(all_data["Electrical"].mode()[0])
# - **KitchenQual**: Тільки одне значення NA, і так само, як і для електрики, ми встановили "TA" (яке найчастіше зустрічається) для відсутнього значення в KitchenQual.
#
all_data["KitchenQual"] = all_data["KitchenQual"].fillna(
all_data["KitchenQual"].mode()[0]
)
# - **Exterior1st and Exterior2nd** : Знову ж таки, в обох екстер'єрах 1 і 2 відсутнє лише одне значення. Ми просто підставимо найпоширеніший рядок
#
all_data["Exterior1st"] = all_data["Exterior1st"].fillna(
all_data["Exterior1st"].mode()[0]
)
all_data["Exterior2nd"] = all_data["Exterior2nd"].fillna(
all_data["Exterior2nd"].mode()[0]
)
# - **SaleType** : Заповніть ще раз найчастіше, тобто "WD"
all_data["SaleType"] = all_data["SaleType"].fillna(all_data["SaleType"].mode()[0])
# - **MSSubClass** : Na скоріш за все означає No building class. Ми можемо замінити відсутні значення на None
#
all_data["MSSubClass"] = all_data["MSSubClass"].fillna("None")
# ###Генерування більшої кількості функцій
# **Перетворення деяких числових змінних, які є дійсно категоріальними**
# MSSubClass=Клас будівлі
all_data["MSSubClass"] = all_data["MSSubClass"].apply(str)
# Перетворення OverallCond на категоріональну змінну
all_data["OverallCond"] = all_data["OverallCond"].astype(str)
# Рік і місяць продажу перетворюються на категоріальні ознаки.
all_data["YrSold"] = all_data["YrSold"].astype(str)
all_data["MoSold"] = all_data["MoSold"].astype(str)
# **Мітка Кодування деяких категорійних змінних, які можуть містити інформацію у своєму наборі порядку**
from sklearn.preprocessing import LabelEncoder
cols = (
"FireplaceQu",
"BsmtQual",
"BsmtCond",
"GarageQual",
"GarageCond",
"ExterQual",
"ExterCond",
"HeatingQC",
"PoolQC",
"KitchenQual",
"BsmtFinType1",
"BsmtFinType2",
"Functional",
"Fence",
"BsmtExposure",
"GarageFinish",
"LandSlope",
"LotShape",
"PavedDrive",
"Street",
"Alley",
"CentralAir",
"MSSubClass",
"OverallCond",
"YrSold",
"MoSold",
)
# оброблення стовпців, застосовуємо LabelEncoder до категоріональних ознак
for c in cols:
lbl = LabelEncoder()
lbl.fit(list(all_data[c].values))
all_data[c] = lbl.transform(list(all_data[c].values))
print("Сформовані дані: {}".format(all_data.shape))
# **Додамо ще одну важливу особливість**
# Оскільки характеристики, пов'язані з площею, є дуже важливими для визначення цін на житло, ми додали ще одну характеристику - загальну площу підвалу, першого та другого поверхів кожного будинку
# Додавання функції загальної площі
all_data["TotalSF"] = (
all_data["TotalBsmtSF"] + all_data["1stFlrSF"] + all_data["2ndFlrSF"]
)
# **Викривлені риси**
numeric_feats = all_data.dtypes[all_data.dtypes != "object"].index
# Перевірка відхидлень всіх числових ознак
skewed_feats = (
all_data[numeric_feats]
.apply(lambda x: skew(x.dropna()))
.sort_values(ascending=False)
)
print("\nВикривлення у числових характеристиках: \n")
skewness = pd.DataFrame({"Викривлення": skewed_feats})
skewness.head(10)
# **Перетворення Бокса-Кокса (сильно) викривлених ознак**
skewness = skewness[abs(skewness) > 0.75]
print(
"Тут є {} відхилень(ня) числових характеристик до перетворення Кокса-Бокса".format(
skewness.shape[0]
)
)
from scipy.special import boxcox1p
skewed_features = skewness.index
lam = 0.15
for feat in skewed_features:
# all_data[feat] += 1
all_data[feat] = boxcox1p(all_data[feat], lam)
# all_data[skewed_features] = np.log1p(all_data[skewed_features])
# **Отримання фіктивних категоріональних ознак**
all_data = pd.get_dummies(all_data)
print(all_data.shape)
# Отримання нового та тестового набору.
train = all_data[:ntrain]
test = all_data[ntrain:]
# #Modelling
# **Імпортування бібліотек**
from sklearn.linear_model import ElasticNet, Lasso, BayesianRidge, LassoLarsIC
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone
from sklearn.metrics import mean_squared_error
import xgboost as xgb
import lightgbm as lgb
# **Визначте стратегію перехресної перевірки**
# Ми використовуємо функцію cross_val_score з Sklearn. Однак ця функція не має атрибуту **shuffle**, тому ми додаємо один рядок коду, щоб перемішати набір даних перед перехресною перевіркою
# Validation function
n_folds = 5
def rmsle_cv(model):
kf = KFold(n_folds, shuffle=True, random_state=42).get_n_splits(train.values)
rmse = np.sqrt(
-cross_val_score(
model, train.values, y_train, scoring="neg_mean_squared_error", cv=kf
)
)
return rmse
# ##Базові моделі
# **LASSO Regression** :
# Ця модель може бути дуже чутливою до викидів. Тому нам потрібно зробити її більш стійкою до них. Для цього ми використовуємо метод Robustscaler() на конвеєрі sklearn
lasso = make_pipeline(RobustScaler(), Lasso(alpha=0.0005, random_state=1))
# - **Elastic Net Regression** :
# знову зроблено стійким до нестандартних ситуацій
ENet = make_pipeline(
RobustScaler(), ElasticNet(alpha=0.0005, l1_ratio=0.9, random_state=3)
)
# - **Kernel Ridge Regression** :
KRR = KernelRidge(alpha=0.6, kernel="polynomial", degree=2, coef0=2.5)
# - **Gradient Boosting Regression** :
# З втратою **huber**, що робить її стійкою до викидів
#
GBoost = GradientBoostingRegressor(
n_estimators=3000,
learning_rate=0.05,
max_depth=4,
max_features="sqrt",
min_samples_leaf=15,
min_samples_split=10,
loss="huber",
random_state=5,
)
# - **XGBoost** :
model_xgb = xgb.XGBRegressor(
colsample_bytree=0.4603,
gamma=0.0468,
learning_rate=0.05,
max_depth=3,
min_child_weight=1.7817,
n_estimators=2200,
reg_alpha=0.4640,
reg_lambda=0.8571,
subsample=0.5213,
silent=1,
random_state=7,
nthread=-1,
)
# - **LightGBM** :
model_lgb = lgb.LGBMRegressor(
objective="regression",
num_leaves=5,
learning_rate=0.05,
n_estimators=720,
max_bin=55,
bagging_fraction=0.8,
bagging_freq=5,
feature_fraction=0.2319,
feature_fraction_seed=9,
bagging_seed=9,
min_data_in_leaf=6,
min_sum_hessian_in_leaf=11,
)
# ###Бали базових моделей
# Давайте подивимося, як ці базові моделі працюють на даних, оцінивши середньоквадратичну похибку перехресної перевірки
score = rmsle_cv(lasso)
print("\nLasso score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()))
score = rmsle_cv(ENet)
print("ElasticNet score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()))
score = rmsle_cv(KRR)
print("Kernel Ridge score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()))
score = rmsle_cv(GBoost)
print("Gradient Boosting score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()))
score = rmsle_cv(model_xgb)
print("Xgboost score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()))
score = rmsle_cv(model_lgb)
print("LGBM score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()))
| false | 0 | 10,086 | 0 | 10,086 | 10,086 |
||
129917638
|
<jupyter_start><jupyter_text>Students Performance in Exams
### Context
Marks secured by the students
### Content
This data set consists of the marks secured by the students in various subjects.
Kaggle dataset identifier: students-performance-in-exams
<jupyter_code>import pandas as pd
df = pd.read_csv('students-performance-in-exams/StudentsPerformance.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000 entries, 0 to 999
Data columns (total 8 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 gender 1000 non-null object
1 race/ethnicity 1000 non-null object
2 parental level of education 1000 non-null object
3 lunch 1000 non-null object
4 test preparation course 1000 non-null object
5 math score 1000 non-null int64
6 reading score 1000 non-null int64
7 writing score 1000 non-null int64
dtypes: int64(3), object(5)
memory usage: 62.6+ KB
<jupyter_text>Examples:
{
"gender": "female",
"race/ethnicity": "group B",
"parental level of education": "bachelor's degree",
"lunch": "standard",
"test preparation course": "none",
"math score": 72,
"reading score": 72,
"writing score": 74
}
{
"gender": "female",
"race/ethnicity": "group C",
"parental level of education": "some college",
"lunch": "standard",
"test preparation course": "completed",
"math score": 69,
"reading score": 90,
"writing score": 88
}
{
"gender": "female",
"race/ethnicity": "group B",
"parental level of education": "master's degree",
"lunch": "standard",
"test preparation course": "none",
"math score": 90,
"reading score": 95,
"writing score": 93
}
{
"gender": "male",
"race/ethnicity": "group A",
"parental level of education": "associate's degree",
"lunch": "free/reduced",
"test preparation course": "none",
"math score": 47,
"reading score": 57,
"writing score": 44
}
<jupyter_script>import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# chi square
# parental
# test
student_perf_df = pd.read_csv(
"/kaggle/input/students-performance-in-exams/StudentsPerformance.csv"
)
student_perf_df.head()
# gender: gender of the student
# race/ethnicity: race of the student
# parental level of education: education level of student's parent
# test preparation course: did the student got course for the exam?
# math score: math exam score
# reading score: reading exam score
# writing score: writing exam score
# # Visualize and Analyze
male_df = student_perf_df[student_perf_df.gender == "male"].iloc[
:, lambda df: [0, 1, 2, 5, 6, 7]
]
male_df.head()
male_df.describe()
female_df = student_perf_df[student_perf_df.gender == "female"].iloc[
:, lambda df: [0, 1, 2, 5, 6, 7]
]
female_df.head()
female_df.describe()
import seaborn as sns
plt.subplots(figsize=(14, 8))
sns.heatmap(
male_df.iloc[:, 3:6].corr(),
annot=True,
linewidths=0.4,
linecolor="black",
fmt="1.2f",
cbar=False,
)
plt.title("Male Correlation", fontsize=30)
plt.xticks(rotation=35)
plt.show()
plt.subplots(figsize=(14, 8))
sns.heatmap(
female_df.iloc[:, 3:6].corr(),
annot=True,
linewidths=0.4,
linecolor="black",
fmt="1.2f",
cbar=False,
)
plt.title("Female Correlation", fontsize=30)
plt.xticks(rotation=35)
plt.show()
# We can see a high correlation between scores for both male and female
sns.histplot(male_df["math score"], label="male", kde=True)
sns.histplot(female_df["math score"], label="female", kde=True)
plt.legend()
plt.title("Female- Male Math Scores")
plt.show()
sns.histplot(male_df["reading score"], label="male", kde=True)
sns.histplot(female_df["reading score"], label="female", kde=True)
plt.legend()
plt.title("Female- Male Reading Scores")
plt.show()
sns.histplot(male_df["writing score"], label="male", kde=True)
sns.histplot(female_df["writing score"], label="female", kde=True)
plt.legend()
plt.title("Female- Male Writing Scores")
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/917/129917638.ipynb
|
students-performance-in-exams
|
spscientist
|
[{"Id": 129917638, "ScriptId": 38644219, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2079975, "CreationDate": "05/17/2023 12:22:31", "VersionNumber": 1.0, "Title": "ENF Chi Square", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 114.0, "LinesInsertedFromPrevious": 114.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 186336221, "KernelVersionId": 129917638, "SourceDatasetVersionId": 169835}]
|
[{"Id": 169835, "DatasetId": 74977, "DatasourceVersionId": 180443, "CreatorUserId": 2094163, "LicenseName": "Unknown", "CreationDate": "11/09/2018 18:25:25", "VersionNumber": 1.0, "Title": "Students Performance in Exams", "Slug": "students-performance-in-exams", "Subtitle": "Marks secured by the students in various subjects", "Description": "### Context\n\nMarks secured by the students\n\n\n### Content\n\nThis data set consists of the marks secured by the students in various subjects. \n\n\n### Acknowledgements\n\nhttp://roycekimmons.com/tools/generated_data/exams\n\n\n### Inspiration\n\nTo understand the influence of the parents background, test preparation etc on students performance", "VersionNotes": "Initial release", "TotalCompressedBytes": 72036.0, "TotalUncompressedBytes": 72036.0}]
|
[{"Id": 74977, "CreatorUserId": 2094163, "OwnerUserId": 2094163.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 169835.0, "CurrentDatasourceVersionId": 180443.0, "ForumId": 84238, "Type": 2, "CreationDate": "11/09/2018 18:25:25", "LastActivityDate": "11/09/2018", "TotalViews": 1423654, "TotalDownloads": 235440, "TotalVotes": 3848, "TotalKernels": 1151}]
|
[{"Id": 2094163, "UserName": "spscientist", "DisplayName": "Jakki Seshapanpu", "RegisterDate": "07/24/2018", "PerformanceTier": 1}]
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# chi square
# parental
# test
student_perf_df = pd.read_csv(
"/kaggle/input/students-performance-in-exams/StudentsPerformance.csv"
)
student_perf_df.head()
# gender: gender of the student
# race/ethnicity: race of the student
# parental level of education: education level of student's parent
# test preparation course: did the student got course for the exam?
# math score: math exam score
# reading score: reading exam score
# writing score: writing exam score
# # Visualize and Analyze
male_df = student_perf_df[student_perf_df.gender == "male"].iloc[
:, lambda df: [0, 1, 2, 5, 6, 7]
]
male_df.head()
male_df.describe()
female_df = student_perf_df[student_perf_df.gender == "female"].iloc[
:, lambda df: [0, 1, 2, 5, 6, 7]
]
female_df.head()
female_df.describe()
import seaborn as sns
plt.subplots(figsize=(14, 8))
sns.heatmap(
male_df.iloc[:, 3:6].corr(),
annot=True,
linewidths=0.4,
linecolor="black",
fmt="1.2f",
cbar=False,
)
plt.title("Male Correlation", fontsize=30)
plt.xticks(rotation=35)
plt.show()
plt.subplots(figsize=(14, 8))
sns.heatmap(
female_df.iloc[:, 3:6].corr(),
annot=True,
linewidths=0.4,
linecolor="black",
fmt="1.2f",
cbar=False,
)
plt.title("Female Correlation", fontsize=30)
plt.xticks(rotation=35)
plt.show()
# We can see a high correlation between scores for both male and female
sns.histplot(male_df["math score"], label="male", kde=True)
sns.histplot(female_df["math score"], label="female", kde=True)
plt.legend()
plt.title("Female- Male Math Scores")
plt.show()
sns.histplot(male_df["reading score"], label="male", kde=True)
sns.histplot(female_df["reading score"], label="female", kde=True)
plt.legend()
plt.title("Female- Male Reading Scores")
plt.show()
sns.histplot(male_df["writing score"], label="male", kde=True)
sns.histplot(female_df["writing score"], label="female", kde=True)
plt.legend()
plt.title("Female- Male Writing Scores")
plt.show()
|
[{"students-performance-in-exams/StudentsPerformance.csv": {"column_names": "[\"gender\", \"race/ethnicity\", \"parental level of education\", \"lunch\", \"test preparation course\", \"math score\", \"reading score\", \"writing score\"]", "column_data_types": "{\"gender\": \"object\", \"race/ethnicity\": \"object\", \"parental level of education\": \"object\", \"lunch\": \"object\", \"test preparation course\": \"object\", \"math score\": \"int64\", \"reading score\": \"int64\", \"writing score\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1000 entries, 0 to 999\nData columns (total 8 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 gender 1000 non-null object\n 1 race/ethnicity 1000 non-null object\n 2 parental level of education 1000 non-null object\n 3 lunch 1000 non-null object\n 4 test preparation course 1000 non-null object\n 5 math score 1000 non-null int64 \n 6 reading score 1000 non-null int64 \n 7 writing score 1000 non-null int64 \ndtypes: int64(3), object(5)\nmemory usage: 62.6+ KB\n", "summary": "{\"math score\": {\"count\": 1000.0, \"mean\": 66.089, \"std\": 15.16308009600945, \"min\": 0.0, \"25%\": 57.0, \"50%\": 66.0, \"75%\": 77.0, \"max\": 100.0}, \"reading score\": {\"count\": 1000.0, \"mean\": 69.169, \"std\": 14.600191937252216, \"min\": 17.0, \"25%\": 59.0, \"50%\": 70.0, \"75%\": 79.0, \"max\": 100.0}, \"writing score\": {\"count\": 1000.0, \"mean\": 68.054, \"std\": 15.19565701086965, \"min\": 10.0, \"25%\": 57.75, \"50%\": 69.0, \"75%\": 79.0, \"max\": 100.0}}", "examples": "{\"gender\":{\"0\":\"female\",\"1\":\"female\",\"2\":\"female\",\"3\":\"male\"},\"race\\/ethnicity\":{\"0\":\"group B\",\"1\":\"group C\",\"2\":\"group B\",\"3\":\"group A\"},\"parental level of education\":{\"0\":\"bachelor's degree\",\"1\":\"some college\",\"2\":\"master's degree\",\"3\":\"associate's degree\"},\"lunch\":{\"0\":\"standard\",\"1\":\"standard\",\"2\":\"standard\",\"3\":\"free\\/reduced\"},\"test preparation course\":{\"0\":\"none\",\"1\":\"completed\",\"2\":\"none\",\"3\":\"none\"},\"math score\":{\"0\":72,\"1\":69,\"2\":90,\"3\":47},\"reading score\":{\"0\":72,\"1\":90,\"2\":95,\"3\":57},\"writing score\":{\"0\":74,\"1\":88,\"2\":93,\"3\":44}}"}}]
| true | 1 |
<start_data_description><data_path>students-performance-in-exams/StudentsPerformance.csv:
<column_names>
['gender', 'race/ethnicity', 'parental level of education', 'lunch', 'test preparation course', 'math score', 'reading score', 'writing score']
<column_types>
{'gender': 'object', 'race/ethnicity': 'object', 'parental level of education': 'object', 'lunch': 'object', 'test preparation course': 'object', 'math score': 'int64', 'reading score': 'int64', 'writing score': 'int64'}
<dataframe_Summary>
{'math score': {'count': 1000.0, 'mean': 66.089, 'std': 15.16308009600945, 'min': 0.0, '25%': 57.0, '50%': 66.0, '75%': 77.0, 'max': 100.0}, 'reading score': {'count': 1000.0, 'mean': 69.169, 'std': 14.600191937252216, 'min': 17.0, '25%': 59.0, '50%': 70.0, '75%': 79.0, 'max': 100.0}, 'writing score': {'count': 1000.0, 'mean': 68.054, 'std': 15.19565701086965, 'min': 10.0, '25%': 57.75, '50%': 69.0, '75%': 79.0, 'max': 100.0}}
<dataframe_info>
RangeIndex: 1000 entries, 0 to 999
Data columns (total 8 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 gender 1000 non-null object
1 race/ethnicity 1000 non-null object
2 parental level of education 1000 non-null object
3 lunch 1000 non-null object
4 test preparation course 1000 non-null object
5 math score 1000 non-null int64
6 reading score 1000 non-null int64
7 writing score 1000 non-null int64
dtypes: int64(3), object(5)
memory usage: 62.6+ KB
<some_examples>
{'gender': {'0': 'female', '1': 'female', '2': 'female', '3': 'male'}, 'race/ethnicity': {'0': 'group B', '1': 'group C', '2': 'group B', '3': 'group A'}, 'parental level of education': {'0': "bachelor's degree", '1': 'some college', '2': "master's degree", '3': "associate's degree"}, 'lunch': {'0': 'standard', '1': 'standard', '2': 'standard', '3': 'free/reduced'}, 'test preparation course': {'0': 'none', '1': 'completed', '2': 'none', '3': 'none'}, 'math score': {'0': 72, '1': 69, '2': 90, '3': 47}, 'reading score': {'0': 72, '1': 90, '2': 95, '3': 57}, 'writing score': {'0': 74, '1': 88, '2': 93, '3': 44}}
<end_description>
| 736 | 0 | 1,373 | 736 |
129917379
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Making imports
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (12.0, 9.0)
# Preprocessing Input data
data = pd.read_csv("/kaggle/input/linear-regression-dataset/example_data.csv")
X = data.iloc[:, 0]
Y = data.iloc[:, 1]
plt.scatter(X, Y)
plt.show()
# Building the model
X_mean = np.mean(X)
Y_mean = np.mean(Y)
num = 0
den = 0
for i in range(len(X)):
num += (X[i] - X_mean) * (Y[i] - Y_mean)
den += (X[i] - X_mean) ** 2
m = num / den
c = Y_mean - m * X_mean
print(m, c)
# Making predictions
Y_pred = m * X + c
plt.scatter(X, Y) # actual
plt.plot([min(X), max(X)], [min(Y_pred), max(Y_pred)], color="red") # predicted
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/917/129917379.ipynb
| null | null |
[{"Id": 129917379, "ScriptId": 38644868, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12287728, "CreationDate": "05/17/2023 12:20:45", "VersionNumber": 1.0, "Title": "ML Lab-Linear regression", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 51.0, "LinesInsertedFromPrevious": 51.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Making imports
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (12.0, 9.0)
# Preprocessing Input data
data = pd.read_csv("/kaggle/input/linear-regression-dataset/example_data.csv")
X = data.iloc[:, 0]
Y = data.iloc[:, 1]
plt.scatter(X, Y)
plt.show()
# Building the model
X_mean = np.mean(X)
Y_mean = np.mean(Y)
num = 0
den = 0
for i in range(len(X)):
num += (X[i] - X_mean) * (Y[i] - Y_mean)
den += (X[i] - X_mean) ** 2
m = num / den
c = Y_mean - m * X_mean
print(m, c)
# Making predictions
Y_pred = m * X + c
plt.scatter(X, Y) # actual
plt.plot([min(X), max(X)], [min(Y_pred), max(Y_pred)], color="red") # predicted
plt.show()
| false | 0 | 465 | 0 | 465 | 465 |
||
129917931
|
<jupyter_start><jupyter_text>eval-squad
Kaggle dataset identifier: eval-squad
<jupyter_script># # Answering System
# ## (1) process data.
# Use the official dev set as test set, and
# split the original training set into training set and validation set (5000 samples). Prepare
# the data according to the requirements of ML model training.
import pandas as pd
import json
def preprocess(file_name):
with open(file_name, "r", encoding="utf-8") as file:
squad_data = json.load(file)
data = []
for article in squad_data["data"]:
title = article["title"]
for paragraph in article["paragraphs"]:
context = paragraph["context"]
for qa in paragraph["qas"]:
question = qa["question"]
id = qa["id"]
for answer in qa["answers"]:
answer_text = answer["text"]
answer_start = answer["answer_start"]
entry = {
"id": id,
"title": title,
"context": context,
"question": question,
"answers": {
"text": [answer_text],
"answer_start": [answer_start],
},
}
data.append(entry)
return data
dev_data = preprocess("/kaggle/input/squad-20/dev-v2.0.json")
train_data = preprocess("/kaggle/input/squad-20/train-v2.0.json")
dev_data[0:2]
print("length of dev:", len(dev_data))
print("length of train:", len(train_data))
pd.DataFrame(train_data).groupby(["id"]).count().reset_index().sort_values(by="id")
pd.DataFrame(dev_data)
# - train_data: 没有出现同一个context、同一个question对应不同回答的情况
# - dev_data:有重复
# ### Drop duplicates
dev_data_1 = pd.DataFrame(dev_data).drop_duplicates(
subset=["id", "question", "context"]
)
dev_data_1
train_data_1 = pd.DataFrame(train_data).drop_duplicates(
subset=["id", "question", "context"]
)
train_data_1
print("after drop duplicates for same context, same question, same answer")
print("length of test set:", len(dev_data_1))
print("length of train set:", len(train_data_1))
# ### Dataset
import random
# 首先进行洗牌
random.shuffle(train_data)
validation_length = 5000
training_data = train_data[:-validation_length]
validation_data = train_data[-validation_length:]
print(len(training_data)) # 输出训练集长度
print(len(validation_data)) # 输出验证集长度
train_data_ = pd.DataFrame(training_data)
valid_data_ = pd.DataFrame(validation_data)
import pandas as pd
from datasets import Dataset, DatasetDict
# 将 Pandas DataFrame 转换为 Dataset
train_dataset = Dataset.from_pandas(train_data_)
validation_dataset = Dataset.from_pandas(valid_data_)
test_dataset = Dataset.from_pandas(dev_data_1)
# 将 Dataset 转换为 DatasetDict
data = DatasetDict(
{"train": train_dataset, "test": test_dataset, "validation": validation_dataset}
)
# 打印转换后的数据集
print(data)
data["train"][0:3]
# ### tokenizer
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased")
def preprocess_function(examples):
"""
examples: dataframe
return: a dataset that have features ['input_ids', 'attention_mask', 'start_positions', 'end_positions']
"""
# 对 examples 字典中的 "question" 键对应的值进行处理,去除首尾空白字符,并将每个问题存储在 questions 列表中
questions = [q.strip() for q in examples["question"]]
# 使用 tokenizer 对问题和上下文进行编码,并生成模型的输入
inputs = tokenizer(
questions,
examples["context"],
max_length=384,
truncation="only_second",
return_offsets_mapping=True,
padding="max_length",
)
# 从 inputs 字典中弹出 "offset_mapping" 键对应的值,并将其赋值给 offset_mapping 变量
offset_mapping = inputs.pop("offset_mapping")
# 从 examples 字典中获取答案
answers = examples["answers"]
# 初始化用于存储开始位置和结束位置的列表
start_positions = []
end_positions = []
# 遍历 offset_mapping 列表,并获取每个答案的开始位置和结束位置
for i, offset in enumerate(offset_mapping):
# 获取第 i 个答案
answer = answers[i]
# 获取答案的开始字符位置和结束字符位置
start_char = answer["answer_start"][0]
end_char = answer["answer_start"][0] + len(answer["text"][0])
# 获取输入的序列 ID
sequence_ids = inputs.sequence_ids(
i
) # 第i条数据的context过了tokenizer之后, 每一个token在第几个sequence
# 查找上下文的起始和结束位置
idx = 0
while sequence_ids[idx] != 1:
idx += 1
context_start = idx
while sequence_ids[idx] == 1:
idx += 1
context_end = idx - 1
# 如果答案不完全在上下文中,则标记为 (0, 0)
if offset[context_start][0] > end_char or offset[context_end][1] < start_char:
start_positions.append(0)
end_positions.append(0)
else:
# 否则,答案的开始和结束位置为相应的标记位置
idx = context_start
while idx <= context_end and offset[idx][0] <= start_char:
idx += 1
start_positions.append(idx - 1)
idx = context_end
while idx >= context_start and offset[idx][1] >= end_char:
idx -= 1
end_positions.append(idx + 1)
# 将开始位置和结束位置存储到 inputs 字典中的对应键中
inputs["start_positions"] = start_positions
inputs["end_positions"] = end_positions
# 返回处理后的 inputs 字典
return inputs
tokenized_data = data.map(
preprocess_function, batched=True, remove_columns=data["train"].column_names
)
tokenized_data["validation"]
tokenized_data["train"]
tokenized_data.save_to_disk("/kaggle/working/tokenized_data")
from datasets import load_from_disk
reloaded_dataset = load_from_disk("/kaggle/working/tokenized_data")
# ## (2) Finetune
from transformers import AutoModelForQuestionAnswering, TrainingArguments, Trainer
model = AutoModelForQuestionAnswering.from_pretrained("distilbert-base-cased")
from transformers import DefaultDataCollator
data_collator = DefaultDataCollator()
from huggingface_hub import notebook_login
notebook_login()
# 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
tokenized_data["train"]
tokenized_data["validation"]
from datasets import load_metric
metric = load_metric("/kaggle/input/eval-squad/squad.py")
training_args = TrainingArguments(
output_dir="qasystem_distilbert",
evaluation_strategy="epoch",
learning_rate=3e-5,
per_device_train_batch_size=24,
per_device_eval_batch_size=24,
num_train_epochs=3,
weight_decay=0.01,
push_to_hub=True,
)
trainer = Trainer(
model=model,
args=training_args,
compute_metrics=metric,
train_dataset=tokenized_data["train"],
eval_dataset=tokenized_data["validation"],
tokenizer=tokenizer,
data_collator=data_collator,
)
# trainer.train()
# # weight and bias API key: 9d4c36ec325757de4dc529602fb94a877f93a94a
# trainer.push_to_hub('qasystem_distilbert')
# ## Hyperparameters Search
args = TrainingArguments(
report_to="wandb",
output_dir="qasystem_distilbert",
evaluation_strategy="epoch",
learning_rate=3e-5,
per_device_train_batch_size=24,
per_device_eval_batch_size=24,
num_train_epochs=3,
weight_decay=0.01,
push_to_hub=True,
)
def model_init(trial):
return AutoModelForQuestionAnswering.from_pretrained(
# model_args.model_name_or_path,
# from_tf=bool(".ckpt" in model_args.model_name_or_path),
# config=config,
# cache_dir=model_args.cache_dir,
# revision=model_args.model_revision,
# use_auth_token=True if model_args.use_auth_token else None,
"distilbert-base-cased"
)
from datasets import load_metric
metric = load_metric("/kaggle/input/eval-squad/squad.py")
trainer = Trainer(
model=None,
args=args,
train_dataset=tokenized_data["train"],
eval_dataset=tokenized_data["validation"],
compute_metrics=metric,
tokenizer=tokenizer,
model_init=model_init,
data_collator=data_collator,
)
def wandb_hp_space(trial):
return {
"method": "random",
"metric": {"name": "objective", "goal": "minimize"},
"parameters": {
"learning_rate": {"distribution": "uniform", "min": 1e-6, "max": 1e-4},
"per_device_train_batch_size": {"values": [16, 32]},
},
}
best_trial = trainer.hyperparameter_search(
direction="maximize",
backend="wandb",
hp_space=wandb_hp_space,
n_trials=20,
# compute_objective=compute_objective,
)
# weight and bias API key: 9d4c36ec325757de4dc529602fb94a877f93a94a
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/917/129917931.ipynb
|
eval-squad
|
arlene025
|
[{"Id": 129917931, "ScriptId": 38475171, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11155395, "CreationDate": "05/17/2023 12:24:40", "VersionNumber": 2.0, "Title": "Question_Answer_System", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 311.0, "LinesInsertedFromPrevious": 109.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 202.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 186336729, "KernelVersionId": 129917931, "SourceDatasetVersionId": 5707802}, {"Id": 186336728, "KernelVersionId": 129917931, "SourceDatasetVersionId": 160296}]
|
[{"Id": 5707802, "DatasetId": 3281373, "DatasourceVersionId": 5783870, "CreatorUserId": 11155395, "LicenseName": "Unknown", "CreationDate": "05/17/2023 12:08:59", "VersionNumber": 1.0, "Title": "eval-squad", "Slug": "eval-squad", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3281373, "CreatorUserId": 11155395, "OwnerUserId": 11155395.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5707802.0, "CurrentDatasourceVersionId": 5783870.0, "ForumId": 3347107, "Type": 2, "CreationDate": "05/17/2023 12:08:59", "LastActivityDate": "05/17/2023", "TotalViews": 4, "TotalDownloads": 4, "TotalVotes": 0, "TotalKernels": 1}]
|
[{"Id": 11155395, "UserName": "arlene025", "DisplayName": "Yaqi Wang", "RegisterDate": "07/27/2022", "PerformanceTier": 1}]
|
# # Answering System
# ## (1) process data.
# Use the official dev set as test set, and
# split the original training set into training set and validation set (5000 samples). Prepare
# the data according to the requirements of ML model training.
import pandas as pd
import json
def preprocess(file_name):
with open(file_name, "r", encoding="utf-8") as file:
squad_data = json.load(file)
data = []
for article in squad_data["data"]:
title = article["title"]
for paragraph in article["paragraphs"]:
context = paragraph["context"]
for qa in paragraph["qas"]:
question = qa["question"]
id = qa["id"]
for answer in qa["answers"]:
answer_text = answer["text"]
answer_start = answer["answer_start"]
entry = {
"id": id,
"title": title,
"context": context,
"question": question,
"answers": {
"text": [answer_text],
"answer_start": [answer_start],
},
}
data.append(entry)
return data
dev_data = preprocess("/kaggle/input/squad-20/dev-v2.0.json")
train_data = preprocess("/kaggle/input/squad-20/train-v2.0.json")
dev_data[0:2]
print("length of dev:", len(dev_data))
print("length of train:", len(train_data))
pd.DataFrame(train_data).groupby(["id"]).count().reset_index().sort_values(by="id")
pd.DataFrame(dev_data)
# - train_data: 没有出现同一个context、同一个question对应不同回答的情况
# - dev_data:有重复
# ### Drop duplicates
dev_data_1 = pd.DataFrame(dev_data).drop_duplicates(
subset=["id", "question", "context"]
)
dev_data_1
train_data_1 = pd.DataFrame(train_data).drop_duplicates(
subset=["id", "question", "context"]
)
train_data_1
print("after drop duplicates for same context, same question, same answer")
print("length of test set:", len(dev_data_1))
print("length of train set:", len(train_data_1))
# ### Dataset
import random
# 首先进行洗牌
random.shuffle(train_data)
validation_length = 5000
training_data = train_data[:-validation_length]
validation_data = train_data[-validation_length:]
print(len(training_data)) # 输出训练集长度
print(len(validation_data)) # 输出验证集长度
train_data_ = pd.DataFrame(training_data)
valid_data_ = pd.DataFrame(validation_data)
import pandas as pd
from datasets import Dataset, DatasetDict
# 将 Pandas DataFrame 转换为 Dataset
train_dataset = Dataset.from_pandas(train_data_)
validation_dataset = Dataset.from_pandas(valid_data_)
test_dataset = Dataset.from_pandas(dev_data_1)
# 将 Dataset 转换为 DatasetDict
data = DatasetDict(
{"train": train_dataset, "test": test_dataset, "validation": validation_dataset}
)
# 打印转换后的数据集
print(data)
data["train"][0:3]
# ### tokenizer
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased")
def preprocess_function(examples):
"""
examples: dataframe
return: a dataset that have features ['input_ids', 'attention_mask', 'start_positions', 'end_positions']
"""
# 对 examples 字典中的 "question" 键对应的值进行处理,去除首尾空白字符,并将每个问题存储在 questions 列表中
questions = [q.strip() for q in examples["question"]]
# 使用 tokenizer 对问题和上下文进行编码,并生成模型的输入
inputs = tokenizer(
questions,
examples["context"],
max_length=384,
truncation="only_second",
return_offsets_mapping=True,
padding="max_length",
)
# 从 inputs 字典中弹出 "offset_mapping" 键对应的值,并将其赋值给 offset_mapping 变量
offset_mapping = inputs.pop("offset_mapping")
# 从 examples 字典中获取答案
answers = examples["answers"]
# 初始化用于存储开始位置和结束位置的列表
start_positions = []
end_positions = []
# 遍历 offset_mapping 列表,并获取每个答案的开始位置和结束位置
for i, offset in enumerate(offset_mapping):
# 获取第 i 个答案
answer = answers[i]
# 获取答案的开始字符位置和结束字符位置
start_char = answer["answer_start"][0]
end_char = answer["answer_start"][0] + len(answer["text"][0])
# 获取输入的序列 ID
sequence_ids = inputs.sequence_ids(
i
) # 第i条数据的context过了tokenizer之后, 每一个token在第几个sequence
# 查找上下文的起始和结束位置
idx = 0
while sequence_ids[idx] != 1:
idx += 1
context_start = idx
while sequence_ids[idx] == 1:
idx += 1
context_end = idx - 1
# 如果答案不完全在上下文中,则标记为 (0, 0)
if offset[context_start][0] > end_char or offset[context_end][1] < start_char:
start_positions.append(0)
end_positions.append(0)
else:
# 否则,答案的开始和结束位置为相应的标记位置
idx = context_start
while idx <= context_end and offset[idx][0] <= start_char:
idx += 1
start_positions.append(idx - 1)
idx = context_end
while idx >= context_start and offset[idx][1] >= end_char:
idx -= 1
end_positions.append(idx + 1)
# 将开始位置和结束位置存储到 inputs 字典中的对应键中
inputs["start_positions"] = start_positions
inputs["end_positions"] = end_positions
# 返回处理后的 inputs 字典
return inputs
tokenized_data = data.map(
preprocess_function, batched=True, remove_columns=data["train"].column_names
)
tokenized_data["validation"]
tokenized_data["train"]
tokenized_data.save_to_disk("/kaggle/working/tokenized_data")
from datasets import load_from_disk
reloaded_dataset = load_from_disk("/kaggle/working/tokenized_data")
# ## (2) Finetune
from transformers import AutoModelForQuestionAnswering, TrainingArguments, Trainer
model = AutoModelForQuestionAnswering.from_pretrained("distilbert-base-cased")
from transformers import DefaultDataCollator
data_collator = DefaultDataCollator()
from huggingface_hub import notebook_login
notebook_login()
# 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
tokenized_data["train"]
tokenized_data["validation"]
from datasets import load_metric
metric = load_metric("/kaggle/input/eval-squad/squad.py")
training_args = TrainingArguments(
output_dir="qasystem_distilbert",
evaluation_strategy="epoch",
learning_rate=3e-5,
per_device_train_batch_size=24,
per_device_eval_batch_size=24,
num_train_epochs=3,
weight_decay=0.01,
push_to_hub=True,
)
trainer = Trainer(
model=model,
args=training_args,
compute_metrics=metric,
train_dataset=tokenized_data["train"],
eval_dataset=tokenized_data["validation"],
tokenizer=tokenizer,
data_collator=data_collator,
)
# trainer.train()
# # weight and bias API key: 9d4c36ec325757de4dc529602fb94a877f93a94a
# trainer.push_to_hub('qasystem_distilbert')
# ## Hyperparameters Search
args = TrainingArguments(
report_to="wandb",
output_dir="qasystem_distilbert",
evaluation_strategy="epoch",
learning_rate=3e-5,
per_device_train_batch_size=24,
per_device_eval_batch_size=24,
num_train_epochs=3,
weight_decay=0.01,
push_to_hub=True,
)
def model_init(trial):
return AutoModelForQuestionAnswering.from_pretrained(
# model_args.model_name_or_path,
# from_tf=bool(".ckpt" in model_args.model_name_or_path),
# config=config,
# cache_dir=model_args.cache_dir,
# revision=model_args.model_revision,
# use_auth_token=True if model_args.use_auth_token else None,
"distilbert-base-cased"
)
from datasets import load_metric
metric = load_metric("/kaggle/input/eval-squad/squad.py")
trainer = Trainer(
model=None,
args=args,
train_dataset=tokenized_data["train"],
eval_dataset=tokenized_data["validation"],
compute_metrics=metric,
tokenizer=tokenizer,
model_init=model_init,
data_collator=data_collator,
)
def wandb_hp_space(trial):
return {
"method": "random",
"metric": {"name": "objective", "goal": "minimize"},
"parameters": {
"learning_rate": {"distribution": "uniform", "min": 1e-6, "max": 1e-4},
"per_device_train_batch_size": {"values": [16, 32]},
},
}
best_trial = trainer.hyperparameter_search(
direction="maximize",
backend="wandb",
hp_space=wandb_hp_space,
n_trials=20,
# compute_objective=compute_objective,
)
# weight and bias API key: 9d4c36ec325757de4dc529602fb94a877f93a94a
| false | 0 | 2,563 | 0 | 2,584 | 2,563 |
||
129917598
|
import pandas as pd
data_may1 = pd.read_json("/kaggle/input/nctc-may-1-7/NCTC_may_4.json")
txt = list(data_may1["Text"])
import re
import string
from nltk.corpus import stopwords
stopwords = set(stopwords.words("english"))
def clean_text(text):
# Remove special characters
text = re.sub(r"[^a-zA-Z0-9\s]", "", text)
# Convert text to lowercase
text = text.lower()
# Change any white space to one space
text = re.sub("\s+", " ", text)
# Remove punctuation
text = text.translate(str.maketrans("", "", string.punctuation))
return text
# def filter_by_pos(text, pos_tags):
# # Tokenize the text into words
# tokens = text
# # Perform POS tagging
# tagged_tokens = pos_tag(tokens)
# # Filter tokens based on specified POS tags
# filtered_tokens = []
# # [token for token, pos in tagged_tokens if pos in pos_tags or pos.isdigit()]
# for token, pos in tagged_tokens:
# if pos in pos_tags or token.isdigit():
# filtered_tokens.append(token)
# # elif token.isdigit:
# # filtered_tokens.append(token)
# # else:
# # continue
# # Join the filtered tokens back into a string
# filtered_text = ' '.join(filtered_tokens)
# return filtered_text
import nltk
from nltk import pos_tag
from nltk.collocations import (
TrigramCollocationFinder,
BigramCollocationFinder,
QuadgramCollocationFinder,
)
from nltk.metrics import (
TrigramAssocMeasures,
BigramAssocMeasures,
QuadgramAssocMeasures,
)
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
from nltk import word_tokenize
lemmatizer = WordNetLemmatizer()
stop_words = set(stopwords.words("english"))
for i, line in enumerate(txt):
line = clean_text(line)
postag = ["NN", "NNS", "NNP", "NNPS", "JJ", "JJR", "JJS", "CD"]
txt[i] = [
x
for x in nltk.word_tokenize(line)
if (x not in stop_words and pos_tag([x])[0][1])
]
txt[i] = " ".join(txt[i])
# Tokenize the sentence
# tokens = nltk.word_tokenize(txt[0])
tokens = []
for i in txt:
for j in i.split(" "):
tokens.append(j)
# Create a trigram collocation finder
finder_b = BigramCollocationFinder.from_words(tokens)
finder_t = TrigramCollocationFinder.from_words(tokens)
finder_q = QuadgramCollocationFinder.from_words(tokens)
# Filter out common words and punctuation
finder_b.apply_freq_filter(1)
finder_t.apply_freq_filter(1)
finder_q.apply_freq_filter(1)
# Set the scoring metric
scoring_measure_b = BigramAssocMeasures.raw_freq
scoring_measure_t = TrigramAssocMeasures.raw_freq
scoring_measure_t = QuadgramAssocMeasures.raw_freq
# Get the top 10 trigrams based on the scoring metric
top_trigrams = finder_t.nbest(scoring_measure_t, 10)
top_bigrams = finder_b.nbest(scoring_measure_b, 15)
top_quadgrams = finder_q.nbest(scoring_measure_b, 10)
trigrams = {}
bigrams = {}
quadgrams = {}
t = finder_t.ngram_fd.items()
b = finder_b.ngram_fd.items()
q = finder_q.ngram_fd.items()
for i in txt:
print(i)
print("---------------------")
uuseless = ["worth rs"]
for i in t:
if i[0] in top_trigrams:
trigrams[" ".join(i[0])] = i[1]
trigrams = {
k: v for k, v in sorted(trigrams.items(), key=lambda item: item[1], reverse=True)
}
for i in b:
if i[0] in top_bigrams:
bigrams[" ".join(i[0])] = i[1]
bigrams = {
k: v for k, v in sorted(bigrams.items(), key=lambda item: item[1], reverse=True)
}
for i in q:
if i[0] in top_quadgrams:
quadgrams[" ".join(i[0])] = i[1]
quadgrams = {
k: v for k, v in sorted(quadgrams.items(), key=lambda item: item[1], reverse=True)
}
for b, j in bigrams.items():
for t, i in trigrams.items():
if b in t:
bigrams[b] = None
break
for b, j in bigrams.items():
for t, i in quadgrams.items():
if b in t:
bigrams[b] = None
break
for b, j in trigrams.items():
for t, i in quadgrams.items():
if b in t:
trigrams[b] = None
break
ngrams = {}
for b, j in bigrams.items():
if j != None:
ngrams[b] = j
for b, j in trigrams.items():
if j != None:
ngrams[b] = j
for b, j in quadgrams.items():
if j != None:
ngrams[b] = j
pos_tag(["347682432 88 cash"])
ngrams = {
k: v for k, v in sorted(ngrams.items(), key=lambda item: item[1], reverse=True)
}
ngrams
{
"worth rs": 18,
"mumbai police": 15,
"police case": 15,
"police police": 15,
"nigerian national": 13,
"drugs rs 5 lakh": 13,
"police station": 12,
"police thursday": 12,
"lakh international market": 12,
"drugs psychotropic substance ndps": 12,
"psychotropic substance ndps act": 12,
"rs 20 crore": 10,
"income tax department": 9,
"case ndps act sent": 7,
"ndps act sent police": 7,
"act sent police custody": 7,
"police custody till 6": 7,
"drug peddler drugs rs": 7,
"peddler drugs rs 5": 7,
"5 lakh possession goregaon": 7,
}
print("Top Quadgrams:")
for trigram, j in quadgrams.items():
print(trigram, "------------------------", j)
print("Top Trigrams:")
for trigram, j in trigrams.items():
print(trigram, "------------------------", j)
print("-------------------------")
print("Top Bigrams:")
for trigram, j in bigrams.items():
print(trigram, "------------------------", j)
print("-------------------------")
k = set()
k.add(1)
k.add(2)
k.add(3)
k.add(1)
k.add(10)
k.add(2)
k.add(2)
k = list(k)
k
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/917/129917598.ipynb
| null | null |
[{"Id": 129917598, "ScriptId": 38536156, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14218315, "CreationDate": "05/17/2023 12:22:14", "VersionNumber": 5.0, "Title": "ngram_workspace", "EvaluationDate": NaN, "IsChange": true, "TotalLines": 211.0, "LinesInsertedFromPrevious": 89.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 122.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import pandas as pd
data_may1 = pd.read_json("/kaggle/input/nctc-may-1-7/NCTC_may_4.json")
txt = list(data_may1["Text"])
import re
import string
from nltk.corpus import stopwords
stopwords = set(stopwords.words("english"))
def clean_text(text):
# Remove special characters
text = re.sub(r"[^a-zA-Z0-9\s]", "", text)
# Convert text to lowercase
text = text.lower()
# Change any white space to one space
text = re.sub("\s+", " ", text)
# Remove punctuation
text = text.translate(str.maketrans("", "", string.punctuation))
return text
# def filter_by_pos(text, pos_tags):
# # Tokenize the text into words
# tokens = text
# # Perform POS tagging
# tagged_tokens = pos_tag(tokens)
# # Filter tokens based on specified POS tags
# filtered_tokens = []
# # [token for token, pos in tagged_tokens if pos in pos_tags or pos.isdigit()]
# for token, pos in tagged_tokens:
# if pos in pos_tags or token.isdigit():
# filtered_tokens.append(token)
# # elif token.isdigit:
# # filtered_tokens.append(token)
# # else:
# # continue
# # Join the filtered tokens back into a string
# filtered_text = ' '.join(filtered_tokens)
# return filtered_text
import nltk
from nltk import pos_tag
from nltk.collocations import (
TrigramCollocationFinder,
BigramCollocationFinder,
QuadgramCollocationFinder,
)
from nltk.metrics import (
TrigramAssocMeasures,
BigramAssocMeasures,
QuadgramAssocMeasures,
)
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
from nltk import word_tokenize
lemmatizer = WordNetLemmatizer()
stop_words = set(stopwords.words("english"))
for i, line in enumerate(txt):
line = clean_text(line)
postag = ["NN", "NNS", "NNP", "NNPS", "JJ", "JJR", "JJS", "CD"]
txt[i] = [
x
for x in nltk.word_tokenize(line)
if (x not in stop_words and pos_tag([x])[0][1])
]
txt[i] = " ".join(txt[i])
# Tokenize the sentence
# tokens = nltk.word_tokenize(txt[0])
tokens = []
for i in txt:
for j in i.split(" "):
tokens.append(j)
# Create a trigram collocation finder
finder_b = BigramCollocationFinder.from_words(tokens)
finder_t = TrigramCollocationFinder.from_words(tokens)
finder_q = QuadgramCollocationFinder.from_words(tokens)
# Filter out common words and punctuation
finder_b.apply_freq_filter(1)
finder_t.apply_freq_filter(1)
finder_q.apply_freq_filter(1)
# Set the scoring metric
scoring_measure_b = BigramAssocMeasures.raw_freq
scoring_measure_t = TrigramAssocMeasures.raw_freq
scoring_measure_t = QuadgramAssocMeasures.raw_freq
# Get the top 10 trigrams based on the scoring metric
top_trigrams = finder_t.nbest(scoring_measure_t, 10)
top_bigrams = finder_b.nbest(scoring_measure_b, 15)
top_quadgrams = finder_q.nbest(scoring_measure_b, 10)
trigrams = {}
bigrams = {}
quadgrams = {}
t = finder_t.ngram_fd.items()
b = finder_b.ngram_fd.items()
q = finder_q.ngram_fd.items()
for i in txt:
print(i)
print("---------------------")
uuseless = ["worth rs"]
for i in t:
if i[0] in top_trigrams:
trigrams[" ".join(i[0])] = i[1]
trigrams = {
k: v for k, v in sorted(trigrams.items(), key=lambda item: item[1], reverse=True)
}
for i in b:
if i[0] in top_bigrams:
bigrams[" ".join(i[0])] = i[1]
bigrams = {
k: v for k, v in sorted(bigrams.items(), key=lambda item: item[1], reverse=True)
}
for i in q:
if i[0] in top_quadgrams:
quadgrams[" ".join(i[0])] = i[1]
quadgrams = {
k: v for k, v in sorted(quadgrams.items(), key=lambda item: item[1], reverse=True)
}
for b, j in bigrams.items():
for t, i in trigrams.items():
if b in t:
bigrams[b] = None
break
for b, j in bigrams.items():
for t, i in quadgrams.items():
if b in t:
bigrams[b] = None
break
for b, j in trigrams.items():
for t, i in quadgrams.items():
if b in t:
trigrams[b] = None
break
ngrams = {}
for b, j in bigrams.items():
if j != None:
ngrams[b] = j
for b, j in trigrams.items():
if j != None:
ngrams[b] = j
for b, j in quadgrams.items():
if j != None:
ngrams[b] = j
pos_tag(["347682432 88 cash"])
ngrams = {
k: v for k, v in sorted(ngrams.items(), key=lambda item: item[1], reverse=True)
}
ngrams
{
"worth rs": 18,
"mumbai police": 15,
"police case": 15,
"police police": 15,
"nigerian national": 13,
"drugs rs 5 lakh": 13,
"police station": 12,
"police thursday": 12,
"lakh international market": 12,
"drugs psychotropic substance ndps": 12,
"psychotropic substance ndps act": 12,
"rs 20 crore": 10,
"income tax department": 9,
"case ndps act sent": 7,
"ndps act sent police": 7,
"act sent police custody": 7,
"police custody till 6": 7,
"drug peddler drugs rs": 7,
"peddler drugs rs 5": 7,
"5 lakh possession goregaon": 7,
}
print("Top Quadgrams:")
for trigram, j in quadgrams.items():
print(trigram, "------------------------", j)
print("Top Trigrams:")
for trigram, j in trigrams.items():
print(trigram, "------------------------", j)
print("-------------------------")
print("Top Bigrams:")
for trigram, j in bigrams.items():
print(trigram, "------------------------", j)
print("-------------------------")
k = set()
k.add(1)
k.add(2)
k.add(3)
k.add(1)
k.add(10)
k.add(2)
k.add(2)
k = list(k)
k
| false | 0 | 1,879 | 0 | 1,879 | 1,879 |
||
129883540
|
import os
os.getcwd()
os.chdir("/kaggle/")
os.getcwd()
os.listdir("/kaggle/input/123456")
import pandas as pd
titanic_train = pd.read_csv("input/123456/ball.csv")
titanic_train.head(6)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/883/129883540.ipynb
| null | null |
[{"Id": 129883540, "ScriptId": 38631088, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15133349, "CreationDate": "05/17/2023 07:13:36", "VersionNumber": 1.0, "Title": "notebookbd9245e092", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 12.0, "LinesInsertedFromPrevious": 12.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import os
os.getcwd()
os.chdir("/kaggle/")
os.getcwd()
os.listdir("/kaggle/input/123456")
import pandas as pd
titanic_train = pd.read_csv("input/123456/ball.csv")
titanic_train.head(6)
| false | 0 | 82 | 0 | 82 | 82 |
||
129437594
|
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import mean_absolute_error
from sklearn.preprocessing import MinMaxScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from xgboost import XGBRegressor
train = pd.read_csv("/kaggle/input/playground-series-s3e14/train.csv", index_col="id")
test = pd.read_csv("/kaggle/input/playground-series-s3e14/test.csv", index_col="id")
# # Overview
train.shape
train.head()
train.tail()
train.info()
train.describe()
train.dtypes
# # EDA
train.corr()
sns.heatmap(train.corr())
plt.show()
train.corr()["yield"].sort_values(ascending=False)
# # Feature Engineering
# train.drop(["osmia","bumbles","andrena","honeybee",
# "AverageOfUpperTRange","AverageOfLowerTRange",
# "MinOfLowerTRange","MinOfUpperTRange",
# "MaxOfUpperTRange","MaxOfLowerTRange"],axis = 1, inplace = True)
# # Model Selection
def scaling(feature):
global X_train, X_test
scaler = MinMaxScaler()
scaler.fit
scaler.fit(X_train[feature].to_numpy().reshape(-1, 1))
X_train[feature] = scaler.transform(X_train[feature].to_numpy().reshape(-1, 1))
X_test[feature] = scaler.transform(X_test[feature].to_numpy().reshape(-1, 1))
RS = 13
X = train.drop(["yield"], axis=1)
y = train[["yield"]]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=RS
)
scale_needed_features = [
"MaxOfUpperTRange",
"MinOfUpperTRange",
"AverageOfUpperTRange",
"MaxOfLowerTRange",
"MinOfLowerTRange",
"AverageOfLowerTRange",
"RainingDays",
"seeds",
]
for feature in scale_needed_features:
scaling(feature)
xgb = XGBRegressor(random_state=RS, max_depth=3, n_estimators=100, eval_metric="mae")
xgb.fit(X_train, y_train)
xgb_prediction = xgb.predict(X_test)
mae_xgb = mean_absolute_error(y_test, xgb_prediction)
mae_xgb
# # Final Evaluation - XGB
X_train = train.drop(["yield"], axis=1)
y_train = train[["yield"]]
X_test = test.copy()
xgb_final = XGBRegressor(
random_state=RS, max_depth=3, n_estimators=100, eval_metric="mae"
)
xgb_final.fit(X_train, y_train)
xgb_final_prediction = xgb_final.predict(X_test)
# # Result
result = pd.DataFrame({"yield": xgb_final_prediction}).set_index(X_test.index)
result
result.to_csv("second_sub.csv")
# Author: amyrmahdy
# Date: 12 May 2023
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/437/129437594.ipynb
| null | null |
[{"Id": 129437594, "ScriptId": 38412702, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7867890, "CreationDate": "05/13/2023 19:57:32", "VersionNumber": 15.0, "Title": "playground-series-s3e14-wild-blueberry", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 105.0, "LinesInsertedFromPrevious": 4.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 101.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import mean_absolute_error
from sklearn.preprocessing import MinMaxScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from xgboost import XGBRegressor
train = pd.read_csv("/kaggle/input/playground-series-s3e14/train.csv", index_col="id")
test = pd.read_csv("/kaggle/input/playground-series-s3e14/test.csv", index_col="id")
# # Overview
train.shape
train.head()
train.tail()
train.info()
train.describe()
train.dtypes
# # EDA
train.corr()
sns.heatmap(train.corr())
plt.show()
train.corr()["yield"].sort_values(ascending=False)
# # Feature Engineering
# train.drop(["osmia","bumbles","andrena","honeybee",
# "AverageOfUpperTRange","AverageOfLowerTRange",
# "MinOfLowerTRange","MinOfUpperTRange",
# "MaxOfUpperTRange","MaxOfLowerTRange"],axis = 1, inplace = True)
# # Model Selection
def scaling(feature):
global X_train, X_test
scaler = MinMaxScaler()
scaler.fit
scaler.fit(X_train[feature].to_numpy().reshape(-1, 1))
X_train[feature] = scaler.transform(X_train[feature].to_numpy().reshape(-1, 1))
X_test[feature] = scaler.transform(X_test[feature].to_numpy().reshape(-1, 1))
RS = 13
X = train.drop(["yield"], axis=1)
y = train[["yield"]]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=RS
)
scale_needed_features = [
"MaxOfUpperTRange",
"MinOfUpperTRange",
"AverageOfUpperTRange",
"MaxOfLowerTRange",
"MinOfLowerTRange",
"AverageOfLowerTRange",
"RainingDays",
"seeds",
]
for feature in scale_needed_features:
scaling(feature)
xgb = XGBRegressor(random_state=RS, max_depth=3, n_estimators=100, eval_metric="mae")
xgb.fit(X_train, y_train)
xgb_prediction = xgb.predict(X_test)
mae_xgb = mean_absolute_error(y_test, xgb_prediction)
mae_xgb
# # Final Evaluation - XGB
X_train = train.drop(["yield"], axis=1)
y_train = train[["yield"]]
X_test = test.copy()
xgb_final = XGBRegressor(
random_state=RS, max_depth=3, n_estimators=100, eval_metric="mae"
)
xgb_final.fit(X_train, y_train)
xgb_final_prediction = xgb_final.predict(X_test)
# # Result
result = pd.DataFrame({"yield": xgb_final_prediction}).set_index(X_test.index)
result
result.to_csv("second_sub.csv")
# Author: amyrmahdy
# Date: 12 May 2023
| false | 0 | 862 | 0 | 862 | 862 |
||
129437256
|
<jupyter_start><jupyter_text>CrowS-Pairs (Social biases in MLMs)
_____
# CrowS-Pairs (Social biases in MLMs)
### CrowS-Pairs: A Challenge Dataset for Measuring Social Biases in Masked LM
By [[source]](https://github.com/nyu-mll/crows-pairs)
_____
### About this dataset
> The CrowS-Pairs dataset is a collection of 1,508 sentence pairs that cover nine types of biases: race/color, gender/gender identity, sexual orientation, religion, age, nationality, disability, physical appearance, and socioeconomic status. Each sentence pair is a minimal edit of the first sentence: The only words that change between them are those that identify the group. The first sentence can _demonstrate_ or _violate_ a stereotype. The other sentence is a minimal edit of the first sentence: The only words that change between them are those that identify the group. Each example has the following information:
>
> Columns:,**sent_more**,sent_less,**stereo_antistereo**,bias_type,**annotations**,,anon_writer,,anon_annotators,,prompt,,source
>
> The CrowS-Pairs dataset is a collection of 1,508 sentence pairs that cover nine types of biases: race/color, gender/gender identity, sexual orientation, religion, age
### More Datasets
> For more datasets, click [here](https://www.kaggle.com/thedevastator/datasets).
### Featured Notebooks
> - 🚨 **Your notebook can be here!** 🚨!
### How to use the dataset
> The CrowS-Pairs dataset is a collection of 1,508 sentence pairs that cover nine types of biases: race/color, gender/gender identity, sexual orientation, religion, age, nationality, disability, physical appearance, and socioeconomic status. Each sentence pair is a minimal edit of the first sentence: The only words that change between them are those that identify the group. The first sentence can _demonstrate_ or _violate_ a stereotype. The other sentence is a minimal edit of the first sentence: The only words that change between them are those that identify the group. Each example has the following information:
>
> Columns:,**sent_less**sent_more,,stereo_antistereo,,bias_type,,annotations,,anon_writer,,anon_annotators,,,,prompt,,source
>
> This dataset can be used to measure social biases in MLMs by training models on it and evaluating their performance
### Research Ideas
> - Measuring the ability of MLMs to identify and avoid social biases;
> - Developing new methods for reducing social biases in MLMs; and
> - Investigating the impact of social biases on downstream tasks such as reading comprehension or question answering
Kaggle dataset identifier: a-dataset-for-measuring-social-biases-in-mlms
<jupyter_script># ## Imports and installs
import pandas as pd
import numpy as np
from transformers import (
AutoTokenizer,
DataCollatorWithPadding,
BloomTokenizerFast,
BloomForTokenClassification,
BloomForSequenceClassification,
DataCollatorForTokenClassification,
AutoModelForTokenClassification,
TrainingArguments,
Trainer,
)
from datasets import load_dataset
import torch
import os
import evaluate
import random
# ## Dataset : CrowS-Pairs
dataset = pd.read_csv(
"/kaggle/input/a-dataset-for-measuring-social-biases-in-mlms/crows_pairs_anonymized.csv"
)
dataset = dataset[["sent_more", "stereo_antistereo"]]
dataset = dataset.rename(columns={"sent_more": "text", "stereo_antistereo": "policy"})
dataset = dataset.replace("stereo", 1)
dataset = dataset.replace("antistereo", 0)
dataset = dataset.rename(columns={"policy": "label"})
good = dataset[dataset["label"] == 0]
bad = dataset[dataset["label"] == 1]
print("Size of good : ", len(good))
print("Size of bad : ", len(bad))
remove = len(bad) - len(good)
to_remove = random.sample(range(len(bad)), remove)
bad.drop(bad.index[to_remove], inplace=True)
dataset = pd.concat([good, bad], ignore_index=True)
dataset.to_csv("/kaggle/working/final.csv", index=False)
dataset = load_dataset(
"csv", data_files="/kaggle/working/final.csv", keep_default_na=False
)
dataset = dataset.shuffle(seed=42)
dataset = dataset["train"].train_test_split(test_size=0.005)
def preprocess_function(examples):
return tokenizer(examples["text"])
dataset
def compute_metrics(eval_pred):
predictions, labels = eval_pred
predictions = np.argmax(predictions, axis=1)
return accuracy.compute(predictions=predictions, references=labels)
id2label = {1: "BROKE", 0: "KEPT"}
label2id = {"KEPT": 0, "BROKE": 1}
tokenizer = BloomTokenizerFast.from_pretrained(
f"bigscience/bloom-560m", add_prefix_space=True
)
accuracy = evaluate.load("accuracy")
data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
tokenized_dataset = dataset.map(preprocess_function, batched=True)
# ## Model : BLOOM-560M
model = BloomForSequenceClassification.from_pretrained(
f"bigscience/bloom-560m",
num_labels=2,
id2label=id2label,
label2id=label2id,
ignore_mismatched_sizes=True,
)
model
training_args = TrainingArguments(
output_dir="OutModelPolicy",
learning_rate=3e-05,
per_device_train_batch_size=35,
per_device_eval_batch_size=35,
num_train_epochs=5,
weight_decay=0.01,
evaluation_strategy="steps",
save_strategy="steps",
load_best_model_at_end=True,
save_steps=50000,
eval_steps=50000,
fp16=True,
save_total_limit=2,
push_to_hub=False,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_dataset["train"],
eval_dataset=tokenized_dataset["test"],
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics,
)
os.environ["WANDB_API_KEY"] = "174605229344dbcc2c90f595394111e3396b2b8b"
trainer.train()
trainer.save_model("final_model")
import shutil
shutil.make_archive("/kaggle/working/final_model", "zip", "/kaggle/working")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/437/129437256.ipynb
|
a-dataset-for-measuring-social-biases-in-mlms
|
thedevastator
|
[{"Id": 129437256, "ScriptId": 38485309, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10021937, "CreationDate": "05/13/2023 19:52:20", "VersionNumber": 1.0, "Title": "CrowS-Pairs", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 107.0, "LinesInsertedFromPrevious": 107.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
|
[{"Id": 185485528, "KernelVersionId": 129437256, "SourceDatasetVersionId": 4601549}]
|
[{"Id": 4601549, "DatasetId": 2680265, "DatasourceVersionId": 4662968, "CreatorUserId": 10654180, "LicenseName": "CC0: Public Domain", "CreationDate": "11/27/2022 15:59:57", "VersionNumber": 2.0, "Title": "CrowS-Pairs (Social biases in MLMs)", "Slug": "a-dataset-for-measuring-social-biases-in-mlms", "Subtitle": "CrowS-Pairs: A Challenge Dataset for Measuring Social Biases in Masked LM", "Description": "_____\n# CrowS-Pairs (Social biases in MLMs)\n### CrowS-Pairs: A Challenge Dataset for Measuring Social Biases in Masked LM\nBy [[source]](https://github.com/nyu-mll/crows-pairs)\n_____\n\n### About this dataset\n> The CrowS-Pairs dataset is a collection of 1,508 sentence pairs that cover nine types of biases: race/color, gender/gender identity, sexual orientation, religion, age, nationality, disability, physical appearance, and socioeconomic status. Each sentence pair is a minimal edit of the first sentence: The only words that change between them are those that identify the group. The first sentence can _demonstrate_ or _violate_ a stereotype. The other sentence is a minimal edit of the first sentence: The only words that change between them are those that identify the group. Each example has the following information:\n> \n> Columns:,**sent_more**,sent_less,**stereo_antistereo**,bias_type,**annotations**,,anon_writer,,anon_annotators,,prompt,,source\n> \n> The CrowS-Pairs dataset is a collection of 1,508 sentence pairs that cover nine types of biases: race/color, gender/gender identity, sexual orientation, religion, age\n\n### More Datasets\n> For more datasets, click [here](https://www.kaggle.com/thedevastator/datasets).\n\n### Featured Notebooks\n> - \ud83d\udea8 **Your notebook can be here!** \ud83d\udea8! \n\n### How to use the dataset\n> The CrowS-Pairs dataset is a collection of 1,508 sentence pairs that cover nine types of biases: race/color, gender/gender identity, sexual orientation, religion, age, nationality, disability, physical appearance, and socioeconomic status. Each sentence pair is a minimal edit of the first sentence: The only words that change between them are those that identify the group. The first sentence can _demonstrate_ or _violate_ a stereotype. The other sentence is a minimal edit of the first sentence: The only words that change between them are those that identify the group. Each example has the following information:\n> \n> Columns:,**sent_less**sent_more,,stereo_antistereo,,bias_type,,annotations,,anon_writer,,anon_annotators,,,,prompt,,source\n> \n> This dataset can be used to measure social biases in MLMs by training models on it and evaluating their performance\n\n### Research Ideas\n> - Measuring the ability of MLMs to identify and avoid social biases;\n> - Developing new methods for reducing social biases in MLMs; and \n> - Investigating the impact of social biases on downstream tasks such as reading comprehension or question answering\n\n### Acknowledgements\n> If you use this dataset in your research, please credit the original authors.\n\n> [Data Source](https://github.com/nyu-mll/crows-pairs)\n> \n> \n> ### License\n> \n> \n> **License: [CC0 1.0 Universal (CC0 1.0) - Public Domain Dedication](https://creativecommons.org/publicdomain/zero/1.0/)**\n> No Copyright - You can copy, modify, distribute and perform the work, even for commercial purposes, all without asking permission. [See Other Information](https://creativecommons.org/publicdomain/zero/1.0/).\n\n### Columns\n\n**File: crows_pairs_anonymized.csv**\n| Column name | Description |\n|:----------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| **sent_more** | The first sentence in the pair, which can demonstrate or violate a stereotype. (String) |\n| **sent_less** | The second sentence in the pair, which is a minimal edit of the first sentence. The only words that change between them are those that identify the group. (String) |\n| **stereo_antistereo** | Whether the first sentence demonstrates or violates a stereotype. (String) |\n| **bias_type** | The type of bias represented in the sentence pair. (String) |\n| **annotations** | The annotations made by the crowdworkers on the sentence pair. (String) |\n| **anon_writer** | The anonymous writer of the sentence pair. (String) |\n| **anon_annotators** | The anonymous annotators of the sentence pair. (String) |\n\n_____\n\n**File: prompts.csv**\n| Column name | Description |\n|:--------------|:----------------------------------------------------------------------------------------|\n| **sent_more** | The first sentence in the pair, which can demonstrate or violate a stereotype. (String) |\n| **prompt** | The prompt for the sentence pair. (String) |\n| **source** | The source of the sentence pair. (String) |\n\n### Acknowledgements\n> If you use this dataset in your research, please credit the original authors.\n> If you use this dataset in your research, please credit [](https://github.com/nyu-mll/crows-pairs).", "VersionNotes": "version update", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 2680265, "CreatorUserId": 10654180, "OwnerUserId": 10654180.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4601549.0, "CurrentDatasourceVersionId": 4662968.0, "ForumId": 2712256, "Type": 2, "CreationDate": "11/27/2022 15:59:51", "LastActivityDate": "11/27/2022", "TotalViews": 602, "TotalDownloads": 38, "TotalVotes": 2, "TotalKernels": 10}]
|
[{"Id": 10654180, "UserName": "thedevastator", "DisplayName": "The Devastator", "RegisterDate": "05/26/2022", "PerformanceTier": 4}]
|
# ## Imports and installs
import pandas as pd
import numpy as np
from transformers import (
AutoTokenizer,
DataCollatorWithPadding,
BloomTokenizerFast,
BloomForTokenClassification,
BloomForSequenceClassification,
DataCollatorForTokenClassification,
AutoModelForTokenClassification,
TrainingArguments,
Trainer,
)
from datasets import load_dataset
import torch
import os
import evaluate
import random
# ## Dataset : CrowS-Pairs
dataset = pd.read_csv(
"/kaggle/input/a-dataset-for-measuring-social-biases-in-mlms/crows_pairs_anonymized.csv"
)
dataset = dataset[["sent_more", "stereo_antistereo"]]
dataset = dataset.rename(columns={"sent_more": "text", "stereo_antistereo": "policy"})
dataset = dataset.replace("stereo", 1)
dataset = dataset.replace("antistereo", 0)
dataset = dataset.rename(columns={"policy": "label"})
good = dataset[dataset["label"] == 0]
bad = dataset[dataset["label"] == 1]
print("Size of good : ", len(good))
print("Size of bad : ", len(bad))
remove = len(bad) - len(good)
to_remove = random.sample(range(len(bad)), remove)
bad.drop(bad.index[to_remove], inplace=True)
dataset = pd.concat([good, bad], ignore_index=True)
dataset.to_csv("/kaggle/working/final.csv", index=False)
dataset = load_dataset(
"csv", data_files="/kaggle/working/final.csv", keep_default_na=False
)
dataset = dataset.shuffle(seed=42)
dataset = dataset["train"].train_test_split(test_size=0.005)
def preprocess_function(examples):
return tokenizer(examples["text"])
dataset
def compute_metrics(eval_pred):
predictions, labels = eval_pred
predictions = np.argmax(predictions, axis=1)
return accuracy.compute(predictions=predictions, references=labels)
id2label = {1: "BROKE", 0: "KEPT"}
label2id = {"KEPT": 0, "BROKE": 1}
tokenizer = BloomTokenizerFast.from_pretrained(
f"bigscience/bloom-560m", add_prefix_space=True
)
accuracy = evaluate.load("accuracy")
data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
tokenized_dataset = dataset.map(preprocess_function, batched=True)
# ## Model : BLOOM-560M
model = BloomForSequenceClassification.from_pretrained(
f"bigscience/bloom-560m",
num_labels=2,
id2label=id2label,
label2id=label2id,
ignore_mismatched_sizes=True,
)
model
training_args = TrainingArguments(
output_dir="OutModelPolicy",
learning_rate=3e-05,
per_device_train_batch_size=35,
per_device_eval_batch_size=35,
num_train_epochs=5,
weight_decay=0.01,
evaluation_strategy="steps",
save_strategy="steps",
load_best_model_at_end=True,
save_steps=50000,
eval_steps=50000,
fp16=True,
save_total_limit=2,
push_to_hub=False,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_dataset["train"],
eval_dataset=tokenized_dataset["test"],
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics,
)
os.environ["WANDB_API_KEY"] = "174605229344dbcc2c90f595394111e3396b2b8b"
trainer.train()
trainer.save_model("final_model")
import shutil
shutil.make_archive("/kaggle/working/final_model", "zip", "/kaggle/working")
| false | 1 | 1,025 | 1 | 1,744 | 1,025 |
||
129437261
|
<jupyter_start><jupyter_text>countries by intentional homicide rate
The Countries by Intentional Homicide Rate dataset provides information on the intentional homicide rate in countries around the world.
The dataset contains information on more than 150 countries and territories, including both developed and developing nations. It provides a comprehensive overview of the variation in homicide rates across different regions and countries around the world.
The dataset can be used for a variety of research purposes, including exploring the relationship between homicide rates and other social and economic indicators, identifying trends and patterns in homicide rates over time, and comparing homicide rates across different countries and regions.
Overall, the Countries by Intentional Homicide Rate dataset is a valuable resource for anyone interested in studying crime and violence, and in understanding the social and economic factors that underlie these phenomena....
**Description**: ChatGPT
Kaggle dataset identifier: countries-by-intentional-homicide-rate
<jupyter_script># importing libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
# loading dataset
df = pd.read_csv(
"../input/countries-by-intentional-homicide-rate/countries-by-intentional-homicide-rate.csv"
)
# checking if there is any null values in the dataframe
df.isnull().sum()
# let's check what is in the head
df.head(10)
df.head(10)
# Now we have bigger picture that how data is looks like
# Let's notedown what is in the dataset
# * we have locations (countries)
# * we have Regions (continents)
# * we have count ( i assume it is a number of deaths)
# * we have year
# * we have rate %
# Let's Define Tasks
# * ### i will try visualize the total homicide rate by continents, countries
# * ### we will calculate the total number of deaths and then visualize it
# * ### we will calculate the number of deaths by continents
# Task 1 | visualization by countries
# Homicide rate by Asia Countries
#
# Here we have seprated all the asian countries.
asia_df = df[df["Region"] == "Asia"]
fig = px.scatter(
asia_df,
x="Count",
y="Year",
size="Rate",
color="Location",
hover_name="Location",
log_x=True,
size_max=60,
)
fig.update_layout(title="Homicide Rate in Asian Countries")
fig.show()
# Homicide rate by Europian Countries
#
# Here we have seprated all the asian countries.
Euro_df = df[df["Region"] == "Europe"]
fig = px.scatter(
Euro_df,
x="Count",
y="Year",
size="Rate",
color="Location",
hover_name="Location",
log_x=True,
size_max=60,
)
fig.update_layout(title="Homicide Rate in Europian Countries")
fig.show()
# Homicide rate by American Countries
#
# Here we have seprated all the american countries.
amr_df = df[df["Region"] == "Americas"]
fig = px.scatter(
amr_df,
x="Count",
y="Year",
size="Rate",
color="Location",
hover_name="Location",
log_x=True,
size_max=60,
)
fig.update_layout(title="Homicide Rate in American Countries")
fig.show()
# Homicide rate by African Countries
#
# Here we have seprated all the asian countries.
africa_df = df[df["Region"] == "Africa"]
fig = px.scatter(
africa_df,
x="Count",
y="Year",
size="Rate",
color="Location",
hover_name="Location",
log_x=True,
size_max=60,
)
fig.update_layout(title="Homicide Rate in African Countries")
fig.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/437/129437261.ipynb
|
countries-by-intentional-homicide-rate
|
bilalwaseer
|
[{"Id": 129437261, "ScriptId": 38228903, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9940952, "CreationDate": "05/13/2023 19:52:24", "VersionNumber": 3.0, "Title": "Mastering Intentional Homicide Data Analysis", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 98.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 98.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185485533, "KernelVersionId": 129437261, "SourceDatasetVersionId": 5627892}]
|
[{"Id": 5627892, "DatasetId": 3209333, "DatasourceVersionId": 5703124, "CreatorUserId": 9940952, "LicenseName": "CC0: Public Domain", "CreationDate": "05/07/2023 20:28:31", "VersionNumber": 3.0, "Title": "countries by intentional homicide rate", "Slug": "countries-by-intentional-homicide-rate", "Subtitle": "comprehensive dataset for studying crime and violence.", "Description": "The Countries by Intentional Homicide Rate dataset provides information on the intentional homicide rate in countries around the world. \n\nThe dataset contains information on more than 150 countries and territories, including both developed and developing nations. It provides a comprehensive overview of the variation in homicide rates across different regions and countries around the world.\n\nThe dataset can be used for a variety of research purposes, including exploring the relationship between homicide rates and other social and economic indicators, identifying trends and patterns in homicide rates over time, and comparing homicide rates across different countries and regions.\n\n\nOverall, the Countries by Intentional Homicide Rate dataset is a valuable resource for anyone interested in studying crime and violence, and in understanding the social and economic factors that underlie these phenomena....\n\n\n\n**Description**: ChatGPT", "VersionNotes": "Data Update 2023-05-07", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3209333, "CreatorUserId": 9940952, "OwnerUserId": 9940952.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5627892.0, "CurrentDatasourceVersionId": 5703124.0, "ForumId": 3274046, "Type": 2, "CreationDate": "05/01/2023 16:30:31", "LastActivityDate": "05/01/2023", "TotalViews": 6919, "TotalDownloads": 1207, "TotalVotes": 27, "TotalKernels": 5}]
|
[{"Id": 9940952, "UserName": "bilalwaseer", "DisplayName": "Muhammad Bilal Hussain", "RegisterDate": "03/15/2022", "PerformanceTier": 2}]
|
# importing libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
# loading dataset
df = pd.read_csv(
"../input/countries-by-intentional-homicide-rate/countries-by-intentional-homicide-rate.csv"
)
# checking if there is any null values in the dataframe
df.isnull().sum()
# let's check what is in the head
df.head(10)
df.head(10)
# Now we have bigger picture that how data is looks like
# Let's notedown what is in the dataset
# * we have locations (countries)
# * we have Regions (continents)
# * we have count ( i assume it is a number of deaths)
# * we have year
# * we have rate %
# Let's Define Tasks
# * ### i will try visualize the total homicide rate by continents, countries
# * ### we will calculate the total number of deaths and then visualize it
# * ### we will calculate the number of deaths by continents
# Task 1 | visualization by countries
# Homicide rate by Asia Countries
#
# Here we have seprated all the asian countries.
asia_df = df[df["Region"] == "Asia"]
fig = px.scatter(
asia_df,
x="Count",
y="Year",
size="Rate",
color="Location",
hover_name="Location",
log_x=True,
size_max=60,
)
fig.update_layout(title="Homicide Rate in Asian Countries")
fig.show()
# Homicide rate by Europian Countries
#
# Here we have seprated all the asian countries.
Euro_df = df[df["Region"] == "Europe"]
fig = px.scatter(
Euro_df,
x="Count",
y="Year",
size="Rate",
color="Location",
hover_name="Location",
log_x=True,
size_max=60,
)
fig.update_layout(title="Homicide Rate in Europian Countries")
fig.show()
# Homicide rate by American Countries
#
# Here we have seprated all the american countries.
amr_df = df[df["Region"] == "Americas"]
fig = px.scatter(
amr_df,
x="Count",
y="Year",
size="Rate",
color="Location",
hover_name="Location",
log_x=True,
size_max=60,
)
fig.update_layout(title="Homicide Rate in American Countries")
fig.show()
# Homicide rate by African Countries
#
# Here we have seprated all the asian countries.
africa_df = df[df["Region"] == "Africa"]
fig = px.scatter(
africa_df,
x="Count",
y="Year",
size="Rate",
color="Location",
hover_name="Location",
log_x=True,
size_max=60,
)
fig.update_layout(title="Homicide Rate in African Countries")
fig.show()
| false | 1 | 779 | 0 | 1,006 | 779 |
||
129437077
|
import tensorflow as tf
# Necessary libraries
import numpy as np
from datetime import datetime
from tqdm import tqdm
from scipy.interpolate import interp1d
from math import pi
from scipy.linalg import solve_banded
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from scipy.integrate import simps
import matplotlib.cm as cm
import time
# Coefficients
k = 176.1 # permeability coef
drho = 814.4 # oil-gas pressure difference
g = 9.80665 # acceleration of gravity
mu = 18.76 # dynamic viscosity
phi = 0.266 # effective porosity
alpha = k * drho * g / (mu * phi) / 3000
h_0 = 1 / 2 * 7**2 # initial height
W = 150 # half of the drain length
L = 1171.3 # well length
dw = 0.089 # well diameter
zw1 = h_0 * 0.8
zw2 = h_0 * 0.75
cube_to_kg = 800 # coefficient to translate m^3 to kg
beta = 0.191794288875527
# beta = 1
Bo = 1.069
Bg = 0.00935
Rs = 300
gamma = 119
def diff_coef(z_nm, dt, dx):
return dt * alpha * (2 * z_nm) ** (1 / 2) / (dx**2)
def K(y):
return (beta - 1) * y / L + 1
def well_flow_rate(y, q0, prev_y, dy):
delta = np.minimum(np.maximum((np.full(prev_y.shape, zw1) - prev_y), 0) / dw, 1)
ny = prev_y.shape[0]
y_length = L * (y / (ny - 1))
rate_in_point = (1 - delta[y] ** 2) * K(y_length)
all_ys = np.linspace(0, L, ny)
rate_overall = [(1 - delta[pt] ** 2) * K(y_len) for pt, y_len in enumerate(all_ys)]
int_rate_overall = simps(y=rate_overall, dx=dy)
if int_rate_overall <= 0:
return 0
return max(q0 * (rate_in_point / int_rate_overall), 0)
def psi(gnc, dy):
all_ys = np.linspace(0, L, gnc.shape[0])
delta = np.minimum(np.maximum((np.full(gnc.shape, zw2) - gnc), 0) / dw, 1)
gas_diff = [(delta[pt] ** 2) * K(y_len) for pt, y_len in enumerate(all_ys)]
oil_diff = [(1 - delta[pt] ** 2) * K(y_len) for pt, y_len in enumerate(all_ys)]
gas_rate = simps(y=gas_diff, dx=dy)
oil_rate = simps(y=oil_diff, dx=dy)
return gas_rate / oil_rate
def plot(sol):
t = len(sol)
n_x = len(sol[0]) - 1
fig, ax = plt.subplots()
ax.axis([0, W, (2 * zw2) ** (1 / 2) - 1, (2 * h_0) ** (1 / 2)])
(l,) = ax.plot([], [], label="GOC")
def animate(i):
l.set_data(np.linspace(0, W, n_x + 1), sol[i])
ani = animation.FuncAnimation(fig, animate, frames=t)
plt.plot(
np.linspace(0, W, n_x + 1), [zw2 for _ in range(n_x + 1)], label="Well bottom"
)
plt.plot(
np.linspace(0, W, n_x + 1), [zw1 for _ in range(n_x + 1)], label="Well top"
)
plt.legend()
plt.show()
def solve(path, nt, nx, ny):
debits_rate = {}
with open(path, "r") as f:
dat = f.read().split()
# save gas and oil values
days = []
gas = []
oil = []
for i in range(len(dat) // 3):
days.append(int(dat[i * 3]))
oil.append(float(dat[i * 3 + 1]) * cube_to_kg)
gas.append(float(dat[i * 3 + 2]))
# interpolate oil
debit = interp1d(days, oil, fill_value="extrapolate")
Rs = gas[0] / oil[0]
# to predict gas
gas_init = gas[0]
gas_pred = []
dx = W / nx
dt = max(days) / nt
dy = L / ny
sol = np.zeros((nt + 1, nx, ny))
# initial condition
sol[0] = np.full((nx, ny), h_0)
for t, curr_t in enumerate(tqdm(np.linspace(0, int(max(days)), nt))):
for y, curr_y in enumerate(np.linspace(0, L, ny)):
A = np.zeros((nx, nx))
b = np.zeros(nx)
# left boundary condition
q_o = well_flow_rate(y, debit(curr_t), sol[t][0][:], dy) / (2 * alpha * phi)
debits_rate[(curr_t, curr_y)] = q_o
b[0] = -sol[t][1][y] + diff_coef(sol[t][1][y], dt, dx) * (q_o * dx) / (
alpha * phi
)
A[0][1] = -(1 - 2 * diff_coef(sol[t][1][y], dt, dx))
A[0][0] = -2 * diff_coef(sol[t][1][y], dt, dx)
# finite diff scheme
for i in range(1, nx - 1):
b[i] = -sol[t][i][y]
A[i][i - 1] = diff_coef(sol[t][i][y], dt, dx)
A[i][i] = -1 - 2 * diff_coef(sol[t][i][y], dt, dx)
A[i][i + 1] = diff_coef(sol[t][i][y], dt, dx)
# right boundary condition
b[nx - 1] = -sol[t][nx - 2][y]
A[nx - 1][nx - 2] = -(1 - 2 * diff_coef(sol[t][nx - 1][y], dt, dx))
A[nx - 1][nx - 1] = -2 * diff_coef(sol[t][nx - 1][y], dt, dx)
# x = tridiagonal_solution(A, b)
Ab = np.zeros((3, nx))
Ab[0, 1:] = A.diagonal(1)
Ab[1, :] = A.diagonal()
Ab[2, :-1] = A.diagonal(-1)
x = solve_banded((1, 1), Ab, b)
sol[t + 1, :, y] = x
# overall_volume.append(simps(simps(sol[t] - zw2, axis=0, dx=dx), axis=0, dx=dy))
# counting gas
psi_coef = psi(sol[t, 0, :], dy)
Qg = debit(curr_t) / cube_to_kg * (psi_coef * gamma / Bg + Rs / Bo)
qg = Bg * (Qg - debit(curr_t) / (Bo * cube_to_kg))
gas_pred.append(qg)
return sol, debits_rate
if __name__ == "__main__":
path = "/kaggle/input/1501-dat/1501.dat"
nx = 100
nt = 169
ny = 100
surface, well_rates = solve(path, nt, nx, ny)
# создаем одномерные массивы для координат
t = np.linspace(0, 169, surface.shape[0])
x = np.linspace(0, W, surface.shape[1])
y = np.linspace(0, L, surface.shape[2])
# создаем сетку из трех массивов
T, X, Y = np.meshgrid(t, x, y, indexing="ij")
# преобразуем массив A в одномерный массив
A_flat = surface.flatten()
# используем A_flat в качестве значений вектора Y
Z = A_flat
X_input = np.stack([T, X, Y], axis=-1).reshape(-1, 3)
df = np.concatenate([X_input, Z.reshape(-1, 1)], axis=1)
from sklearn.model_selection import train_test_split
df_train, df_test = train_test_split(df, test_size=0.2, shuffle=True)
np.savetxt("./df_train.dat", df_train)
np.savetxt("./df_test.dat", df_test)
import deepxde as dde
data = dde.data.DataSet(
fname_train="./df_train.dat",
fname_test="./df_test.dat",
col_x=(0, 1, 2),
col_y=(3,),
standardize=True,
)
layer_size = [3] + [50] * 8 + [1]
activation = "tanh"
initializer = "Glorot normal"
net = dde.nn.FNN(layer_size, activation, initializer)
model = dde.Model(data, net)
model.compile("adam", lr=0.001, metrics=["l2 relative error"])
losshistory, train_state = model.train(iterations=50000)
dde.saveplot(losshistory, train_state, issave=True, isplot=True)
# создаем одномерные массивы для координат
ntp = 77
nxp = 77
nyp = 77
t = np.linspace(-1.7149, 1.714654, ntp)
x = np.linspace(-1.714971, 1.7142838, nxp)
y = np.linspace(-1.714554, 1.7149646, nyp)
# создаем сетку из трех массивов
T, X, Y = np.meshgrid(t, x, y, indexing="ij")
X_input = np.stack([T, X, Y], axis=-1).reshape(-1, 3)
surf = model.predict(X_input).reshape(ntp, nxp, nyp)
cone = np.concatenate([surf[:, ::-1, :], surf], axis=1)
cone = (2 * cone) ** (1 / 2)
def update_plot(frame_number):
ax.clear()
ax.set_zlim((2 * zw2) ** (1 / 2) - 0.5, 7)
z = np.swapaxes(cone[frame_number, :, :], 0, 1)
surface_plot = ax.plot_surface(
X, Y, z, cmap="coolwarm", vmin=np.min(np.abs(z)), vmax=np.max(np.abs(z))
)
surface_plot.set_facecolor((0, 0, 0, 0))
ax.auto_scale_xyz(
[X.min(), X.max()], [Y.min(), Y.max()], [(2 * zw2) ** (1 / 2) - 0.5, 7]
)
well_level = np.ones_like(
np.array([np.linspace(-W, W, 3), np.linspace(0, L, 3)])
) * (2 * zw2) ** (1 / 2)
well_level_plot = ax.plot_surface(
np.linspace(-W, W, 3),
np.linspace(0, L, 3),
well_level,
alpha=0.7,
color="grey",
label="well_level",
)
well_level_plot.set_facecolor((0, 0, 0, 0))
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
X = np.linspace(-150, 150, 2 * nxp)
Y = np.linspace(0, L, nyp)
X, Y = np.meshgrid(X, Y)
z = np.swapaxes(cone[0, :, :], 0, 1)
cmap = plt.cm.get_cmap("coolwarm")
plot = [
ax.plot_surface(
X,
Y,
z,
color="0.75",
rstride=1,
cstride=1,
cmap=cmap,
vmin=np.min(np.abs(z)),
vmax=np.max(np.abs(z)),
)
]
ax.set_zlim((2 * zw2) ** (1 / 2) - 0.5, 7)
ani = animation.FuncAnimation(fig, update_plot, ntp, interval=100)
from IPython.display import HTML
HTML(ani.to_jshtml())
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/437/129437077.ipynb
| null | null |
[{"Id": 129437077, "ScriptId": 38369212, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8300400, "CreationDate": "05/13/2023 19:49:45", "VersionNumber": 1.0, "Title": "METAMODELLING", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 260.0, "LinesInsertedFromPrevious": 260.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import tensorflow as tf
# Necessary libraries
import numpy as np
from datetime import datetime
from tqdm import tqdm
from scipy.interpolate import interp1d
from math import pi
from scipy.linalg import solve_banded
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from scipy.integrate import simps
import matplotlib.cm as cm
import time
# Coefficients
k = 176.1 # permeability coef
drho = 814.4 # oil-gas pressure difference
g = 9.80665 # acceleration of gravity
mu = 18.76 # dynamic viscosity
phi = 0.266 # effective porosity
alpha = k * drho * g / (mu * phi) / 3000
h_0 = 1 / 2 * 7**2 # initial height
W = 150 # half of the drain length
L = 1171.3 # well length
dw = 0.089 # well diameter
zw1 = h_0 * 0.8
zw2 = h_0 * 0.75
cube_to_kg = 800 # coefficient to translate m^3 to kg
beta = 0.191794288875527
# beta = 1
Bo = 1.069
Bg = 0.00935
Rs = 300
gamma = 119
def diff_coef(z_nm, dt, dx):
return dt * alpha * (2 * z_nm) ** (1 / 2) / (dx**2)
def K(y):
return (beta - 1) * y / L + 1
def well_flow_rate(y, q0, prev_y, dy):
delta = np.minimum(np.maximum((np.full(prev_y.shape, zw1) - prev_y), 0) / dw, 1)
ny = prev_y.shape[0]
y_length = L * (y / (ny - 1))
rate_in_point = (1 - delta[y] ** 2) * K(y_length)
all_ys = np.linspace(0, L, ny)
rate_overall = [(1 - delta[pt] ** 2) * K(y_len) for pt, y_len in enumerate(all_ys)]
int_rate_overall = simps(y=rate_overall, dx=dy)
if int_rate_overall <= 0:
return 0
return max(q0 * (rate_in_point / int_rate_overall), 0)
def psi(gnc, dy):
all_ys = np.linspace(0, L, gnc.shape[0])
delta = np.minimum(np.maximum((np.full(gnc.shape, zw2) - gnc), 0) / dw, 1)
gas_diff = [(delta[pt] ** 2) * K(y_len) for pt, y_len in enumerate(all_ys)]
oil_diff = [(1 - delta[pt] ** 2) * K(y_len) for pt, y_len in enumerate(all_ys)]
gas_rate = simps(y=gas_diff, dx=dy)
oil_rate = simps(y=oil_diff, dx=dy)
return gas_rate / oil_rate
def plot(sol):
t = len(sol)
n_x = len(sol[0]) - 1
fig, ax = plt.subplots()
ax.axis([0, W, (2 * zw2) ** (1 / 2) - 1, (2 * h_0) ** (1 / 2)])
(l,) = ax.plot([], [], label="GOC")
def animate(i):
l.set_data(np.linspace(0, W, n_x + 1), sol[i])
ani = animation.FuncAnimation(fig, animate, frames=t)
plt.plot(
np.linspace(0, W, n_x + 1), [zw2 for _ in range(n_x + 1)], label="Well bottom"
)
plt.plot(
np.linspace(0, W, n_x + 1), [zw1 for _ in range(n_x + 1)], label="Well top"
)
plt.legend()
plt.show()
def solve(path, nt, nx, ny):
debits_rate = {}
with open(path, "r") as f:
dat = f.read().split()
# save gas and oil values
days = []
gas = []
oil = []
for i in range(len(dat) // 3):
days.append(int(dat[i * 3]))
oil.append(float(dat[i * 3 + 1]) * cube_to_kg)
gas.append(float(dat[i * 3 + 2]))
# interpolate oil
debit = interp1d(days, oil, fill_value="extrapolate")
Rs = gas[0] / oil[0]
# to predict gas
gas_init = gas[0]
gas_pred = []
dx = W / nx
dt = max(days) / nt
dy = L / ny
sol = np.zeros((nt + 1, nx, ny))
# initial condition
sol[0] = np.full((nx, ny), h_0)
for t, curr_t in enumerate(tqdm(np.linspace(0, int(max(days)), nt))):
for y, curr_y in enumerate(np.linspace(0, L, ny)):
A = np.zeros((nx, nx))
b = np.zeros(nx)
# left boundary condition
q_o = well_flow_rate(y, debit(curr_t), sol[t][0][:], dy) / (2 * alpha * phi)
debits_rate[(curr_t, curr_y)] = q_o
b[0] = -sol[t][1][y] + diff_coef(sol[t][1][y], dt, dx) * (q_o * dx) / (
alpha * phi
)
A[0][1] = -(1 - 2 * diff_coef(sol[t][1][y], dt, dx))
A[0][0] = -2 * diff_coef(sol[t][1][y], dt, dx)
# finite diff scheme
for i in range(1, nx - 1):
b[i] = -sol[t][i][y]
A[i][i - 1] = diff_coef(sol[t][i][y], dt, dx)
A[i][i] = -1 - 2 * diff_coef(sol[t][i][y], dt, dx)
A[i][i + 1] = diff_coef(sol[t][i][y], dt, dx)
# right boundary condition
b[nx - 1] = -sol[t][nx - 2][y]
A[nx - 1][nx - 2] = -(1 - 2 * diff_coef(sol[t][nx - 1][y], dt, dx))
A[nx - 1][nx - 1] = -2 * diff_coef(sol[t][nx - 1][y], dt, dx)
# x = tridiagonal_solution(A, b)
Ab = np.zeros((3, nx))
Ab[0, 1:] = A.diagonal(1)
Ab[1, :] = A.diagonal()
Ab[2, :-1] = A.diagonal(-1)
x = solve_banded((1, 1), Ab, b)
sol[t + 1, :, y] = x
# overall_volume.append(simps(simps(sol[t] - zw2, axis=0, dx=dx), axis=0, dx=dy))
# counting gas
psi_coef = psi(sol[t, 0, :], dy)
Qg = debit(curr_t) / cube_to_kg * (psi_coef * gamma / Bg + Rs / Bo)
qg = Bg * (Qg - debit(curr_t) / (Bo * cube_to_kg))
gas_pred.append(qg)
return sol, debits_rate
if __name__ == "__main__":
path = "/kaggle/input/1501-dat/1501.dat"
nx = 100
nt = 169
ny = 100
surface, well_rates = solve(path, nt, nx, ny)
# создаем одномерные массивы для координат
t = np.linspace(0, 169, surface.shape[0])
x = np.linspace(0, W, surface.shape[1])
y = np.linspace(0, L, surface.shape[2])
# создаем сетку из трех массивов
T, X, Y = np.meshgrid(t, x, y, indexing="ij")
# преобразуем массив A в одномерный массив
A_flat = surface.flatten()
# используем A_flat в качестве значений вектора Y
Z = A_flat
X_input = np.stack([T, X, Y], axis=-1).reshape(-1, 3)
df = np.concatenate([X_input, Z.reshape(-1, 1)], axis=1)
from sklearn.model_selection import train_test_split
df_train, df_test = train_test_split(df, test_size=0.2, shuffle=True)
np.savetxt("./df_train.dat", df_train)
np.savetxt("./df_test.dat", df_test)
import deepxde as dde
data = dde.data.DataSet(
fname_train="./df_train.dat",
fname_test="./df_test.dat",
col_x=(0, 1, 2),
col_y=(3,),
standardize=True,
)
layer_size = [3] + [50] * 8 + [1]
activation = "tanh"
initializer = "Glorot normal"
net = dde.nn.FNN(layer_size, activation, initializer)
model = dde.Model(data, net)
model.compile("adam", lr=0.001, metrics=["l2 relative error"])
losshistory, train_state = model.train(iterations=50000)
dde.saveplot(losshistory, train_state, issave=True, isplot=True)
# создаем одномерные массивы для координат
ntp = 77
nxp = 77
nyp = 77
t = np.linspace(-1.7149, 1.714654, ntp)
x = np.linspace(-1.714971, 1.7142838, nxp)
y = np.linspace(-1.714554, 1.7149646, nyp)
# создаем сетку из трех массивов
T, X, Y = np.meshgrid(t, x, y, indexing="ij")
X_input = np.stack([T, X, Y], axis=-1).reshape(-1, 3)
surf = model.predict(X_input).reshape(ntp, nxp, nyp)
cone = np.concatenate([surf[:, ::-1, :], surf], axis=1)
cone = (2 * cone) ** (1 / 2)
def update_plot(frame_number):
ax.clear()
ax.set_zlim((2 * zw2) ** (1 / 2) - 0.5, 7)
z = np.swapaxes(cone[frame_number, :, :], 0, 1)
surface_plot = ax.plot_surface(
X, Y, z, cmap="coolwarm", vmin=np.min(np.abs(z)), vmax=np.max(np.abs(z))
)
surface_plot.set_facecolor((0, 0, 0, 0))
ax.auto_scale_xyz(
[X.min(), X.max()], [Y.min(), Y.max()], [(2 * zw2) ** (1 / 2) - 0.5, 7]
)
well_level = np.ones_like(
np.array([np.linspace(-W, W, 3), np.linspace(0, L, 3)])
) * (2 * zw2) ** (1 / 2)
well_level_plot = ax.plot_surface(
np.linspace(-W, W, 3),
np.linspace(0, L, 3),
well_level,
alpha=0.7,
color="grey",
label="well_level",
)
well_level_plot.set_facecolor((0, 0, 0, 0))
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
X = np.linspace(-150, 150, 2 * nxp)
Y = np.linspace(0, L, nyp)
X, Y = np.meshgrid(X, Y)
z = np.swapaxes(cone[0, :, :], 0, 1)
cmap = plt.cm.get_cmap("coolwarm")
plot = [
ax.plot_surface(
X,
Y,
z,
color="0.75",
rstride=1,
cstride=1,
cmap=cmap,
vmin=np.min(np.abs(z)),
vmax=np.max(np.abs(z)),
)
]
ax.set_zlim((2 * zw2) ** (1 / 2) - 0.5, 7)
ani = animation.FuncAnimation(fig, update_plot, ntp, interval=100)
from IPython.display import HTML
HTML(ani.to_jshtml())
| false | 0 | 3,398 | 0 | 3,398 | 3,398 |
||
129440675
|
# Here's an end-to-end data analytics project that includes codes, visualizations, and some insightful analysis. In this project, we'll be working with a dataset containing information about online retail transactions. We'll perform data cleaning, exploratory data analysis, and generate meaningful insights from the data.
# **Step 1: Import Required Libraries**
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
## The import keyword is used to import libraries or modules in Python.
## pandas, numpy, matplotlib.pyplot, and seaborn are the libraries being imported.
## pd, np, plt, and sns are aliases or shorthand names given to these libraries to make it easier to refer to them in the code.
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# **Step 2: Load the Data**
data = pd.read_csv("../input/retail-dataset/end2endretail.csv")
data.head(10) # first 5 rows
# We load the dataset into a pandas DataFrame using the read_csv() function. Make sure to provide the correct file path and encoding if needed.
# **Step 3: Data Cleaning and Preprocessing**
# Remove missing values and duplicates
data = data.dropna()
data = data.drop_duplicates()
# Convert the invoice date to datetime format
data["InvoiceDate"] = pd.to_datetime(data["InvoiceDate"])
# Extract additional features from the invoice date
data["Year"] = data["InvoiceDate"].dt.year
data["Month"] = data["InvoiceDate"].dt.month
data["Day"] = data["InvoiceDate"].dt.day
data["Hour"] = data["InvoiceDate"].dt.hour
# Filter out negative quantity and price values
data = data[(data["Quantity"] > 0) & (data["UnitPrice"] > 0)]
# In this step, we perform data cleaning and preprocessing operations to ensure the data is in a suitable format for analysis. This includes removing missing values and duplicates, converting the invoice date to datetime format, extracting additional features from the date (e.g., year, month, day, hour), and filtering out negative quantity and price values.
# **Step 4: Exploratory Data Analysis and Visualizations**
# Top 10 countries with the highest number of transactions
top_countries = data["Country"].value_counts().head(10)
plt.figure(figsize=(12, 6))
sns.barplot(x=top_countries.index, y=top_countries.values)
plt.title("Top 10 Countries with the Highest Number of Transactions")
plt.xlabel("Country")
plt.ylabel("Number of Transactions")
plt.xticks(rotation=45)
plt.show()
# This visualization presents a count plot that illustrates the number of repeat customers based on the number of invoices they have made.
# It helps you understand the distribution of repeat customers and the frequency of their purchases.
repeat_customers = data.groupby("CustomerID")["InvoiceNo"].nunique().reset_index()
repeat_customers = repeat_customers[repeat_customers["InvoiceNo"] > 1]
plt.figure(figsize=(8, 6))
sns.countplot(data=repeat_customers, x="InvoiceNo")
plt.title("Number of Repeat Customers")
plt.xlabel("Number of Invoices")
plt.ylabel("Count")
plt.show()
# In this step, we explore the data and gain insights through visualizations. In the provided example, we demonstrate two types of visualizations:
# **Step 5: Generate Insights
# **
# Total revenue
total_quantity = data["Quantity"].sum()
print("Total Quantity:", total_quantity)
# Average quantity per month
average_quantity_per_month = monthly_quantity.groupby("Month")["Quantity"].mean()
plt.figure(figsize=(10, 6))
sns.barplot(x=average_quantity_per_month.index, y=average_quantity_per_month.values)
plt.title("Average Quantity per Month")
plt.xlabel("Month")
plt.ylabel("Average Quantity")
plt.show()
# Quantity Distribution by Country
quantity_by_country = (
data.groupby("Country")["Quantity"].sum().sort_values(ascending=False)
)
print("Quantity Distribution by Country:\n", quantity_by_country)
quantity_by_country = (
data.groupby("Country")["Quantity"].sum().sort_values(ascending=False)
)
plt.figure(figsize=(12, 6))
sns.barplot(x=quantity_by_country.index, y=quantity_by_country.values)
plt.title("Quantity Distribution by Country")
plt.xlabel("Country")
plt.ylabel("Quantity")
plt.xticks(rotation=45)
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/440/129440675.ipynb
| null | null |
[{"Id": 129440675, "ScriptId": 38484716, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6455124, "CreationDate": "05/13/2023 20:45:42", "VersionNumber": 1.0, "Title": "retail dataset analysis", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 108.0, "LinesInsertedFromPrevious": 108.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# Here's an end-to-end data analytics project that includes codes, visualizations, and some insightful analysis. In this project, we'll be working with a dataset containing information about online retail transactions. We'll perform data cleaning, exploratory data analysis, and generate meaningful insights from the data.
# **Step 1: Import Required Libraries**
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
## The import keyword is used to import libraries or modules in Python.
## pandas, numpy, matplotlib.pyplot, and seaborn are the libraries being imported.
## pd, np, plt, and sns are aliases or shorthand names given to these libraries to make it easier to refer to them in the code.
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# **Step 2: Load the Data**
data = pd.read_csv("../input/retail-dataset/end2endretail.csv")
data.head(10) # first 5 rows
# We load the dataset into a pandas DataFrame using the read_csv() function. Make sure to provide the correct file path and encoding if needed.
# **Step 3: Data Cleaning and Preprocessing**
# Remove missing values and duplicates
data = data.dropna()
data = data.drop_duplicates()
# Convert the invoice date to datetime format
data["InvoiceDate"] = pd.to_datetime(data["InvoiceDate"])
# Extract additional features from the invoice date
data["Year"] = data["InvoiceDate"].dt.year
data["Month"] = data["InvoiceDate"].dt.month
data["Day"] = data["InvoiceDate"].dt.day
data["Hour"] = data["InvoiceDate"].dt.hour
# Filter out negative quantity and price values
data = data[(data["Quantity"] > 0) & (data["UnitPrice"] > 0)]
# In this step, we perform data cleaning and preprocessing operations to ensure the data is in a suitable format for analysis. This includes removing missing values and duplicates, converting the invoice date to datetime format, extracting additional features from the date (e.g., year, month, day, hour), and filtering out negative quantity and price values.
# **Step 4: Exploratory Data Analysis and Visualizations**
# Top 10 countries with the highest number of transactions
top_countries = data["Country"].value_counts().head(10)
plt.figure(figsize=(12, 6))
sns.barplot(x=top_countries.index, y=top_countries.values)
plt.title("Top 10 Countries with the Highest Number of Transactions")
plt.xlabel("Country")
plt.ylabel("Number of Transactions")
plt.xticks(rotation=45)
plt.show()
# This visualization presents a count plot that illustrates the number of repeat customers based on the number of invoices they have made.
# It helps you understand the distribution of repeat customers and the frequency of their purchases.
repeat_customers = data.groupby("CustomerID")["InvoiceNo"].nunique().reset_index()
repeat_customers = repeat_customers[repeat_customers["InvoiceNo"] > 1]
plt.figure(figsize=(8, 6))
sns.countplot(data=repeat_customers, x="InvoiceNo")
plt.title("Number of Repeat Customers")
plt.xlabel("Number of Invoices")
plt.ylabel("Count")
plt.show()
# In this step, we explore the data and gain insights through visualizations. In the provided example, we demonstrate two types of visualizations:
# **Step 5: Generate Insights
# **
# Total revenue
total_quantity = data["Quantity"].sum()
print("Total Quantity:", total_quantity)
# Average quantity per month
average_quantity_per_month = monthly_quantity.groupby("Month")["Quantity"].mean()
plt.figure(figsize=(10, 6))
sns.barplot(x=average_quantity_per_month.index, y=average_quantity_per_month.values)
plt.title("Average Quantity per Month")
plt.xlabel("Month")
plt.ylabel("Average Quantity")
plt.show()
# Quantity Distribution by Country
quantity_by_country = (
data.groupby("Country")["Quantity"].sum().sort_values(ascending=False)
)
print("Quantity Distribution by Country:\n", quantity_by_country)
quantity_by_country = (
data.groupby("Country")["Quantity"].sum().sort_values(ascending=False)
)
plt.figure(figsize=(12, 6))
sns.barplot(x=quantity_by_country.index, y=quantity_by_country.values)
plt.title("Quantity Distribution by Country")
plt.xlabel("Country")
plt.ylabel("Quantity")
plt.xticks(rotation=45)
plt.show()
| false | 0 | 1,129 | 0 | 1,129 | 1,129 |
||
129440104
|
<jupyter_start><jupyter_text>cardata
Kaggle dataset identifier: cardata
<jupyter_script>import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn import preprocessing
data = pd.read_csv("../input/cardata/cardata.csv")
df
df = pd.DataFrame(data)
df
import pandas_profiling
pandas_profiling.ProfileReport(df)
# # **Now Analyse Data to find Noises**
plt.figure(figsize=(10, 8))
plt.scatter(df["Year"], df["Selling_Price"])
plt.xlabel("Year", fontsize=15)
plt.ylabel("Selling_Price", fontsize=15)
plt.xticks(np.arange(2002, 2019, 1))
plt.yticks(np.arange(0, 36, 5))
plt.title("Car selling price", fontsize=15)
plt.grid()
plt.show()
# # **Can we drop it as noise?**
# **NO! based on real world, it would be reasonable**
df[df["Selling_Price"] > 25]
plt.figure(figsize=(10, 8))
plt.scatter(df["Selling_Price"], df["Present_Price"])
plt.xlabel("Selling Price", fontsize=15)
plt.ylabel("Present Price", fontsize=15)
plt.title("Selling and Present Car Price", fontsize=15)
plt.grid()
plt.show()
plt.figure(figsize=(10, 8))
plt.scatter(df["Kms_Driven"], df["Selling_Price"])
plt.xlabel("Kms_Driven", fontsize=15)
plt.ylabel("Selling_Price", fontsize=15)
plt.title("Kms Driven VS Selling Price", fontsize=15)
plt.grid()
plt.show()
# # Is it OK to keep a data so far like '500000 km' driven?
# **it's ok based on real world! 500,000 kilometer driven during almost 10 years would be reasonable as well!**
df[df["Kms_Driven"] > 300000]
plt.figure(figsize=(10, 8))
plt.scatter(df["Fuel_Type"], df["Selling_Price"])
plt.xlabel("Fuel Type", fontsize=15)
plt.ylabel("Selling Price", fontsize=15)
plt.title("Fuel_Type VS Selling Price", fontsize=15)
plt.grid()
plt.show()
plt.figure(figsize=(10, 8))
plt.scatter(df["Seller_Type"], df["Selling_Price"])
plt.xlabel("Seller Type", fontsize=15)
plt.ylabel("Selling Price", fontsize=15)
plt.title("Seller Type VS Selling Price", fontsize=15)
plt.grid()
plt.show()
plt.figure(figsize=(10, 8))
plt.scatter(df["Transmission"], df["Selling_Price"])
plt.xlabel("Transmission", fontsize=15)
plt.ylabel("Selling Price", fontsize=15)
plt.title("Transmission VS Selling Price", fontsize=15)
plt.grid()
plt.show()
plt.figure(figsize=(10, 8))
plt.scatter(df["Owner"], df["Selling_Price"])
plt.xlabel("Owner", fontsize=15)
plt.ylabel("Selling Price", fontsize=15)
plt.title("Owner VS Selling Price", fontsize=15)
plt.grid()
plt.show()
# # So I decided to keep the whole data
Age = 2019 - df["Year"]
Age = pd.DataFrame(Age)
df.insert(9, "Age", Age)
df
df.drop(columns=["Car_Name", "Year"], inplace=True)
df
# # The next step is to convert string features into numerical features so that we can create a model
# *Important : we won't use [0,1] numbers because of their negative effects on regression equation*
df["Seller_Type"].replace("Dealer", 2, inplace=True)
df["Seller_Type"].replace("Individual", 3, inplace=True)
# ______________________________________________________#
df["Transmission"].replace("Manual", 2, inplace=True)
df["Transmission"].replace("Automatic", 3, inplace=True)
# ______________________________________________________#
df["Fuel_Type"].replace("Petrol", 2, inplace=True)
df["Fuel_Type"].replace("Diesel", 3, inplace=True)
df["Fuel_Type"].replace("CNG", 4, inplace=True)
df
df.corr()
# # Best correlation between 'Present Price' and 'Selling Price'
# # Time for creating Linear Model
X = pd.DataFrame(
df,
columns=[
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print("Mean Absolute Error: ", metrics.mean_absolute_error(y_test, y_pred))
print("Mean Squared Error: ", metrics.mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error: ", np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print("R2 score: ", metrics.r2_score(y_test, y_pred))
# # Improving Model
model_new = LinearRegression()
kFold_validation = KFold(10, shuffle=True)
results = cross_val_score(model_new, X, y, cv=kFold_validation)
print(results)
print(np.mean(results))
Present_Price2 = df.Present_Price**2
Present_Price2
df.insert(1, "Present_Price2", Present_Price2)
df
X = pd.DataFrame(
df,
columns=[
"Present_Price",
"Present_Price2",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print("Mean Absolute Error: ", metrics.mean_absolute_error(y_test, y_pred))
print("Mean Squared Error: ", metrics.mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error: ", np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print("R2 score: ", metrics.r2_score(y_test, y_pred))
Fuel_Type2 = df.Fuel_Type**2
Fuel_Type2
df.insert(1, "Fuel_Type2", Fuel_Type2)
df
X = pd.DataFrame(
df,
columns=[
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print("Mean Absolute Error: ", metrics.mean_absolute_error(y_test, y_pred))
print("Mean Squared Error: ", metrics.mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error: ", np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print("R2 score: ", metrics.r2_score(y_test, y_pred))
Kms_Driven2 = df.Kms_Driven**2
Kms_Driven2
df.insert(1, "Kms_Driven2", Kms_Driven2)
df
X = pd.DataFrame(
df,
columns=[
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print("Mean Absolute Error: ", metrics.mean_absolute_error(y_test, y_pred))
print("Mean Squared Error: ", metrics.mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error: ", np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print("R2 score: ", metrics.r2_score(y_test, y_pred))
Seller_Type2 = df.Seller_Type**2
df.insert(1, "Seller_Type2", Seller_Type2)
df
X = pd.DataFrame(
df,
columns=[
"Seller_Type2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print("Mean Absolute Error: ", metrics.mean_absolute_error(y_test, y_pred))
print("Mean Squared Error: ", metrics.mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error: ", np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print("R2 score: ", metrics.r2_score(y_test, y_pred))
# # Hyper dimension for 'Seller Type' feature didn't have a good effect so it's better to drop it.
df.drop("Seller_Type2", axis=1, inplace=True)
df
Transmission2 = df.Transmission**2
df.insert(1, "Transmission2", Transmission2)
df
X = pd.DataFrame(
df,
columns=[
"Transmission2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print("Mean Absolute Error: ", metrics.mean_absolute_error(y_test, y_pred))
print("Mean Squared Error: ", metrics.mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error: ", np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print("R2 score: ", metrics.r2_score(y_test, y_pred))
# # Hyper dimension for 'Transmission' feature didn't have a good effect too.
df.drop("Transmission2", axis=1, inplace=True)
Owner2 = df.Owner**2
df.insert(1, "Owner2", Owner2)
df
X = pd.DataFrame(
df,
columns=[
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print("Mean Absolute Error: ", metrics.mean_absolute_error(y_test, y_pred))
print("Mean Squared Error: ", metrics.mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error: ", np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print("R2 score: ", metrics.r2_score(y_test, y_pred))
Age2 = df.Age**2
df.insert(1, "Age2", Age2)
df
X = pd.DataFrame(
df,
columns=[
"Age2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print("Mean Absolute Error: ", metrics.mean_absolute_error(y_test, y_pred))
print("Mean Squared Error: ", metrics.mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error: ", np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print("R2 score: ", metrics.r2_score(y_test, y_pred))
# # Wow hyper dimention for 'Age' feature had a negative effect! DROP!
df.drop("Age2", axis=1, inplace=True)
# # Trying Higher Dimentions
Present_Price3 = df.Present_Price**3
df.insert(1, "Present_Price3", Present_Price3)
df
X = pd.DataFrame(
df,
columns=[
"Present_Price3",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print("Mean Absolute Error: ", metrics.mean_absolute_error(y_test, y_pred))
print("Mean Squared Error: ", metrics.mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error: ", np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print("R2 score: ", metrics.r2_score(y_test, y_pred))
# # Not good!
df.drop("Present_Price3", axis=1, inplace=True)
Kms_Driven3 = df.Kms_Driven**3
df.insert(1, "Kms_Driven3", Kms_Driven3)
df
X = pd.DataFrame(
df,
columns=[
"Kms_Driven3",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print("Mean Absolute Error: ", metrics.mean_absolute_error(y_test, y_pred))
print("Mean Squared Error: ", metrics.mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error: ", np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print("R2 score: ", metrics.r2_score(y_test, y_pred))
# # Not good again!
df.drop("Kms_Driven3", axis=1, inplace=True)
Fuel_Type3 = df.Fuel_Type**3
df.insert(1, "Fuel_Type3", Fuel_Type3)
df
X = pd.DataFrame(
df,
columns=[
"Fuel_Type3",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print("Mean Absolute Error: ", metrics.mean_absolute_error(y_test, y_pred))
print("Mean Squared Error: ", metrics.mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error: ", np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print("R2 score: ", metrics.r2_score(y_test, y_pred))
Owner3 = df.Owner**3
df.insert(1, "Owner3", Owner3)
df
X = pd.DataFrame(
df,
columns=[
"Owner3",
"Fuel_Type3",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print("Mean Absolute Error: ", metrics.mean_absolute_error(y_test, y_pred))
print("Mean Squared Error: ", metrics.mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error: ", np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print("R2 score: ", metrics.r2_score(y_test, y_pred))
# # No significant improvement was observed!
df.drop("Owner3", axis=1, inplace=True)
df.drop("Fuel_Type3", axis=1, inplace=True)
df
# # Integration of features!
df.corr()
# Present_Price = PP
PP_kms = df["Present_Price"] * df["Kms_Driven"]
df.insert(1, "Present_Kms", PP_kms)
X = pd.DataFrame(
df,
columns=[
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print("Mean Absolute Error: ", metrics.mean_absolute_error(y_test, y_pred))
print("Mean Squared Error: ", metrics.mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error: ", np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print("R2 score: ", metrics.r2_score(y_test, y_pred))
PP_Kms2 = df["Present_Price"] * df["Kms_Driven2"]
df.insert(1, "Present_Kms2", PP_Kms2)
# _________________________________________
X = pd.DataFrame(
df,
columns=[
"Present_Kms2",
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
# _________________________________________
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# _________________________________________
print("R2 score: ", metrics.r2_score(y_test, y_pred))
PP_Kms3 = df["Present_Price2"] * df["Kms_Driven2"]
df.insert(1, "Present_Kms3", PP_Kms3)
# _________________________________________
X = pd.DataFrame(
df,
columns=[
"Present_Kms3",
"Present_Kms2",
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
# _________________________________________
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# _________________________________________
print("R2 score: ", metrics.r2_score(y_test, y_pred))
# # Present Price2 * Kms Driven2 is not sufficient due to its r2 score
df.drop("Present_Kms3", axis=1, inplace=True)
PP_Kms4 = df["Present_Price"] * df["Kms_Driven2"]
df.insert(1, "Present_Kms4", PP_Kms4)
# _________________________________________
X = pd.DataFrame(
df,
columns=[
"Present_Kms4",
"Present_Kms2",
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
# _________________________________________
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# _________________________________________
print("R2 score: ", metrics.r2_score(y_test, y_pred))
df.drop("Present_Kms4", axis=1, inplace=True)
PP_Fuel = df["Present_Price"] * df["Fuel_Type"]
df.insert(1, "Present_Fuel", PP_Fuel)
# _________________________________________
X = pd.DataFrame(
df,
columns=[
"Present_Fuel",
"Present_Kms2",
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
# _________________________________________
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# _________________________________________
print("R2 score: ", metrics.r2_score(y_test, y_pred))
df.drop("Present_Fuel", axis=1, inplace=True)
PP_Fuel2 = df["Present_Price"] * df["Fuel_Type2"]
df.insert(1, "Present_Fuel2", PP_Fuel2)
# _________________________________________
X = pd.DataFrame(
df,
columns=[
"Present_Fuel2",
"Present_Kms2",
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
# _________________________________________
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# _________________________________________
print("R2 score: ", metrics.r2_score(y_test, y_pred))
df.drop("Present_Fuel2", axis=1, inplace=True)
PP_Fuel3 = df["Present_Price2"] * df["Fuel_Type2"]
df.insert(1, "Present_Fuel3", PP_Fuel3)
# _________________________________________
X = pd.DataFrame(
df,
columns=[
"Present_Fuel3",
"Present_Kms2",
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
# _________________________________________
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# _________________________________________
print("R2 score: ", metrics.r2_score(y_test, y_pred))
df.drop("Present_Fuel3", axis=1, inplace=True)
PP_Fuel4 = df["Present_Price"] * df["Fuel_Type2"]
df.insert(1, "Present_Fuel4", PP_Fuel4)
# _________________________________________
X = pd.DataFrame(
df,
columns=[
"Present_Fuel4",
"Present_Kms2",
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
# _________________________________________
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# _________________________________________
print("R2 score: ", metrics.r2_score(y_test, y_pred))
df.drop("Present_Fuel4", axis=1, inplace=True)
PP_Owner = df["Present_Price"] * df["Owner"]
df.insert(1, "Present_Owner", PP_Owner)
# _________________________________________
X = pd.DataFrame(
df,
columns=[
"Present_Owner",
"Present_Kms2",
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
# _________________________________________
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# _________________________________________
print("R2 score: ", metrics.r2_score(y_test, y_pred))
PP_Owner2 = df["Present_Price2"] * df["Owner2"]
df.insert(1, "Present_Owner2", PP_Owner2)
# _________________________________________
X = pd.DataFrame(
df,
columns=[
"Present_Owner2",
"Present_Owner",
"Present_Kms2",
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
# _________________________________________
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# _________________________________________
print("R2 score: ", metrics.r2_score(y_test, y_pred))
df.drop("Present_Owner2", axis=1, inplace=True)
PP_Owner3 = df["Present_Price2"] * df["Owner"]
df.insert(1, "Present_Owner3", PP_Owner3)
# _________________________________________
X = pd.DataFrame(
df,
columns=[
"Present_Owner3",
"Present_Owner",
"Present_Kms2",
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
# _________________________________________
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# _________________________________________
print("R2 score: ", metrics.r2_score(y_test, y_pred))
df.drop("Present_Owner3", axis=1, inplace=True)
PP_Owner4 = df["Present_Price"] * df["Owner2"]
df.insert(1, "Present_Owner4", PP_Owner4)
# _________________________________________
X = pd.DataFrame(
df,
columns=[
"Present_Owner4",
"Present_Owner",
"Present_Kms2",
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
# _________________________________________
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# _________________________________________
print("R2 score: ", metrics.r2_score(y_test, y_pred))
df.drop("Present_Owner4", axis=1, inplace=True)
X
model_new3 = LinearRegression()
kFold_validation = KFold(7, shuffle=True)
results = cross_val_score(model_new3, X, y, cv=kFold_validation)
print(results)
print(np.mean(results))
# # Normalizing
df
Scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))
Norm1 = Scaler.fit_transform(df)
Norm_df = pd.DataFrame(
Norm1,
columns=[
"Selling_Price",
"Present_Owner",
"Present_Kms2",
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
Norm_df.describe()
Norm_df
X_norm = pd.DataFrame(
Norm_df,
columns=[
"Present_Owner",
"Present_Kms2",
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y_norm = Norm_df["Selling_Price"].values.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(
X_norm, y_norm, test_size=0.2, random_state=0
)
model_1 = LinearRegression()
model_1.fit(X_train, y_train)
y_pred = model_1.predict(X_test)
print("Mean Absolute Error: ", metrics.mean_absolute_error(y_test, y_pred))
print("Mean Squared Error: ", metrics.mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error: ", np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print("R2 score: ", metrics.r2_score(y_test, y_pred))
compare = pd.DataFrame({"Actual": y_test.flatten(), "Prediction": y_pred.flatten()})
compare
X_test = pd.DataFrame(X_test)
X_test.insert(0, "y_test1", y_test)
X_test.insert(0, "y_pred1", y_pred)
X_test.sort_values(by="Present_Price", inplace=True)
a = X_test.Present_Price
b = X_test.y_test1
c = X_test.Present_Price
d = X_test.y_pred1
plt.figure(figsize=(10, 10))
plt.scatter(a, b, color="grey")
plt.plot(c, d, color="orange")
plt.xlabel("Present Price")
plt.ylabel("Selling Price")
plt.grid()
plt.show()
X_test.sort_values(by="Kms_Driven", inplace=True)
a = X_test.Kms_Driven
b = X_test.y_test1
c = X_test.Kms_Driven
d = X_test.y_pred1
plt.figure(figsize=(10, 10))
plt.scatter(a, b, color="grey")
plt.plot(c, d, color="orange")
plt.xlabel("Kms_Driven")
plt.ylabel("Selling Price")
plt.grid()
plt.show()
X_test.sort_values(by="Present_Kms", inplace=True)
a = X_test.Present_Kms
b = X_test.y_test1
c = X_test.Present_Kms
d = X_test.y_pred1
plt.figure(figsize=(10, 10))
plt.scatter(a, b, color="grey")
plt.plot(c, d, color="orange")
plt.xlabel("Present_Kms")
plt.ylabel("Selling Price")
plt.grid()
plt.show()
a = X_test.Present_Kms
b = X_test.y_test1
c = X_test.Present_Kms
d = X_test.y_pred1
plt.figure(figsize=(10, 10))
plt.scatter(b, d, color="grey")
# plt.plot(c,d, color='orange')
plt.xlabel("Actual", fontsize=12)
plt.ylabel("Prediction", fontsize=12)
plt.grid()
plt.show()
Norm_df
# # Time for adding new data and Normalize again
Age = 10
Present_Price = 11.23
Kms_driven = 42000
Fuel_Type = 2
Seller_Type = 2
Transmission = 2
Owner = 1
df2 = pd.DataFrame(
{
"Selling_Price": [5],
"Present_Owner": [Present_Price * Owner],
"Present_Kms2": [Present_Price * (Kms_driven**2)],
"Present_Kms": [Present_Price * Kms_driven],
"Owner2": [Owner**2],
"Kms_Driven2": [Kms_driven**2],
"Fuel_Type2": [Fuel_Type**2],
"Present_Price2": [Present_Price**2],
"Present_Price": [Present_Price],
"Kms_Driven": [Kms_driven],
"Fuel_Type": [Fuel_Type],
"Seller_Type": [Seller_Type],
"Transmission": [Transmission],
"Owner": [Owner],
"Age": [Age],
}
)
df2
df3 = df.append(df2)
Scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))
Norm1 = Scaler.fit_transform(
df3[
[
"Present_Owner",
"Present_Kms2",
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
]
]
)
Norm_df = pd.DataFrame(
Norm1,
columns=[
"Present_Owner",
"Present_Kms2",
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
Norm_df
x = Norm_df[
[
"Present_Owner",
"Present_Kms2",
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
]
][:301]
y = df3["Selling_Price"][:301]
x_test = Norm_df[
[
"Present_Owner",
"Present_Kms2",
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
]
][301:]
x_test
regressor = LinearRegression()
regressor.fit(x, y)
y_pred = regressor.predict(x_test)
y_pred
x = df3[
[
"Present_Owner",
"Present_Kms2",
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
]
][:301]
y = df3["Selling_Price"][:301]
x_test = df3[
[
"Present_Owner",
"Present_Kms2",
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
]
][301:]
y_test = df3["Selling_Price"][301:]
regressor = LinearRegression()
regressor.fit(x, y)
y_pred = regressor.predict(x_test)
y_pred
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/440/129440104.ipynb
|
cardata
|
alifarajnia
|
[{"Id": 129440104, "ScriptId": 38487263, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12969510, "CreationDate": "05/13/2023 20:36:41", "VersionNumber": 3.0, "Title": "Car project", "EvaluationDate": "05/13/2023", "IsChange": false, "TotalLines": 778.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 778.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185491307, "KernelVersionId": 129440104, "SourceDatasetVersionId": 3962880}]
|
[{"Id": 3962880, "DatasetId": 2351942, "DatasourceVersionId": 4018438, "CreatorUserId": 8788103, "LicenseName": "Unknown", "CreationDate": "07/20/2022 00:39:31", "VersionNumber": 1.0, "Title": "cardata", "Slug": "cardata", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 2351942, "CreatorUserId": 8788103, "OwnerUserId": 8788103.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 3962880.0, "CurrentDatasourceVersionId": 4018438.0, "ForumId": 2378962, "Type": 2, "CreationDate": "07/20/2022 00:39:31", "LastActivityDate": "07/20/2022", "TotalViews": 785, "TotalDownloads": 127, "TotalVotes": 44, "TotalKernels": 10}]
|
[{"Id": 8788103, "UserName": "alifarajnia", "DisplayName": "Ali Farajnia", "RegisterDate": "11/04/2021", "PerformanceTier": 3}]
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn import preprocessing
data = pd.read_csv("../input/cardata/cardata.csv")
df
df = pd.DataFrame(data)
df
import pandas_profiling
pandas_profiling.ProfileReport(df)
# # **Now Analyse Data to find Noises**
plt.figure(figsize=(10, 8))
plt.scatter(df["Year"], df["Selling_Price"])
plt.xlabel("Year", fontsize=15)
plt.ylabel("Selling_Price", fontsize=15)
plt.xticks(np.arange(2002, 2019, 1))
plt.yticks(np.arange(0, 36, 5))
plt.title("Car selling price", fontsize=15)
plt.grid()
plt.show()
# # **Can we drop it as noise?**
# **NO! based on real world, it would be reasonable**
df[df["Selling_Price"] > 25]
plt.figure(figsize=(10, 8))
plt.scatter(df["Selling_Price"], df["Present_Price"])
plt.xlabel("Selling Price", fontsize=15)
plt.ylabel("Present Price", fontsize=15)
plt.title("Selling and Present Car Price", fontsize=15)
plt.grid()
plt.show()
plt.figure(figsize=(10, 8))
plt.scatter(df["Kms_Driven"], df["Selling_Price"])
plt.xlabel("Kms_Driven", fontsize=15)
plt.ylabel("Selling_Price", fontsize=15)
plt.title("Kms Driven VS Selling Price", fontsize=15)
plt.grid()
plt.show()
# # Is it OK to keep a data so far like '500000 km' driven?
# **it's ok based on real world! 500,000 kilometer driven during almost 10 years would be reasonable as well!**
df[df["Kms_Driven"] > 300000]
plt.figure(figsize=(10, 8))
plt.scatter(df["Fuel_Type"], df["Selling_Price"])
plt.xlabel("Fuel Type", fontsize=15)
plt.ylabel("Selling Price", fontsize=15)
plt.title("Fuel_Type VS Selling Price", fontsize=15)
plt.grid()
plt.show()
plt.figure(figsize=(10, 8))
plt.scatter(df["Seller_Type"], df["Selling_Price"])
plt.xlabel("Seller Type", fontsize=15)
plt.ylabel("Selling Price", fontsize=15)
plt.title("Seller Type VS Selling Price", fontsize=15)
plt.grid()
plt.show()
plt.figure(figsize=(10, 8))
plt.scatter(df["Transmission"], df["Selling_Price"])
plt.xlabel("Transmission", fontsize=15)
plt.ylabel("Selling Price", fontsize=15)
plt.title("Transmission VS Selling Price", fontsize=15)
plt.grid()
plt.show()
plt.figure(figsize=(10, 8))
plt.scatter(df["Owner"], df["Selling_Price"])
plt.xlabel("Owner", fontsize=15)
plt.ylabel("Selling Price", fontsize=15)
plt.title("Owner VS Selling Price", fontsize=15)
plt.grid()
plt.show()
# # So I decided to keep the whole data
Age = 2019 - df["Year"]
Age = pd.DataFrame(Age)
df.insert(9, "Age", Age)
df
df.drop(columns=["Car_Name", "Year"], inplace=True)
df
# # The next step is to convert string features into numerical features so that we can create a model
# *Important : we won't use [0,1] numbers because of their negative effects on regression equation*
df["Seller_Type"].replace("Dealer", 2, inplace=True)
df["Seller_Type"].replace("Individual", 3, inplace=True)
# ______________________________________________________#
df["Transmission"].replace("Manual", 2, inplace=True)
df["Transmission"].replace("Automatic", 3, inplace=True)
# ______________________________________________________#
df["Fuel_Type"].replace("Petrol", 2, inplace=True)
df["Fuel_Type"].replace("Diesel", 3, inplace=True)
df["Fuel_Type"].replace("CNG", 4, inplace=True)
df
df.corr()
# # Best correlation between 'Present Price' and 'Selling Price'
# # Time for creating Linear Model
X = pd.DataFrame(
df,
columns=[
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print("Mean Absolute Error: ", metrics.mean_absolute_error(y_test, y_pred))
print("Mean Squared Error: ", metrics.mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error: ", np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print("R2 score: ", metrics.r2_score(y_test, y_pred))
# # Improving Model
model_new = LinearRegression()
kFold_validation = KFold(10, shuffle=True)
results = cross_val_score(model_new, X, y, cv=kFold_validation)
print(results)
print(np.mean(results))
Present_Price2 = df.Present_Price**2
Present_Price2
df.insert(1, "Present_Price2", Present_Price2)
df
X = pd.DataFrame(
df,
columns=[
"Present_Price",
"Present_Price2",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print("Mean Absolute Error: ", metrics.mean_absolute_error(y_test, y_pred))
print("Mean Squared Error: ", metrics.mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error: ", np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print("R2 score: ", metrics.r2_score(y_test, y_pred))
Fuel_Type2 = df.Fuel_Type**2
Fuel_Type2
df.insert(1, "Fuel_Type2", Fuel_Type2)
df
X = pd.DataFrame(
df,
columns=[
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print("Mean Absolute Error: ", metrics.mean_absolute_error(y_test, y_pred))
print("Mean Squared Error: ", metrics.mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error: ", np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print("R2 score: ", metrics.r2_score(y_test, y_pred))
Kms_Driven2 = df.Kms_Driven**2
Kms_Driven2
df.insert(1, "Kms_Driven2", Kms_Driven2)
df
X = pd.DataFrame(
df,
columns=[
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print("Mean Absolute Error: ", metrics.mean_absolute_error(y_test, y_pred))
print("Mean Squared Error: ", metrics.mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error: ", np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print("R2 score: ", metrics.r2_score(y_test, y_pred))
Seller_Type2 = df.Seller_Type**2
df.insert(1, "Seller_Type2", Seller_Type2)
df
X = pd.DataFrame(
df,
columns=[
"Seller_Type2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print("Mean Absolute Error: ", metrics.mean_absolute_error(y_test, y_pred))
print("Mean Squared Error: ", metrics.mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error: ", np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print("R2 score: ", metrics.r2_score(y_test, y_pred))
# # Hyper dimension for 'Seller Type' feature didn't have a good effect so it's better to drop it.
df.drop("Seller_Type2", axis=1, inplace=True)
df
Transmission2 = df.Transmission**2
df.insert(1, "Transmission2", Transmission2)
df
X = pd.DataFrame(
df,
columns=[
"Transmission2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print("Mean Absolute Error: ", metrics.mean_absolute_error(y_test, y_pred))
print("Mean Squared Error: ", metrics.mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error: ", np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print("R2 score: ", metrics.r2_score(y_test, y_pred))
# # Hyper dimension for 'Transmission' feature didn't have a good effect too.
df.drop("Transmission2", axis=1, inplace=True)
Owner2 = df.Owner**2
df.insert(1, "Owner2", Owner2)
df
X = pd.DataFrame(
df,
columns=[
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print("Mean Absolute Error: ", metrics.mean_absolute_error(y_test, y_pred))
print("Mean Squared Error: ", metrics.mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error: ", np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print("R2 score: ", metrics.r2_score(y_test, y_pred))
Age2 = df.Age**2
df.insert(1, "Age2", Age2)
df
X = pd.DataFrame(
df,
columns=[
"Age2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print("Mean Absolute Error: ", metrics.mean_absolute_error(y_test, y_pred))
print("Mean Squared Error: ", metrics.mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error: ", np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print("R2 score: ", metrics.r2_score(y_test, y_pred))
# # Wow hyper dimention for 'Age' feature had a negative effect! DROP!
df.drop("Age2", axis=1, inplace=True)
# # Trying Higher Dimentions
Present_Price3 = df.Present_Price**3
df.insert(1, "Present_Price3", Present_Price3)
df
X = pd.DataFrame(
df,
columns=[
"Present_Price3",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print("Mean Absolute Error: ", metrics.mean_absolute_error(y_test, y_pred))
print("Mean Squared Error: ", metrics.mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error: ", np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print("R2 score: ", metrics.r2_score(y_test, y_pred))
# # Not good!
df.drop("Present_Price3", axis=1, inplace=True)
Kms_Driven3 = df.Kms_Driven**3
df.insert(1, "Kms_Driven3", Kms_Driven3)
df
X = pd.DataFrame(
df,
columns=[
"Kms_Driven3",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print("Mean Absolute Error: ", metrics.mean_absolute_error(y_test, y_pred))
print("Mean Squared Error: ", metrics.mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error: ", np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print("R2 score: ", metrics.r2_score(y_test, y_pred))
# # Not good again!
df.drop("Kms_Driven3", axis=1, inplace=True)
Fuel_Type3 = df.Fuel_Type**3
df.insert(1, "Fuel_Type3", Fuel_Type3)
df
X = pd.DataFrame(
df,
columns=[
"Fuel_Type3",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print("Mean Absolute Error: ", metrics.mean_absolute_error(y_test, y_pred))
print("Mean Squared Error: ", metrics.mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error: ", np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print("R2 score: ", metrics.r2_score(y_test, y_pred))
Owner3 = df.Owner**3
df.insert(1, "Owner3", Owner3)
df
X = pd.DataFrame(
df,
columns=[
"Owner3",
"Fuel_Type3",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print("Mean Absolute Error: ", metrics.mean_absolute_error(y_test, y_pred))
print("Mean Squared Error: ", metrics.mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error: ", np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print("R2 score: ", metrics.r2_score(y_test, y_pred))
# # No significant improvement was observed!
df.drop("Owner3", axis=1, inplace=True)
df.drop("Fuel_Type3", axis=1, inplace=True)
df
# # Integration of features!
df.corr()
# Present_Price = PP
PP_kms = df["Present_Price"] * df["Kms_Driven"]
df.insert(1, "Present_Kms", PP_kms)
X = pd.DataFrame(
df,
columns=[
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print("Mean Absolute Error: ", metrics.mean_absolute_error(y_test, y_pred))
print("Mean Squared Error: ", metrics.mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error: ", np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print("R2 score: ", metrics.r2_score(y_test, y_pred))
PP_Kms2 = df["Present_Price"] * df["Kms_Driven2"]
df.insert(1, "Present_Kms2", PP_Kms2)
# _________________________________________
X = pd.DataFrame(
df,
columns=[
"Present_Kms2",
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
# _________________________________________
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# _________________________________________
print("R2 score: ", metrics.r2_score(y_test, y_pred))
PP_Kms3 = df["Present_Price2"] * df["Kms_Driven2"]
df.insert(1, "Present_Kms3", PP_Kms3)
# _________________________________________
X = pd.DataFrame(
df,
columns=[
"Present_Kms3",
"Present_Kms2",
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
# _________________________________________
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# _________________________________________
print("R2 score: ", metrics.r2_score(y_test, y_pred))
# # Present Price2 * Kms Driven2 is not sufficient due to its r2 score
df.drop("Present_Kms3", axis=1, inplace=True)
PP_Kms4 = df["Present_Price"] * df["Kms_Driven2"]
df.insert(1, "Present_Kms4", PP_Kms4)
# _________________________________________
X = pd.DataFrame(
df,
columns=[
"Present_Kms4",
"Present_Kms2",
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
# _________________________________________
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# _________________________________________
print("R2 score: ", metrics.r2_score(y_test, y_pred))
df.drop("Present_Kms4", axis=1, inplace=True)
PP_Fuel = df["Present_Price"] * df["Fuel_Type"]
df.insert(1, "Present_Fuel", PP_Fuel)
# _________________________________________
X = pd.DataFrame(
df,
columns=[
"Present_Fuel",
"Present_Kms2",
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
# _________________________________________
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# _________________________________________
print("R2 score: ", metrics.r2_score(y_test, y_pred))
df.drop("Present_Fuel", axis=1, inplace=True)
PP_Fuel2 = df["Present_Price"] * df["Fuel_Type2"]
df.insert(1, "Present_Fuel2", PP_Fuel2)
# _________________________________________
X = pd.DataFrame(
df,
columns=[
"Present_Fuel2",
"Present_Kms2",
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
# _________________________________________
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# _________________________________________
print("R2 score: ", metrics.r2_score(y_test, y_pred))
df.drop("Present_Fuel2", axis=1, inplace=True)
PP_Fuel3 = df["Present_Price2"] * df["Fuel_Type2"]
df.insert(1, "Present_Fuel3", PP_Fuel3)
# _________________________________________
X = pd.DataFrame(
df,
columns=[
"Present_Fuel3",
"Present_Kms2",
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
# _________________________________________
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# _________________________________________
print("R2 score: ", metrics.r2_score(y_test, y_pred))
df.drop("Present_Fuel3", axis=1, inplace=True)
PP_Fuel4 = df["Present_Price"] * df["Fuel_Type2"]
df.insert(1, "Present_Fuel4", PP_Fuel4)
# _________________________________________
X = pd.DataFrame(
df,
columns=[
"Present_Fuel4",
"Present_Kms2",
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
# _________________________________________
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# _________________________________________
print("R2 score: ", metrics.r2_score(y_test, y_pred))
df.drop("Present_Fuel4", axis=1, inplace=True)
PP_Owner = df["Present_Price"] * df["Owner"]
df.insert(1, "Present_Owner", PP_Owner)
# _________________________________________
X = pd.DataFrame(
df,
columns=[
"Present_Owner",
"Present_Kms2",
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
# _________________________________________
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# _________________________________________
print("R2 score: ", metrics.r2_score(y_test, y_pred))
PP_Owner2 = df["Present_Price2"] * df["Owner2"]
df.insert(1, "Present_Owner2", PP_Owner2)
# _________________________________________
X = pd.DataFrame(
df,
columns=[
"Present_Owner2",
"Present_Owner",
"Present_Kms2",
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
# _________________________________________
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# _________________________________________
print("R2 score: ", metrics.r2_score(y_test, y_pred))
df.drop("Present_Owner2", axis=1, inplace=True)
PP_Owner3 = df["Present_Price2"] * df["Owner"]
df.insert(1, "Present_Owner3", PP_Owner3)
# _________________________________________
X = pd.DataFrame(
df,
columns=[
"Present_Owner3",
"Present_Owner",
"Present_Kms2",
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
# _________________________________________
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# _________________________________________
print("R2 score: ", metrics.r2_score(y_test, y_pred))
df.drop("Present_Owner3", axis=1, inplace=True)
PP_Owner4 = df["Present_Price"] * df["Owner2"]
df.insert(1, "Present_Owner4", PP_Owner4)
# _________________________________________
X = pd.DataFrame(
df,
columns=[
"Present_Owner4",
"Present_Owner",
"Present_Kms2",
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y = df["Selling_Price"].values.reshape(-1, 1)
# _________________________________________
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# _________________________________________
print("R2 score: ", metrics.r2_score(y_test, y_pred))
df.drop("Present_Owner4", axis=1, inplace=True)
X
model_new3 = LinearRegression()
kFold_validation = KFold(7, shuffle=True)
results = cross_val_score(model_new3, X, y, cv=kFold_validation)
print(results)
print(np.mean(results))
# # Normalizing
df
Scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))
Norm1 = Scaler.fit_transform(df)
Norm_df = pd.DataFrame(
Norm1,
columns=[
"Selling_Price",
"Present_Owner",
"Present_Kms2",
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
Norm_df.describe()
Norm_df
X_norm = pd.DataFrame(
Norm_df,
columns=[
"Present_Owner",
"Present_Kms2",
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
y_norm = Norm_df["Selling_Price"].values.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(
X_norm, y_norm, test_size=0.2, random_state=0
)
model_1 = LinearRegression()
model_1.fit(X_train, y_train)
y_pred = model_1.predict(X_test)
print("Mean Absolute Error: ", metrics.mean_absolute_error(y_test, y_pred))
print("Mean Squared Error: ", metrics.mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error: ", np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print("R2 score: ", metrics.r2_score(y_test, y_pred))
compare = pd.DataFrame({"Actual": y_test.flatten(), "Prediction": y_pred.flatten()})
compare
X_test = pd.DataFrame(X_test)
X_test.insert(0, "y_test1", y_test)
X_test.insert(0, "y_pred1", y_pred)
X_test.sort_values(by="Present_Price", inplace=True)
a = X_test.Present_Price
b = X_test.y_test1
c = X_test.Present_Price
d = X_test.y_pred1
plt.figure(figsize=(10, 10))
plt.scatter(a, b, color="grey")
plt.plot(c, d, color="orange")
plt.xlabel("Present Price")
plt.ylabel("Selling Price")
plt.grid()
plt.show()
X_test.sort_values(by="Kms_Driven", inplace=True)
a = X_test.Kms_Driven
b = X_test.y_test1
c = X_test.Kms_Driven
d = X_test.y_pred1
plt.figure(figsize=(10, 10))
plt.scatter(a, b, color="grey")
plt.plot(c, d, color="orange")
plt.xlabel("Kms_Driven")
plt.ylabel("Selling Price")
plt.grid()
plt.show()
X_test.sort_values(by="Present_Kms", inplace=True)
a = X_test.Present_Kms
b = X_test.y_test1
c = X_test.Present_Kms
d = X_test.y_pred1
plt.figure(figsize=(10, 10))
plt.scatter(a, b, color="grey")
plt.plot(c, d, color="orange")
plt.xlabel("Present_Kms")
plt.ylabel("Selling Price")
plt.grid()
plt.show()
a = X_test.Present_Kms
b = X_test.y_test1
c = X_test.Present_Kms
d = X_test.y_pred1
plt.figure(figsize=(10, 10))
plt.scatter(b, d, color="grey")
# plt.plot(c,d, color='orange')
plt.xlabel("Actual", fontsize=12)
plt.ylabel("Prediction", fontsize=12)
plt.grid()
plt.show()
Norm_df
# # Time for adding new data and Normalize again
Age = 10
Present_Price = 11.23
Kms_driven = 42000
Fuel_Type = 2
Seller_Type = 2
Transmission = 2
Owner = 1
df2 = pd.DataFrame(
{
"Selling_Price": [5],
"Present_Owner": [Present_Price * Owner],
"Present_Kms2": [Present_Price * (Kms_driven**2)],
"Present_Kms": [Present_Price * Kms_driven],
"Owner2": [Owner**2],
"Kms_Driven2": [Kms_driven**2],
"Fuel_Type2": [Fuel_Type**2],
"Present_Price2": [Present_Price**2],
"Present_Price": [Present_Price],
"Kms_Driven": [Kms_driven],
"Fuel_Type": [Fuel_Type],
"Seller_Type": [Seller_Type],
"Transmission": [Transmission],
"Owner": [Owner],
"Age": [Age],
}
)
df2
df3 = df.append(df2)
Scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))
Norm1 = Scaler.fit_transform(
df3[
[
"Present_Owner",
"Present_Kms2",
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
]
]
)
Norm_df = pd.DataFrame(
Norm1,
columns=[
"Present_Owner",
"Present_Kms2",
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
],
)
Norm_df
x = Norm_df[
[
"Present_Owner",
"Present_Kms2",
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
]
][:301]
y = df3["Selling_Price"][:301]
x_test = Norm_df[
[
"Present_Owner",
"Present_Kms2",
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
]
][301:]
x_test
regressor = LinearRegression()
regressor.fit(x, y)
y_pred = regressor.predict(x_test)
y_pred
x = df3[
[
"Present_Owner",
"Present_Kms2",
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
]
][:301]
y = df3["Selling_Price"][:301]
x_test = df3[
[
"Present_Owner",
"Present_Kms2",
"Present_Kms",
"Owner2",
"Kms_Driven2",
"Fuel_Type2",
"Present_Price2",
"Present_Price",
"Kms_Driven",
"Fuel_Type",
"Seller_Type",
"Transmission",
"Owner",
"Age",
]
][301:]
y_test = df3["Selling_Price"][301:]
regressor = LinearRegression()
regressor.fit(x, y)
y_pred = regressor.predict(x_test)
y_pred
| false | 1 | 11,013 | 0 | 11,031 | 11,013 |
||
129440850
|
# # Imports
from keras.datasets import mnist
import numpy as np
from matplotlib.pyplot import plot as plt
import sys
# # Standardization
def standardize(x):
return (x - np.mean(x)) / np.std(x)
# # Prediction
def sigmoid(z):
return 1 / (1 + np.exp(-z))
def predict(X, W, B):
pred = sigmoid(np.dot(X, W) + B)
pred[pred >= 0.5] = 1
pred[pred < 0.5] = 0
return pred
# # Cost Function
def cost(y, y_hat):
return np.mean(
-y * np.log(y_hat + sys.float_info.min)
- (1 - y) * np.log(1 - y_hat + sys.float_info.min)
)
# # Train
def train(X, y, learning_rate, epochs):
W = np.random.random(X.shape[1])
B = np.random.random()
costs = np.array([])
for i in range(epochs):
y_hat = predict(X, W, B)
dW = np.dot(X.T, (y_hat - y)) / len(X)
db = np.mean(y_hat - y)
W -= learning_rate * dW
B -= learning_rate * db
if i % 100 == 0:
costValue: float = cost(y, y_hat)
costs = np.append(costs, costValue)
print(f"Cost at epoch {i}: {costValue}")
return W, B, costs
# # Train with L1 Regularization
def train_L1(X, y, learning_rate, epochs, Lambda):
W = np.random.random(X.shape[1])
B = np.random.random()
costs = np.array([])
for i in range(epochs):
y_hat = predict(X, W, B)
dW = np.dot(X.T, (y_hat - y)) / len(X)
db = np.mean(y_hat - y)
W -= learning_rate * (dW + Lambda * np.sign(W))
B -= learning_rate * db
if i % 100 == 0:
costValue: float = cost(y, y_hat)
costs = np.append(costs, costValue)
print(f"Cost at epoch {i}: {costValue}")
return W, B, costs
# # Mini_Batch Gradient Descent
def train_mini_batch(X, y, learning_rate, epochs, batch_size):
W = np.random.random(X.shape[1])
B = np.random.random()
costs = np.array([])
for i in range(epochs):
for j in range(0, len(X), batch_size):
X_batch = X[j : j + batch_size]
y_batch = y[j : j + batch_size]
y_hat = predict(X_batch, W, B)
dW = np.dot(X_batch.T, (y_hat - y_batch)) / len(X_batch)
db = np.mean(y_hat - y_batch)
W -= learning_rate * dW
B -= learning_rate * db
if i % 100 == 0:
costValue: float = cost(y, y_hat)
costs = np.append(costs, costValue)
print(f"Cost at epoch {i}: {costValue}")
return W, B, costs
# # RMS Prop
def train_rms_prop(X, y, learning_rate, epochs, beta, epsilon=sys.float_info.min):
W = np.random.random(X.shape[1])
B = np.random.random()
costs = np.array([])
vW = np.zeros(X.shape[1])
vB = 0
for i in range(epochs):
y_hat = predict(X, W, B)
dW = np.dot(X.T, (y_hat - y)) / len(X)
dB = np.mean(y_hat - y)
vW = beta * vW + (1 - beta) * dW**2
vB = beta * vB + (1 - beta) * dB**2
W -= learning_rate * dW / (np.sqrt(vW) + epsilon)
B -= learning_rate * dB / (np.sqrt(vB) + epsilon)
if i % 100 == 0:
costValue: float = cost(y, y_hat)
costs = np.append(costs, costValue)
print(f"Cost at epoch {i}: {costValue}")
return W, B, costs
# # Adam
def train_adam(X, y, learning_rate, epochs, beta1, beta2, epsilon=sys.float_info.min):
W = np.random.random(X.shape[1])
B = np.random.random()
costs = np.array([])
vW = np.zeros(X.shape[1])
vB = 0
sW = np.zeros(X.shape[1])
sB = 0
for i in range(epochs):
y_hat = predict(X, W, B)
dW = np.dot(X.T, (y_hat - y)) / len(X)
dB = np.mean(y_hat - y)
vW = beta1 * vW + (1 - beta1) * dW
vB = beta1 * vB + (1 - beta1) * dB
sW = beta2 * sW + (1 - beta2) * dW**2
sB = beta2 * sB + (1 - beta2) * dB**2
vW_corrected = vW / (1 - beta1 ** (i + 1))
vB_corrected = vB / (1 - beta1 ** (i + 1))
sW_corrected = sW / (1 - beta2 ** (i + 1))
sB_corrected = sB / (1 - beta2 ** (i + 1))
W -= learning_rate * vW_corrected / (np.sqrt(sW_corrected) + epsilon)
B -= learning_rate * vB_corrected / (np.sqrt(sB_corrected) + epsilon)
if i % 100 == 0:
costValue: float = cost(y, y_hat)
costs = np.append(costs, costValue)
print(f"Cost at epoch {i}: {costValue}")
return W, B, costs
# # Test
def test(X, y, W, b):
y_hat = predict(X, W, b)
return accuracy(y, y_hat)
# # Accuracy
def accuracy(y, y_hat):
return np.mean(y == y_hat)
# # Loading the data
trainSet, testSet = mnist.load_data()
# # Get class 0 and class 1
xTrain = trainSet[0].astype("float32")
yTrain = trainSet[1].astype("int32")
xTest = testSet[0].astype("float32")
yTest = testSet[1].astype("int32")
xTrain = xTrain.reshape(xTrain.shape[0], -1)
xTest = xTest.reshape(xTest.shape[0], -1)
xTrain = np.concatenate([xTrain[yTrain == 0], xTrain[yTrain == 1]])
yTrain = np.concatenate([yTrain[yTrain == 0], yTrain[yTrain == 1]])
xTest = np.concatenate([xTest[yTest == 0], xTest[yTest == 1]])
yTest = np.concatenate([yTest[yTest == 0], yTest[yTest == 1]])
xTrain = standardize(xTrain)
xTest = standardize(xTest)
p = np.random.permutation(len(xTrain))
p2 = np.random.permutation(len(xTest))
xTrain = xTrain[p]
yTrain = yTrain[p]
xTest = xTest[p2]
yTest = yTest[p2]
k = 10
iterations = 1000
# # K-Fold Cross-Validation
def k_fold_cross_validation(X, y, K, lr, epochs):
fold_size = len(X) // K
accuracies = []
for i in range(K):
# Divide the data into training and testing sets
X_train = np.concatenate([X[: i * fold_size], X[(i + 1) * fold_size :]])
Y_train = np.concatenate([y[: i * fold_size], y[(i + 1) * fold_size :]])
x_valid = X[i * fold_size : (i + 1) * fold_size]
y_valid = y[i * fold_size : (i + 1) * fold_size]
# Train the model
print(f"\nTraining at Iteration {i + 1} of {K}")
w, b, Costs = train(X_train, Y_train, lr, epochs)
# Test the model
print(f"\nTesting at Iteration {i + 1} of {K}")
acc = test(x_valid, y_valid, w, b)
print(f"Accuracy at Iteration {i + 1} of {K}: {acc}")
accuracies.append(acc)
return np.mean(accuracies), Costs, w, b
# # Running Code
# ## At Learning Rate 0.1
print(f"----------K-Fold Cross Validation with {k} Folds and eta 0.1----------")
average_accuracy, c, _, _ = k_fold_cross_validation(xTrain, yTrain, k, 0.1, iterations)
print("\nAverage Accuracy: ", average_accuracy)
plt(c)
# ## At Learning Rate 0.01
print(f"----------K-Fold Cross Validation with {k} Folds and eta 0.01----------")
average_accuracy, c, _, _ = k_fold_cross_validation(xTrain, yTrain, k, 0.01, iterations)
print("\nAverage Accuracy: ", average_accuracy)
plt(c)
# ## At Learning Rate 0.001
print(f"----------K-Fold Cross Validation with {k} Folds and eta 0.001----------")
average_accuracy, c, _, _ = k_fold_cross_validation(
xTrain, yTrain, k, 0.001, iterations
)
print("\nAverage Accuracy: ", average_accuracy)
plt(c)
# ## At Learning Rate 0.0001
print(f"----------K-Fold Cross Validation with {k} Folds and eta 0.0001----------")
average_accuracy, c, _, _ = k_fold_cross_validation(
xTrain, yTrain, k, 0.0001, iterations
)
print("\nAverage Accuracy: ", average_accuracy)
plt(c)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/440/129440850.ipynb
| null | null |
[{"Id": 129440850, "ScriptId": 37234559, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11261889, "CreationDate": "05/13/2023 20:48:26", "VersionNumber": 4.0, "Title": "logistic_regression", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 234.0, "LinesInsertedFromPrevious": 118.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 116.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
| null | null | null | null |
# # Imports
from keras.datasets import mnist
import numpy as np
from matplotlib.pyplot import plot as plt
import sys
# # Standardization
def standardize(x):
return (x - np.mean(x)) / np.std(x)
# # Prediction
def sigmoid(z):
return 1 / (1 + np.exp(-z))
def predict(X, W, B):
pred = sigmoid(np.dot(X, W) + B)
pred[pred >= 0.5] = 1
pred[pred < 0.5] = 0
return pred
# # Cost Function
def cost(y, y_hat):
return np.mean(
-y * np.log(y_hat + sys.float_info.min)
- (1 - y) * np.log(1 - y_hat + sys.float_info.min)
)
# # Train
def train(X, y, learning_rate, epochs):
W = np.random.random(X.shape[1])
B = np.random.random()
costs = np.array([])
for i in range(epochs):
y_hat = predict(X, W, B)
dW = np.dot(X.T, (y_hat - y)) / len(X)
db = np.mean(y_hat - y)
W -= learning_rate * dW
B -= learning_rate * db
if i % 100 == 0:
costValue: float = cost(y, y_hat)
costs = np.append(costs, costValue)
print(f"Cost at epoch {i}: {costValue}")
return W, B, costs
# # Train with L1 Regularization
def train_L1(X, y, learning_rate, epochs, Lambda):
W = np.random.random(X.shape[1])
B = np.random.random()
costs = np.array([])
for i in range(epochs):
y_hat = predict(X, W, B)
dW = np.dot(X.T, (y_hat - y)) / len(X)
db = np.mean(y_hat - y)
W -= learning_rate * (dW + Lambda * np.sign(W))
B -= learning_rate * db
if i % 100 == 0:
costValue: float = cost(y, y_hat)
costs = np.append(costs, costValue)
print(f"Cost at epoch {i}: {costValue}")
return W, B, costs
# # Mini_Batch Gradient Descent
def train_mini_batch(X, y, learning_rate, epochs, batch_size):
W = np.random.random(X.shape[1])
B = np.random.random()
costs = np.array([])
for i in range(epochs):
for j in range(0, len(X), batch_size):
X_batch = X[j : j + batch_size]
y_batch = y[j : j + batch_size]
y_hat = predict(X_batch, W, B)
dW = np.dot(X_batch.T, (y_hat - y_batch)) / len(X_batch)
db = np.mean(y_hat - y_batch)
W -= learning_rate * dW
B -= learning_rate * db
if i % 100 == 0:
costValue: float = cost(y, y_hat)
costs = np.append(costs, costValue)
print(f"Cost at epoch {i}: {costValue}")
return W, B, costs
# # RMS Prop
def train_rms_prop(X, y, learning_rate, epochs, beta, epsilon=sys.float_info.min):
W = np.random.random(X.shape[1])
B = np.random.random()
costs = np.array([])
vW = np.zeros(X.shape[1])
vB = 0
for i in range(epochs):
y_hat = predict(X, W, B)
dW = np.dot(X.T, (y_hat - y)) / len(X)
dB = np.mean(y_hat - y)
vW = beta * vW + (1 - beta) * dW**2
vB = beta * vB + (1 - beta) * dB**2
W -= learning_rate * dW / (np.sqrt(vW) + epsilon)
B -= learning_rate * dB / (np.sqrt(vB) + epsilon)
if i % 100 == 0:
costValue: float = cost(y, y_hat)
costs = np.append(costs, costValue)
print(f"Cost at epoch {i}: {costValue}")
return W, B, costs
# # Adam
def train_adam(X, y, learning_rate, epochs, beta1, beta2, epsilon=sys.float_info.min):
W = np.random.random(X.shape[1])
B = np.random.random()
costs = np.array([])
vW = np.zeros(X.shape[1])
vB = 0
sW = np.zeros(X.shape[1])
sB = 0
for i in range(epochs):
y_hat = predict(X, W, B)
dW = np.dot(X.T, (y_hat - y)) / len(X)
dB = np.mean(y_hat - y)
vW = beta1 * vW + (1 - beta1) * dW
vB = beta1 * vB + (1 - beta1) * dB
sW = beta2 * sW + (1 - beta2) * dW**2
sB = beta2 * sB + (1 - beta2) * dB**2
vW_corrected = vW / (1 - beta1 ** (i + 1))
vB_corrected = vB / (1 - beta1 ** (i + 1))
sW_corrected = sW / (1 - beta2 ** (i + 1))
sB_corrected = sB / (1 - beta2 ** (i + 1))
W -= learning_rate * vW_corrected / (np.sqrt(sW_corrected) + epsilon)
B -= learning_rate * vB_corrected / (np.sqrt(sB_corrected) + epsilon)
if i % 100 == 0:
costValue: float = cost(y, y_hat)
costs = np.append(costs, costValue)
print(f"Cost at epoch {i}: {costValue}")
return W, B, costs
# # Test
def test(X, y, W, b):
y_hat = predict(X, W, b)
return accuracy(y, y_hat)
# # Accuracy
def accuracy(y, y_hat):
return np.mean(y == y_hat)
# # Loading the data
trainSet, testSet = mnist.load_data()
# # Get class 0 and class 1
xTrain = trainSet[0].astype("float32")
yTrain = trainSet[1].astype("int32")
xTest = testSet[0].astype("float32")
yTest = testSet[1].astype("int32")
xTrain = xTrain.reshape(xTrain.shape[0], -1)
xTest = xTest.reshape(xTest.shape[0], -1)
xTrain = np.concatenate([xTrain[yTrain == 0], xTrain[yTrain == 1]])
yTrain = np.concatenate([yTrain[yTrain == 0], yTrain[yTrain == 1]])
xTest = np.concatenate([xTest[yTest == 0], xTest[yTest == 1]])
yTest = np.concatenate([yTest[yTest == 0], yTest[yTest == 1]])
xTrain = standardize(xTrain)
xTest = standardize(xTest)
p = np.random.permutation(len(xTrain))
p2 = np.random.permutation(len(xTest))
xTrain = xTrain[p]
yTrain = yTrain[p]
xTest = xTest[p2]
yTest = yTest[p2]
k = 10
iterations = 1000
# # K-Fold Cross-Validation
def k_fold_cross_validation(X, y, K, lr, epochs):
fold_size = len(X) // K
accuracies = []
for i in range(K):
# Divide the data into training and testing sets
X_train = np.concatenate([X[: i * fold_size], X[(i + 1) * fold_size :]])
Y_train = np.concatenate([y[: i * fold_size], y[(i + 1) * fold_size :]])
x_valid = X[i * fold_size : (i + 1) * fold_size]
y_valid = y[i * fold_size : (i + 1) * fold_size]
# Train the model
print(f"\nTraining at Iteration {i + 1} of {K}")
w, b, Costs = train(X_train, Y_train, lr, epochs)
# Test the model
print(f"\nTesting at Iteration {i + 1} of {K}")
acc = test(x_valid, y_valid, w, b)
print(f"Accuracy at Iteration {i + 1} of {K}: {acc}")
accuracies.append(acc)
return np.mean(accuracies), Costs, w, b
# # Running Code
# ## At Learning Rate 0.1
print(f"----------K-Fold Cross Validation with {k} Folds and eta 0.1----------")
average_accuracy, c, _, _ = k_fold_cross_validation(xTrain, yTrain, k, 0.1, iterations)
print("\nAverage Accuracy: ", average_accuracy)
plt(c)
# ## At Learning Rate 0.01
print(f"----------K-Fold Cross Validation with {k} Folds and eta 0.01----------")
average_accuracy, c, _, _ = k_fold_cross_validation(xTrain, yTrain, k, 0.01, iterations)
print("\nAverage Accuracy: ", average_accuracy)
plt(c)
# ## At Learning Rate 0.001
print(f"----------K-Fold Cross Validation with {k} Folds and eta 0.001----------")
average_accuracy, c, _, _ = k_fold_cross_validation(
xTrain, yTrain, k, 0.001, iterations
)
print("\nAverage Accuracy: ", average_accuracy)
plt(c)
# ## At Learning Rate 0.0001
print(f"----------K-Fold Cross Validation with {k} Folds and eta 0.0001----------")
average_accuracy, c, _, _ = k_fold_cross_validation(
xTrain, yTrain, k, 0.0001, iterations
)
print("\nAverage Accuracy: ", average_accuracy)
plt(c)
| false | 0 | 2,660 | 1 | 2,660 | 2,660 |
||
129440389
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import plotly.graph_objs as go
from plotly.offline import iplot, init_notebook_mode
init_notebook_mode()
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
features = pd.read_csv(
"/kaggle/input/walmart-recruiting-store-sales-forecasting/features.csv.zip"
)
sample = pd.read_csv(
"/kaggle/input/walmart-recruiting-store-sales-forecasting/sampleSubmission.csv.zip"
)
stores = pd.read_csv(
"/kaggle/input/walmart-recruiting-store-sales-forecasting/stores.csv"
)
train = pd.read_csv(
"/kaggle/input/walmart-recruiting-store-sales-forecasting/train.csv.zip"
)
test = pd.read_csv(
"/kaggle/input/walmart-recruiting-store-sales-forecasting/test.csv.zip"
)
features.head()
stores.head()
train.head()
feature_store = features.merge(stores, how="inner", on="Store").copy()
feature_store.head()
train_df = (
train.merge(feature_store, how="inner", on=["Store", "Date", "IsHoliday"])
.sort_values(by=["Store", "Dept", "Date"])
.reset_index(drop=True)
.copy()
)
test_df = (
test.merge(feature_store, how="inner", on=["Store", "Date", "IsHoliday"])
.sort_values(by=["Store", "Dept", "Date"])
.reset_index(drop=True)
.copy()
)
train_df.head()
train_df.describe()
train_df.dtypes
feature_store["Date"] = pd.to_datetime(feature_store["Date"])
train["Date"] = pd.to_datetime(train["Date"])
test["Date"] = pd.to_datetime(test["Date"])
train.head()
feature_store.head()
feature_store["Day"] = feature_store["Date"].dt.isocalendar().day
feature_store["Week"] = feature_store["Date"].dt.isocalendar().week
feature_store["Month"] = feature_store["Date"].dt.month
feature_store["Year"] = feature_store["Date"].dt.isocalendar().year
feature_store.head()
train_df = (
train.merge(feature_store, how="inner", on=["Store", "Date", "IsHoliday"])
.sort_values(by=["Store", "Dept", "Date"])
.reset_index(drop=True)
.copy()
)
test_df = (
test.merge(feature_store, how="inner", on=["Store", "Date", "IsHoliday"])
.sort_values(by=["Store", "Dept", "Date"])
.reset_index(drop=True)
.copy()
)
df_weeks = train_df.groupby("Week").sum()
df_weeks.head()
px.line(
data_frame=df_weeks,
x=df_weeks.index,
y="Weekly_Sales",
title="Weekly Sales vs. Weeks",
)
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=df_weeks.index, y=df_weeks["MarkDown1"], name="MarkDown1", mode="lines"
)
)
fig.add_trace(
go.Scatter(
x=df_weeks.index, y=df_weeks["MarkDown2"], name="MarkDown2", mode="lines"
)
)
fig.add_trace(
go.Scatter(
x=df_weeks.index, y=df_weeks["MarkDown3"], name="MarkDown3", mode="lines"
)
)
fig.add_trace(
go.Scatter(
x=df_weeks.index, y=df_weeks["MarkDown4"], name="MarkDown4", mode="lines"
)
)
fig.add_trace(
go.Scatter(
x=df_weeks.index, y=df_weeks["MarkDown5"], name="MarkDown5", mode="lines"
)
)
fig.add_trace(
go.Scatter(
x=df_weeks.index, y=df_weeks["Weekly_Sales"], name="Weekly_Sales", mode="lines"
)
)
fig.update_layout(title="Sales vs. Markdowns", xaxis_title="Weeks")
# mean sales across years
weekly_sales = (
train_df.groupby(by=["Year", "Week"], as_index=False)
.agg({"Weekly_Sales": ["mean", "median"]})
.copy()
)
weekly_sales_2010 = weekly_sales.loc[weekly_sales["Year"] == 2010].copy()
weekly_sales_2011 = weekly_sales.loc[weekly_sales["Year"] == 2011].copy()
weekly_sales_2012 = weekly_sales.loc[weekly_sales["Year"] == 2012].copy()
weekly_sales_2010 = weekly_sales_2010.reset_index(drop=True)
weekly_sales_2011 = weekly_sales_2011.reset_index(drop=True)
weekly_sales_2012 = weekly_sales_2012.reset_index(drop=True)
weekly_sales_2011.head()
from plotly.graph_objs.scatter.marker import Line
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=weekly_sales_2010.index,
y=weekly_sales_2010["Weekly_Sales"]["mean"],
name="2010",
mode="lines",
)
)
fig.add_trace(
go.Scatter(
x=weekly_sales_2011.index,
y=weekly_sales_2011["Weekly_Sales"]["mean"],
name="2011",
mode="lines",
)
)
fig.add_trace(
go.Scatter(
x=weekly_sales_2012.index,
y=weekly_sales_2012["Weekly_Sales"]["mean"],
name="2012",
mode="lines",
)
)
# clearly there is an increase of sales at the end of the year
train_df.head()
corr = train_df.corr(numeric_only=True)
corr
sns.heatmap(corr)
test_df.isna().sum()
train_data = train_df.copy()
test_data = test_df.copy()
train_data.fillna(0, inplace=True)
test_data.isna().sum()
test_data["CPI"].fillna(test_data["CPI"].mean(), inplace=True)
test_data["Unemployment"].fillna(test_data["Unemployment"].mean(), inplace=True)
test_data.fillna(0, inplace=True)
test_data.isna().sum()
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
train_data["IsHoliday"] = le.fit_transform(train_data["IsHoliday"])
test_data["IsHoliday"] = le.fit_transform(test_data["IsHoliday"])
train_data.head()
train_data["Type"] = le.fit_transform(train_data["Type"])
test_data["Type"] = le.fit_transform(test_data["Type"])
train_data.head()
train_data.corr(method="spearman", numeric_only=True)
features = ["Week", "CPI", "Unemployment", "Size", "Type", "Dept", "Store"]
train_data[features]
train_data["Week"] = train_data["Week"].astype(int)
from sklearn.model_selection import train_test_split
X = train_data[features].copy()
y = train_data["Weekly_Sales"].copy()
X_train, X_valid, y_train, y_valid = train_test_split(
X, y, random_state=42, test_size=0.2
)
from sklearn import linear_model
from sklearn.ensemble import RandomForestRegressor
import xgboost as xgb
import catboost as cb
import lightgbm as lgb
from sklearn.metrics import mean_squared_error
models = {
"lr": linear_model.LinearRegression(),
"xgb": xgb.XGBRegressor(random_state=1, objective="reg:squarederror"),
"cb": cb.CatBoostRegressor(random_state=1, verbose=False),
"lgb": lgb.LGBMRegressor(random_state=1),
"rfr": RandomForestRegressor(random_state=1),
}
def valid_model(name, model, X_train, y_train, X_valid, y_valid):
model.fit(X_train, y_train)
preds = model.predict(X_valid)
rmse = mean_squared_error(y_valid, preds, squared=False)
return rmse
for name, model in models.items():
rmse = valid_model(name, model, X_train, y_train, X_valid, y_valid)
print(f"{name} : {rmse}")
RF = RandomForestRegressor(random_state=1)
RF.fit(X, y)
test = test_data[features].copy()
sub_preds = RF.predict(test)
sample["Weekly_Sales"] = sub_preds
sample.to_csv("submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/440/129440389.ipynb
| null | null |
[{"Id": 129440389, "ScriptId": 38348983, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12364689, "CreationDate": "05/13/2023 20:41:12", "VersionNumber": 1.0, "Title": "Walmart Forecasting", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 225.0, "LinesInsertedFromPrevious": 225.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import plotly.graph_objs as go
from plotly.offline import iplot, init_notebook_mode
init_notebook_mode()
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
features = pd.read_csv(
"/kaggle/input/walmart-recruiting-store-sales-forecasting/features.csv.zip"
)
sample = pd.read_csv(
"/kaggle/input/walmart-recruiting-store-sales-forecasting/sampleSubmission.csv.zip"
)
stores = pd.read_csv(
"/kaggle/input/walmart-recruiting-store-sales-forecasting/stores.csv"
)
train = pd.read_csv(
"/kaggle/input/walmart-recruiting-store-sales-forecasting/train.csv.zip"
)
test = pd.read_csv(
"/kaggle/input/walmart-recruiting-store-sales-forecasting/test.csv.zip"
)
features.head()
stores.head()
train.head()
feature_store = features.merge(stores, how="inner", on="Store").copy()
feature_store.head()
train_df = (
train.merge(feature_store, how="inner", on=["Store", "Date", "IsHoliday"])
.sort_values(by=["Store", "Dept", "Date"])
.reset_index(drop=True)
.copy()
)
test_df = (
test.merge(feature_store, how="inner", on=["Store", "Date", "IsHoliday"])
.sort_values(by=["Store", "Dept", "Date"])
.reset_index(drop=True)
.copy()
)
train_df.head()
train_df.describe()
train_df.dtypes
feature_store["Date"] = pd.to_datetime(feature_store["Date"])
train["Date"] = pd.to_datetime(train["Date"])
test["Date"] = pd.to_datetime(test["Date"])
train.head()
feature_store.head()
feature_store["Day"] = feature_store["Date"].dt.isocalendar().day
feature_store["Week"] = feature_store["Date"].dt.isocalendar().week
feature_store["Month"] = feature_store["Date"].dt.month
feature_store["Year"] = feature_store["Date"].dt.isocalendar().year
feature_store.head()
train_df = (
train.merge(feature_store, how="inner", on=["Store", "Date", "IsHoliday"])
.sort_values(by=["Store", "Dept", "Date"])
.reset_index(drop=True)
.copy()
)
test_df = (
test.merge(feature_store, how="inner", on=["Store", "Date", "IsHoliday"])
.sort_values(by=["Store", "Dept", "Date"])
.reset_index(drop=True)
.copy()
)
df_weeks = train_df.groupby("Week").sum()
df_weeks.head()
px.line(
data_frame=df_weeks,
x=df_weeks.index,
y="Weekly_Sales",
title="Weekly Sales vs. Weeks",
)
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=df_weeks.index, y=df_weeks["MarkDown1"], name="MarkDown1", mode="lines"
)
)
fig.add_trace(
go.Scatter(
x=df_weeks.index, y=df_weeks["MarkDown2"], name="MarkDown2", mode="lines"
)
)
fig.add_trace(
go.Scatter(
x=df_weeks.index, y=df_weeks["MarkDown3"], name="MarkDown3", mode="lines"
)
)
fig.add_trace(
go.Scatter(
x=df_weeks.index, y=df_weeks["MarkDown4"], name="MarkDown4", mode="lines"
)
)
fig.add_trace(
go.Scatter(
x=df_weeks.index, y=df_weeks["MarkDown5"], name="MarkDown5", mode="lines"
)
)
fig.add_trace(
go.Scatter(
x=df_weeks.index, y=df_weeks["Weekly_Sales"], name="Weekly_Sales", mode="lines"
)
)
fig.update_layout(title="Sales vs. Markdowns", xaxis_title="Weeks")
# mean sales across years
weekly_sales = (
train_df.groupby(by=["Year", "Week"], as_index=False)
.agg({"Weekly_Sales": ["mean", "median"]})
.copy()
)
weekly_sales_2010 = weekly_sales.loc[weekly_sales["Year"] == 2010].copy()
weekly_sales_2011 = weekly_sales.loc[weekly_sales["Year"] == 2011].copy()
weekly_sales_2012 = weekly_sales.loc[weekly_sales["Year"] == 2012].copy()
weekly_sales_2010 = weekly_sales_2010.reset_index(drop=True)
weekly_sales_2011 = weekly_sales_2011.reset_index(drop=True)
weekly_sales_2012 = weekly_sales_2012.reset_index(drop=True)
weekly_sales_2011.head()
from plotly.graph_objs.scatter.marker import Line
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=weekly_sales_2010.index,
y=weekly_sales_2010["Weekly_Sales"]["mean"],
name="2010",
mode="lines",
)
)
fig.add_trace(
go.Scatter(
x=weekly_sales_2011.index,
y=weekly_sales_2011["Weekly_Sales"]["mean"],
name="2011",
mode="lines",
)
)
fig.add_trace(
go.Scatter(
x=weekly_sales_2012.index,
y=weekly_sales_2012["Weekly_Sales"]["mean"],
name="2012",
mode="lines",
)
)
# clearly there is an increase of sales at the end of the year
train_df.head()
corr = train_df.corr(numeric_only=True)
corr
sns.heatmap(corr)
test_df.isna().sum()
train_data = train_df.copy()
test_data = test_df.copy()
train_data.fillna(0, inplace=True)
test_data.isna().sum()
test_data["CPI"].fillna(test_data["CPI"].mean(), inplace=True)
test_data["Unemployment"].fillna(test_data["Unemployment"].mean(), inplace=True)
test_data.fillna(0, inplace=True)
test_data.isna().sum()
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
train_data["IsHoliday"] = le.fit_transform(train_data["IsHoliday"])
test_data["IsHoliday"] = le.fit_transform(test_data["IsHoliday"])
train_data.head()
train_data["Type"] = le.fit_transform(train_data["Type"])
test_data["Type"] = le.fit_transform(test_data["Type"])
train_data.head()
train_data.corr(method="spearman", numeric_only=True)
features = ["Week", "CPI", "Unemployment", "Size", "Type", "Dept", "Store"]
train_data[features]
train_data["Week"] = train_data["Week"].astype(int)
from sklearn.model_selection import train_test_split
X = train_data[features].copy()
y = train_data["Weekly_Sales"].copy()
X_train, X_valid, y_train, y_valid = train_test_split(
X, y, random_state=42, test_size=0.2
)
from sklearn import linear_model
from sklearn.ensemble import RandomForestRegressor
import xgboost as xgb
import catboost as cb
import lightgbm as lgb
from sklearn.metrics import mean_squared_error
models = {
"lr": linear_model.LinearRegression(),
"xgb": xgb.XGBRegressor(random_state=1, objective="reg:squarederror"),
"cb": cb.CatBoostRegressor(random_state=1, verbose=False),
"lgb": lgb.LGBMRegressor(random_state=1),
"rfr": RandomForestRegressor(random_state=1),
}
def valid_model(name, model, X_train, y_train, X_valid, y_valid):
model.fit(X_train, y_train)
preds = model.predict(X_valid)
rmse = mean_squared_error(y_valid, preds, squared=False)
return rmse
for name, model in models.items():
rmse = valid_model(name, model, X_train, y_train, X_valid, y_valid)
print(f"{name} : {rmse}")
RF = RandomForestRegressor(random_state=1)
RF.fit(X, y)
test = test_data[features].copy()
sub_preds = RF.predict(test)
sample["Weekly_Sales"] = sub_preds
sample.to_csv("submission.csv", index=False)
| false | 0 | 2,508 | 1 | 2,508 | 2,508 |
||
129440843
|
<jupyter_start><jupyter_text>Fraud_detection
Kaggle dataset identifier: fraud-detection
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv("/kaggle/input/fraud-detection/Fraud detection.csv")
df.head()
print(df.isnull().sum())
print(df["type"].value_counts())
type = df["type"].value_counts()
transactions = type.index
quantity = type.values
import plotly.express as px
figure = px.pie(
df,
values=quantity,
names=transactions,
hole=0.5,
title="Destribution of transaction pie",
)
figure.show()
correlation = df.corr()
print(correlation["isFraud"].sort_values(ascending=False))
df["type"] = df["type"].map(
{"CASH_OUT": 1, "PAYMENT": 2, "CASH_IN": 3, "TRANSFER": 4, "DEBIT": 5}
)
df["isFraud"] = df["isFraud"].map({0: "No Fraud", 1: "Fraud"})
print(df.head())
X = np.array(df[["type", "amount", "oldbalanceOrg", "newbalanceOrig"]])
y = np.array(df[["isFraud"]])
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
xtrain, xtest, ytrain, ytest = train_test_split(X, y, test_size=0.2, random_state=42)
model = DecisionTreeClassifier()
model.fit(xtrain, ytrain)
print(model.score(xtest, ytest))
# Trial
features = np.array([[4, 9000.60, 9000.60, 2]])
print(model.predict(features))
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/440/129440843.ipynb
|
fraud-detection
|
marwanashraf22
|
[{"Id": 129440843, "ScriptId": 38479244, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11844665, "CreationDate": "05/13/2023 20:48:21", "VersionNumber": 1.0, "Title": "Online Payments Fraud Detection", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 61.0, "LinesInsertedFromPrevious": 61.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185492596, "KernelVersionId": 129440843, "SourceDatasetVersionId": 5678506}]
|
[{"Id": 5678506, "DatasetId": 3264481, "DatasourceVersionId": 5754059, "CreatorUserId": 11844665, "LicenseName": "Unknown", "CreationDate": "05/13/2023 19:59:38", "VersionNumber": 1.0, "Title": "Fraud_detection", "Slug": "fraud-detection", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3264481, "CreatorUserId": 11844665, "OwnerUserId": 11844665.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5678506.0, "CurrentDatasourceVersionId": 5754059.0, "ForumId": 3330084, "Type": 2, "CreationDate": "05/13/2023 19:59:38", "LastActivityDate": "05/13/2023", "TotalViews": 21, "TotalDownloads": 0, "TotalVotes": 0, "TotalKernels": 1}]
|
[{"Id": 11844665, "UserName": "marwanashraf22", "DisplayName": "Marwan Ashraf", "RegisterDate": "10/06/2022", "PerformanceTier": 1}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv("/kaggle/input/fraud-detection/Fraud detection.csv")
df.head()
print(df.isnull().sum())
print(df["type"].value_counts())
type = df["type"].value_counts()
transactions = type.index
quantity = type.values
import plotly.express as px
figure = px.pie(
df,
values=quantity,
names=transactions,
hole=0.5,
title="Destribution of transaction pie",
)
figure.show()
correlation = df.corr()
print(correlation["isFraud"].sort_values(ascending=False))
df["type"] = df["type"].map(
{"CASH_OUT": 1, "PAYMENT": 2, "CASH_IN": 3, "TRANSFER": 4, "DEBIT": 5}
)
df["isFraud"] = df["isFraud"].map({0: "No Fraud", 1: "Fraud"})
print(df.head())
X = np.array(df[["type", "amount", "oldbalanceOrg", "newbalanceOrig"]])
y = np.array(df[["isFraud"]])
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
xtrain, xtest, ytrain, ytest = train_test_split(X, y, test_size=0.2, random_state=42)
model = DecisionTreeClassifier()
model.fit(xtrain, ytrain)
print(model.score(xtest, ytest))
# Trial
features = np.array([[4, 9000.60, 9000.60, 2]])
print(model.predict(features))
| false | 1 | 595 | 0 | 617 | 595 |
||
129209996
|
<jupyter_start><jupyter_text>Data Science Salaries 2023 💸
Data Science Job Salaries Dataset contains 11 columns, each are:
1. work_year: The year the salary was paid.
2. experience_level: The experience level in the job during the year
3. employment_type: The type of employment for the role
4. job_title: The role worked in during the year.
5. salary: The total gross salary amount paid.
6. salary_currency: The currency of the salary paid as an ISO 4217 currency code.
7. salaryinusd: The salary in USD
8. employee_residence: Employee's primary country of residence in during the work year as an ISO 3166 country code.
9. remote_ratio: The overall amount of work done remotely
10. company_location: The country of the employer's main office or contracting branch
11. company_size: The median number of people that worked for the company during the year
Kaggle dataset identifier: data-science-salaries-2023
<jupyter_script># IMPORT LIBRARIES:
# # Introduction
# Data Science
# is a fantastic career with a tonne of potential for future growth. Already, there is a lot of demand, competitive pay, and several benefits. Companies are actively looking for data scientists that can glean valuable information from massive amounts of data.
#
#
# Salary Prediction using machine learning involves training a model on historical salary data to predict future salaries for individuals. This can be done using various supervised learning algorithms such as linear regression, decision trees, or random forest.
#
# In this notebook , I will explore a dataset of salaries of Different Data Science Fields in the Data Science Domain. The data consists of nearly 3755 current and former employees with information related to their work year,experience level, Employment type,job title,salary in USD,company location etc. First, we will do EDA based on some questions. Then we will perform some feature engineering. Finally, we will build 5 machine learning classification model for predicting the attrition.
#
#
#
#
# Table Of Contents
#
#
#
# |No | Contents
# |:---| :---
# |1 | [ Importing Libraries](#1)
# |2 | [ Basic Exploration ](#2)
# |3 | [ Exploratory Data Analysis (EDA) ](#3)
# |4 | [ Feature Engineering](#4)
# |5 | [ Model Building ](#5)
# # Importing Libraries
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import seaborn as sns
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestRegressor
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
#
# # Basic Exploration
Existing_data = pd.read_csv("/kaggle/input/data-science-salaries-2023/ds_salaries.csv")
# Load the Dataset
Existing_data.head()
# **Taking a glance at Data Types**
Existing_data.info()
data = Existing_data.drop(
columns=["salary", "salary_currency", "employee_residence", "remote_ratio"]
)
data.isna().sum()
print(data.shape)
# Remove duplicate Features
data = data.T.drop_duplicates()
data = data.T
# Remove Duplicate Rows
data.drop_duplicates(inplace=True)
print(data.shape)
data.describe(include=object).T
#
# # Exploratory Data Analysis (EDA)
# # How salary has been affected by Work Year?
data.groupby("work_year")["salary_in_usd"].median().plot.bar()
plt.show()
# ***This bar chart shows that there are highest salaries in year 2023.***
# # Is Experience level is the main factor towards salary?
data.groupby(["experience_level"])["salary_in_usd"].median().plot.bar()
# ***This bar chart shows that highest salaries are with Executive Level experience.***
# # How Employment Type affect Salaries?
data.groupby("employment_type")["salary_in_usd"].median().plot.bar()
plt.show()
# ***This bar chart shows that employees working Full Time are highly paid.***
# # What is the impact of job title on Salaries?
fig = plt.figure(figsize=(13, 7))
occupation = data.job_title.value_counts().reset_index().set_index("index").head(7)
ax = sns.barplot(y=occupation.index, x=occupation.job_title)
plt.title("Top job roles in data science")
plt.xlabel("count")
plt.ylabel("Job Title")
for p in ax.patches:
height = p.get_height()
width = p.get_width()
ax.text(
x=width + 3, y=p.get_y() + (height / 2), s="{:.0f}".format(width), va="center"
)
# ***From the above plot, Data Engineer and Data Scientist have higher Salaries.***
# # How Company Location Affects the Salary?
fig = plt.figure(figsize=(15, 9))
top10 = data.groupby(["company_location"]).salary_in_usd.max()
top10 = top10.sort_values(ascending=False).head(10)
ax = sns.barplot(y=top10.index, x=top10.values)
plt.title("Top 10 countries where data scientists are paid the highest salaries")
plt.xlabel("Salary in USD")
plt.ylabel("Company Location")
for p in ax.patches:
height = p.get_height()
width = p.get_width()
ax.text(
x=width + 3, y=p.get_y() + (height / 2), s="{:.0f}".format(width), va="center"
)
# # What is the impact of company Size on Salaries
data.groupby("company_size")["salary_in_usd"].median().plot.bar()
plt.show()
# ***This shows,Medium Companies offers you the best salaries.***
# # Feature Engineering
# convert categorical data into numeric
for column in data.columns:
print(f"{column}: Number of unique values {data[column].nunique()}")
print("==========================================================")
# Range of correlation are as follows:
# **Strong correlation: X > 0.7**
# **Moderate correlation: 0.5 < X < 0.7**
# **Weak correlation: X < 0.5**
# Insight: From the correlation matrix, it is evident that features
# Conclusion:
# There are no columns that are highly correlated.
# SPLITTING DATA INTO TRAINING AND TEST SETS
X = data.iloc[:, :-1]
y = data.salary_in_usd
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=0
)
print("X training size:", X_train.shape)
print("X test size:", X_test.shape)
print("Y training size:", y_train.shape)
print("Y test size:", y_test.shape)
# Implement Classifier based on Simple Linear Regression
one_hot_encoded_training_predictors = pd.get_dummies(data)
salary = data.salary_in_usd
# this gives us a list of all column names who have object dtype
object_col = [col for col in data.columns if data[col].dtype == "object"]
# there might be dissimilarites of features found in training and test set.
# So let's find the columns with same features in both training and test set.
good_label = [col for col in object_col if set(X_test[col]).issubset(set(X_train[col]))]
bad_label = set(object_col) - set(good_label)
print("The columns we will use to ordinal encode: ", *good_label, sep=", ")
print("The columns we will drop: ", bad_label)
from sklearn.preprocessing import OrdinalEncoder
od = OrdinalEncoder()
X_train[object_col] = od.fit_transform(X_train[object_col])
X_test[object_col] = od.transform(X_test[object_col])
X_train.head()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/209/129209996.ipynb
|
data-science-salaries-2023
|
arnabchaki
|
[{"Id": 129209996, "ScriptId": 37746274, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14682576, "CreationDate": "05/11/2023 21:16:59", "VersionNumber": 7.0, "Title": "Minor Project 3", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 214.0, "LinesInsertedFromPrevious": 159.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 55.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185051236, "KernelVersionId": 129209996, "SourceDatasetVersionId": 5392837}]
|
[{"Id": 5392837, "DatasetId": 3125926, "DatasourceVersionId": 5466555, "CreatorUserId": 7428813, "LicenseName": "Database: Open Database, Contents: Database Contents", "CreationDate": "04/13/2023 09:55:16", "VersionNumber": 1.0, "Title": "Data Science Salaries 2023 \ud83d\udcb8", "Slug": "data-science-salaries-2023", "Subtitle": "Salaries of Different Data Science Fields in the Data Science Domain", "Description": "Data Science Job Salaries Dataset contains 11 columns, each are:\n\n1. work_year: The year the salary was paid.\n2. experience_level: The experience level in the job during the year\n3. employment_type: The type of employment for the role\n4. job_title: The role worked in during the year.\n5. salary: The total gross salary amount paid.\n6. salary_currency: The currency of the salary paid as an ISO 4217 currency code.\n7. salaryinusd: The salary in USD\n8. employee_residence: Employee's primary country of residence in during the work year as an ISO 3166 country code.\n9. remote_ratio: The overall amount of work done remotely\n10. company_location: The country of the employer's main office or contracting branch\n11. company_size: The median number of people that worked for the company during the year", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3125926, "CreatorUserId": 7428813, "OwnerUserId": 7428813.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5392837.0, "CurrentDatasourceVersionId": 5466555.0, "ForumId": 3189506, "Type": 2, "CreationDate": "04/13/2023 09:55:16", "LastActivityDate": "04/13/2023", "TotalViews": 234449, "TotalDownloads": 44330, "TotalVotes": 1244, "TotalKernels": 184}]
|
[{"Id": 7428813, "UserName": "arnabchaki", "DisplayName": "randomarnab", "RegisterDate": "05/16/2021", "PerformanceTier": 2}]
|
# IMPORT LIBRARIES:
# # Introduction
# Data Science
# is a fantastic career with a tonne of potential for future growth. Already, there is a lot of demand, competitive pay, and several benefits. Companies are actively looking for data scientists that can glean valuable information from massive amounts of data.
#
#
# Salary Prediction using machine learning involves training a model on historical salary data to predict future salaries for individuals. This can be done using various supervised learning algorithms such as linear regression, decision trees, or random forest.
#
# In this notebook , I will explore a dataset of salaries of Different Data Science Fields in the Data Science Domain. The data consists of nearly 3755 current and former employees with information related to their work year,experience level, Employment type,job title,salary in USD,company location etc. First, we will do EDA based on some questions. Then we will perform some feature engineering. Finally, we will build 5 machine learning classification model for predicting the attrition.
#
#
#
#
# Table Of Contents
#
#
#
# |No | Contents
# |:---| :---
# |1 | [ Importing Libraries](#1)
# |2 | [ Basic Exploration ](#2)
# |3 | [ Exploratory Data Analysis (EDA) ](#3)
# |4 | [ Feature Engineering](#4)
# |5 | [ Model Building ](#5)
# # Importing Libraries
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import seaborn as sns
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestRegressor
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
#
# # Basic Exploration
Existing_data = pd.read_csv("/kaggle/input/data-science-salaries-2023/ds_salaries.csv")
# Load the Dataset
Existing_data.head()
# **Taking a glance at Data Types**
Existing_data.info()
data = Existing_data.drop(
columns=["salary", "salary_currency", "employee_residence", "remote_ratio"]
)
data.isna().sum()
print(data.shape)
# Remove duplicate Features
data = data.T.drop_duplicates()
data = data.T
# Remove Duplicate Rows
data.drop_duplicates(inplace=True)
print(data.shape)
data.describe(include=object).T
#
# # Exploratory Data Analysis (EDA)
# # How salary has been affected by Work Year?
data.groupby("work_year")["salary_in_usd"].median().plot.bar()
plt.show()
# ***This bar chart shows that there are highest salaries in year 2023.***
# # Is Experience level is the main factor towards salary?
data.groupby(["experience_level"])["salary_in_usd"].median().plot.bar()
# ***This bar chart shows that highest salaries are with Executive Level experience.***
# # How Employment Type affect Salaries?
data.groupby("employment_type")["salary_in_usd"].median().plot.bar()
plt.show()
# ***This bar chart shows that employees working Full Time are highly paid.***
# # What is the impact of job title on Salaries?
fig = plt.figure(figsize=(13, 7))
occupation = data.job_title.value_counts().reset_index().set_index("index").head(7)
ax = sns.barplot(y=occupation.index, x=occupation.job_title)
plt.title("Top job roles in data science")
plt.xlabel("count")
plt.ylabel("Job Title")
for p in ax.patches:
height = p.get_height()
width = p.get_width()
ax.text(
x=width + 3, y=p.get_y() + (height / 2), s="{:.0f}".format(width), va="center"
)
# ***From the above plot, Data Engineer and Data Scientist have higher Salaries.***
# # How Company Location Affects the Salary?
fig = plt.figure(figsize=(15, 9))
top10 = data.groupby(["company_location"]).salary_in_usd.max()
top10 = top10.sort_values(ascending=False).head(10)
ax = sns.barplot(y=top10.index, x=top10.values)
plt.title("Top 10 countries where data scientists are paid the highest salaries")
plt.xlabel("Salary in USD")
plt.ylabel("Company Location")
for p in ax.patches:
height = p.get_height()
width = p.get_width()
ax.text(
x=width + 3, y=p.get_y() + (height / 2), s="{:.0f}".format(width), va="center"
)
# # What is the impact of company Size on Salaries
data.groupby("company_size")["salary_in_usd"].median().plot.bar()
plt.show()
# ***This shows,Medium Companies offers you the best salaries.***
# # Feature Engineering
# convert categorical data into numeric
for column in data.columns:
print(f"{column}: Number of unique values {data[column].nunique()}")
print("==========================================================")
# Range of correlation are as follows:
# **Strong correlation: X > 0.7**
# **Moderate correlation: 0.5 < X < 0.7**
# **Weak correlation: X < 0.5**
# Insight: From the correlation matrix, it is evident that features
# Conclusion:
# There are no columns that are highly correlated.
# SPLITTING DATA INTO TRAINING AND TEST SETS
X = data.iloc[:, :-1]
y = data.salary_in_usd
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=0
)
print("X training size:", X_train.shape)
print("X test size:", X_test.shape)
print("Y training size:", y_train.shape)
print("Y test size:", y_test.shape)
# Implement Classifier based on Simple Linear Regression
one_hot_encoded_training_predictors = pd.get_dummies(data)
salary = data.salary_in_usd
# this gives us a list of all column names who have object dtype
object_col = [col for col in data.columns if data[col].dtype == "object"]
# there might be dissimilarites of features found in training and test set.
# So let's find the columns with same features in both training and test set.
good_label = [col for col in object_col if set(X_test[col]).issubset(set(X_train[col]))]
bad_label = set(object_col) - set(good_label)
print("The columns we will use to ordinal encode: ", *good_label, sep=", ")
print("The columns we will drop: ", bad_label)
from sklearn.preprocessing import OrdinalEncoder
od = OrdinalEncoder()
X_train[object_col] = od.fit_transform(X_train[object_col])
X_test[object_col] = od.transform(X_test[object_col])
X_train.head()
| false | 1 | 1,874 | 0 | 2,122 | 1,874 |
||
129209428
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
train = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv")
meta = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/greeks.csv")
sub_samp = pd.read_csv(
"/kaggle/input/icr-identify-age-related-conditions/sample_submission.csv"
)
NUM_FEATURES = list(train.drop(["Id", "EJ", "Class"], axis=1).columns)
TARGET = "Class"
CAT_FEATURES = "EJ"
# # Train data
train.info()
train.head()
# # Meta data
# Supplemental metadata, only available for the training set.
#
meta.info()
meta.head()
# # Train Features
# Only categorical feature apart from the target is EJ
train.EJ.unique()
sns.countplot(data=train, x="EJ")
# # Duplicates
train[train.duplicated(subset=train.drop(["Id", "Class"], axis=1).columns)]
# # Missing values
train.isna().sum().sort_values(ascending=False).head(10)
# # Descriptive stats
train[NUM_FEATURES].agg(["mean", "std", "max", "min"]).T
# # Data visualization
n_cols = 5
n_rows = len(NUM_FEATURES) // n_cols
fig, ax = plt.subplots(n_rows, n_cols, figsize=(15, n_rows * 2))
fig.tight_layout(h_pad=2)
ax = ax.flatten()
for i, col in enumerate(NUM_FEATURES):
sns.histplot(data=train[col], ax=ax[i])
ax[i].set_title(f"{col}")
ax[i].set_ylabel(None)
ax[i].set_xlabel(None)
# # Target feature
sns.countplot(data=train, x="Class")
print("Diagnosed patients: {ct}".format(ct=train.Class.value_counts()[0]))
print("Not-Diagnosed patients: {cf}".format(cf=train.Class.value_counts()[1]))
# # Features correlation
corr = train.corr()
corr["Class"].sort_values(ascending=False)
corr = train.corr()
sns.heatmap(corr, annot=False)
corr_feat = corr.index
n_cols = 5
n_rows = len(NUM_FEATURES) // n_cols
fig, ax = plt.subplots(n_rows, n_cols, figsize=(20, n_rows * 2))
fig.tight_layout(h_pad=2)
ax = ax.flatten()
for i, col in enumerate(NUM_FEATURES):
sns.regplot(
data=train,
x=col,
y="Class",
ax=ax[i],
)
ax[i].set_title(f"{col}")
ax[i].set_ylabel(None)
ax[i].set_xlabel(None)
# # Submission File
sub_samp.head()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/209/129209428.ipynb
| null | null |
[{"Id": 129209428, "ScriptId": 38413586, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9660701, "CreationDate": "05/11/2023 21:08:03", "VersionNumber": 2.0, "Title": "ICR - Identifying Age-Related Conditions EDA", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 94.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 94.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 4}]
| null | null | null | null |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
train = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv")
meta = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/greeks.csv")
sub_samp = pd.read_csv(
"/kaggle/input/icr-identify-age-related-conditions/sample_submission.csv"
)
NUM_FEATURES = list(train.drop(["Id", "EJ", "Class"], axis=1).columns)
TARGET = "Class"
CAT_FEATURES = "EJ"
# # Train data
train.info()
train.head()
# # Meta data
# Supplemental metadata, only available for the training set.
#
meta.info()
meta.head()
# # Train Features
# Only categorical feature apart from the target is EJ
train.EJ.unique()
sns.countplot(data=train, x="EJ")
# # Duplicates
train[train.duplicated(subset=train.drop(["Id", "Class"], axis=1).columns)]
# # Missing values
train.isna().sum().sort_values(ascending=False).head(10)
# # Descriptive stats
train[NUM_FEATURES].agg(["mean", "std", "max", "min"]).T
# # Data visualization
n_cols = 5
n_rows = len(NUM_FEATURES) // n_cols
fig, ax = plt.subplots(n_rows, n_cols, figsize=(15, n_rows * 2))
fig.tight_layout(h_pad=2)
ax = ax.flatten()
for i, col in enumerate(NUM_FEATURES):
sns.histplot(data=train[col], ax=ax[i])
ax[i].set_title(f"{col}")
ax[i].set_ylabel(None)
ax[i].set_xlabel(None)
# # Target feature
sns.countplot(data=train, x="Class")
print("Diagnosed patients: {ct}".format(ct=train.Class.value_counts()[0]))
print("Not-Diagnosed patients: {cf}".format(cf=train.Class.value_counts()[1]))
# # Features correlation
corr = train.corr()
corr["Class"].sort_values(ascending=False)
corr = train.corr()
sns.heatmap(corr, annot=False)
corr_feat = corr.index
n_cols = 5
n_rows = len(NUM_FEATURES) // n_cols
fig, ax = plt.subplots(n_rows, n_cols, figsize=(20, n_rows * 2))
fig.tight_layout(h_pad=2)
ax = ax.flatten()
for i, col in enumerate(NUM_FEATURES):
sns.regplot(
data=train,
x=col,
y="Class",
ax=ax[i],
)
ax[i].set_title(f"{col}")
ax[i].set_ylabel(None)
ax[i].set_xlabel(None)
# # Submission File
sub_samp.head()
| false | 0 | 774 | 4 | 774 | 774 |
||
129209316
|
import pandas as pd
import numpy as np
def smape_plus(y_true, y_pred):
"""
Calculate symmetric mean absolute percentage error from given ground-truth and predictions
Parameters
----------
y_true: array-like of shape (n_samples)
Array of ground-truth values
y_pred: array-like of shape (n_samples)
Array of prediction values
Returns
-------
smape: float
Symmetric mean absolute percentage error
"""
y_true = y_true + 1
y_pred = y_pred + 1
smape = (
100
/ len(y_true)
* np.sum(2 * np.abs(y_pred - y_true) / (np.abs(y_true) + np.abs(y_pred)))
)
return smape
train_clinical_data = pd.read_csv(
"/kaggle/input/amp-parkinsons-disease-progression-prediction/train_clinical_data.csv"
)
supplemental_clinical_data = pd.read_csv(
"/kaggle/input/amp-parkinsons-disease-progression-prediction/supplemental_clinical_data.csv"
)
train_clinical_data["source"] = "standard"
train_clinical_data["upd23b_clinical_state_on_medication"].fillna("NA", inplace=True)
supplemental_clinical_data["source"] = "supplemental"
supplemental_clinical_data["upd23b_clinical_state_on_medication"].fillna(
"NA", inplace=True
)
df_clinical = pd.concat([train_clinical_data, supplemental_clinical_data])
# Flag users with all NA values
train_plus_unique_med = df_clinical.groupby("patient_id")[
"upd23b_clinical_state_on_medication"
].nunique()
train_plus_first_med = df_clinical.groupby("patient_id")[
"upd23b_clinical_state_on_medication"
].first()
NA_patient_id_list = train_plus_unique_med[
(train_plus_unique_med == 1) & (train_plus_first_med == "NA")
].index.to_list()
df_clinical["patient_medication_is_all_NA"] = (
df_clinical["patient_id"].isin(NA_patient_id_list).astype(int)
)
targets = ["updrs_1", "updrs_2", "updrs_3", "updrs_4"]
pred_months = [
0,
6,
12,
18,
24,
30,
36,
42,
48,
54,
60,
66,
72,
78,
84,
90,
96,
102,
108,
]
# ## Original
# Only Trends
# https://www.kaggle.com/code/vitalykudelya/only-trends
from scipy.optimize import minimize
def smape_plus_1(y_true, y_pred):
y_true_plus_1 = y_true + 1
y_pred_plus_1 = y_pred + 1
metric = np.zeros(len(y_true_plus_1))
numerator = np.abs(y_true_plus_1 - y_pred_plus_1)
denominator = (np.abs(y_true_plus_1) + np.abs(y_pred_plus_1)) / 2
mask_not_zeros = (y_true_plus_1 != 0) | (y_pred_plus_1 != 0)
metric[mask_not_zeros] = numerator[mask_not_zeros] / denominator[mask_not_zeros]
return 100 * np.nanmean(metric)
def calculate_predicitons(pred_month, trend):
if target == "updrs_4":
pred_month = pred_month.clip(54, None)
return np.round(trend[0] + pred_month * trend[1])
def function_to_minimize(x):
metric = smape_plus_1(
y_true=y_true_array,
y_pred=calculate_predicitons(pred_month=pred_month_array, trend=x),
)
return metric
target_to_trend = {}
for j in [0, 1]:
train_clinical_all = df_clinical[
df_clinical["patient_medication_is_all_NA"] == j
].copy()
train_clinical_all["pred_month"] = train_clinical_all["visit_month"]
for plus_month in [6, 12, 24]:
train_shift = train_clinical_all[
[
"patient_id",
"visit_month",
"pred_month",
"updrs_1",
"updrs_2",
"updrs_3",
"updrs_4",
]
].copy()
train_shift["visit_month"] -= plus_month
train_shift.rename(
columns={f"updrs_{i}": f"updrs_{i}_plus_{plus_month}" for i in range(1, 5)},
inplace=True,
)
train_shift.rename(
columns={"pred_month": f"pred_month_plus_{plus_month}"}, inplace=True
)
train_clinical_all = train_clinical_all.merge(
train_shift, how="left", on=["patient_id", "visit_month"]
)
train_clinical_all.rename(
columns={f"updrs_{i}": f"updrs_{i}_plus_0" for i in range(1, 5)}, inplace=True
)
train_clinical_all.rename(
columns={"pred_month": f"pred_month_plus_0"}, inplace=True
)
for i in range(1, 5):
target = f"updrs_{i}"
columns_with_target = [
f"{target}_plus_{plus_month}" for plus_month in [0, 6, 12, 24]
]
columns_with_pred_month = [
f"pred_month_plus_{plus_month}" for plus_month in [0, 6, 12, 24]
]
y_true_array = train_clinical_all[columns_with_target].values.ravel()
pred_month_array = train_clinical_all[columns_with_pred_month].values.ravel()
trend = list(
minimize(fun=function_to_minimize, x0=[0, 0.0048], method="Powell").x
)
target_to_trend[(j, target)] = trend
dfs = []
for i, med in enumerate(["not_all_NA", "NA"]):
df_trend = pd.DataFrame(columns=targets)
for visit_month in pred_months:
for target in targets:
trend = target_to_trend[(i, target)]
if target == "updrs_4":
if visit_month < 54:
pred_month = 54
else:
pred_month = visit_month
else:
pred_month = visit_month
result = trend[0] + pred_month * trend[1]
df_trend.loc[visit_month, target] = np.round(result) if result >= 0 else 0.0
df_trend.columns = ["trend_" + c for c in df_trend.columns]
df_trend = df_trend.astype(float).reset_index()
df_trend = df_trend.rename(columns={"index": "visit_month"})
df_trend["patient_medication_is_all_NA"] = i
dfs.append(df_trend)
df_group_trend = pd.concat(dfs)
# calculate smape score
df_eval = pd.merge(
df_clinical,
df_group_trend,
how="left",
on=["visit_month", "patient_medication_is_all_NA"],
)
y_preds = []
y_trues = []
for target in targets:
df_calc = df_eval.loc[
(~df_eval[target].isnull()) & (~df_eval[f"trend_{target}"].isnull())
]
y_preds.append(df_calc[f"trend_{target}"].values)
y_trues.append(df_calc[target].values)
smape = smape_plus(np.concatenate(y_trues), np.concatenate(y_preds))
print(smape)
# ## Find Best Group
from tqdm.auto import tqdm
best_smape = smape
patient_list = df_clinical["patient_id"].unique()
df = df_clinical.copy()
for loop in range(10):
print("*" * 20)
print(f"loop {loop} start")
print("*" * 20)
for patient in tqdm(patient_list):
df_before = df.copy()
# inverse patient label
df.loc[(df["patient_id"] == patient), "patient_medication_is_all_NA"] = df.loc[
(df["patient_id"] == patient), "patient_medication_is_all_NA"
].apply(lambda x: 1 if x == 0 else 0)
target_to_trend = {}
for j in [0, 1]:
train_clinical_all = df[df["patient_medication_is_all_NA"] == j].copy()
train_clinical_all["pred_month"] = train_clinical_all["visit_month"]
for plus_month in [6, 12, 24]:
train_shift = train_clinical_all[
[
"patient_id",
"visit_month",
"pred_month",
"updrs_1",
"updrs_2",
"updrs_3",
"updrs_4",
]
].copy()
train_shift["visit_month"] -= plus_month
train_shift.rename(
columns={
f"updrs_{i}": f"updrs_{i}_plus_{plus_month}"
for i in range(1, 5)
},
inplace=True,
)
train_shift.rename(
columns={"pred_month": f"pred_month_plus_{plus_month}"},
inplace=True,
)
train_clinical_all = train_clinical_all.merge(
train_shift, how="left", on=["patient_id", "visit_month"]
)
train_clinical_all.rename(
columns={f"updrs_{i}": f"updrs_{i}_plus_0" for i in range(1, 5)},
inplace=True,
)
train_clinical_all.rename(
columns={"pred_month": f"pred_month_plus_0"}, inplace=True
)
for i in range(1, 5):
target = f"updrs_{i}"
columns_with_target = [
f"{target}_plus_{plus_month}" for plus_month in [0, 6, 12, 24]
]
columns_with_pred_month = [
f"pred_month_plus_{plus_month}" for plus_month in [0, 6, 12, 24]
]
y_true_array = train_clinical_all[columns_with_target].values.ravel()
pred_month_array = train_clinical_all[
columns_with_pred_month
].values.ravel()
trend = list(
minimize(
fun=function_to_minimize, x0=[0, 0.0048], method="Powell"
).x
)
target_to_trend[(j, target)] = trend
dfs = []
for i, med in enumerate(["not_all_NA", "NA"]):
df_trend = pd.DataFrame(columns=targets)
for visit_month in pred_months:
for target in targets:
trend = target_to_trend[(i, target)]
if target == "updrs_4":
if visit_month < 54:
pred_month = 54
else:
pred_month = visit_month
else:
pred_month = visit_month
result = trend[0] + pred_month * trend[1]
df_trend.loc[visit_month, target] = (
np.round(result) if result >= 0 else 0.0
)
df_trend.columns = ["trend_" + c for c in df_trend.columns]
df_trend = df_trend.astype(float).reset_index()
df_trend = df_trend.rename(columns={"index": "visit_month"})
df_trend["patient_medication_is_all_NA"] = i
dfs.append(df_trend)
df_group_trend = pd.concat(dfs)
# calculate smape score
df_eval = pd.merge(
df,
df_group_trend,
how="left",
on=["visit_month", "patient_medication_is_all_NA"],
)
y_preds = []
y_trues = []
for target in targets:
df_calc = df_eval.loc[
(~df_eval[target].isnull()) & (~df_eval[f"trend_{target}"].isnull())
]
y_preds.append(df_calc[f"trend_{target}"].values)
y_trues.append(df_calc[target].values)
smape = smape_plus(np.concatenate(y_trues), np.concatenate(y_preds))
if smape < best_smape:
print(f"patient {patient} is inversed")
print(smape)
best_smape = smape
else:
df = df_before.copy()
df_save = df.loc[:, ["patient_id", "patient_medication_is_all_NA"]].drop_duplicates()
df_save = df_save.rename(columns={"patient_medication_is_all_NA": "group"})
df_save.to_csv("patient_group.csv", index=False)
df_load = pd.read_csv("patient_group.csv")
df_load
df_load["group"].value_counts()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/209/129209316.ipynb
| null | null |
[{"Id": 129209316, "ScriptId": 38412255, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2848721, "CreationDate": "05/11/2023 21:06:42", "VersionNumber": 1.0, "Title": "MedModel Find Best Group", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 235.0, "LinesInsertedFromPrevious": 235.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import pandas as pd
import numpy as np
def smape_plus(y_true, y_pred):
"""
Calculate symmetric mean absolute percentage error from given ground-truth and predictions
Parameters
----------
y_true: array-like of shape (n_samples)
Array of ground-truth values
y_pred: array-like of shape (n_samples)
Array of prediction values
Returns
-------
smape: float
Symmetric mean absolute percentage error
"""
y_true = y_true + 1
y_pred = y_pred + 1
smape = (
100
/ len(y_true)
* np.sum(2 * np.abs(y_pred - y_true) / (np.abs(y_true) + np.abs(y_pred)))
)
return smape
train_clinical_data = pd.read_csv(
"/kaggle/input/amp-parkinsons-disease-progression-prediction/train_clinical_data.csv"
)
supplemental_clinical_data = pd.read_csv(
"/kaggle/input/amp-parkinsons-disease-progression-prediction/supplemental_clinical_data.csv"
)
train_clinical_data["source"] = "standard"
train_clinical_data["upd23b_clinical_state_on_medication"].fillna("NA", inplace=True)
supplemental_clinical_data["source"] = "supplemental"
supplemental_clinical_data["upd23b_clinical_state_on_medication"].fillna(
"NA", inplace=True
)
df_clinical = pd.concat([train_clinical_data, supplemental_clinical_data])
# Flag users with all NA values
train_plus_unique_med = df_clinical.groupby("patient_id")[
"upd23b_clinical_state_on_medication"
].nunique()
train_plus_first_med = df_clinical.groupby("patient_id")[
"upd23b_clinical_state_on_medication"
].first()
NA_patient_id_list = train_plus_unique_med[
(train_plus_unique_med == 1) & (train_plus_first_med == "NA")
].index.to_list()
df_clinical["patient_medication_is_all_NA"] = (
df_clinical["patient_id"].isin(NA_patient_id_list).astype(int)
)
targets = ["updrs_1", "updrs_2", "updrs_3", "updrs_4"]
pred_months = [
0,
6,
12,
18,
24,
30,
36,
42,
48,
54,
60,
66,
72,
78,
84,
90,
96,
102,
108,
]
# ## Original
# Only Trends
# https://www.kaggle.com/code/vitalykudelya/only-trends
from scipy.optimize import minimize
def smape_plus_1(y_true, y_pred):
y_true_plus_1 = y_true + 1
y_pred_plus_1 = y_pred + 1
metric = np.zeros(len(y_true_plus_1))
numerator = np.abs(y_true_plus_1 - y_pred_plus_1)
denominator = (np.abs(y_true_plus_1) + np.abs(y_pred_plus_1)) / 2
mask_not_zeros = (y_true_plus_1 != 0) | (y_pred_plus_1 != 0)
metric[mask_not_zeros] = numerator[mask_not_zeros] / denominator[mask_not_zeros]
return 100 * np.nanmean(metric)
def calculate_predicitons(pred_month, trend):
if target == "updrs_4":
pred_month = pred_month.clip(54, None)
return np.round(trend[0] + pred_month * trend[1])
def function_to_minimize(x):
metric = smape_plus_1(
y_true=y_true_array,
y_pred=calculate_predicitons(pred_month=pred_month_array, trend=x),
)
return metric
target_to_trend = {}
for j in [0, 1]:
train_clinical_all = df_clinical[
df_clinical["patient_medication_is_all_NA"] == j
].copy()
train_clinical_all["pred_month"] = train_clinical_all["visit_month"]
for plus_month in [6, 12, 24]:
train_shift = train_clinical_all[
[
"patient_id",
"visit_month",
"pred_month",
"updrs_1",
"updrs_2",
"updrs_3",
"updrs_4",
]
].copy()
train_shift["visit_month"] -= plus_month
train_shift.rename(
columns={f"updrs_{i}": f"updrs_{i}_plus_{plus_month}" for i in range(1, 5)},
inplace=True,
)
train_shift.rename(
columns={"pred_month": f"pred_month_plus_{plus_month}"}, inplace=True
)
train_clinical_all = train_clinical_all.merge(
train_shift, how="left", on=["patient_id", "visit_month"]
)
train_clinical_all.rename(
columns={f"updrs_{i}": f"updrs_{i}_plus_0" for i in range(1, 5)}, inplace=True
)
train_clinical_all.rename(
columns={"pred_month": f"pred_month_plus_0"}, inplace=True
)
for i in range(1, 5):
target = f"updrs_{i}"
columns_with_target = [
f"{target}_plus_{plus_month}" for plus_month in [0, 6, 12, 24]
]
columns_with_pred_month = [
f"pred_month_plus_{plus_month}" for plus_month in [0, 6, 12, 24]
]
y_true_array = train_clinical_all[columns_with_target].values.ravel()
pred_month_array = train_clinical_all[columns_with_pred_month].values.ravel()
trend = list(
minimize(fun=function_to_minimize, x0=[0, 0.0048], method="Powell").x
)
target_to_trend[(j, target)] = trend
dfs = []
for i, med in enumerate(["not_all_NA", "NA"]):
df_trend = pd.DataFrame(columns=targets)
for visit_month in pred_months:
for target in targets:
trend = target_to_trend[(i, target)]
if target == "updrs_4":
if visit_month < 54:
pred_month = 54
else:
pred_month = visit_month
else:
pred_month = visit_month
result = trend[0] + pred_month * trend[1]
df_trend.loc[visit_month, target] = np.round(result) if result >= 0 else 0.0
df_trend.columns = ["trend_" + c for c in df_trend.columns]
df_trend = df_trend.astype(float).reset_index()
df_trend = df_trend.rename(columns={"index": "visit_month"})
df_trend["patient_medication_is_all_NA"] = i
dfs.append(df_trend)
df_group_trend = pd.concat(dfs)
# calculate smape score
df_eval = pd.merge(
df_clinical,
df_group_trend,
how="left",
on=["visit_month", "patient_medication_is_all_NA"],
)
y_preds = []
y_trues = []
for target in targets:
df_calc = df_eval.loc[
(~df_eval[target].isnull()) & (~df_eval[f"trend_{target}"].isnull())
]
y_preds.append(df_calc[f"trend_{target}"].values)
y_trues.append(df_calc[target].values)
smape = smape_plus(np.concatenate(y_trues), np.concatenate(y_preds))
print(smape)
# ## Find Best Group
from tqdm.auto import tqdm
best_smape = smape
patient_list = df_clinical["patient_id"].unique()
df = df_clinical.copy()
for loop in range(10):
print("*" * 20)
print(f"loop {loop} start")
print("*" * 20)
for patient in tqdm(patient_list):
df_before = df.copy()
# inverse patient label
df.loc[(df["patient_id"] == patient), "patient_medication_is_all_NA"] = df.loc[
(df["patient_id"] == patient), "patient_medication_is_all_NA"
].apply(lambda x: 1 if x == 0 else 0)
target_to_trend = {}
for j in [0, 1]:
train_clinical_all = df[df["patient_medication_is_all_NA"] == j].copy()
train_clinical_all["pred_month"] = train_clinical_all["visit_month"]
for plus_month in [6, 12, 24]:
train_shift = train_clinical_all[
[
"patient_id",
"visit_month",
"pred_month",
"updrs_1",
"updrs_2",
"updrs_3",
"updrs_4",
]
].copy()
train_shift["visit_month"] -= plus_month
train_shift.rename(
columns={
f"updrs_{i}": f"updrs_{i}_plus_{plus_month}"
for i in range(1, 5)
},
inplace=True,
)
train_shift.rename(
columns={"pred_month": f"pred_month_plus_{plus_month}"},
inplace=True,
)
train_clinical_all = train_clinical_all.merge(
train_shift, how="left", on=["patient_id", "visit_month"]
)
train_clinical_all.rename(
columns={f"updrs_{i}": f"updrs_{i}_plus_0" for i in range(1, 5)},
inplace=True,
)
train_clinical_all.rename(
columns={"pred_month": f"pred_month_plus_0"}, inplace=True
)
for i in range(1, 5):
target = f"updrs_{i}"
columns_with_target = [
f"{target}_plus_{plus_month}" for plus_month in [0, 6, 12, 24]
]
columns_with_pred_month = [
f"pred_month_plus_{plus_month}" for plus_month in [0, 6, 12, 24]
]
y_true_array = train_clinical_all[columns_with_target].values.ravel()
pred_month_array = train_clinical_all[
columns_with_pred_month
].values.ravel()
trend = list(
minimize(
fun=function_to_minimize, x0=[0, 0.0048], method="Powell"
).x
)
target_to_trend[(j, target)] = trend
dfs = []
for i, med in enumerate(["not_all_NA", "NA"]):
df_trend = pd.DataFrame(columns=targets)
for visit_month in pred_months:
for target in targets:
trend = target_to_trend[(i, target)]
if target == "updrs_4":
if visit_month < 54:
pred_month = 54
else:
pred_month = visit_month
else:
pred_month = visit_month
result = trend[0] + pred_month * trend[1]
df_trend.loc[visit_month, target] = (
np.round(result) if result >= 0 else 0.0
)
df_trend.columns = ["trend_" + c for c in df_trend.columns]
df_trend = df_trend.astype(float).reset_index()
df_trend = df_trend.rename(columns={"index": "visit_month"})
df_trend["patient_medication_is_all_NA"] = i
dfs.append(df_trend)
df_group_trend = pd.concat(dfs)
# calculate smape score
df_eval = pd.merge(
df,
df_group_trend,
how="left",
on=["visit_month", "patient_medication_is_all_NA"],
)
y_preds = []
y_trues = []
for target in targets:
df_calc = df_eval.loc[
(~df_eval[target].isnull()) & (~df_eval[f"trend_{target}"].isnull())
]
y_preds.append(df_calc[f"trend_{target}"].values)
y_trues.append(df_calc[target].values)
smape = smape_plus(np.concatenate(y_trues), np.concatenate(y_preds))
if smape < best_smape:
print(f"patient {patient} is inversed")
print(smape)
best_smape = smape
else:
df = df_before.copy()
df_save = df.loc[:, ["patient_id", "patient_medication_is_all_NA"]].drop_duplicates()
df_save = df_save.rename(columns={"patient_medication_is_all_NA": "group"})
df_save.to_csv("patient_group.csv", index=False)
df_load = pd.read_csv("patient_group.csv")
df_load
df_load["group"].value_counts()
| false | 0 | 3,486 | 0 | 3,486 | 3,486 |
||
129209701
|
<jupyter_start><jupyter_text>Point Cloud Segmentation
### Context
A labeled point-cloud dataset taken from the Semantic3D project (http://semantic3d.net/view_dbase.php?chl=1). The dataset has billions of XYZ-RGB points and labels them into 7 classes.
### Content
The data are raw ASCII files containing 7 columns (X, Y, Z, Intensity, R, G, B) and the labels are
`{1: man-made terrain, 2: natural terrain, 3: high vegetation, 4: low vegetation, 5: buildings, 6: hard scape, 7: scanning artefacts, 8: cars}` including an 8th class of unlabeled.
Kaggle dataset identifier: point-cloud-segmentation
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import matplotlib.pyplot as plt
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
all_paths = [
os.path.join(path, file)
for path, _, files in os.walk(top=os.path.join("..", "input"))
for file in files
if (".labels" in file) or (".txt" in file)
]
label_names = {
0: "unlabeled",
1: "man-made terrain",
2: "natural terrain",
3: "high vegetation",
4: "low vegetation",
5: "buildings",
6: "hard scape",
7: "scanning artefacts",
8: "cars",
}
# ### Read data
all_files_df = pd.DataFrame({"path": all_paths})
all_files_df["basename"] = all_files_df["path"].map(os.path.basename)
all_files_df["id"] = all_files_df["basename"].map(lambda x: os.path.splitext(x)[0])
all_files_df["ext"] = all_files_df["basename"].map(lambda x: os.path.splitext(x)[1][1:])
all_files_df.sample(5)
# #### bildstein_station1 and domfountain_station1 have a txt file and labels
#
all_training_pairs = all_files_df.pivot_table(
values="path", columns="ext", index=["id"], aggfunc="first"
).reset_index()
all_training_pairs
# # Reading Functions data domfountain_station1
# #### using the dropna() method to remove any rows with missing values
# #### retrieving the last row using the tail(1) for ext=3
# #### tail(2) ext = 1
_, test_row = next(all_training_pairs.dropna().tail(1).iterrows())
print("this is a test_row", test_row)
read_label_data = lambda path, rows: pd.read_table(
path, sep=" ", nrows=rows, names=["class"], index_col=False
)
read_xyz_data = lambda path, rows: pd.read_table(
path,
sep=" ",
nrows=rows,
names=["x", "y", "z", "intensity", "r", "g", "b"],
header=None,
) # x, y, z, intensity, r, g, b
read_joint_data = lambda c_row, rows: pd.concat(
[read_xyz_data(c_row["txt"], rows), read_label_data(c_row["labels"], rows)], axis=1
)
read_joint_data(test_row, 10)
full_df = read_joint_data(test_row, None)
full_df.describe
nombre_occurrences = full_df["class"].value_counts().get(0, 0)
nombre_occurrences
# ### Créer un nouveau DataFrame contenant uniquement les lignes avec la valeur Spécifique dans la colonne "label"
number_of_label = 6
# Créer un nouveau DataFrame contenant uniquement les lignes avec la valeur 0 dans la colonne "label"
new_df = full_df.loc[full_df["class"] == number_of_label]
new_df.shape
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(15, 10))
ax = plt.axes(projection="3d")
ax.scatter(
new_df["x"], new_df["y"], new_df["z"], c=new_df[["r", "g", "b"]].values / 255, s=3
)
ax.view_init(15, 165)
# # Reading Functions data bildstien_station1
_, test_row = next(all_training_pairs.dropna().tail(2).iterrows())
print("this is a test_row", test_row)
read_label_data = lambda path, rows: pd.read_table(
path, sep=" ", nrows=rows, names=["class"], index_col=False
)
read_xyz_data = lambda path, rows: pd.read_table(
path,
sep=" ",
nrows=rows,
names=["x", "y", "z", "intensity", "r", "g", "b"],
header=None,
) # x, y, z, intensity, r, g, b
read_joint_data = lambda c_row, rows: pd.concat(
[read_xyz_data(c_row["txt"], rows), read_label_data(c_row["labels"], rows)], axis=1
)
read_joint_data(test_row, 10)
full_df2 = read_joint_data(test_row, None)
nombre_occurrences = full_df2["class"].value_counts().get(0, 0)
nombre_occurrences
number_of_label = 3
# Créer un nouveau DataFrame contenant uniquement les lignes avec la valeur 0 dans la colonne "label"
new_df2 = full_df2.loc[full_df2["class"] == number_of_label]
new_df2.shape
# show 3D
fig = plt.figure(figsize=(15, 10))
ax = plt.axes(projection="3d")
ax.scatter(
new_df2["x"],
new_df2["y"],
new_df2["z"],
c=new_df2[["r", "g", "b"]].values / 255,
s=3,
)
ax.view_init(15, 165)
# # clustering for data domfountain_station1
#
fig = plt.figure(figsize=(15, 10))
ax = plt.axes(projection="3d")
for c_key, c_value in label_names.items():
c_df = full_df[full_df["class"] == c_key]
ax.plot(c_df["x"], c_df["y"], c_df["z"], ".", label=c_value, alpha=0.5)
ax.legend()
ax.view_init(15, 165)
fig.savefig("3d_labels.png", dpi=300)
df_without0 = full_df.drop(full_df[full_df["class"] == 0].index)
data_xyz = df_without0[["x", "y", "z"]]
from sklearn.cluster import KMeans
# Find optimal number of clusters using elbow method
wcss = []
for i in range(4, 10):
print(i)
kmeans = KMeans(
n_clusters=i, init="k-means++", max_iter=300, n_init=10, random_state=0
)
print(i)
kmeans.fit(data_xyz)
wcss.append(kmeans.inertia_)
plt.plot(range(4, 10), wcss)
plt.xlabel("Number of clusters")
plt.ylabel("WCSS")
plt.title("Elbow Method")
plt.show()
data_rgb = df_without0[["r", "g", "b"]]
from sklearn.cluster import KMeans
# Find optimal number of clusters using elbow method
wcss1 = []
for i in range(4, 10):
print(i)
kmeans1 = KMeans(
n_clusters=i, init="k-means++", max_iter=300, n_init=10, random_state=0
)
print(i)
kmeans1.fit(data_rgb)
wcss1.append(kmeans1.inertia_)
plt.plot(range(4, 10), wcss1)
plt.xlabel("Number of clusters")
plt.ylabel("WCSS")
plt.title("Elbow Method")
plt.show()
# # KMeans for XYZ data
from sklearn.cluster import KMeans
# training the K-means model on a dataset XYZ
kmeans = KMeans(n_clusters=7, init="k-means++", max_iter=300, n_init=10, random_state=0)
y_predict = kmeans.fit_predict(data_xyz)
wcss_xyz = kmeans.inertia_
wcss_xyz
wcss_xyz
# # kmeans for RGB DATA
# training the K-means model on a dataset RGB
kmeans1 = KMeans(
n_clusters=6, init="k-means++", max_iter=300, n_init=10, random_state=0
)
y_predict1 = kmeans1.fit_predict(data_rgb)
# #### WCSS (Within-Cluster Sum of Squares)
wcss_rgb = kmeans1.inertia_
print(wcss_rgb)
df_without0["predict_xyz"] = y_predict
df_without0["predict_rgb"] = y_predict1
df_without0
df_without0.to_csv("domfountain_predict.csv", index=False)
df_without0
# ### score xyz
from sklearn.metrics import (
adjusted_rand_score,
normalized_mutual_info_score,
fowlkes_mallows_score,
)
# assuming your true labels are stored in a variable called `y_true`
# calculate the ARI
ari_xyz = adjusted_rand_score(df_without0["class"], df_without0["predict_xyz"])
print(f"ARI_xyz: {ari_xyz:.3f}")
# calculate the NMI
nmi_xyz = normalized_mutual_info_score(df_without0["class"], df_without0["predict_xyz"])
print(f"NMI_xyz: {nmi_xyz:.3f}")
# calculate the FMI
fmi_xyz = fowlkes_mallows_score(df_without0["class"], df_without0["predict_xyz"])
print(f"FMI_xyz: {fmi_xyz:.3f}")
from sklearn.metrics import (
adjusted_rand_score,
normalized_mutual_info_score,
fowlkes_mallows_score,
)
# assuming your true labels are stored in a variable called `y_true`
# calculate the ARI
ari_xyz = adjusted_rand_score(df_without0["class"], df_without0["predict_rgb"])
print(f"ARI_rgb: {ari_xyz:.3f}")
# calculate the NMI
nmi_xyz = normalized_mutual_info_score(df_without0["class"], df_without0["predict_rgb"])
print(f"NMI_rgb: {nmi_xyz:.3f}")
# calculate the FMI
fmi_xyz = fowlkes_mallows_score(df_without0["class"], df_without0["predict_rgb"])
print(f"FMI_rgb: {fmi_xyz:.3f}")
import matplotlib.pyplot as plt
number_of_label = 5
new_df2 = pd.DataFrame()
# Créer un nouveau DataFrame contenant uniquement les lignes avec la valeur 0 dans la colonne "label"
new_df2 = df_without0.loc[df_without0["predict_xyz"] == number_of_label]
new_df2.shape
# show 3D
fig = plt.figure(figsize=(15, 10))
ax = plt.axes(projection="3d")
ax.scatter(
new_df2["x"],
new_df2["y"],
new_df2["z"],
c=new_df2[["r", "g", "b"]].values / 255,
s=3,
)
ax.view_init(15, 165)
df_5_predict = df_without0.loc[df_without0["predict_xyz"] == 3]
df_5_predict
nombre_occurrences = df_5_predict["class"].value_counts().get(8)
nombre_occurrences
occurrences = np.bincount(df_5_predict["class"])
for valeur, nb_occurrences in enumerate(occurrences):
print(f"Valeur {valeur}: {nb_occurrences} occurrences")
occurrences
# ## la matrice de confusion XYZ
#
from sklearn.metrics import confusion_matrix
# Étape : Construire la matrice de confusion
# Utilisez la fonction confusion_matrix de la bibliothèque scikit-learn pour créer la matrice de confusion
confusion_mat_xyz = confusion_matrix(
df_without0[["class"]], df_without0[["predict_xyz"]]
)
confusion_mat_xyz = np.delete(confusion_mat_xyz, 0, axis=0)
confusion_mat_xyz = np.delete(confusion_mat_xyz, [7, 8], axis=1)
print(confusion_mat_xyz)
# ## la matrice de confusion RGB
# Utilisez la fonction confusion_matrix de la bibliothèque scikit-learn pour créer la matrice de confusion
confusion_mat_rgb = confusion_matrix(
df_without0[["class"]], df_without0[["predict_rgb"]]
)
confusion_mat_rgb = np.delete(confusion_mat_rgb, 0, axis=0)
confusion_mat_rgb = np.delete(confusion_mat_rgb, [6, 7, 8], axis=1)
print(confusion_mat_rgb)
# ## Evaluation XYZ
# Calcul des vrais positifs et des faux positifs
vrais_positifs = np.sum(np.max(confusion_mat_xyz, axis=1))
faux_positifs = np.sum(confusion_mat_xyz) - vrais_positifs
# Calcul de la précision
precision_xyz = vrais_positifs / (vrais_positifs + faux_positifs)
print("Précision_xyz :", precision_xyz)
# ## Evaluation RGB
# Calcul des vrais positifs et des faux positifs
vrais_positifs = np.sum(np.max(confusion_mat_rgb, axis=1))
faux_positifs = np.sum(confusion_mat_rgb) - vrais_positifs
# Calcul de la précision
precision_rgb = vrais_positifs / (vrais_positifs + faux_positifs)
print("Précision_rgb :", precision_rgb)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/209/129209701.ipynb
|
point-cloud-segmentation
|
kmader
|
[{"Id": 129209701, "ScriptId": 38367059, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9029195, "CreationDate": "05/11/2023 21:12:09", "VersionNumber": 3.0, "Title": "P2M-3D-point-cloud", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 297.0, "LinesInsertedFromPrevious": 74.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 223.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185050746, "KernelVersionId": 129209701, "SourceDatasetVersionId": 41920}]
|
[{"Id": 41920, "DatasetId": 32283, "DatasourceVersionId": 44103, "CreatorUserId": 67483, "LicenseName": "CC BY-NC-SA 4.0", "CreationDate": "06/19/2018 11:16:26", "VersionNumber": 2.0, "Title": "Point Cloud Segmentation", "Slug": "point-cloud-segmentation", "Subtitle": "Segment buildings, trees, and cars in point cloud datasets", "Description": "### Context\n\nA labeled point-cloud dataset taken from the Semantic3D project (http://semantic3d.net/view_dbase.php?chl=1). The dataset has billions of XYZ-RGB points and labels them into 7 classes.\n\n### Content\n\nThe data are raw ASCII files containing 7 columns (X, Y, Z, Intensity, R, G, B) and the labels are \n\n`{1: man-made terrain, 2: natural terrain, 3: high vegetation, 4: low vegetation, 5: buildings, 6: hard scape, 7: scanning artefacts, 8: cars}` including an 8th class of unlabeled.\n\n### Acknowledgements\n\nThe data are taken directly from the Semantic3D competition and users must check and cite the rules and regulations posted on the original site: http://semantic3d.net/view_dbase.php?chl=1\n\n\n### Inspiration\n\n 1. What sort of models can classify point clouds well? \n 2. What\n transformations make classification easier?\n 3. Are there certain\n classes which require more data in order to classify well?", "VersionNotes": "adding dom dataset", "TotalCompressedBytes": 709854048.0, "TotalUncompressedBytes": 709854048.0}]
|
[{"Id": 32283, "CreatorUserId": 67483, "OwnerUserId": 67483.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 41920.0, "CurrentDatasourceVersionId": 44103.0, "ForumId": 40616, "Type": 2, "CreationDate": "06/19/2018 09:18:25", "LastActivityDate": "06/19/2018", "TotalViews": 126225, "TotalDownloads": 816, "TotalVotes": 39, "TotalKernels": 5}]
|
[{"Id": 67483, "UserName": "kmader", "DisplayName": "K Scott Mader", "RegisterDate": "11/04/2012", "PerformanceTier": 4}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import matplotlib.pyplot as plt
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
all_paths = [
os.path.join(path, file)
for path, _, files in os.walk(top=os.path.join("..", "input"))
for file in files
if (".labels" in file) or (".txt" in file)
]
label_names = {
0: "unlabeled",
1: "man-made terrain",
2: "natural terrain",
3: "high vegetation",
4: "low vegetation",
5: "buildings",
6: "hard scape",
7: "scanning artefacts",
8: "cars",
}
# ### Read data
all_files_df = pd.DataFrame({"path": all_paths})
all_files_df["basename"] = all_files_df["path"].map(os.path.basename)
all_files_df["id"] = all_files_df["basename"].map(lambda x: os.path.splitext(x)[0])
all_files_df["ext"] = all_files_df["basename"].map(lambda x: os.path.splitext(x)[1][1:])
all_files_df.sample(5)
# #### bildstein_station1 and domfountain_station1 have a txt file and labels
#
all_training_pairs = all_files_df.pivot_table(
values="path", columns="ext", index=["id"], aggfunc="first"
).reset_index()
all_training_pairs
# # Reading Functions data domfountain_station1
# #### using the dropna() method to remove any rows with missing values
# #### retrieving the last row using the tail(1) for ext=3
# #### tail(2) ext = 1
_, test_row = next(all_training_pairs.dropna().tail(1).iterrows())
print("this is a test_row", test_row)
read_label_data = lambda path, rows: pd.read_table(
path, sep=" ", nrows=rows, names=["class"], index_col=False
)
read_xyz_data = lambda path, rows: pd.read_table(
path,
sep=" ",
nrows=rows,
names=["x", "y", "z", "intensity", "r", "g", "b"],
header=None,
) # x, y, z, intensity, r, g, b
read_joint_data = lambda c_row, rows: pd.concat(
[read_xyz_data(c_row["txt"], rows), read_label_data(c_row["labels"], rows)], axis=1
)
read_joint_data(test_row, 10)
full_df = read_joint_data(test_row, None)
full_df.describe
nombre_occurrences = full_df["class"].value_counts().get(0, 0)
nombre_occurrences
# ### Créer un nouveau DataFrame contenant uniquement les lignes avec la valeur Spécifique dans la colonne "label"
number_of_label = 6
# Créer un nouveau DataFrame contenant uniquement les lignes avec la valeur 0 dans la colonne "label"
new_df = full_df.loc[full_df["class"] == number_of_label]
new_df.shape
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(15, 10))
ax = plt.axes(projection="3d")
ax.scatter(
new_df["x"], new_df["y"], new_df["z"], c=new_df[["r", "g", "b"]].values / 255, s=3
)
ax.view_init(15, 165)
# # Reading Functions data bildstien_station1
_, test_row = next(all_training_pairs.dropna().tail(2).iterrows())
print("this is a test_row", test_row)
read_label_data = lambda path, rows: pd.read_table(
path, sep=" ", nrows=rows, names=["class"], index_col=False
)
read_xyz_data = lambda path, rows: pd.read_table(
path,
sep=" ",
nrows=rows,
names=["x", "y", "z", "intensity", "r", "g", "b"],
header=None,
) # x, y, z, intensity, r, g, b
read_joint_data = lambda c_row, rows: pd.concat(
[read_xyz_data(c_row["txt"], rows), read_label_data(c_row["labels"], rows)], axis=1
)
read_joint_data(test_row, 10)
full_df2 = read_joint_data(test_row, None)
nombre_occurrences = full_df2["class"].value_counts().get(0, 0)
nombre_occurrences
number_of_label = 3
# Créer un nouveau DataFrame contenant uniquement les lignes avec la valeur 0 dans la colonne "label"
new_df2 = full_df2.loc[full_df2["class"] == number_of_label]
new_df2.shape
# show 3D
fig = plt.figure(figsize=(15, 10))
ax = plt.axes(projection="3d")
ax.scatter(
new_df2["x"],
new_df2["y"],
new_df2["z"],
c=new_df2[["r", "g", "b"]].values / 255,
s=3,
)
ax.view_init(15, 165)
# # clustering for data domfountain_station1
#
fig = plt.figure(figsize=(15, 10))
ax = plt.axes(projection="3d")
for c_key, c_value in label_names.items():
c_df = full_df[full_df["class"] == c_key]
ax.plot(c_df["x"], c_df["y"], c_df["z"], ".", label=c_value, alpha=0.5)
ax.legend()
ax.view_init(15, 165)
fig.savefig("3d_labels.png", dpi=300)
df_without0 = full_df.drop(full_df[full_df["class"] == 0].index)
data_xyz = df_without0[["x", "y", "z"]]
from sklearn.cluster import KMeans
# Find optimal number of clusters using elbow method
wcss = []
for i in range(4, 10):
print(i)
kmeans = KMeans(
n_clusters=i, init="k-means++", max_iter=300, n_init=10, random_state=0
)
print(i)
kmeans.fit(data_xyz)
wcss.append(kmeans.inertia_)
plt.plot(range(4, 10), wcss)
plt.xlabel("Number of clusters")
plt.ylabel("WCSS")
plt.title("Elbow Method")
plt.show()
data_rgb = df_without0[["r", "g", "b"]]
from sklearn.cluster import KMeans
# Find optimal number of clusters using elbow method
wcss1 = []
for i in range(4, 10):
print(i)
kmeans1 = KMeans(
n_clusters=i, init="k-means++", max_iter=300, n_init=10, random_state=0
)
print(i)
kmeans1.fit(data_rgb)
wcss1.append(kmeans1.inertia_)
plt.plot(range(4, 10), wcss1)
plt.xlabel("Number of clusters")
plt.ylabel("WCSS")
plt.title("Elbow Method")
plt.show()
# # KMeans for XYZ data
from sklearn.cluster import KMeans
# training the K-means model on a dataset XYZ
kmeans = KMeans(n_clusters=7, init="k-means++", max_iter=300, n_init=10, random_state=0)
y_predict = kmeans.fit_predict(data_xyz)
wcss_xyz = kmeans.inertia_
wcss_xyz
wcss_xyz
# # kmeans for RGB DATA
# training the K-means model on a dataset RGB
kmeans1 = KMeans(
n_clusters=6, init="k-means++", max_iter=300, n_init=10, random_state=0
)
y_predict1 = kmeans1.fit_predict(data_rgb)
# #### WCSS (Within-Cluster Sum of Squares)
wcss_rgb = kmeans1.inertia_
print(wcss_rgb)
df_without0["predict_xyz"] = y_predict
df_without0["predict_rgb"] = y_predict1
df_without0
df_without0.to_csv("domfountain_predict.csv", index=False)
df_without0
# ### score xyz
from sklearn.metrics import (
adjusted_rand_score,
normalized_mutual_info_score,
fowlkes_mallows_score,
)
# assuming your true labels are stored in a variable called `y_true`
# calculate the ARI
ari_xyz = adjusted_rand_score(df_without0["class"], df_without0["predict_xyz"])
print(f"ARI_xyz: {ari_xyz:.3f}")
# calculate the NMI
nmi_xyz = normalized_mutual_info_score(df_without0["class"], df_without0["predict_xyz"])
print(f"NMI_xyz: {nmi_xyz:.3f}")
# calculate the FMI
fmi_xyz = fowlkes_mallows_score(df_without0["class"], df_without0["predict_xyz"])
print(f"FMI_xyz: {fmi_xyz:.3f}")
from sklearn.metrics import (
adjusted_rand_score,
normalized_mutual_info_score,
fowlkes_mallows_score,
)
# assuming your true labels are stored in a variable called `y_true`
# calculate the ARI
ari_xyz = adjusted_rand_score(df_without0["class"], df_without0["predict_rgb"])
print(f"ARI_rgb: {ari_xyz:.3f}")
# calculate the NMI
nmi_xyz = normalized_mutual_info_score(df_without0["class"], df_without0["predict_rgb"])
print(f"NMI_rgb: {nmi_xyz:.3f}")
# calculate the FMI
fmi_xyz = fowlkes_mallows_score(df_without0["class"], df_without0["predict_rgb"])
print(f"FMI_rgb: {fmi_xyz:.3f}")
import matplotlib.pyplot as plt
number_of_label = 5
new_df2 = pd.DataFrame()
# Créer un nouveau DataFrame contenant uniquement les lignes avec la valeur 0 dans la colonne "label"
new_df2 = df_without0.loc[df_without0["predict_xyz"] == number_of_label]
new_df2.shape
# show 3D
fig = plt.figure(figsize=(15, 10))
ax = plt.axes(projection="3d")
ax.scatter(
new_df2["x"],
new_df2["y"],
new_df2["z"],
c=new_df2[["r", "g", "b"]].values / 255,
s=3,
)
ax.view_init(15, 165)
df_5_predict = df_without0.loc[df_without0["predict_xyz"] == 3]
df_5_predict
nombre_occurrences = df_5_predict["class"].value_counts().get(8)
nombre_occurrences
occurrences = np.bincount(df_5_predict["class"])
for valeur, nb_occurrences in enumerate(occurrences):
print(f"Valeur {valeur}: {nb_occurrences} occurrences")
occurrences
# ## la matrice de confusion XYZ
#
from sklearn.metrics import confusion_matrix
# Étape : Construire la matrice de confusion
# Utilisez la fonction confusion_matrix de la bibliothèque scikit-learn pour créer la matrice de confusion
confusion_mat_xyz = confusion_matrix(
df_without0[["class"]], df_without0[["predict_xyz"]]
)
confusion_mat_xyz = np.delete(confusion_mat_xyz, 0, axis=0)
confusion_mat_xyz = np.delete(confusion_mat_xyz, [7, 8], axis=1)
print(confusion_mat_xyz)
# ## la matrice de confusion RGB
# Utilisez la fonction confusion_matrix de la bibliothèque scikit-learn pour créer la matrice de confusion
confusion_mat_rgb = confusion_matrix(
df_without0[["class"]], df_without0[["predict_rgb"]]
)
confusion_mat_rgb = np.delete(confusion_mat_rgb, 0, axis=0)
confusion_mat_rgb = np.delete(confusion_mat_rgb, [6, 7, 8], axis=1)
print(confusion_mat_rgb)
# ## Evaluation XYZ
# Calcul des vrais positifs et des faux positifs
vrais_positifs = np.sum(np.max(confusion_mat_xyz, axis=1))
faux_positifs = np.sum(confusion_mat_xyz) - vrais_positifs
# Calcul de la précision
precision_xyz = vrais_positifs / (vrais_positifs + faux_positifs)
print("Précision_xyz :", precision_xyz)
# ## Evaluation RGB
# Calcul des vrais positifs et des faux positifs
vrais_positifs = np.sum(np.max(confusion_mat_rgb, axis=1))
faux_positifs = np.sum(confusion_mat_rgb) - vrais_positifs
# Calcul de la précision
precision_rgb = vrais_positifs / (vrais_positifs + faux_positifs)
print("Précision_rgb :", precision_rgb)
| false | 0 | 3,657 | 0 | 3,843 | 3,657 |
||
129373947
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # The dataset contains data about employers working in an organization and their level of Burnouts during the pandemic period while work from home was a norm.
# ### Employee ID: The unique ID allocated for each employee
# ### Date of Joining: The date-time when the employee has joined the organization (example: 2008-12-30)
# ### Gender: The gender of the employee (Male/Female)
# ### Company Type: The type of company where the employee is working (Service/Product)
# ### WFH Setup Available: Is the work from home facility available for the employee (Yes/No)
# ### Designation: The designation of the employee of work in the organization. In the range of [0.0, 5.0] bigger is higher designation.
# ### Resource Allocation: The amount of resource allocated to the employee to work, ie. number of working hours. In the range of [1.0, 10.0] (higher means more resource)
# ### Mental Fatigue Score: The level of fatigue mentally the employee is facing. In the range of [0.0, 10.0] where 0.0 means no fatigue and 10.0 means completely fatigue.
# ### Burn Rate: The value we need to predict for each employee telling the rate of Bur out while working. In the range of [0.0, 1.0] where the higher the value is more is the burn out.
burnout = pd.read_csv("/kaggle/input/employee-mentalhealth/Employee_mental-health.csv")
burnout
burnout.info()
burnout.describe()
burnout["Employee ID"].nunique()
# #### Here we could see that there are 22750 unique values in the Employee ID column, which means there are no duplicate values. And also, in terms of analysis, the Employee ID doesn't provide any assistance to generate insights. Hence, we drop the column using 'drop' function
burnout.drop("Employee ID", axis=1, inplace=True)
burnout
# #### The 'Date of Joining' column is in object datatype while it should be in datetime format
burnout["Date of Joining"] = pd.to_datetime(burnout["Date of Joining"])
burnout.info()
burnout
burnout["Gender"].unique()
burnout["Company Type"].unique()
burnout["WFH Setup Available"].unique()
# #### Filling up the missing values for the last 3 columns with the mean values of respective columns
burnout["Resource Allocation"].unique()
mean_resource_allocation = burnout["Resource Allocation"].mean()
burnout["Resource Allocation"] = (
burnout["Resource Allocation"].fillna(mean_resource_allocation).round(0)
)
burnout["Resource Allocation"].unique()
burnout["Mental Fatigue Score"].unique()
mean_mental_fatigue = burnout["Mental Fatigue Score"].mean()
burnout["Mental Fatigue Score"] = (
burnout["Mental Fatigue Score"].fillna(mean_mental_fatigue).round(1)
)
burnout["Mental Fatigue Score"].unique()
burnout["Burn Rate"].unique()
mean_burn_rate = burnout["Burn Rate"].mean()
burnout["Burn Rate"] = burnout["Burn Rate"].fillna(mean_burn_rate).round(2)
burnout["Burn Rate"].unique()
# #### The data seems to be cleaned and ready for analysis
# ## 1. Does the gender of employees impact their burnout rate?
import matplotlib.pyplot as plt
average_burnout_by_gender = burnout.groupby("Gender")["Burn Rate"].mean()
colors = ["#FF6F61", "#6B5B95"]
plt.bar(average_burnout_by_gender.index, average_burnout_by_gender, color=colors)
plt.xlabel("Gender")
plt.ylabel("Average Burnout Rate")
plt.title("Average Burnout Rate by Gender")
plt.show()
# #### The burnout rate is higher in males when compared to that of females
# ## 2. Is there a relationship between resource allocation and burnout rate?
correlation = burnout["Resource Allocation"].corr(burnout["Burn Rate"])
# Create a scatter plot
plt.scatter(
burnout["Resource Allocation"], burnout["Burn Rate"], color="orange", alpha=0.6
)
# Set labels and title
plt.xlabel("Resource Allocation")
plt.ylabel("Burnout Rate")
plt.title(
"Relationship between Resource Allocation and Burnout Rate (Correlation: {:.2f})".format(
correlation
)
)
# Display the plot
plt.show()
# #### There is a direct relationship between Resources allocated to an employee and burnout rate. Higher the Resources Allocated, higher is the Burnout rate and vice-versa
# ## 3. How does the availability of work-from-home (WFH) setup affect burnout rate?
average_burnout_by_wfh = burnout.groupby("WFH Setup Available")["Burn Rate"].mean()
colors = ["#4C72B0", "#55A868"]
plt.bar(average_burnout_by_wfh.index, average_burnout_by_wfh, color=colors)
plt.xlabel("WFH Setup Available")
plt.ylabel("Average Burnout Rate")
plt.title("Average Burnout Rate by WFH Setup Availability")
plt.show()
# #### The employers seems to be less burned out if they are working from home when compared to not working remotely
# ## 4. Are there any differences in burnout rate between service and product-based companies?
average_burnout_by_company_type = burnout.groupby("Company Type")["Burn Rate"].mean()
colors = ["#FF9F40", "#FFCD56"]
plt.bar(
average_burnout_by_company_type.index, average_burnout_by_company_type, color=colors
)
plt.xlabel("Company Type")
plt.ylabel("Average Burnout Rate")
plt.title("Average Burnout Rate by Company Type")
plt.show()
# #### The company type doesn't seem to have an impact in terms of Burnout rates on working employees
# ## 5. Does the length of employment impact the burnout rate?
# #### Assuming that the employers are still working, we will calculate number of years of experience they have in that organization
burnout["Date of Joining"] = pd.to_datetime(burnout["Date of Joining"])
burnout["Employment Duration"] = (
current_date - burnout["Date of Joining"]
).dt.days / 365
average_burnout_by_duration = burnout.groupby("Employment Duration")["Burn Rate"].mean()
plt.plot(
average_burnout_by_duration.index,
average_burnout_by_duration,
marker="o",
color="#FF5E5B",
)
plt.xlabel("Employment Duration (years)")
plt.ylabel("Average Burnout Rate")
plt.title("Average Burnout Rate by Employment Duration")
plt.show()
# #### The employement duration doesn't seem to have a direct impact on the burnout rates
# ## 6. Is there any relationship between the mental fatigue score and burnout rate?
sorted_data = burnout.sort_values("Mental Fatigue Score")
# Create an area chart
plt.fill_between(
sorted_data["Mental Fatigue Score"],
sorted_data["Burn Rate"],
color="#FF5E5B",
alpha=0.7,
)
# Set labels and title
plt.xlabel("Mental Fatigue Score")
plt.ylabel("Burnout Rate")
plt.title("Relationship between Mental Fatigue Score and Burnout Rate")
# Display the plot
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/373/129373947.ipynb
| null | null |
[{"Id": 129373947, "ScriptId": 38465257, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8605824, "CreationDate": "05/13/2023 08:39:12", "VersionNumber": 1.0, "Title": "Employers-Burnout_Analysis", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 182.0, "LinesInsertedFromPrevious": 182.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # The dataset contains data about employers working in an organization and their level of Burnouts during the pandemic period while work from home was a norm.
# ### Employee ID: The unique ID allocated for each employee
# ### Date of Joining: The date-time when the employee has joined the organization (example: 2008-12-30)
# ### Gender: The gender of the employee (Male/Female)
# ### Company Type: The type of company where the employee is working (Service/Product)
# ### WFH Setup Available: Is the work from home facility available for the employee (Yes/No)
# ### Designation: The designation of the employee of work in the organization. In the range of [0.0, 5.0] bigger is higher designation.
# ### Resource Allocation: The amount of resource allocated to the employee to work, ie. number of working hours. In the range of [1.0, 10.0] (higher means more resource)
# ### Mental Fatigue Score: The level of fatigue mentally the employee is facing. In the range of [0.0, 10.0] where 0.0 means no fatigue and 10.0 means completely fatigue.
# ### Burn Rate: The value we need to predict for each employee telling the rate of Bur out while working. In the range of [0.0, 1.0] where the higher the value is more is the burn out.
burnout = pd.read_csv("/kaggle/input/employee-mentalhealth/Employee_mental-health.csv")
burnout
burnout.info()
burnout.describe()
burnout["Employee ID"].nunique()
# #### Here we could see that there are 22750 unique values in the Employee ID column, which means there are no duplicate values. And also, in terms of analysis, the Employee ID doesn't provide any assistance to generate insights. Hence, we drop the column using 'drop' function
burnout.drop("Employee ID", axis=1, inplace=True)
burnout
# #### The 'Date of Joining' column is in object datatype while it should be in datetime format
burnout["Date of Joining"] = pd.to_datetime(burnout["Date of Joining"])
burnout.info()
burnout
burnout["Gender"].unique()
burnout["Company Type"].unique()
burnout["WFH Setup Available"].unique()
# #### Filling up the missing values for the last 3 columns with the mean values of respective columns
burnout["Resource Allocation"].unique()
mean_resource_allocation = burnout["Resource Allocation"].mean()
burnout["Resource Allocation"] = (
burnout["Resource Allocation"].fillna(mean_resource_allocation).round(0)
)
burnout["Resource Allocation"].unique()
burnout["Mental Fatigue Score"].unique()
mean_mental_fatigue = burnout["Mental Fatigue Score"].mean()
burnout["Mental Fatigue Score"] = (
burnout["Mental Fatigue Score"].fillna(mean_mental_fatigue).round(1)
)
burnout["Mental Fatigue Score"].unique()
burnout["Burn Rate"].unique()
mean_burn_rate = burnout["Burn Rate"].mean()
burnout["Burn Rate"] = burnout["Burn Rate"].fillna(mean_burn_rate).round(2)
burnout["Burn Rate"].unique()
# #### The data seems to be cleaned and ready for analysis
# ## 1. Does the gender of employees impact their burnout rate?
import matplotlib.pyplot as plt
average_burnout_by_gender = burnout.groupby("Gender")["Burn Rate"].mean()
colors = ["#FF6F61", "#6B5B95"]
plt.bar(average_burnout_by_gender.index, average_burnout_by_gender, color=colors)
plt.xlabel("Gender")
plt.ylabel("Average Burnout Rate")
plt.title("Average Burnout Rate by Gender")
plt.show()
# #### The burnout rate is higher in males when compared to that of females
# ## 2. Is there a relationship between resource allocation and burnout rate?
correlation = burnout["Resource Allocation"].corr(burnout["Burn Rate"])
# Create a scatter plot
plt.scatter(
burnout["Resource Allocation"], burnout["Burn Rate"], color="orange", alpha=0.6
)
# Set labels and title
plt.xlabel("Resource Allocation")
plt.ylabel("Burnout Rate")
plt.title(
"Relationship between Resource Allocation and Burnout Rate (Correlation: {:.2f})".format(
correlation
)
)
# Display the plot
plt.show()
# #### There is a direct relationship between Resources allocated to an employee and burnout rate. Higher the Resources Allocated, higher is the Burnout rate and vice-versa
# ## 3. How does the availability of work-from-home (WFH) setup affect burnout rate?
average_burnout_by_wfh = burnout.groupby("WFH Setup Available")["Burn Rate"].mean()
colors = ["#4C72B0", "#55A868"]
plt.bar(average_burnout_by_wfh.index, average_burnout_by_wfh, color=colors)
plt.xlabel("WFH Setup Available")
plt.ylabel("Average Burnout Rate")
plt.title("Average Burnout Rate by WFH Setup Availability")
plt.show()
# #### The employers seems to be less burned out if they are working from home when compared to not working remotely
# ## 4. Are there any differences in burnout rate between service and product-based companies?
average_burnout_by_company_type = burnout.groupby("Company Type")["Burn Rate"].mean()
colors = ["#FF9F40", "#FFCD56"]
plt.bar(
average_burnout_by_company_type.index, average_burnout_by_company_type, color=colors
)
plt.xlabel("Company Type")
plt.ylabel("Average Burnout Rate")
plt.title("Average Burnout Rate by Company Type")
plt.show()
# #### The company type doesn't seem to have an impact in terms of Burnout rates on working employees
# ## 5. Does the length of employment impact the burnout rate?
# #### Assuming that the employers are still working, we will calculate number of years of experience they have in that organization
burnout["Date of Joining"] = pd.to_datetime(burnout["Date of Joining"])
burnout["Employment Duration"] = (
current_date - burnout["Date of Joining"]
).dt.days / 365
average_burnout_by_duration = burnout.groupby("Employment Duration")["Burn Rate"].mean()
plt.plot(
average_burnout_by_duration.index,
average_burnout_by_duration,
marker="o",
color="#FF5E5B",
)
plt.xlabel("Employment Duration (years)")
plt.ylabel("Average Burnout Rate")
plt.title("Average Burnout Rate by Employment Duration")
plt.show()
# #### The employement duration doesn't seem to have a direct impact on the burnout rates
# ## 6. Is there any relationship between the mental fatigue score and burnout rate?
sorted_data = burnout.sort_values("Mental Fatigue Score")
# Create an area chart
plt.fill_between(
sorted_data["Mental Fatigue Score"],
sorted_data["Burn Rate"],
color="#FF5E5B",
alpha=0.7,
)
# Set labels and title
plt.xlabel("Mental Fatigue Score")
plt.ylabel("Burnout Rate")
plt.title("Relationship between Mental Fatigue Score and Burnout Rate")
# Display the plot
plt.show()
| false | 0 | 2,049 | 0 | 2,049 | 2,049 |
||
129373165
|
<jupyter_start><jupyter_text>Amazon Fine Food Reviews
## Context
This dataset consists of reviews of fine foods from amazon. The data span a period of more than 10 years, including all ~500,000 reviews up to October 2012. Reviews include product and user information, ratings, and a plain text review. It also includes reviews from all other Amazon categories.
## Contents
- Reviews.csv: Pulled from the corresponding SQLite table named Reviews in database.sqlite<br>
- database.sqlite: Contains the table 'Reviews'<br><br>
Data includes:<br>
- Reviews from Oct 1999 - Oct 2012<br>
- 568,454 reviews<br>
- 256,059 users<br>
- 74,258 products<br>
- 260 users with > 50 reviews<br>
[](https://www.kaggle.com/benhamner/d/snap/amazon-fine-food-reviews/reviews-wordcloud)
## Acknowledgements
See [this SQLite query](https://www.kaggle.com/benhamner/d/snap/amazon-fine-food-reviews/data-sample) for a quick sample of the dataset.
If you publish articles based on this dataset, please cite the following paper:
- J. McAuley and J. Leskovec. [From amateurs to connoisseurs: modeling the evolution of user expertise through online reviews](http://i.stanford.edu/~julian/pdfs/www13.pdf). WWW, 2013.
Kaggle dataset identifier: amazon-fine-food-reviews
<jupyter_script># # **Read in Data**
#
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use("ggplot")
import nltk
df = pd.read_csv("../input/amazon-fine-food-reviews/Reviews.csv")
print(df.shape)
df = df.head(1000)
print(df.shape)
# # **EDA**
df.head()
ax = (
df["Score"]
.value_counts()
.sort_index()
.plot(kind="bar", title="Reviews Count by Stars", figsize=(12, 5))
)
ax.set_xlabel("Amazon review score")
plt.show()
# # Some basic NLTK
#
##Taking the 100th value in the dataset as an example
example = df["Text"][100]
print(example)
tokens = nltk.word_tokenize(example)
tokens[:15]
# # Part of speech tagging
pos_tag = nltk.pos_tag(tokens)
pos_tag[:10]
chunked_entities = nltk.chunk.ne_chunk(pos_tag)
chunked_entities.pprint()
# # Vader scoring
# * Stop words are removed
# * Every word is scored and combined to total score
from nltk.sentiment import SentimentIntensityAnalyzer
from tqdm.notebook import tqdm
sia = SentimentIntensityAnalyzer()
sia.polarity_scores(example)
# Run polarity scores for the entire dataset
from tqdm import tqdm
res = {}
for i, row in tqdm(df.iterrows(), total=len(df)):
text = row["Text"]
myid = row["Id"]
res[myid] = sia.polarity_scores(text)
vaders = pd.DataFrame(res).T
vaders = vaders.reset_index().rename(columns={"index": "Id"})
vaders = vaders.merge(df, how="left")
vaders.head()
# ## Plotting Vader Results
# * Plotting it in the form of a grid with positive, neutral and negative sentiments side by side
# Firstly, plotting the Stars VS Compund sentiment polarity scores
ax = sns.barplot(data=vaders, x="Score", y="compound")
ax.set_title("Compund Score by Amazon Star Review")
plt.show()
fig, axs = plt.subplots(1, 3, figsize=(11, 4))
sns.barplot(data=vaders, x="Score", y="pos", ax=axs[0])
sns.barplot(data=vaders, x="Score", y="neu", ax=axs[1])
sns.barplot(data=vaders, x="Score", y="neg", ax=axs[2])
axs[0].set_title("Positive")
axs[1].set_title("Neutral")
axs[2].set_title("Negative")
plt.tight_layout()
plt.show()
# ## Roberta Pretrained Model
# * Use a model trained of a large corpus of data.
# * Transformer model accounts for the words but also the context related to other words.
from transformers import AutoTokenizer
from transformers import AutoModelForSequenceClassification
from scipy.special import softmax
MODEL = f"cardiffnlp/twitter-roberta-base-sentiment"
tokenizer = AutoTokenizer.from_pretrained(MODEL)
model = AutoModelForSequenceClassification.from_pretrained(MODEL)
encoded_text = tokenizer(example, return_tensors="pt")
output = model(**encoded_text)
scores = output[0][0].detach().numpy()
scores = softmax(scores)
scores_dict = {
"roberta_neg": scores[0],
"roberta_neu": scores[1],
"roberta_pos": scores[2],
}
print(scores_dict)
# VADER results on example
print(example)
sia.polarity_scores(example)
# Store it in a dictionary
def polarity_scores_roberta(example):
encoded_text = tokenizer(example, return_tensors="pt")
output = model(**encoded_text)
scores = output[0][0].detach().numpy()
scores = softmax(scores)
scores_dict = {
"roberta_neg": scores[0],
"roberta_neu": scores[1],
"roberta_pos": scores[2],
}
return scores_dict
# Store both sentiment scores in another dictionary
res = {}
for i, row in tqdm(df.iterrows(), total=len(df)):
try:
text = row["Text"]
myid = row["Id"]
vader_result = sia.polarity_scores(text)
vader_result_rename = {}
for key, value in vader_result.items():
vader_result_rename[f"vader_{key}"] = value
roberta_result = polarity_scores_roberta(text)
both = {**vader_result_rename, **roberta_result}
res[myid] = both
except RuntimeError:
print(f"Broke for id {myid}")
# ## Merged Results
#
results_df = pd.DataFrame(res).T
results_df = results_df.reset_index().rename(columns={"index": "Id"})
results_df = results_df.merge(df, how="left")
## Compare model results
results_df.columns
# Plot the comparison
sns.pairplot(
data=results_df,
vars=[
"vader_neg",
"vader_neu",
"vader_pos",
"roberta_neg",
"roberta_neu",
"roberta_pos",
],
hue="Score",
palette="tab10",
)
plt.show()
results_df
# ## Review scores
# Positive sentiment 1 star (roberta)
results_df.query("Score == 1").sort_values("roberta_pos", ascending=False)[
"Text"
].values[0]
# Negative sentiment 5 star (roberta)
results_df.query("Score == 5").sort_values("roberta_neg", ascending=False)[
"Text"
].values[0]
# Positive sentiment 1 star (vader)
results_df.query("Score == 1").sort_values("vader_pos", ascending=False)["Text"].values[
0
]
# Negative sentiment 5 star (vader)
results_df.query("Score == 5").sort_values("vader_neg", ascending=False)["Text"].values[
0
]
# ## Simple Pipeline
from transformers import pipeline
sent_pipeline = pipeline("sentiment-analysis")
# sentiment_scores = df['Text'].apply(lambda text: sent_pipeline(text)) #Showing an error
def preprocess_text(text):
# Truncate or preprocess the text to fit within the model's maximum sequence length
truncated_text = text[:512] # Truncate the text to 512 tokens
return truncated_text
# Apply the preprocessing function to the DataFrame column
df["Processed_Text"] = df["Text"].apply(preprocess_text)
# Apply the sentiment analysis pipeline to the preprocessed text
sentiment_scores = df["Processed_Text"].apply(lambda text: sent_pipeline(text))
# Create empty columns in the DataFrame to store the sentiment labels and scores
df["Sentiment Label"] = ""
df["Sentiment Score"] = ""
# Iterate over the DataFrame rows and apply the sentiment analysis pipeline
for index, row in df.iterrows():
text = row["Processed_Text"]
sentiment_results = sent_pipeline(text)
# Extract sentiment label and score from the results
sentiment_label = sentiment_results[0]["label"]
sentiment_score = sentiment_results[0]["score"]
# Assign sentiment label and score to the corresponding row in the DataFrame
df.at[index, "Sentiment Label"] = sentiment_label
df.at[index, "Sentiment Score"] = sentiment_score
df
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/373/129373165.ipynb
|
amazon-fine-food-reviews
| null |
[{"Id": 129373165, "ScriptId": 38430075, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9208755, "CreationDate": "05/13/2023 08:30:36", "VersionNumber": 4.0, "Title": "Amazon reviews Sentimental Analysis", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 224.0, "LinesInsertedFromPrevious": 171.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 53.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
|
[{"Id": 185361034, "KernelVersionId": 129373165, "SourceDatasetVersionId": 2157}]
|
[{"Id": 2157, "DatasetId": 18, "DatasourceVersionId": 2157, "CreatorUserId": 500099, "LicenseName": "CC0: Public Domain", "CreationDate": "05/01/2017 18:51:31", "VersionNumber": 2.0, "Title": "Amazon Fine Food Reviews", "Slug": "amazon-fine-food-reviews", "Subtitle": "Analyze ~500,000 food reviews from Amazon", "Description": "## Context\n\nThis dataset consists of reviews of fine foods from amazon. The data span a period of more than 10 years, including all ~500,000 reviews up to October 2012. Reviews include product and user information, ratings, and a plain text review. It also includes reviews from all other Amazon categories.\n\n\n## Contents\n\n- Reviews.csv: Pulled from the corresponding SQLite table named Reviews in database.sqlite<br>\n- database.sqlite: Contains the table 'Reviews'<br><br>\n\nData includes:<br>\n- Reviews from Oct 1999 - Oct 2012<br>\n- 568,454 reviews<br>\n- 256,059 users<br>\n- 74,258 products<br>\n- 260 users with > 50 reviews<br>\n\n\n[](https://www.kaggle.com/benhamner/d/snap/amazon-fine-food-reviews/reviews-wordcloud)\n\n\n## Acknowledgements\n\nSee [this SQLite query](https://www.kaggle.com/benhamner/d/snap/amazon-fine-food-reviews/data-sample) for a quick sample of the dataset.\n\nIf you publish articles based on this dataset, please cite the following paper:\n\n - J. McAuley and J. Leskovec. [From amateurs to connoisseurs: modeling the evolution of user expertise through online reviews](http://i.stanford.edu/~julian/pdfs/www13.pdf). WWW, 2013.", "VersionNotes": "Re-uploading the files so they appear in file previews.", "TotalCompressedBytes": 673703435.0, "TotalUncompressedBytes": 673703435.0}]
|
[{"Id": 18, "CreatorUserId": 500099, "OwnerUserId": NaN, "OwnerOrganizationId": 229.0, "CurrentDatasetVersionId": 2157.0, "CurrentDatasourceVersionId": 2157.0, "ForumId": 993, "Type": 2, "CreationDate": "01/08/2016 21:12:10", "LastActivityDate": "02/06/2018", "TotalViews": 856266, "TotalDownloads": 156282, "TotalVotes": 2086, "TotalKernels": 817}]
| null |
# # **Read in Data**
#
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use("ggplot")
import nltk
df = pd.read_csv("../input/amazon-fine-food-reviews/Reviews.csv")
print(df.shape)
df = df.head(1000)
print(df.shape)
# # **EDA**
df.head()
ax = (
df["Score"]
.value_counts()
.sort_index()
.plot(kind="bar", title="Reviews Count by Stars", figsize=(12, 5))
)
ax.set_xlabel("Amazon review score")
plt.show()
# # Some basic NLTK
#
##Taking the 100th value in the dataset as an example
example = df["Text"][100]
print(example)
tokens = nltk.word_tokenize(example)
tokens[:15]
# # Part of speech tagging
pos_tag = nltk.pos_tag(tokens)
pos_tag[:10]
chunked_entities = nltk.chunk.ne_chunk(pos_tag)
chunked_entities.pprint()
# # Vader scoring
# * Stop words are removed
# * Every word is scored and combined to total score
from nltk.sentiment import SentimentIntensityAnalyzer
from tqdm.notebook import tqdm
sia = SentimentIntensityAnalyzer()
sia.polarity_scores(example)
# Run polarity scores for the entire dataset
from tqdm import tqdm
res = {}
for i, row in tqdm(df.iterrows(), total=len(df)):
text = row["Text"]
myid = row["Id"]
res[myid] = sia.polarity_scores(text)
vaders = pd.DataFrame(res).T
vaders = vaders.reset_index().rename(columns={"index": "Id"})
vaders = vaders.merge(df, how="left")
vaders.head()
# ## Plotting Vader Results
# * Plotting it in the form of a grid with positive, neutral and negative sentiments side by side
# Firstly, plotting the Stars VS Compund sentiment polarity scores
ax = sns.barplot(data=vaders, x="Score", y="compound")
ax.set_title("Compund Score by Amazon Star Review")
plt.show()
fig, axs = plt.subplots(1, 3, figsize=(11, 4))
sns.barplot(data=vaders, x="Score", y="pos", ax=axs[0])
sns.barplot(data=vaders, x="Score", y="neu", ax=axs[1])
sns.barplot(data=vaders, x="Score", y="neg", ax=axs[2])
axs[0].set_title("Positive")
axs[1].set_title("Neutral")
axs[2].set_title("Negative")
plt.tight_layout()
plt.show()
# ## Roberta Pretrained Model
# * Use a model trained of a large corpus of data.
# * Transformer model accounts for the words but also the context related to other words.
from transformers import AutoTokenizer
from transformers import AutoModelForSequenceClassification
from scipy.special import softmax
MODEL = f"cardiffnlp/twitter-roberta-base-sentiment"
tokenizer = AutoTokenizer.from_pretrained(MODEL)
model = AutoModelForSequenceClassification.from_pretrained(MODEL)
encoded_text = tokenizer(example, return_tensors="pt")
output = model(**encoded_text)
scores = output[0][0].detach().numpy()
scores = softmax(scores)
scores_dict = {
"roberta_neg": scores[0],
"roberta_neu": scores[1],
"roberta_pos": scores[2],
}
print(scores_dict)
# VADER results on example
print(example)
sia.polarity_scores(example)
# Store it in a dictionary
def polarity_scores_roberta(example):
encoded_text = tokenizer(example, return_tensors="pt")
output = model(**encoded_text)
scores = output[0][0].detach().numpy()
scores = softmax(scores)
scores_dict = {
"roberta_neg": scores[0],
"roberta_neu": scores[1],
"roberta_pos": scores[2],
}
return scores_dict
# Store both sentiment scores in another dictionary
res = {}
for i, row in tqdm(df.iterrows(), total=len(df)):
try:
text = row["Text"]
myid = row["Id"]
vader_result = sia.polarity_scores(text)
vader_result_rename = {}
for key, value in vader_result.items():
vader_result_rename[f"vader_{key}"] = value
roberta_result = polarity_scores_roberta(text)
both = {**vader_result_rename, **roberta_result}
res[myid] = both
except RuntimeError:
print(f"Broke for id {myid}")
# ## Merged Results
#
results_df = pd.DataFrame(res).T
results_df = results_df.reset_index().rename(columns={"index": "Id"})
results_df = results_df.merge(df, how="left")
## Compare model results
results_df.columns
# Plot the comparison
sns.pairplot(
data=results_df,
vars=[
"vader_neg",
"vader_neu",
"vader_pos",
"roberta_neg",
"roberta_neu",
"roberta_pos",
],
hue="Score",
palette="tab10",
)
plt.show()
results_df
# ## Review scores
# Positive sentiment 1 star (roberta)
results_df.query("Score == 1").sort_values("roberta_pos", ascending=False)[
"Text"
].values[0]
# Negative sentiment 5 star (roberta)
results_df.query("Score == 5").sort_values("roberta_neg", ascending=False)[
"Text"
].values[0]
# Positive sentiment 1 star (vader)
results_df.query("Score == 1").sort_values("vader_pos", ascending=False)["Text"].values[
0
]
# Negative sentiment 5 star (vader)
results_df.query("Score == 5").sort_values("vader_neg", ascending=False)["Text"].values[
0
]
# ## Simple Pipeline
from transformers import pipeline
sent_pipeline = pipeline("sentiment-analysis")
# sentiment_scores = df['Text'].apply(lambda text: sent_pipeline(text)) #Showing an error
def preprocess_text(text):
# Truncate or preprocess the text to fit within the model's maximum sequence length
truncated_text = text[:512] # Truncate the text to 512 tokens
return truncated_text
# Apply the preprocessing function to the DataFrame column
df["Processed_Text"] = df["Text"].apply(preprocess_text)
# Apply the sentiment analysis pipeline to the preprocessed text
sentiment_scores = df["Processed_Text"].apply(lambda text: sent_pipeline(text))
# Create empty columns in the DataFrame to store the sentiment labels and scores
df["Sentiment Label"] = ""
df["Sentiment Score"] = ""
# Iterate over the DataFrame rows and apply the sentiment analysis pipeline
for index, row in df.iterrows():
text = row["Processed_Text"]
sentiment_results = sent_pipeline(text)
# Extract sentiment label and score from the results
sentiment_label = sentiment_results[0]["label"]
sentiment_score = sentiment_results[0]["score"]
# Assign sentiment label and score to the corresponding row in the DataFrame
df.at[index, "Sentiment Label"] = sentiment_label
df.at[index, "Sentiment Score"] = sentiment_score
df
| false | 0 | 1,893 | 3 | 2,354 | 1,893 |
||
129290914
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
# Load train dataset and make a copy of it
df_train = pd.read_csv("/kaggle/input/playground-series-s3e14/train.csv")
# Load test dataset
df_test = pd.read_csv("/kaggle/input/playground-series-s3e14/test.csv")
id = df_test["id"]
df_train.drop(["id"], axis=1, inplace=True)
df_test.drop(["id"], axis=1, inplace=True)
df_train.shape
df_test.shape
# calculate total number of null values in training data
null_train = df_train.isnull().sum().sum()
print(null_train)
# calculate total number of null values in test data
null_test = df_test.isnull().sum().sum()
print(null_test)
df_train.shape, df_test.shape
# save the 'SalePrice'column as train_label
train_label = df_train["yield"].reset_index(drop=True)
df_train = df_train.drop(["yield"], axis=1)
variables = list(df_train.columns)
# Configurazione del layout dei subplot
rows = len(variables)
cols = 1
fig, axes = plt.subplots(rows, cols, figsize=(8, rows * 4))
# Genera i grafici delle distribuzioni per ciascuna variabile
for i, variable in enumerate(variables):
ax = axes[i] if rows > 1 else axes # Gestisce il caso con una sola variabile
ax.hist(
df_train[variable], bins=30
) # Puoi personalizzare il numero di bins se necessario
ax.set_title(f"Distribuzione di {variable}")
ax.set_xlabel(variable)
ax.set_ylabel("Frequenza")
# Mostra i grafici
plt.tight_layout()
plt.show()
from sklearn.model_selection import train_test_split
X_train, X_valid, y_train, y_valid = train_test_split(
df_train, train_label, train_size=0.7, shuffle=False
)
print(f"X_train", X_train.shape)
print(f"y_train", y_train.shape)
print(f"X_valid", X_valid.shape)
print(f"y_valid", y_valid.shape)
# prepare configuration for cross validation test harness
seed = 7
# prepare models
models = []
from sklearn.linear_model import LinearRegression
models.append(("LinearR", LinearRegression()))
from sklearn.linear_model import Ridge
models.append(("Ridge", Ridge(alpha=0.1)))
from sklearn.linear_model import Lasso
models.append(("Lasso", Lasso(alpha=0.1)))
from sklearn.tree import DecisionTreeRegressor
models.append(("DecisionTree", DecisionTreeRegressor()))
from sklearn.ensemble import RandomForestRegressor
models.append(("RandomForest", RandomForestRegressor()))
from sklearn.neighbors import KNeighborsRegressor
models.append(("KNN", KNeighborsRegressor()))
from sklearn.svm import SVR
models.append(("SVR", SVR()))
from catboost import CatBoostRegressor
models.append(("CatBoost", CatBoostRegressor(verbose=500)))
import xgboost
models.append(("xgboost", xgboost.XGBRegressor()))
from sklearn.ensemble import AdaBoostRegressor
models.append(("AdaBoost", AdaBoostRegressor()))
# evaluate each model in turn
from sklearn.metrics import mean_absolute_error
from sklearn import model_selection
results = []
names = []
scoring = "neg_mean_absolute_error"
for name, model in models:
kfold = model_selection.KFold(n_splits=10, random_state=seed, shuffle=True)
cv_results = model_selection.cross_val_score(
model, X_train, y_train, cv=kfold, scoring=scoring
)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
model_svr = CatBoostRegressor(verbose=500)
model_svr.fit(X_train, y_train)
# get predictions
pred_opt = model_svr.predict(X_valid)
mae = mean_absolute_error(y_valid, pred_opt)
print("MAE : ", mae)
# from sklearn.feature_selection import RFECV
# rfecv = RFECV(estimator=model_svr, step=5, cv=kfold, scoring=scoring)
# rfecv.fit(X_train, y_train)
# Selected features
# print(str(X_train.columns[rfecv.get_support()]))
# print("Optimal number of features : %d" % rfecv.n_features_)
# df_test = df_test[X_train.columns[rfecv.get_support()]]
# df_train = df_train[X_train.columns[rfecv.get_support()]]
model_svr = CatBoostRegressor(verbose=500)
model_svr.fit(df_train, train_label)
prediction = model_svr.predict(df_test)
output = pd.DataFrame({"id": id, "yield": prediction})
output.to_csv("submission.csv", index=False)
output
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/290/129290914.ipynb
| null | null |
[{"Id": 129290914, "ScriptId": 38363909, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2582119, "CreationDate": "05/12/2023 13:44:58", "VersionNumber": 2.0, "Title": "Voting&hyper", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 156.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 155.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
# Load train dataset and make a copy of it
df_train = pd.read_csv("/kaggle/input/playground-series-s3e14/train.csv")
# Load test dataset
df_test = pd.read_csv("/kaggle/input/playground-series-s3e14/test.csv")
id = df_test["id"]
df_train.drop(["id"], axis=1, inplace=True)
df_test.drop(["id"], axis=1, inplace=True)
df_train.shape
df_test.shape
# calculate total number of null values in training data
null_train = df_train.isnull().sum().sum()
print(null_train)
# calculate total number of null values in test data
null_test = df_test.isnull().sum().sum()
print(null_test)
df_train.shape, df_test.shape
# save the 'SalePrice'column as train_label
train_label = df_train["yield"].reset_index(drop=True)
df_train = df_train.drop(["yield"], axis=1)
variables = list(df_train.columns)
# Configurazione del layout dei subplot
rows = len(variables)
cols = 1
fig, axes = plt.subplots(rows, cols, figsize=(8, rows * 4))
# Genera i grafici delle distribuzioni per ciascuna variabile
for i, variable in enumerate(variables):
ax = axes[i] if rows > 1 else axes # Gestisce il caso con una sola variabile
ax.hist(
df_train[variable], bins=30
) # Puoi personalizzare il numero di bins se necessario
ax.set_title(f"Distribuzione di {variable}")
ax.set_xlabel(variable)
ax.set_ylabel("Frequenza")
# Mostra i grafici
plt.tight_layout()
plt.show()
from sklearn.model_selection import train_test_split
X_train, X_valid, y_train, y_valid = train_test_split(
df_train, train_label, train_size=0.7, shuffle=False
)
print(f"X_train", X_train.shape)
print(f"y_train", y_train.shape)
print(f"X_valid", X_valid.shape)
print(f"y_valid", y_valid.shape)
# prepare configuration for cross validation test harness
seed = 7
# prepare models
models = []
from sklearn.linear_model import LinearRegression
models.append(("LinearR", LinearRegression()))
from sklearn.linear_model import Ridge
models.append(("Ridge", Ridge(alpha=0.1)))
from sklearn.linear_model import Lasso
models.append(("Lasso", Lasso(alpha=0.1)))
from sklearn.tree import DecisionTreeRegressor
models.append(("DecisionTree", DecisionTreeRegressor()))
from sklearn.ensemble import RandomForestRegressor
models.append(("RandomForest", RandomForestRegressor()))
from sklearn.neighbors import KNeighborsRegressor
models.append(("KNN", KNeighborsRegressor()))
from sklearn.svm import SVR
models.append(("SVR", SVR()))
from catboost import CatBoostRegressor
models.append(("CatBoost", CatBoostRegressor(verbose=500)))
import xgboost
models.append(("xgboost", xgboost.XGBRegressor()))
from sklearn.ensemble import AdaBoostRegressor
models.append(("AdaBoost", AdaBoostRegressor()))
# evaluate each model in turn
from sklearn.metrics import mean_absolute_error
from sklearn import model_selection
results = []
names = []
scoring = "neg_mean_absolute_error"
for name, model in models:
kfold = model_selection.KFold(n_splits=10, random_state=seed, shuffle=True)
cv_results = model_selection.cross_val_score(
model, X_train, y_train, cv=kfold, scoring=scoring
)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
model_svr = CatBoostRegressor(verbose=500)
model_svr.fit(X_train, y_train)
# get predictions
pred_opt = model_svr.predict(X_valid)
mae = mean_absolute_error(y_valid, pred_opt)
print("MAE : ", mae)
# from sklearn.feature_selection import RFECV
# rfecv = RFECV(estimator=model_svr, step=5, cv=kfold, scoring=scoring)
# rfecv.fit(X_train, y_train)
# Selected features
# print(str(X_train.columns[rfecv.get_support()]))
# print("Optimal number of features : %d" % rfecv.n_features_)
# df_test = df_test[X_train.columns[rfecv.get_support()]]
# df_train = df_train[X_train.columns[rfecv.get_support()]]
model_svr = CatBoostRegressor(verbose=500)
model_svr.fit(df_train, train_label)
prediction = model_svr.predict(df_test)
output = pd.DataFrame({"id": id, "yield": prediction})
output.to_csv("submission.csv", index=False)
output
| false | 0 | 1,512 | 0 | 1,512 | 1,512 |
||
129290567
|
<jupyter_start><jupyter_text>Heart Disease Dataset
### Description:
This database contains 76 attributes, but all published experiments refer to using a subset of 14 of them. In particular, the Cleveland database is the only one that has been used by ML researchers to
this date. The "goal" field refers to the presence of heart disease in the patient. It is integer-valued from 0 (no presence) to 4.
Kaggle dataset identifier: heart-disease-dataset
<jupyter_script># **Business Understanding**
# Pada kaggle ini digunakan dataset mengenai "Heart Disease Dataset" yang berasal dari Kaggle. Kumpulan data ini berasal dari tahun 1988 dan terdiri dari empat database: Cleveland, Hungaria, Swiss, dan Long Beach V. Ini berisi 76 atribut, termasuk atribut yang diprediksi, tetapi semua percobaan yang dipublikasikan mengacu pada penggunaan subset dari 14 atribut tersebut. Variabel "target" mengacu pada adanya penyakit jantung pada pasien. Itu adalah bilangan bulat bernilai 0 = tidak ada penyakit dan 1 = penyakit. Tujuan dari dataset ini adalah mengetahui atribut atau variabel mana saja yang dapat membuat penyakit jantung sehingga para pasien dapat fokus menghindari hal- hal yang dapat memicu variabel tersebut muncul serta mengontrol variabel tersebut dengan pola hidup bersih dan sehat.
# **Data Understanding**
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
from pandas_profiling import ProfileReport
from plotly.offline import iplot
from sklearn.cluster import KMeans
data = pd.read_csv("/kaggle/input/heart-disease-dataset/heart.csv")
data.head()
data.describe()
data.isnull().sum()
# #Tidak ada data yang hilang atau missing sehingga tidak perlu ada penangan terhadap data yang missing
data.info()
# #Dari info di atas dataset terdiri atas 14 variabel dengan 1 variabel berjenis float yaitu oldpeak yang menyatakan ST depresi yang diinduksi oleh latihan relatif terhadap istirahat dan 13 data lainnya berjenis interger
# Identifying duplicate
duplicate = data.duplicated().sum()
if duplicate:
print("Duplicate rows are: {}".format(duplicate))
else:
print("There is no duplicate value")
# #Terdapat duplikat karena memang data digolongkan ke dalam suatu kelompok sehingga terdapat duplikat data
# Visualize boxplot to check the outliers
data.plot(kind="box", subplots=True, figsize=(12, 4), color="blue")
# Create the correlation heatmap
plt.rcParams["figure.figsize"] = (10, 10)
sns.heatmap(data.corr().round(2), annot=True, linewidths=0.5)
plt.title("Correlation Between Features", fontsize=15)
# Set the plot size
plt.figure(figsize=(10, 5))
# visualize a count plot of Gender According to Category distribution
ax = sns.countplot(x="cp", hue="target", data=data, palette="inferno_r")
ax.set_title("Chest Pain According to target")
# Add percentage label to each bar
total = len(data["cp"])
for p in ax.patches:
percentage = "{:.1f}%".format(100 * p.get_height() / total)
x = p.get_x() + p.get_width() / 2 - 0.05
y = p.get_y() + p.get_height()
ax.annotate(percentage, (x, y), size=9)
# Set the plot size
plt.figure(figsize=(10, 5))
# visualize a count plot of Gender According to Category distribution
ax = sns.countplot(x="thalach", hue="target", data=data, palette="inferno_r")
ax.set_title("Heart Rate According to target")
# Add percentage label to each bar
total = len(data["thalach"])
for p in ax.patches:
percentage = "{:.1f}%".format(100 * p.get_height() / total)
x = p.get_x() + p.get_width() / 2 - 0.05
y = p.get_y() + p.get_height()
ax.annotate(percentage, (x, y), size=9)
# Set the plot size
plt.figure(figsize=(10, 5))
# visualize a count plot of Gender According to Category distribution
ax = sns.countplot(x="slope", hue="target", data=data, palette="inferno_r")
ax.set_title("Slope ST According to target")
# Add percentage label to each bar
total = len(data["slope"])
for p in ax.patches:
percentage = "{:.1f}%".format(100 * p.get_height() / total)
x = p.get_x() + p.get_width() / 2 - 0.05
y = p.get_y() + p.get_height()
ax.annotate(percentage, (x, y), size=9)
# **EDA Automated**
# Install automated EDA pandas profiling -> ydata-profiling (new version)
# Import library
from ydata_profiling import ProfileReport
profile = ProfileReport(data, title="Heart Deseases") # Create profile report object
profile
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/290/129290567.ipynb
|
heart-disease-dataset
|
yasserh
|
[{"Id": 129290567, "ScriptId": 38438724, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14359983, "CreationDate": "05/12/2023 13:42:08", "VersionNumber": 1.0, "Title": "EDA on Health Case Study", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 97.0, "LinesInsertedFromPrevious": 97.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185199718, "KernelVersionId": 129290567, "SourceDatasetVersionId": 2983853}]
|
[{"Id": 2983853, "DatasetId": 1828813, "DatasourceVersionId": 3031585, "CreatorUserId": 8833583, "LicenseName": "CC0: Public Domain", "CreationDate": "12/29/2021 14:29:31", "VersionNumber": 1.0, "Title": "Heart Disease Dataset", "Slug": "heart-disease-dataset", "Subtitle": "A simple records of Heart Patients monitored - Binary Classification Problem.", "Description": "### Description:\n\nThis database contains 76 attributes, but all published experiments refer to using a subset of 14 of them. In particular, the Cleveland database is the only one that has been used by ML researchers to\nthis date. The \"goal\" field refers to the presence of heart disease in the patient. It is integer-valued from 0 (no presence) to 4.\n\n### Acknowledgements:\nThis dataset has been referred from Kaggle.\n\n### Objective:\n- Understand the Dataset & cleanup (if required).\n- Build classification models to predict whether or not the patients have Heart Disease.\n- Also fine-tune the hyperparameters & compare the evaluation metrics of various classification algorithms.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1828813, "CreatorUserId": 8833583, "OwnerUserId": 8833583.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2983853.0, "CurrentDatasourceVersionId": 3031585.0, "ForumId": 1851571, "Type": 2, "CreationDate": "12/29/2021 14:29:31", "LastActivityDate": "12/29/2021", "TotalViews": 38490, "TotalDownloads": 5929, "TotalVotes": 62, "TotalKernels": 41}]
|
[{"Id": 8833583, "UserName": "yasserh", "DisplayName": "M Yasser H", "RegisterDate": "11/09/2021", "PerformanceTier": 3}]
|
# **Business Understanding**
# Pada kaggle ini digunakan dataset mengenai "Heart Disease Dataset" yang berasal dari Kaggle. Kumpulan data ini berasal dari tahun 1988 dan terdiri dari empat database: Cleveland, Hungaria, Swiss, dan Long Beach V. Ini berisi 76 atribut, termasuk atribut yang diprediksi, tetapi semua percobaan yang dipublikasikan mengacu pada penggunaan subset dari 14 atribut tersebut. Variabel "target" mengacu pada adanya penyakit jantung pada pasien. Itu adalah bilangan bulat bernilai 0 = tidak ada penyakit dan 1 = penyakit. Tujuan dari dataset ini adalah mengetahui atribut atau variabel mana saja yang dapat membuat penyakit jantung sehingga para pasien dapat fokus menghindari hal- hal yang dapat memicu variabel tersebut muncul serta mengontrol variabel tersebut dengan pola hidup bersih dan sehat.
# **Data Understanding**
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
from pandas_profiling import ProfileReport
from plotly.offline import iplot
from sklearn.cluster import KMeans
data = pd.read_csv("/kaggle/input/heart-disease-dataset/heart.csv")
data.head()
data.describe()
data.isnull().sum()
# #Tidak ada data yang hilang atau missing sehingga tidak perlu ada penangan terhadap data yang missing
data.info()
# #Dari info di atas dataset terdiri atas 14 variabel dengan 1 variabel berjenis float yaitu oldpeak yang menyatakan ST depresi yang diinduksi oleh latihan relatif terhadap istirahat dan 13 data lainnya berjenis interger
# Identifying duplicate
duplicate = data.duplicated().sum()
if duplicate:
print("Duplicate rows are: {}".format(duplicate))
else:
print("There is no duplicate value")
# #Terdapat duplikat karena memang data digolongkan ke dalam suatu kelompok sehingga terdapat duplikat data
# Visualize boxplot to check the outliers
data.plot(kind="box", subplots=True, figsize=(12, 4), color="blue")
# Create the correlation heatmap
plt.rcParams["figure.figsize"] = (10, 10)
sns.heatmap(data.corr().round(2), annot=True, linewidths=0.5)
plt.title("Correlation Between Features", fontsize=15)
# Set the plot size
plt.figure(figsize=(10, 5))
# visualize a count plot of Gender According to Category distribution
ax = sns.countplot(x="cp", hue="target", data=data, palette="inferno_r")
ax.set_title("Chest Pain According to target")
# Add percentage label to each bar
total = len(data["cp"])
for p in ax.patches:
percentage = "{:.1f}%".format(100 * p.get_height() / total)
x = p.get_x() + p.get_width() / 2 - 0.05
y = p.get_y() + p.get_height()
ax.annotate(percentage, (x, y), size=9)
# Set the plot size
plt.figure(figsize=(10, 5))
# visualize a count plot of Gender According to Category distribution
ax = sns.countplot(x="thalach", hue="target", data=data, palette="inferno_r")
ax.set_title("Heart Rate According to target")
# Add percentage label to each bar
total = len(data["thalach"])
for p in ax.patches:
percentage = "{:.1f}%".format(100 * p.get_height() / total)
x = p.get_x() + p.get_width() / 2 - 0.05
y = p.get_y() + p.get_height()
ax.annotate(percentage, (x, y), size=9)
# Set the plot size
plt.figure(figsize=(10, 5))
# visualize a count plot of Gender According to Category distribution
ax = sns.countplot(x="slope", hue="target", data=data, palette="inferno_r")
ax.set_title("Slope ST According to target")
# Add percentage label to each bar
total = len(data["slope"])
for p in ax.patches:
percentage = "{:.1f}%".format(100 * p.get_height() / total)
x = p.get_x() + p.get_width() / 2 - 0.05
y = p.get_y() + p.get_height()
ax.annotate(percentage, (x, y), size=9)
# **EDA Automated**
# Install automated EDA pandas profiling -> ydata-profiling (new version)
# Import library
from ydata_profiling import ProfileReport
profile = ProfileReport(data, title="Heart Deseases") # Create profile report object
profile
| false | 1 | 1,266 | 0 | 1,377 | 1,266 |
||
129290756
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
condors = pd.read_csv("/kaggle/input/condor-acceleration-usgs/condor_data_all.csv")
condors.head()
condor_1 = condors.loc[condors["TagID"] == 1]
condor_1.head()
condor_1.info()
condor_1.info()
X = condor_1[:500].X
Y = condor_1[:500].Y
Z = condor_1[:500].Z
data = condor_1[:500]
fig = plt.figure()
ax = fig.add_subplot(projection="3d")
ax.scatter3D(xs=X, ys=Y, zs=Z, c="No", data=data)
fig = plt.figure()
ax = fig.add_subplot(projection="3d")
ax.plot3D(X, Y, Z, alpha=0.5)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/290/129290756.ipynb
| null | null |
[{"Id": 129290756, "ScriptId": 38437249, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10897806, "CreationDate": "05/12/2023 13:43:50", "VersionNumber": 3.0, "Title": "Condor Tri-Axial Data", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 29.0, "LinesInsertedFromPrevious": 4.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 25.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
condors = pd.read_csv("/kaggle/input/condor-acceleration-usgs/condor_data_all.csv")
condors.head()
condor_1 = condors.loc[condors["TagID"] == 1]
condor_1.head()
condor_1.info()
condor_1.info()
X = condor_1[:500].X
Y = condor_1[:500].Y
Z = condor_1[:500].Z
data = condor_1[:500]
fig = plt.figure()
ax = fig.add_subplot(projection="3d")
ax.scatter3D(xs=X, ys=Y, zs=Z, c="No", data=data)
fig = plt.figure()
ax = fig.add_subplot(projection="3d")
ax.plot3D(X, Y, Z, alpha=0.5)
| false | 0 | 271 | 0 | 271 | 271 |
||
129304461
|
<jupyter_start><jupyter_text>China Scholarship Data - May 2019
The data was collected through web scraping https://www.cucas.edu.cn/china_scholarships/
The code to the web scraping program and data cleaning program is stored in https://github.com/mcmuralishclint/CUCAS
The dataset contains information about the scholarship programs in China as of May 2019.
Kaggle dataset identifier: china-scholarship-data-may-2019
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Data Understanding
df = pd.read_csv("/kaggle/input/china-scholarship-data-may-2019/cleaned.csv")
df.head()
df.shape
df.info()
df.isna().sum()
df.describe().T.drop("count", axis=1).T
df.describe(include=object).T.drop("count", axis=1).T
# # Data Preprocessing
df.isna().sum()
df.fillna(0, inplace=True)
df.duplicated().sum()
column_num = [
"Tuition fees to pay",
"Original Tuition fee",
"Accomodation_To_Pay",
"Expense_To_Pay",
]
plt.boxplot(df[column_num])
plt.xticks([1, 2, 3, 4], column_num)
plt.title("Outlier Before Remove")
plt.show()
print(f"Total Row With Outlier: {df.shape[0]}")
Q1 = df[column_num].quantile(0.25)
Q3 = df[column_num].quantile(0.75)
IQR = Q3 - Q1
df = df[
~((df[column_num] < (Q1 - 1.5 * IQR)) | (df[column_num] > (Q3 + 1.5 * IQR))).any(
axis=1
)
]
plt.boxplot(df[column_num])
plt.xticks([1, 2, 3, 4], column_num)
plt.title("Outlier After Remove")
plt.show()
print(f"Total Row Without Outlier: {df.shape[0]}")
sns.clustermap(df.corr(), annot=True, cmap="mako", figsize=(8, 8))
plt.show
# # Feature Engineering
# Create Column Category Tuition Covered?
df["Tuition Covered?"] = df["Tuition Covered"].apply(lambda x: 1 if x > 0 else 0)
df["Tuition Covered?"] = df["Tuition Covered?"].astype(int)
# Calculate Total Covered
total_covered = (
df["Tuition Covered?"] + df["Accomodation covered?"] + df["Living Expense Covered?"]
)
# Groupping for Create Scholarship Category
df.loc[total_covered == 3, "Scholarship Category"] = "Fully Covered"
df.loc[
(total_covered == 1)
& (df["Accomodation covered?"] == 1)
& (df["Living Expense Covered?"] == 0),
"Scholarship Category",
] = "Tuition and Accomodation Covered"
df.loc[
(total_covered == 1)
& (df["Accomodation covered?"] == 0)
& (df["Living Expense Covered?"] == 1),
"Scholarship Category",
] = "Tuition and Expense Covered"
df.loc[(total_covered < 3), "Scholarship Category"] = "Not Fully Covered"
# Create column for Total Accomodation in Year
convert_to_month = {"YEAR": 1, "SEMESTER": 2, "TERM": 3, "MONTH": 12, "DAY": 365}
df["Accomodation_duration"] = df["Accomodation_duration"].replace(convert_to_month)
df["Accomodation_duration"] = df["Accomodation_duration"].astype(int)
df["total_accomodation_year"] = df["Accomodation_duration"] * df["Accomodation_To_Pay"]
# Create Column for Total Expense Year
convert_to_num = {"MONTH": 12}
df["Expense_duration"] = df["Expense_duration"].replace(convert_to_month)
df["Expense_duration"] = df["Expense_duration"].astype(int)
df["total_expense_year"] = df["Expense_duration"] * df["Expense_To_Pay"]
# Create Total Cost Year Without Coverage
df["total_cost_year"] = (
df["Tuition fees to pay"] + df["total_accomodation_year"] + df["total_expense_year"]
)
# Remove text
df["Major"] = df["Major"].str.replace(r"\(.*\)", "").str.strip()
# # Conclusion Feature Engineering
# * The Scholarship Category column was created to facilitate analysis of the types of scholarships awarded in the dataset. This makes it possible to understand more about the types of scholarships present in the dataset and helps in a more detailed analysis of how much impact the scholarship has on the total cost to be borne by the student.
# * By grouping the cost of accommodation and living expenses for one year, we can easily compare the total costs that must be borne by students each year, and also compare costs between universities.
# * Removing text inside parentheses in the Major column may be done to facilitate the data analysis process. Sometimes in columns like this, there is additional information or description that is not needed for analysis, such as specific information about courses or majors, study program codes, or specific education levels. By removing the text inside the parentheses, the Major column becomes easier to read and understand
# # Distribution University
uni = df["University"].value_counts().head(10)
sns.set_style("whitegrid")
ax = sns.barplot(x=uni.values, y=uni.index)
for i in ax.containers:
ax.bar_label(
i,
)
plt.xlabel("Number of Students")
plt.title("Top 10 Universities with Most International Students")
plt.show()
uni_major_count = (
df.groupby(["University", "Major"])
.size()
.reset_index(name="Count")
.sort_values(by="Count", ascending=False)
.head(10)
)
plt.figure(figsize=(10, 5))
ax = sns.barplot(x="Count", y="Major", hue="University", data=uni_major_count)
for i in ax.containers:
ax.bar_label(i, label_type="edge", padding=5, fontsize=10)
plt.title("Top 10 Popular Majors with Universities")
plt.xlabel("Count")
plt.ylabel("Major")
plt.legend(fontsize=10, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show()
uni_level_count = (
df.groupby(["University", "Level"])
.size()
.reset_index(name="Count")
.sort_values("Count", ascending=False)
.head(10)
)
# Plot the data using a barplot
plt.figure(figsize=(8, 6))
ax = sns.barplot(
data=uni_level_count, y="University", x="Count", hue="Level", orient="horizontal"
)
for i in ax.containers:
ax.bar_label(
i,
)
ax.legend(fontsize=8, loc="lower right")
plt.title("Top University with Populer Level", fontsize=10)
plt.xlabel("Count", fontsize=8)
plt.ylabel("University", fontsize=8)
plt.show()
# Group the data by University and calculate the max Tuition Covered
uni_tuition_max = (
df.groupby("University")["Tuition Covered"]
.max()
.reset_index(name="MaxTuitionFee")
.sort_values("MaxTuitionFee", ascending=False)
.head(10)
)
plt.figure(figsize=(8, 6))
ax = sns.barplot(
data=uni_tuition_max, x="MaxTuitionFee", y="University", orient="horizontal"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge")
plt.title("Top 10 Universities with Highest Tuition Cover", fontsize=10)
plt.xlabel("Max Tuition Fee (RMB)", fontsize=8)
plt.ylabel("University", fontsize=8)
plt.show()
# Groupby University dan ambil nilai rata-rata dari kolom 'Tuition Covered'
uni_tuition_covered = (
df.groupby("University")["Tuition Covered"]
.min()
.reset_index()
.sort_values(by="Tuition Covered")
.head(10)
)
ax = sns.barplot(x="Tuition Covered", y="University", data=uni_tuition_covered)
for i in ax.containers:
ax.bar_label(i, label_type="edge")
plt.title("10 Universities with the Lowest Tuition Covered (RMB)")
plt.xlabel("Min Tuition Covered (RMB)", fontsize=8)
plt.show()
# Groupby University Offering Scholarship Category Fully Covered
uni_scholarship_cover = (
df[df["Scholarship Category"] == "Fully Covered"]
.groupby(["University"])
.size()
.reset_index(name="Count")
.sort_values("Count", ascending=False)
.head(10)
)
# Plot the data using a barplot
plt.figure(figsize=(10, 6))
ax = sns.barplot(
data=uni_scholarship_cover,
x="Count",
y="University",
dodge=False,
orient="horizontal",
)
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=10, padding=5)
plt.xlabel("Count", fontsize=12)
plt.ylabel("Number of Programs Offering Fully Cover", fontsize=12)
plt.title("Top 10 Majors Offering Scholarship Fully Covered", fontsize=14)
plt.legend(fontsize=10, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show()
# Group the data by University and calculate the max TuitionFee
uni_tuition_max = (
df.groupby("University")["Tuition fees to pay"]
.max()
.reset_index(name="MaxTuitionFee")
.sort_values("MaxTuitionFee", ascending=False)
.head(10)
)
# Plot the data using a horizontal barplot
plt.figure(figsize=(10, 5))
ax = sns.barplot(
data=uni_tuition_max, x="MaxTuitionFee", y="University", orient="horizontal"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge")
plt.title("Top 10 Universities with Highest Tuition Fee", fontsize=10)
plt.xlabel("Max Tuition Fee", fontsize=8)
plt.ylabel("University", fontsize=8)
plt.show()
uni_total_cost_year = (
df.groupby("University")["total_cost_year"]
.sum()
.reset_index(name="TotalCostYear")
.sort_values("TotalCostYear")
.head(10)
)
# Plot the data using a horizontal barplot
plt.figure(figsize=(8, 6))
ax = sns.barplot(
data=uni_total_cost_year, x="TotalCostYear", y="University", orient="horizontal"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge")
plt.title("Top 10 Universities with Lowest Total Cost Year", fontsize=10)
plt.xlabel("Total Cost Year (RMB)", fontsize=8)
plt.ylabel("University", fontsize=8)
plt.show()
# Group data by university and calculate the mean of each cost category
cost_data = df.groupby("Level")[
["Tuition fees to pay", "Accomodation_To_Pay", "Expense_To_Pay"]
].sum()
# Create a line chart for each cost category
plt.plot(cost_data["Tuition fees to pay"], label="Tuition")
plt.plot(cost_data["Accomodation_To_Pay"], label="Accomodation")
plt.plot(cost_data["Expense_To_Pay"], label="Living Expenses")
# Set the title and axis labels
plt.title("Comparison of Tuition, Accomodation, and Living Expenses by University")
plt.xlabel("University")
plt.ylabel("Cost (RMB)")
# Add legend and show the plot
plt.legend()
plt.show()
# # Conclusion Distribution University
# * From the results of the data analysis that has been done, it can be concluded that Zhejiang Normal University is one of the best universities in China with some outstanding achievements. Zhejiang Normal University ranked first in the categories of number of international students, number of bachelor's and master's programs, number of most popular majors, and highest total tuition covered.
# * In addition, from the analysis of the scholarship category, China University of Petroleum - Beijing is the best choice because it provides all available programs that fully covered the scholarship category. However, for those who want to find a university with a more affordable cost, several options such as Nanchang University, Qingdhao University, and Xungzhou Medical University can be considered.
# * Overall, the results of this analysis can be a guide for prospective international students who want to continue their studies in China to choose a university that suits their needs and budget.
# # DISTRIBUTION MAJOR
uni = df["Major"].value_counts()
top10_major = uni[:10]
sns.set_style("whitegrid")
ax = sns.barplot(x=top10_major.values, y=top10_major.index)
for i in ax.containers:
ax.bar_label(
i,
)
plt.xlabel("Number of Students")
plt.title("Top 10 Major with Most International Students")
plt.show()
maj_level_count = (
df.groupby(["Major", "Level"])
.size()
.reset_index(name="Count")
.sort_values("Count", ascending=False)
.head(10)
)
plt.figure(figsize=(8, 6))
ax = sns.barplot(
data=maj_level_count, y="Major", x="Count", hue="Level", orient="horizontal"
)
for i in ax.containers:
ax.bar_label(
i,
)
plt.title("Top 10 Major Populer with Level Categories", fontsize=10)
plt.xlabel("Count", fontsize=8)
plt.ylabel("University", fontsize=8)
plt.show()
df.loc[df["Level"] == "Phd", ["University", "Major", "Level"]].value_counts()
# Group by major and calculate the Max of tuition covered
maj_tuition_fee = (
df.groupby("Major")["Tuition Covered"]
.max()
.reset_index()
.sort_values("Tuition Covered", ascending=False)
.head(10)
)
plt.figure(figsize=(8, 5))
ax = sns.barplot(
data=maj_tuition_fee, y="Major", x="Tuition Covered", orient="horizontal"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge")
plt.title("Top 10 Major with Highest Tuition Covered", fontsize=8)
plt.xlabel("Tuition Covered (RMB)", fontsize=8)
plt.ylabel("Major", fontsize=8)
plt.show()
# Group by major and calculate the sum of tuition covered
maj_tuition_fee = (
df.groupby("Major")["Tuition Covered"]
.sum()
.reset_index()
.sort_values("Tuition Covered", ascending=False)
.head(10)
)
plt.figure(figsize=(8, 5))
ax = sns.barplot(
data=maj_tuition_fee, y="Major", x="Tuition Covered", orient="horizontal"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge")
plt.title("Top 10 Major with Total Highest Tuition Covered", fontsize=8)
plt.xlabel("Tuition Covered (RMB)", fontsize=8)
plt.ylabel("Major", fontsize=8)
plt.show()
# Group the data by Major and calculate the max TuitionFee
maj_tuition_mean = (
df.groupby("Major")["Tuition fees to pay"]
.mean()
.reset_index(name="MajTuitionFee")
.sort_values("MajTuitionFee", ascending=False)
.head(10)
)
# Plot the data using a horizontal barplot
plt.figure(figsize=(10, 5))
ax = sns.barplot(
data=maj_tuition_mean, x="MajTuitionFee", y="Major", orient="horizontal"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge")
plt.title("Top 10 Major with Average Tuition Fee to Pay", fontsize=10)
plt.xlabel("Avg. Tuition Fee(RMB)", fontsize=8)
plt.ylabel("Major", fontsize=8)
plt.show()
# Groupby Major Programs Offering accommodation cover
major_accommodation_cover = (
df[df["Accomodation covered?"] == 1]
.groupby(["Major"])
.size()
.reset_index(name="Count")
.sort_values("Count", ascending=False)
.head(10)
)
# Plot the data using a barplot
plt.figure(figsize=(10, 6))
ax = sns.barplot(
data=major_accommodation_cover,
x="Count",
y="Major",
dodge=False,
orient="horizontal",
)
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=10, padding=5)
plt.xlabel("Count", fontsize=12)
plt.ylabel("Number of Programs Offering Accommodation Cover", fontsize=12)
plt.title("Top 10 Majors Offering Accommodation Cover", fontsize=14)
plt.legend(fontsize=10, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show()
# Group data by major and calculate the sum of accommodation covered
major_accomodation = (
df.groupby("Major")["total_accomodation_year"]
.sum()
.reset_index()
.sort_values("total_accomodation_year", ascending=False)
.head(10)
)
# Create a horizontal bar plot
plt.figure(figsize=(8, 5))
ax = sns.barplot(
data=major_accomodation, y="Major", x="total_accomodation_year", orient="horizontal"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge")
plt.title("Top 10 Major with Total Highest Accommodation To Pay", fontsize=8)
plt.xlabel("Accommodation To Pay(RMB)", fontsize=8)
plt.ylabel("Major", fontsize=8)
plt.show()
# Groupby Major Programs Offering Living Cover
major_living_cover = (
df[df["Living Expense Covered?"] == 1]
.groupby(["Major"])
.size()
.reset_index(name="Count")
.sort_values("Count", ascending=False)
.head(10)
)
# Plot the data using a barplot
plt.figure(figsize=(10, 6))
ax = sns.barplot(
data=major_living_cover, x="Count", y="Major", dodge=False, orient="horizontal"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=10, padding=5)
plt.xlabel("Count", fontsize=12)
plt.ylabel("Number of Programs Offering Living Expense Cover", fontsize=12)
plt.title("Top 10 Majors Offering Living Expense Cover", fontsize=14)
plt.legend(fontsize=10, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show()
# Group data by major and calculate the sum of Total Expense Year
major_living = (
df.groupby("Major")["total_expense_year"]
.sum()
.reset_index()
.sort_values("total_expense_year", ascending=False)
.head(10)
)
# Create a horizontal bar plot
plt.figure(figsize=(8, 5))
ax = sns.barplot(
data=major_living, y="Major", x="total_expense_year", orient="horizontal"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge")
plt.title("Top 10 Major with Highest Total Expense to Pay Year", fontsize=8)
plt.xlabel("Total Expense Year", fontsize=8)
plt.ylabel("Major", fontsize=8)
plt.show()
# Group data by major and calculate the sum of Total Cost Year
major_living_cost = (
df.groupby("Major")["total_cost_year"]
.sum()
.reset_index()
.sort_values("total_cost_year", ascending=True)
.head(5)
)
# Create a horizontal bar plot
plt.figure(figsize=(8, 5))
ax = sns.barplot(
data=major_living_cost, y="Major", x="total_cost_year", orient="horizontal"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge")
plt.title("Top 10 Major with Highest Total Cost Year", fontsize=8)
plt.xlabel("Total Cost Year", fontsize=8)
plt.ylabel("Major", fontsize=8)
plt.show()
# # Distribution Level
lev = (
df.groupby("Level")
.size()
.reset_index(name="counts")
.sort_values(by="counts", ascending=False)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=lev, x="counts", y="Level", orient="horizontal")
for i in ax.containers:
ax.bar_label(
i,
)
plt.title("Distribution of Level", fontsize=18)
plt.show()
print("Level With Non Degre", "\n")
df.loc[df["Level"] == "Non-Degree", ["University", "Major", "Level"]]
# Groupby Level dan Calculate Tuition Covered
level_tuition_covered = (
df.groupby("Level")["Tuition Covered"]
.sum()
.reset_index()
.sort_values("Tuition Covered", ascending=False)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=level_tuition_covered, x="Tuition Covered", y="Level", orient="horizontal"
)
# Add labels to the bars
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=10, padding=5)
# Set the title and axis labels
plt.title("Sum of Tuition Covered by Level", fontsize=18)
plt.xlabel("Level", fontsize=14)
plt.ylabel("Tuition Covered(RMB)", fontsize=14)
plt.show()
# Group the data by Level and calculate the max TuitionFee
lev_tuition = (
df.groupby("Level")["Tuition fees to pay"]
.sum()
.reset_index(name="MajTuitionFee")
.sort_values("MajTuitionFee", ascending=False)
)
# Plot the data using a horizontal barplot
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=lev_tuition, x="MajTuitionFee", y="Level", orient="horizontal")
for i in ax.containers:
ax.bar_label(i, label_type="edge")
plt.title("Top 10 Major with Average Tuition Fee to Pay", fontsize=10)
plt.xlabel("Avg. Tuition Fee(RMB)", fontsize=8)
plt.ylabel("Major", fontsize=8)
plt.show()
# Groupby Level Offering Accomodation cover
level_accommodation_cover = (
df[df["Accomodation covered?"] == 1]
.groupby(["Level"])
.size()
.reset_index(name="Count")
.sort_values("Count", ascending=False)
)
# Plot the data using a barplot
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=level_accommodation_cover, x="Count", y="Level", orient="horizontal"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge")
plt.xlabel("Count", fontsize=8)
plt.ylabel("Level", fontsize=8)
plt.title("Level Offering Accommodation Cover", fontsize=10)
plt.legend(fontsize=10, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show()
# Group data by Level and calculate the sum of accommodation covered
level_accomodation = (
df.groupby("Level")["total_accomodation_year"]
.sum()
.reset_index()
.sort_values("total_accomodation_year", ascending=False)
)
# Create a horizontal bar plot
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=level_accomodation, y="Level", x="total_accomodation_year", orient="horizontal"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge")
plt.title("Level with Sum Total Accommodation Year", fontsize=8)
plt.xlabel("Total Accommodation Year", fontsize=8)
plt.ylabel("Level", fontsize=8)
plt.show()
# Groupby University dan Major yang memberikan accommodation cover
lev_living_cover = (
df[df["Living Expense Covered?"] == 1]
.groupby(["Level"])
.size()
.reset_index(name="Count")
.sort_values("Count", ascending=False)
)
# Plot the data using a barplot
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=lev_living_cover, x="Count", y="Level", dodge=False, orient="horizontal"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=10, padding=5)
plt.xlabel("Count", fontsize=8)
plt.ylabel("Level", fontsize=8)
plt.title("Offering Living Expense Cover", fontsize=10)
plt.legend(fontsize=10, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show()
# Group data by Level and calculate the sum of Total Expense Year
level_expense = (
df.groupby("Level")["total_expense_year"]
.sum()
.reset_index()
.sort_values("total_expense_year", ascending=False)
)
# Create a horizontal bar plot
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=level_expense, y="Level", x="total_expense_year", orient="horizontal"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge")
plt.title("Level with Total Expense Year", fontsize=10)
plt.xlabel("Expense Year", fontsize=8)
plt.ylabel("Level", fontsize=8)
plt.show()
# Group data by Level and calculate the sum of Total Cost Year
Level_cost = (
df.groupby("Level")["total_cost_year"]
.sum()
.reset_index()
.sort_values("total_cost_year", ascending=False)
)
# Create a horizontal bar plot
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=Level_cost, y="Level", x="total_cost_year", orient="horizontal")
for i in ax.containers:
ax.bar_label(i, label_type="edge")
plt.title("Level with Lower Total Cost Year", fontsize=8)
plt.xlabel("Total Cost Year", fontsize=8)
plt.ylabel("Level", fontsize=8)
plt.show()
# # Distribution Language
lan = (
df.groupby("Language")
.size()
.reset_index(name="counts")
.sort_values(by="counts", ascending=False)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=lan, x="counts", y="Language", orient="horizontal")
for i in ax.containers:
ax.bar_label(
i,
)
plt.title("Distribution of Language", fontsize=10)
plt.show()
df.loc[df["Language"] == "Japanese", ["University", "Major", "Level", "Language"]]
df.loc[df["Language"] == "German", ["University", "Major", "Level", "Language"]]
# Groupby Language and Calculate sum of Tuition Covered
language_tuition_covered = (
df.groupby("Language")["Tuition Covered"]
.sum()
.reset_index()
.sort_values("Tuition Covered", ascending=False)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=language_tuition_covered,
x="Tuition Covered",
y="Language",
orient="horizontal",
)
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=10, padding=5)
plt.title("Tuition Covered by Language", fontsize=10)
plt.xlabel("Tuition Covered(RMB)", fontsize=8)
plt.ylabel("Language", fontsize=8)
plt.show()
# Groupby Language Offering Accomodation Cover
Lan_accommodation_cover = (
df[df["Accomodation covered?"] == 1]
.groupby("Language")
.size()
.reset_index(name="Count")
.sort_values("Count", ascending=False)
)
# Plot the data using a horizontal barplot
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=Lan_accommodation_cover, y="Language", x="Count")
for i in ax.containers:
ax.bar_label(
i,
)
plt.title("Language Offering Accommodation Cover", fontsize=10)
plt.show()
# Group data by Language and calculate the sum of Total Accommodation Year
lan_accomodation = (
df.groupby("Language")["total_accomodation_year"]
.sum()
.reset_index()
.sort_values("total_accomodation_year", ascending=False)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=lan_accomodation,
y="Language",
x="total_accomodation_year",
orient="horizontal",
)
for i in ax.containers:
ax.bar_label(i, label_type="edge")
plt.title("Language with Total Accomodation Year", fontsize=10)
plt.xlabel("Accommodation Year(RMB)", fontsize=8)
plt.ylabel("Language", fontsize=8)
plt.show()
# Groupby Language offering Living Expense Covere
lan_living_cover = (
df[df["Living Expense Covered?"] == 1]
.groupby(["Language"])
.size()
.reset_index(name="Count")
.sort_values("Count", ascending=False)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=lan_living_cover, x="Count", y="Language", dodge=False, orient="horizontal"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=10, padding=5)
plt.xlabel("Count", fontsize=8)
plt.ylabel("Language", fontsize=8)
plt.title("Language Offering Living Expense Covered", fontsize=14)
plt.legend(fontsize=10, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show()
# Group data by Language and calculate the sum of Total Expense Year
lan_living = (
df.groupby("Level")["total_expense_year"]
.sum()
.reset_index()
.sort_values("total_expense_year", ascending=False)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=lan_living, y="Level", x="total_expense_year", orient="horizontal"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge")
plt.title("Language with Total Expense Year", fontsize=10)
plt.xlabel("Total Expense Year(RMB)", fontsize=8)
plt.ylabel("Language", fontsize=8)
plt.show()
# Group data by Language and calculate the sum of Total Cost Year
lan_cost = (
df.groupby("Language")["total_cost_year"]
.sum()
.reset_index()
.sort_values("total_cost_year", ascending=False)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=lan_cost, y="Language", x="total_cost_year", orient="horizontal")
for i in ax.containers:
ax.bar_label(i, label_type="edge")
plt.title("Language with Total Cost Year", fontsize=8)
plt.xlabel("Total Cost Year(RMB)", fontsize=8)
plt.ylabel("Language", fontsize=8)
plt.show()
# Filter data by level
df_bachelor = df[df["Level"] == "Bachelor"]
df_master = df[df["Level"] == "Master"]
df_phd = df[df["Level"] == "Phd"]
df_nondegree = df[df["Level"] == "Non-Degree"]
# Get the highest tuition for each level
tuition_max = [
df_bachelor["Original Tuition fee"].max(),
df_master["Original Tuition fee"].max(),
df_phd["Original Tuition fee"].max(),
df_nondegree["Original Tuition fee"].max(),
]
# Create a barplot
plt.figure(figsize=(5, 5))
ax = sns.barplot(x=["Bachelor", "Master", "PhD", "Non-Degree"], y=tuition_max)
for i in ax.containers:
ax.bar_label(i, label_type="edge")
plt.title("Highest Tuition Covered by Level", fontsize=10)
plt.xlabel("Level", fontsize=8)
plt.ylabel("Tuition (RMB)", fontsize=8)
plt.show()
df["Scholarship Category"].value_counts()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/304/129304461.ipynb
|
china-scholarship-data-may-2019
|
mcmuralishclint96
|
[{"Id": 129304461, "ScriptId": 38357811, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6488245, "CreationDate": "05/12/2023 15:40:02", "VersionNumber": 3.0, "Title": "Scholarship EDA+Feature Enggineering", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 553.0, "LinesInsertedFromPrevious": 248.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 305.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185224909, "KernelVersionId": 129304461, "SourceDatasetVersionId": 436936}]
|
[{"Id": 436936, "DatasetId": 196715, "DatasourceVersionId": 452465, "CreatorUserId": 2824142, "LicenseName": "Other (specified in description)", "CreationDate": "05/19/2019 03:10:48", "VersionNumber": 3.0, "Title": "China Scholarship Data - May 2019", "Slug": "china-scholarship-data-may-2019", "Subtitle": NaN, "Description": "The data was collected through web scraping https://www.cucas.edu.cn/china_scholarships/\nThe code to the web scraping program and data cleaning program is stored in https://github.com/mcmuralishclint/CUCAS\nThe dataset contains information about the scholarship programs in China as of May 2019.", "VersionNotes": "Cleaned", "TotalCompressedBytes": 517175.0, "TotalUncompressedBytes": 517175.0}]
|
[{"Id": 196715, "CreatorUserId": 2824142, "OwnerUserId": 2824142.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 436936.0, "CurrentDatasourceVersionId": 452465.0, "ForumId": 207653, "Type": 2, "CreationDate": "05/18/2019 03:08:18", "LastActivityDate": "05/18/2019", "TotalViews": 9460, "TotalDownloads": 791, "TotalVotes": 26, "TotalKernels": 3}]
|
[{"Id": 2824142, "UserName": "mcmuralishclint96", "DisplayName": "Muralish Clint", "RegisterDate": "02/16/2019", "PerformanceTier": 0}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Data Understanding
df = pd.read_csv("/kaggle/input/china-scholarship-data-may-2019/cleaned.csv")
df.head()
df.shape
df.info()
df.isna().sum()
df.describe().T.drop("count", axis=1).T
df.describe(include=object).T.drop("count", axis=1).T
# # Data Preprocessing
df.isna().sum()
df.fillna(0, inplace=True)
df.duplicated().sum()
column_num = [
"Tuition fees to pay",
"Original Tuition fee",
"Accomodation_To_Pay",
"Expense_To_Pay",
]
plt.boxplot(df[column_num])
plt.xticks([1, 2, 3, 4], column_num)
plt.title("Outlier Before Remove")
plt.show()
print(f"Total Row With Outlier: {df.shape[0]}")
Q1 = df[column_num].quantile(0.25)
Q3 = df[column_num].quantile(0.75)
IQR = Q3 - Q1
df = df[
~((df[column_num] < (Q1 - 1.5 * IQR)) | (df[column_num] > (Q3 + 1.5 * IQR))).any(
axis=1
)
]
plt.boxplot(df[column_num])
plt.xticks([1, 2, 3, 4], column_num)
plt.title("Outlier After Remove")
plt.show()
print(f"Total Row Without Outlier: {df.shape[0]}")
sns.clustermap(df.corr(), annot=True, cmap="mako", figsize=(8, 8))
plt.show
# # Feature Engineering
# Create Column Category Tuition Covered?
df["Tuition Covered?"] = df["Tuition Covered"].apply(lambda x: 1 if x > 0 else 0)
df["Tuition Covered?"] = df["Tuition Covered?"].astype(int)
# Calculate Total Covered
total_covered = (
df["Tuition Covered?"] + df["Accomodation covered?"] + df["Living Expense Covered?"]
)
# Groupping for Create Scholarship Category
df.loc[total_covered == 3, "Scholarship Category"] = "Fully Covered"
df.loc[
(total_covered == 1)
& (df["Accomodation covered?"] == 1)
& (df["Living Expense Covered?"] == 0),
"Scholarship Category",
] = "Tuition and Accomodation Covered"
df.loc[
(total_covered == 1)
& (df["Accomodation covered?"] == 0)
& (df["Living Expense Covered?"] == 1),
"Scholarship Category",
] = "Tuition and Expense Covered"
df.loc[(total_covered < 3), "Scholarship Category"] = "Not Fully Covered"
# Create column for Total Accomodation in Year
convert_to_month = {"YEAR": 1, "SEMESTER": 2, "TERM": 3, "MONTH": 12, "DAY": 365}
df["Accomodation_duration"] = df["Accomodation_duration"].replace(convert_to_month)
df["Accomodation_duration"] = df["Accomodation_duration"].astype(int)
df["total_accomodation_year"] = df["Accomodation_duration"] * df["Accomodation_To_Pay"]
# Create Column for Total Expense Year
convert_to_num = {"MONTH": 12}
df["Expense_duration"] = df["Expense_duration"].replace(convert_to_month)
df["Expense_duration"] = df["Expense_duration"].astype(int)
df["total_expense_year"] = df["Expense_duration"] * df["Expense_To_Pay"]
# Create Total Cost Year Without Coverage
df["total_cost_year"] = (
df["Tuition fees to pay"] + df["total_accomodation_year"] + df["total_expense_year"]
)
# Remove text
df["Major"] = df["Major"].str.replace(r"\(.*\)", "").str.strip()
# # Conclusion Feature Engineering
# * The Scholarship Category column was created to facilitate analysis of the types of scholarships awarded in the dataset. This makes it possible to understand more about the types of scholarships present in the dataset and helps in a more detailed analysis of how much impact the scholarship has on the total cost to be borne by the student.
# * By grouping the cost of accommodation and living expenses for one year, we can easily compare the total costs that must be borne by students each year, and also compare costs between universities.
# * Removing text inside parentheses in the Major column may be done to facilitate the data analysis process. Sometimes in columns like this, there is additional information or description that is not needed for analysis, such as specific information about courses or majors, study program codes, or specific education levels. By removing the text inside the parentheses, the Major column becomes easier to read and understand
# # Distribution University
uni = df["University"].value_counts().head(10)
sns.set_style("whitegrid")
ax = sns.barplot(x=uni.values, y=uni.index)
for i in ax.containers:
ax.bar_label(
i,
)
plt.xlabel("Number of Students")
plt.title("Top 10 Universities with Most International Students")
plt.show()
uni_major_count = (
df.groupby(["University", "Major"])
.size()
.reset_index(name="Count")
.sort_values(by="Count", ascending=False)
.head(10)
)
plt.figure(figsize=(10, 5))
ax = sns.barplot(x="Count", y="Major", hue="University", data=uni_major_count)
for i in ax.containers:
ax.bar_label(i, label_type="edge", padding=5, fontsize=10)
plt.title("Top 10 Popular Majors with Universities")
plt.xlabel("Count")
plt.ylabel("Major")
plt.legend(fontsize=10, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show()
uni_level_count = (
df.groupby(["University", "Level"])
.size()
.reset_index(name="Count")
.sort_values("Count", ascending=False)
.head(10)
)
# Plot the data using a barplot
plt.figure(figsize=(8, 6))
ax = sns.barplot(
data=uni_level_count, y="University", x="Count", hue="Level", orient="horizontal"
)
for i in ax.containers:
ax.bar_label(
i,
)
ax.legend(fontsize=8, loc="lower right")
plt.title("Top University with Populer Level", fontsize=10)
plt.xlabel("Count", fontsize=8)
plt.ylabel("University", fontsize=8)
plt.show()
# Group the data by University and calculate the max Tuition Covered
uni_tuition_max = (
df.groupby("University")["Tuition Covered"]
.max()
.reset_index(name="MaxTuitionFee")
.sort_values("MaxTuitionFee", ascending=False)
.head(10)
)
plt.figure(figsize=(8, 6))
ax = sns.barplot(
data=uni_tuition_max, x="MaxTuitionFee", y="University", orient="horizontal"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge")
plt.title("Top 10 Universities with Highest Tuition Cover", fontsize=10)
plt.xlabel("Max Tuition Fee (RMB)", fontsize=8)
plt.ylabel("University", fontsize=8)
plt.show()
# Groupby University dan ambil nilai rata-rata dari kolom 'Tuition Covered'
uni_tuition_covered = (
df.groupby("University")["Tuition Covered"]
.min()
.reset_index()
.sort_values(by="Tuition Covered")
.head(10)
)
ax = sns.barplot(x="Tuition Covered", y="University", data=uni_tuition_covered)
for i in ax.containers:
ax.bar_label(i, label_type="edge")
plt.title("10 Universities with the Lowest Tuition Covered (RMB)")
plt.xlabel("Min Tuition Covered (RMB)", fontsize=8)
plt.show()
# Groupby University Offering Scholarship Category Fully Covered
uni_scholarship_cover = (
df[df["Scholarship Category"] == "Fully Covered"]
.groupby(["University"])
.size()
.reset_index(name="Count")
.sort_values("Count", ascending=False)
.head(10)
)
# Plot the data using a barplot
plt.figure(figsize=(10, 6))
ax = sns.barplot(
data=uni_scholarship_cover,
x="Count",
y="University",
dodge=False,
orient="horizontal",
)
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=10, padding=5)
plt.xlabel("Count", fontsize=12)
plt.ylabel("Number of Programs Offering Fully Cover", fontsize=12)
plt.title("Top 10 Majors Offering Scholarship Fully Covered", fontsize=14)
plt.legend(fontsize=10, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show()
# Group the data by University and calculate the max TuitionFee
uni_tuition_max = (
df.groupby("University")["Tuition fees to pay"]
.max()
.reset_index(name="MaxTuitionFee")
.sort_values("MaxTuitionFee", ascending=False)
.head(10)
)
# Plot the data using a horizontal barplot
plt.figure(figsize=(10, 5))
ax = sns.barplot(
data=uni_tuition_max, x="MaxTuitionFee", y="University", orient="horizontal"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge")
plt.title("Top 10 Universities with Highest Tuition Fee", fontsize=10)
plt.xlabel("Max Tuition Fee", fontsize=8)
plt.ylabel("University", fontsize=8)
plt.show()
uni_total_cost_year = (
df.groupby("University")["total_cost_year"]
.sum()
.reset_index(name="TotalCostYear")
.sort_values("TotalCostYear")
.head(10)
)
# Plot the data using a horizontal barplot
plt.figure(figsize=(8, 6))
ax = sns.barplot(
data=uni_total_cost_year, x="TotalCostYear", y="University", orient="horizontal"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge")
plt.title("Top 10 Universities with Lowest Total Cost Year", fontsize=10)
plt.xlabel("Total Cost Year (RMB)", fontsize=8)
plt.ylabel("University", fontsize=8)
plt.show()
# Group data by university and calculate the mean of each cost category
cost_data = df.groupby("Level")[
["Tuition fees to pay", "Accomodation_To_Pay", "Expense_To_Pay"]
].sum()
# Create a line chart for each cost category
plt.plot(cost_data["Tuition fees to pay"], label="Tuition")
plt.plot(cost_data["Accomodation_To_Pay"], label="Accomodation")
plt.plot(cost_data["Expense_To_Pay"], label="Living Expenses")
# Set the title and axis labels
plt.title("Comparison of Tuition, Accomodation, and Living Expenses by University")
plt.xlabel("University")
plt.ylabel("Cost (RMB)")
# Add legend and show the plot
plt.legend()
plt.show()
# # Conclusion Distribution University
# * From the results of the data analysis that has been done, it can be concluded that Zhejiang Normal University is one of the best universities in China with some outstanding achievements. Zhejiang Normal University ranked first in the categories of number of international students, number of bachelor's and master's programs, number of most popular majors, and highest total tuition covered.
# * In addition, from the analysis of the scholarship category, China University of Petroleum - Beijing is the best choice because it provides all available programs that fully covered the scholarship category. However, for those who want to find a university with a more affordable cost, several options such as Nanchang University, Qingdhao University, and Xungzhou Medical University can be considered.
# * Overall, the results of this analysis can be a guide for prospective international students who want to continue their studies in China to choose a university that suits their needs and budget.
# # DISTRIBUTION MAJOR
uni = df["Major"].value_counts()
top10_major = uni[:10]
sns.set_style("whitegrid")
ax = sns.barplot(x=top10_major.values, y=top10_major.index)
for i in ax.containers:
ax.bar_label(
i,
)
plt.xlabel("Number of Students")
plt.title("Top 10 Major with Most International Students")
plt.show()
maj_level_count = (
df.groupby(["Major", "Level"])
.size()
.reset_index(name="Count")
.sort_values("Count", ascending=False)
.head(10)
)
plt.figure(figsize=(8, 6))
ax = sns.barplot(
data=maj_level_count, y="Major", x="Count", hue="Level", orient="horizontal"
)
for i in ax.containers:
ax.bar_label(
i,
)
plt.title("Top 10 Major Populer with Level Categories", fontsize=10)
plt.xlabel("Count", fontsize=8)
plt.ylabel("University", fontsize=8)
plt.show()
df.loc[df["Level"] == "Phd", ["University", "Major", "Level"]].value_counts()
# Group by major and calculate the Max of tuition covered
maj_tuition_fee = (
df.groupby("Major")["Tuition Covered"]
.max()
.reset_index()
.sort_values("Tuition Covered", ascending=False)
.head(10)
)
plt.figure(figsize=(8, 5))
ax = sns.barplot(
data=maj_tuition_fee, y="Major", x="Tuition Covered", orient="horizontal"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge")
plt.title("Top 10 Major with Highest Tuition Covered", fontsize=8)
plt.xlabel("Tuition Covered (RMB)", fontsize=8)
plt.ylabel("Major", fontsize=8)
plt.show()
# Group by major and calculate the sum of tuition covered
maj_tuition_fee = (
df.groupby("Major")["Tuition Covered"]
.sum()
.reset_index()
.sort_values("Tuition Covered", ascending=False)
.head(10)
)
plt.figure(figsize=(8, 5))
ax = sns.barplot(
data=maj_tuition_fee, y="Major", x="Tuition Covered", orient="horizontal"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge")
plt.title("Top 10 Major with Total Highest Tuition Covered", fontsize=8)
plt.xlabel("Tuition Covered (RMB)", fontsize=8)
plt.ylabel("Major", fontsize=8)
plt.show()
# Group the data by Major and calculate the max TuitionFee
maj_tuition_mean = (
df.groupby("Major")["Tuition fees to pay"]
.mean()
.reset_index(name="MajTuitionFee")
.sort_values("MajTuitionFee", ascending=False)
.head(10)
)
# Plot the data using a horizontal barplot
plt.figure(figsize=(10, 5))
ax = sns.barplot(
data=maj_tuition_mean, x="MajTuitionFee", y="Major", orient="horizontal"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge")
plt.title("Top 10 Major with Average Tuition Fee to Pay", fontsize=10)
plt.xlabel("Avg. Tuition Fee(RMB)", fontsize=8)
plt.ylabel("Major", fontsize=8)
plt.show()
# Groupby Major Programs Offering accommodation cover
major_accommodation_cover = (
df[df["Accomodation covered?"] == 1]
.groupby(["Major"])
.size()
.reset_index(name="Count")
.sort_values("Count", ascending=False)
.head(10)
)
# Plot the data using a barplot
plt.figure(figsize=(10, 6))
ax = sns.barplot(
data=major_accommodation_cover,
x="Count",
y="Major",
dodge=False,
orient="horizontal",
)
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=10, padding=5)
plt.xlabel("Count", fontsize=12)
plt.ylabel("Number of Programs Offering Accommodation Cover", fontsize=12)
plt.title("Top 10 Majors Offering Accommodation Cover", fontsize=14)
plt.legend(fontsize=10, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show()
# Group data by major and calculate the sum of accommodation covered
major_accomodation = (
df.groupby("Major")["total_accomodation_year"]
.sum()
.reset_index()
.sort_values("total_accomodation_year", ascending=False)
.head(10)
)
# Create a horizontal bar plot
plt.figure(figsize=(8, 5))
ax = sns.barplot(
data=major_accomodation, y="Major", x="total_accomodation_year", orient="horizontal"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge")
plt.title("Top 10 Major with Total Highest Accommodation To Pay", fontsize=8)
plt.xlabel("Accommodation To Pay(RMB)", fontsize=8)
plt.ylabel("Major", fontsize=8)
plt.show()
# Groupby Major Programs Offering Living Cover
major_living_cover = (
df[df["Living Expense Covered?"] == 1]
.groupby(["Major"])
.size()
.reset_index(name="Count")
.sort_values("Count", ascending=False)
.head(10)
)
# Plot the data using a barplot
plt.figure(figsize=(10, 6))
ax = sns.barplot(
data=major_living_cover, x="Count", y="Major", dodge=False, orient="horizontal"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=10, padding=5)
plt.xlabel("Count", fontsize=12)
plt.ylabel("Number of Programs Offering Living Expense Cover", fontsize=12)
plt.title("Top 10 Majors Offering Living Expense Cover", fontsize=14)
plt.legend(fontsize=10, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show()
# Group data by major and calculate the sum of Total Expense Year
major_living = (
df.groupby("Major")["total_expense_year"]
.sum()
.reset_index()
.sort_values("total_expense_year", ascending=False)
.head(10)
)
# Create a horizontal bar plot
plt.figure(figsize=(8, 5))
ax = sns.barplot(
data=major_living, y="Major", x="total_expense_year", orient="horizontal"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge")
plt.title("Top 10 Major with Highest Total Expense to Pay Year", fontsize=8)
plt.xlabel("Total Expense Year", fontsize=8)
plt.ylabel("Major", fontsize=8)
plt.show()
# Group data by major and calculate the sum of Total Cost Year
major_living_cost = (
df.groupby("Major")["total_cost_year"]
.sum()
.reset_index()
.sort_values("total_cost_year", ascending=True)
.head(5)
)
# Create a horizontal bar plot
plt.figure(figsize=(8, 5))
ax = sns.barplot(
data=major_living_cost, y="Major", x="total_cost_year", orient="horizontal"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge")
plt.title("Top 10 Major with Highest Total Cost Year", fontsize=8)
plt.xlabel("Total Cost Year", fontsize=8)
plt.ylabel("Major", fontsize=8)
plt.show()
# # Distribution Level
lev = (
df.groupby("Level")
.size()
.reset_index(name="counts")
.sort_values(by="counts", ascending=False)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=lev, x="counts", y="Level", orient="horizontal")
for i in ax.containers:
ax.bar_label(
i,
)
plt.title("Distribution of Level", fontsize=18)
plt.show()
print("Level With Non Degre", "\n")
df.loc[df["Level"] == "Non-Degree", ["University", "Major", "Level"]]
# Groupby Level dan Calculate Tuition Covered
level_tuition_covered = (
df.groupby("Level")["Tuition Covered"]
.sum()
.reset_index()
.sort_values("Tuition Covered", ascending=False)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=level_tuition_covered, x="Tuition Covered", y="Level", orient="horizontal"
)
# Add labels to the bars
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=10, padding=5)
# Set the title and axis labels
plt.title("Sum of Tuition Covered by Level", fontsize=18)
plt.xlabel("Level", fontsize=14)
plt.ylabel("Tuition Covered(RMB)", fontsize=14)
plt.show()
# Group the data by Level and calculate the max TuitionFee
lev_tuition = (
df.groupby("Level")["Tuition fees to pay"]
.sum()
.reset_index(name="MajTuitionFee")
.sort_values("MajTuitionFee", ascending=False)
)
# Plot the data using a horizontal barplot
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=lev_tuition, x="MajTuitionFee", y="Level", orient="horizontal")
for i in ax.containers:
ax.bar_label(i, label_type="edge")
plt.title("Top 10 Major with Average Tuition Fee to Pay", fontsize=10)
plt.xlabel("Avg. Tuition Fee(RMB)", fontsize=8)
plt.ylabel("Major", fontsize=8)
plt.show()
# Groupby Level Offering Accomodation cover
level_accommodation_cover = (
df[df["Accomodation covered?"] == 1]
.groupby(["Level"])
.size()
.reset_index(name="Count")
.sort_values("Count", ascending=False)
)
# Plot the data using a barplot
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=level_accommodation_cover, x="Count", y="Level", orient="horizontal"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge")
plt.xlabel("Count", fontsize=8)
plt.ylabel("Level", fontsize=8)
plt.title("Level Offering Accommodation Cover", fontsize=10)
plt.legend(fontsize=10, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show()
# Group data by Level and calculate the sum of accommodation covered
level_accomodation = (
df.groupby("Level")["total_accomodation_year"]
.sum()
.reset_index()
.sort_values("total_accomodation_year", ascending=False)
)
# Create a horizontal bar plot
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=level_accomodation, y="Level", x="total_accomodation_year", orient="horizontal"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge")
plt.title("Level with Sum Total Accommodation Year", fontsize=8)
plt.xlabel("Total Accommodation Year", fontsize=8)
plt.ylabel("Level", fontsize=8)
plt.show()
# Groupby University dan Major yang memberikan accommodation cover
lev_living_cover = (
df[df["Living Expense Covered?"] == 1]
.groupby(["Level"])
.size()
.reset_index(name="Count")
.sort_values("Count", ascending=False)
)
# Plot the data using a barplot
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=lev_living_cover, x="Count", y="Level", dodge=False, orient="horizontal"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=10, padding=5)
plt.xlabel("Count", fontsize=8)
plt.ylabel("Level", fontsize=8)
plt.title("Offering Living Expense Cover", fontsize=10)
plt.legend(fontsize=10, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show()
# Group data by Level and calculate the sum of Total Expense Year
level_expense = (
df.groupby("Level")["total_expense_year"]
.sum()
.reset_index()
.sort_values("total_expense_year", ascending=False)
)
# Create a horizontal bar plot
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=level_expense, y="Level", x="total_expense_year", orient="horizontal"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge")
plt.title("Level with Total Expense Year", fontsize=10)
plt.xlabel("Expense Year", fontsize=8)
plt.ylabel("Level", fontsize=8)
plt.show()
# Group data by Level and calculate the sum of Total Cost Year
Level_cost = (
df.groupby("Level")["total_cost_year"]
.sum()
.reset_index()
.sort_values("total_cost_year", ascending=False)
)
# Create a horizontal bar plot
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=Level_cost, y="Level", x="total_cost_year", orient="horizontal")
for i in ax.containers:
ax.bar_label(i, label_type="edge")
plt.title("Level with Lower Total Cost Year", fontsize=8)
plt.xlabel("Total Cost Year", fontsize=8)
plt.ylabel("Level", fontsize=8)
plt.show()
# # Distribution Language
lan = (
df.groupby("Language")
.size()
.reset_index(name="counts")
.sort_values(by="counts", ascending=False)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=lan, x="counts", y="Language", orient="horizontal")
for i in ax.containers:
ax.bar_label(
i,
)
plt.title("Distribution of Language", fontsize=10)
plt.show()
df.loc[df["Language"] == "Japanese", ["University", "Major", "Level", "Language"]]
df.loc[df["Language"] == "German", ["University", "Major", "Level", "Language"]]
# Groupby Language and Calculate sum of Tuition Covered
language_tuition_covered = (
df.groupby("Language")["Tuition Covered"]
.sum()
.reset_index()
.sort_values("Tuition Covered", ascending=False)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=language_tuition_covered,
x="Tuition Covered",
y="Language",
orient="horizontal",
)
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=10, padding=5)
plt.title("Tuition Covered by Language", fontsize=10)
plt.xlabel("Tuition Covered(RMB)", fontsize=8)
plt.ylabel("Language", fontsize=8)
plt.show()
# Groupby Language Offering Accomodation Cover
Lan_accommodation_cover = (
df[df["Accomodation covered?"] == 1]
.groupby("Language")
.size()
.reset_index(name="Count")
.sort_values("Count", ascending=False)
)
# Plot the data using a horizontal barplot
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=Lan_accommodation_cover, y="Language", x="Count")
for i in ax.containers:
ax.bar_label(
i,
)
plt.title("Language Offering Accommodation Cover", fontsize=10)
plt.show()
# Group data by Language and calculate the sum of Total Accommodation Year
lan_accomodation = (
df.groupby("Language")["total_accomodation_year"]
.sum()
.reset_index()
.sort_values("total_accomodation_year", ascending=False)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=lan_accomodation,
y="Language",
x="total_accomodation_year",
orient="horizontal",
)
for i in ax.containers:
ax.bar_label(i, label_type="edge")
plt.title("Language with Total Accomodation Year", fontsize=10)
plt.xlabel("Accommodation Year(RMB)", fontsize=8)
plt.ylabel("Language", fontsize=8)
plt.show()
# Groupby Language offering Living Expense Covere
lan_living_cover = (
df[df["Living Expense Covered?"] == 1]
.groupby(["Language"])
.size()
.reset_index(name="Count")
.sort_values("Count", ascending=False)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=lan_living_cover, x="Count", y="Language", dodge=False, orient="horizontal"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=10, padding=5)
plt.xlabel("Count", fontsize=8)
plt.ylabel("Language", fontsize=8)
plt.title("Language Offering Living Expense Covered", fontsize=14)
plt.legend(fontsize=10, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show()
# Group data by Language and calculate the sum of Total Expense Year
lan_living = (
df.groupby("Level")["total_expense_year"]
.sum()
.reset_index()
.sort_values("total_expense_year", ascending=False)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=lan_living, y="Level", x="total_expense_year", orient="horizontal"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge")
plt.title("Language with Total Expense Year", fontsize=10)
plt.xlabel("Total Expense Year(RMB)", fontsize=8)
plt.ylabel("Language", fontsize=8)
plt.show()
# Group data by Language and calculate the sum of Total Cost Year
lan_cost = (
df.groupby("Language")["total_cost_year"]
.sum()
.reset_index()
.sort_values("total_cost_year", ascending=False)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=lan_cost, y="Language", x="total_cost_year", orient="horizontal")
for i in ax.containers:
ax.bar_label(i, label_type="edge")
plt.title("Language with Total Cost Year", fontsize=8)
plt.xlabel("Total Cost Year(RMB)", fontsize=8)
plt.ylabel("Language", fontsize=8)
plt.show()
# Filter data by level
df_bachelor = df[df["Level"] == "Bachelor"]
df_master = df[df["Level"] == "Master"]
df_phd = df[df["Level"] == "Phd"]
df_nondegree = df[df["Level"] == "Non-Degree"]
# Get the highest tuition for each level
tuition_max = [
df_bachelor["Original Tuition fee"].max(),
df_master["Original Tuition fee"].max(),
df_phd["Original Tuition fee"].max(),
df_nondegree["Original Tuition fee"].max(),
]
# Create a barplot
plt.figure(figsize=(5, 5))
ax = sns.barplot(x=["Bachelor", "Master", "PhD", "Non-Degree"], y=tuition_max)
for i in ax.containers:
ax.bar_label(i, label_type="edge")
plt.title("Highest Tuition Covered by Level", fontsize=10)
plt.xlabel("Level", fontsize=8)
plt.ylabel("Tuition (RMB)", fontsize=8)
plt.show()
df["Scholarship Category"].value_counts()
| false | 1 | 8,740 | 0 | 8,861 | 8,740 |
||
129304364
|
<jupyter_start><jupyter_text>Understanding Career Aspirations of GenZ
The dataset revolves around the career aspirations of Gen-Z and has about 235 rows and 15 fields of information collected as a form of a Survey, primarily from India and few other countries as well.
Kaggle dataset identifier: understanding-career-aspirations-of-genz
<jupyter_script>import pandas as pd
import numpy as np
import plotly.express as px
import plotly.graph_objects as go
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
data = pd.read_csv(
"/kaggle/input/understanding-career-aspirations-of-genz/Your Career Aspirations of GenZ.csv"
)
print(data.head())
print(data.columns)
country = data["Your Current Country."].value_counts()
label = country.index
counts = country.values
colors = ["gold", "lightgreen"]
fig = go.Figure(data=[go.Pie(labels=label, values=counts)])
fig.update_layout(title_text="Current Country")
fig.update_traces(
hoverinfo="label+value",
textinfo="percent",
textfont_size=30,
marker=dict(colors=colors, line=dict(color="black", width=3)),
)
fig.show()
question1 = data[
"Which of the below factors influence the most about your career aspirations ?"
].value_counts()
label = question1.index
counts = question1.values
colors = ["gold", "lightgreen"]
fig = go.Figure(data=[go.Pie(labels=label, values=counts)])
fig.update_layout(title_text="Factors influencing career aspirations")
fig.update_traces(
hoverinfo="label+value",
textinfo="percent",
textfont_size=30,
marker=dict(colors=colors, line=dict(color="black", width=3)),
)
fig.show()
question2 = "Would you definitely pursue a Higher Education / Post Graduation outside of India ? If only you have to self sponsor it."
question2 = data[question2].value_counts()
label = question2.index
counts = question2.values
colors = ["gold", "lightgreen"]
fig = go.Figure(data=[go.Pie(labels=label, values=counts)])
fig.update_layout(
title_text="Will you pursue a Higher Education outside India with your investment?"
)
fig.update_traces(
hoverinfo="label+value",
textinfo="percent",
textfont_size=30,
marker=dict(colors=colors, line=dict(color="black", width=3)),
)
fig.show()
question3 = "How likely is that you will work for one employer for 3 years or more ?"
question3 = data[question3].value_counts()
label = question3.index
counts = question3.values
colors = ["gold", "lightgreen"]
fig = go.Figure(data=[go.Pie(labels=label, values=counts)])
fig.update_layout(
title_text="How likely is that you will work for one employer for 3 years or more?"
)
fig.update_traces(
hoverinfo="label+value",
textinfo="percent",
textfont_size=30,
marker=dict(colors=colors, line=dict(color="black", width=3)),
)
fig.show()
question4 = "Would you work for a company whose mission is not clearly defined and publicly posted."
question4 = data[question4].value_counts()
label = question4.index
counts = question4.values
colors = ["gold", "lightgreen"]
fig = go.Figure(data=[go.Pie(labels=label, values=counts)])
fig.update_layout(
title_text="Would you work for a company whose mission is not clearly defined and publicly posted?"
)
fig.update_traces(
hoverinfo="label+value",
textinfo="percent",
textfont_size=30,
marker=dict(colors=colors, line=dict(color="black", width=3)),
)
fig.show()
question5 = "How likely would you work for a company whose mission is misaligned with their public actions or even their product ?"
question5 = data[question5].value_counts()
label = question5.index
counts = question5.values
colors = ["gold", "lightgreen"]
fig = go.Figure(data=[go.Pie(labels=label, values=counts)])
fig.update_layout(
title_text="How likely would you work for a company whose mission is misaligned with their actions?"
)
fig.update_traces(
hoverinfo="label+value",
textinfo="percent",
textfont_size=30,
marker=dict(colors=colors, line=dict(color="black", width=3)),
)
fig.show()
question6 = "How likely would you work for a company whose mission is not bringing social impact ?"
question6 = data[question6].value_counts()
label = question6.index
counts = question6.values
colors = ["gold", "lightgreen"]
fig = go.Figure(data=[go.Pie(labels=label, values=counts)])
fig.update_layout(
title_text="How likely would you work for a company whose mission is not bringing social impact?"
)
fig.update_traces(
hoverinfo="label+value",
textinfo="percent",
textfont_size=30,
marker=dict(colors=colors, line=dict(color="black", width=3)),
)
fig.show()
question7 = "What is the most preferred working environment for you."
question7 = data[question7].value_counts()
label = question7.index
counts = question7.values
colors = ["gold", "lightgreen"]
fig = go.Figure(data=[go.Pie(labels=label, values=counts)])
fig.update_layout(title_text="What is the most preferred working environment for you?")
fig.update_traces(
hoverinfo="label+value",
textinfo="percent",
textfont_size=30,
marker=dict(colors=colors, line=dict(color="black", width=3)),
)
fig.show()
question8 = "Which of the below Employers would you work with."
question8 = data[question8].value_counts()
label = question8.index
counts = question8.values
colors = ["gold", "lightgreen"]
fig = go.Figure(data=[go.Pie(labels=label, values=counts)])
fig.update_layout(title_text="Which of the below Employers would you work with?")
fig.update_traces(
hoverinfo="label+value",
textinfo="percent",
textfont_size=30,
marker=dict(colors=colors, line=dict(color="black", width=3)),
)
fig.show()
question9 = "Which type of learning environment that you are most likely to work in ?"
question9 = data[question9].value_counts()
label = question9.index
counts = question9.values
colors = ["gold", "lightgreen"]
fig = go.Figure(data=[go.Pie(labels=label, values=counts)])
fig.update_layout(
title_text="Which type of learning environment that you are most likely to work in?"
)
fig.update_traces(
hoverinfo="label+value",
textinfo="percent",
textfont_size=30,
marker=dict(colors=colors, line=dict(color="black", width=3)),
)
fig.show()
question9 = "Which type of learning environment that you are most likely to work in ?"
question9 = data[question9].value_counts()
label = question9.index
counts = question9.values
colors = ["gold", "lightgreen"]
fig = go.Figure(data=[go.Pie(labels=label, values=counts)])
fig.update_layout(
title_text="Which type of learning environment that you are most likely to work in?"
)
fig.update_traces(
hoverinfo="label+value",
textinfo="percent",
textfont_size=30,
marker=dict(colors=colors, line=dict(color="black", width=3)),
)
fig.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/304/129304364.ipynb
|
understanding-career-aspirations-of-genz
|
kulturehire
|
[{"Id": 129304364, "ScriptId": 38444038, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6012166, "CreationDate": "05/12/2023 15:39:10", "VersionNumber": 1.0, "Title": "Career Aspirations Survey Analysis", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 132.0, "LinesInsertedFromPrevious": 132.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
|
[{"Id": 185224718, "KernelVersionId": 129304364, "SourceDatasetVersionId": 4753732}]
|
[{"Id": 4753732, "DatasetId": 2751150, "DatasourceVersionId": 4816888, "CreatorUserId": 11947127, "LicenseName": "Database: Open Database, Contents: \u00a9 Original Authors", "CreationDate": "12/21/2022 13:44:32", "VersionNumber": 1.0, "Title": "Understanding Career Aspirations of GenZ", "Slug": "understanding-career-aspirations-of-genz", "Subtitle": "Understand the career aspirations of Generation Z folks of India", "Description": "The dataset revolves around the career aspirations of Gen-Z and has about 235 rows and 15 fields of information collected as a form of a Survey, primarily from India and few other countries as well.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 2751150, "CreatorUserId": 11947127, "OwnerUserId": 11947127.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4753732.0, "CurrentDatasourceVersionId": 4816888.0, "ForumId": 2784822, "Type": 2, "CreationDate": "12/21/2022 13:44:32", "LastActivityDate": "12/21/2022", "TotalViews": 6881, "TotalDownloads": 1078, "TotalVotes": 33, "TotalKernels": 2}]
|
[{"Id": 11947127, "UserName": "kulturehire", "DisplayName": "KultureHire", "RegisterDate": "10/14/2022", "PerformanceTier": 0}]
|
import pandas as pd
import numpy as np
import plotly.express as px
import plotly.graph_objects as go
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
data = pd.read_csv(
"/kaggle/input/understanding-career-aspirations-of-genz/Your Career Aspirations of GenZ.csv"
)
print(data.head())
print(data.columns)
country = data["Your Current Country."].value_counts()
label = country.index
counts = country.values
colors = ["gold", "lightgreen"]
fig = go.Figure(data=[go.Pie(labels=label, values=counts)])
fig.update_layout(title_text="Current Country")
fig.update_traces(
hoverinfo="label+value",
textinfo="percent",
textfont_size=30,
marker=dict(colors=colors, line=dict(color="black", width=3)),
)
fig.show()
question1 = data[
"Which of the below factors influence the most about your career aspirations ?"
].value_counts()
label = question1.index
counts = question1.values
colors = ["gold", "lightgreen"]
fig = go.Figure(data=[go.Pie(labels=label, values=counts)])
fig.update_layout(title_text="Factors influencing career aspirations")
fig.update_traces(
hoverinfo="label+value",
textinfo="percent",
textfont_size=30,
marker=dict(colors=colors, line=dict(color="black", width=3)),
)
fig.show()
question2 = "Would you definitely pursue a Higher Education / Post Graduation outside of India ? If only you have to self sponsor it."
question2 = data[question2].value_counts()
label = question2.index
counts = question2.values
colors = ["gold", "lightgreen"]
fig = go.Figure(data=[go.Pie(labels=label, values=counts)])
fig.update_layout(
title_text="Will you pursue a Higher Education outside India with your investment?"
)
fig.update_traces(
hoverinfo="label+value",
textinfo="percent",
textfont_size=30,
marker=dict(colors=colors, line=dict(color="black", width=3)),
)
fig.show()
question3 = "How likely is that you will work for one employer for 3 years or more ?"
question3 = data[question3].value_counts()
label = question3.index
counts = question3.values
colors = ["gold", "lightgreen"]
fig = go.Figure(data=[go.Pie(labels=label, values=counts)])
fig.update_layout(
title_text="How likely is that you will work for one employer for 3 years or more?"
)
fig.update_traces(
hoverinfo="label+value",
textinfo="percent",
textfont_size=30,
marker=dict(colors=colors, line=dict(color="black", width=3)),
)
fig.show()
question4 = "Would you work for a company whose mission is not clearly defined and publicly posted."
question4 = data[question4].value_counts()
label = question4.index
counts = question4.values
colors = ["gold", "lightgreen"]
fig = go.Figure(data=[go.Pie(labels=label, values=counts)])
fig.update_layout(
title_text="Would you work for a company whose mission is not clearly defined and publicly posted?"
)
fig.update_traces(
hoverinfo="label+value",
textinfo="percent",
textfont_size=30,
marker=dict(colors=colors, line=dict(color="black", width=3)),
)
fig.show()
question5 = "How likely would you work for a company whose mission is misaligned with their public actions or even their product ?"
question5 = data[question5].value_counts()
label = question5.index
counts = question5.values
colors = ["gold", "lightgreen"]
fig = go.Figure(data=[go.Pie(labels=label, values=counts)])
fig.update_layout(
title_text="How likely would you work for a company whose mission is misaligned with their actions?"
)
fig.update_traces(
hoverinfo="label+value",
textinfo="percent",
textfont_size=30,
marker=dict(colors=colors, line=dict(color="black", width=3)),
)
fig.show()
question6 = "How likely would you work for a company whose mission is not bringing social impact ?"
question6 = data[question6].value_counts()
label = question6.index
counts = question6.values
colors = ["gold", "lightgreen"]
fig = go.Figure(data=[go.Pie(labels=label, values=counts)])
fig.update_layout(
title_text="How likely would you work for a company whose mission is not bringing social impact?"
)
fig.update_traces(
hoverinfo="label+value",
textinfo="percent",
textfont_size=30,
marker=dict(colors=colors, line=dict(color="black", width=3)),
)
fig.show()
question7 = "What is the most preferred working environment for you."
question7 = data[question7].value_counts()
label = question7.index
counts = question7.values
colors = ["gold", "lightgreen"]
fig = go.Figure(data=[go.Pie(labels=label, values=counts)])
fig.update_layout(title_text="What is the most preferred working environment for you?")
fig.update_traces(
hoverinfo="label+value",
textinfo="percent",
textfont_size=30,
marker=dict(colors=colors, line=dict(color="black", width=3)),
)
fig.show()
question8 = "Which of the below Employers would you work with."
question8 = data[question8].value_counts()
label = question8.index
counts = question8.values
colors = ["gold", "lightgreen"]
fig = go.Figure(data=[go.Pie(labels=label, values=counts)])
fig.update_layout(title_text="Which of the below Employers would you work with?")
fig.update_traces(
hoverinfo="label+value",
textinfo="percent",
textfont_size=30,
marker=dict(colors=colors, line=dict(color="black", width=3)),
)
fig.show()
question9 = "Which type of learning environment that you are most likely to work in ?"
question9 = data[question9].value_counts()
label = question9.index
counts = question9.values
colors = ["gold", "lightgreen"]
fig = go.Figure(data=[go.Pie(labels=label, values=counts)])
fig.update_layout(
title_text="Which type of learning environment that you are most likely to work in?"
)
fig.update_traces(
hoverinfo="label+value",
textinfo="percent",
textfont_size=30,
marker=dict(colors=colors, line=dict(color="black", width=3)),
)
fig.show()
question9 = "Which type of learning environment that you are most likely to work in ?"
question9 = data[question9].value_counts()
label = question9.index
counts = question9.values
colors = ["gold", "lightgreen"]
fig = go.Figure(data=[go.Pie(labels=label, values=counts)])
fig.update_layout(
title_text="Which type of learning environment that you are most likely to work in?"
)
fig.update_traces(
hoverinfo="label+value",
textinfo="percent",
textfont_size=30,
marker=dict(colors=colors, line=dict(color="black", width=3)),
)
fig.show()
| false | 1 | 1,902 | 2 | 1,989 | 1,902 |
||
129304270
|
<jupyter_start><jupyter_text>IMDB data from 2006 to 2016
Here's a data set of 1,000 most popular movies on IMDB in the last 10 years. The data points included are:
Title, Genre, Description, Director, Actors, Year, Runtime, Rating, Votes, Revenue, Metascrore
Feel free to tinker with it and derive interesting insights.
Kaggle dataset identifier: imdb-data
<jupyter_script># # Python Libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# # Read Data
df = pd.read_csv("/kaggle/input/imdb-data/IMDB-Movie-Data.csv")
df.head()
df.set_index("Title", inplace=True)
df.head(3)
# number of rows and columns
print("Number of rows: ", df.shape[0])
print("Number of columns:", df.shape[1])
# information about data
df.info()
# describe numerical basic statistics of data
df.describe().T
df.columns
# Let's remove the spaces in the column names
df.rename(
columns={
"Runtime (Minutes)": "Runtime_Minutes",
"Revenue (Millions)": "Revenue_Millions",
},
inplace=True,
)
# # Missing Value Analysis
# Missing data
df.isnull().sum()
# missing value with percentage
for i in df.columns:
null_rate = df[i].isnull().sum() / len(df) * 100
if null_rate > 0:
print("{} null rate: {}%".format(i, round(null_rate, 2)))
# Missing value graph with seaborn
import seaborn as sns
sns.heatmap(df.isnull(), yticklabels=False, cbar=False, cmap="magma")
# Column with high missing value is Revenue_Millions
df["Revenue_Millions"] = df["Revenue_Millions"].fillna(df["Revenue_Millions"].mean())
# There is only one remaining column that can be deleted with dropna
df.dropna(inplace=True)
# Control missing values
df.isnull().sum()
# # Correlation of Numerical Variables
sns.heatmap(df.corr(), annot=True, cmap="YlGnBu")
# # Number of Movies by Year
df.Year.value_counts().sort_index()
years = df["Year"].value_counts().sort_index()
plt.figure(facecolor="orange", figsize=(8, 5))
plt.plot(
years.index,
years.values,
marker="o",
color="red",
linestyle="--",
markerfacecolor="yellow",
)
plt.title("Number of Movies by Year", fontsize=16)
plt.show()
# # Top 10 Most Filmed Genres
df.Genre.value_counts()
# separates movie genres by comma
genres = df["Genre"].str.split(",")
# explode method puts each type on a separate line
genres = genres.explode()
# top 10 most recurring movie genres
genres.value_counts().head(10)
plt.figure(facecolor="orange", figsize=(12, 6))
ax = sns.barplot(
y=genres.value_counts().head(10).index, x=genres.value_counts().head(10).values
)
ax.set(xlabel="Count", ylabel="Genre", title="Top 10 Most Movies Genres")
ax.set_facecolor("xkcd:mint green")
ax.bar_label(ax.containers[0])
plt.show()
# pie chart
plt.figure(figsize=(10, 10))
sns.set_style("whitegrid")
sns.color_palette("pastel")
plt.pie(
x=genres.value_counts().head(5).values,
labels=genres.value_counts().head(5).index,
autopct="%1.1f%%",
)
plt.title("Top 5 Movies Genre")
plt.show()
# # Top 20 Actors by Number of Movies
# separates movie genres by comma
actors = df["Actors"].str.split(",")
# we need to remove spaces before or after the name
actors = actors.apply(lambda x: [i.strip() for i in x])
# explode method puts each type on a separate line
actors = actors.explode()
# the top 20 actors according to the number of movies they starred in
actors.value_counts().head(20)
plt.figure(facecolor="orange", figsize=(12, 6))
ax = sns.barplot(
y=actors.value_counts().head(20).index, x=actors.value_counts().head(20).values
)
ax.set(xlabel="Count", ylabel="Actors", title="Top 20 Actors by Number of Movies")
ax.set_facecolor("xkcd:mint green")
ax.bar_label(ax.containers[0])
plt.show()
# average rating over all movies
df["Rating"].mean()
# average rating scores of the top 20 actors with the most films
rating = []
for i in actors.value_counts().head(20).index:
x = df[df.Actors.str.contains(i)]["Rating"].mean()
rating.append(x)
actors_20 = pd.DataFrame({"rating": rating}, index=actors.value_counts().head(20).index)
actors_20
# * It is observed that most of the actors with the most movies have higher average ratings for the movies they starred in than the overall rating average
# * The purpose of this analysis was to question the reason why these actors play in so many films. Considering that their average ratings are not low, it can be said that there are actors who contribute to the ratings and are appreciated.
plt.figure(facecolor="orange", figsize=(12, 6))
ax = sns.barplot(y=actors_20.index, x=actors_20.rating)
ax.set(xlabel="Count", ylabel="Actors", title="Top 20 Actors Rating Averages")
ax.set_facecolor("xkcd:mint green")
ax.bar_label(ax.containers[0])
plt.show()
df.Director.value_counts()
# # Revenue of Movies by Rating Scores
# we get the title column with reset_index to see the movie names in the bubble chart
df.reset_index(inplace=True)
# Let's select the required columns
bubble_data = df[["Rating", "Revenue_Millions", "Title"]]
fig = px.scatter(
bubble_data,
x="Rating",
y="Revenue_Millions",
size="Revenue_Millions",
hover_name="Title",
title="Revenue of 250 Popular Movies by Rating Scores ",
)
fig.update_layout(width=1100, height=600)
fig.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/304/129304270.ipynb
|
imdb-data
| null |
[{"Id": 129304270, "ScriptId": 38440264, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12449603, "CreationDate": "05/12/2023 15:38:13", "VersionNumber": 1.0, "Title": "IMDB Movies Data( EDA )", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 191.0, "LinesInsertedFromPrevious": 191.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 4}]
|
[{"Id": 185224564, "KernelVersionId": 129304270, "SourceDatasetVersionId": 2639}]
|
[{"Id": 2639, "DatasetId": 1474, "DatasourceVersionId": 2639, "CreatorUserId": 863388, "LicenseName": "Other (specified in description)", "CreationDate": "06/26/2017 07:32:04", "VersionNumber": 1.0, "Title": "IMDB data from 2006 to 2016", "Slug": "imdb-data", "Subtitle": "A data set of 1,000 popular movies on IMDB in the last 10 years", "Description": "Here's a data set of 1,000 most popular movies on IMDB in the last 10 years. The data points included are:\n\nTitle, Genre, Description, Director, Actors, Year, Runtime, Rating, Votes, Revenue, Metascrore\n\nFeel free to tinker with it and derive interesting insights.", "VersionNotes": "Initial release", "TotalCompressedBytes": 309767.0, "TotalUncompressedBytes": 309767.0}]
|
[{"Id": 1474, "CreatorUserId": 863388, "OwnerUserId": NaN, "OwnerOrganizationId": 362.0, "CurrentDatasetVersionId": 2639.0, "CurrentDatasourceVersionId": 2639.0, "ForumId": 4364, "Type": 2, "CreationDate": "06/26/2017 07:32:04", "LastActivityDate": "02/06/2018", "TotalViews": 211642, "TotalDownloads": 39544, "TotalVotes": 394, "TotalKernels": 111}]
| null |
# # Python Libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# # Read Data
df = pd.read_csv("/kaggle/input/imdb-data/IMDB-Movie-Data.csv")
df.head()
df.set_index("Title", inplace=True)
df.head(3)
# number of rows and columns
print("Number of rows: ", df.shape[0])
print("Number of columns:", df.shape[1])
# information about data
df.info()
# describe numerical basic statistics of data
df.describe().T
df.columns
# Let's remove the spaces in the column names
df.rename(
columns={
"Runtime (Minutes)": "Runtime_Minutes",
"Revenue (Millions)": "Revenue_Millions",
},
inplace=True,
)
# # Missing Value Analysis
# Missing data
df.isnull().sum()
# missing value with percentage
for i in df.columns:
null_rate = df[i].isnull().sum() / len(df) * 100
if null_rate > 0:
print("{} null rate: {}%".format(i, round(null_rate, 2)))
# Missing value graph with seaborn
import seaborn as sns
sns.heatmap(df.isnull(), yticklabels=False, cbar=False, cmap="magma")
# Column with high missing value is Revenue_Millions
df["Revenue_Millions"] = df["Revenue_Millions"].fillna(df["Revenue_Millions"].mean())
# There is only one remaining column that can be deleted with dropna
df.dropna(inplace=True)
# Control missing values
df.isnull().sum()
# # Correlation of Numerical Variables
sns.heatmap(df.corr(), annot=True, cmap="YlGnBu")
# # Number of Movies by Year
df.Year.value_counts().sort_index()
years = df["Year"].value_counts().sort_index()
plt.figure(facecolor="orange", figsize=(8, 5))
plt.plot(
years.index,
years.values,
marker="o",
color="red",
linestyle="--",
markerfacecolor="yellow",
)
plt.title("Number of Movies by Year", fontsize=16)
plt.show()
# # Top 10 Most Filmed Genres
df.Genre.value_counts()
# separates movie genres by comma
genres = df["Genre"].str.split(",")
# explode method puts each type on a separate line
genres = genres.explode()
# top 10 most recurring movie genres
genres.value_counts().head(10)
plt.figure(facecolor="orange", figsize=(12, 6))
ax = sns.barplot(
y=genres.value_counts().head(10).index, x=genres.value_counts().head(10).values
)
ax.set(xlabel="Count", ylabel="Genre", title="Top 10 Most Movies Genres")
ax.set_facecolor("xkcd:mint green")
ax.bar_label(ax.containers[0])
plt.show()
# pie chart
plt.figure(figsize=(10, 10))
sns.set_style("whitegrid")
sns.color_palette("pastel")
plt.pie(
x=genres.value_counts().head(5).values,
labels=genres.value_counts().head(5).index,
autopct="%1.1f%%",
)
plt.title("Top 5 Movies Genre")
plt.show()
# # Top 20 Actors by Number of Movies
# separates movie genres by comma
actors = df["Actors"].str.split(",")
# we need to remove spaces before or after the name
actors = actors.apply(lambda x: [i.strip() for i in x])
# explode method puts each type on a separate line
actors = actors.explode()
# the top 20 actors according to the number of movies they starred in
actors.value_counts().head(20)
plt.figure(facecolor="orange", figsize=(12, 6))
ax = sns.barplot(
y=actors.value_counts().head(20).index, x=actors.value_counts().head(20).values
)
ax.set(xlabel="Count", ylabel="Actors", title="Top 20 Actors by Number of Movies")
ax.set_facecolor("xkcd:mint green")
ax.bar_label(ax.containers[0])
plt.show()
# average rating over all movies
df["Rating"].mean()
# average rating scores of the top 20 actors with the most films
rating = []
for i in actors.value_counts().head(20).index:
x = df[df.Actors.str.contains(i)]["Rating"].mean()
rating.append(x)
actors_20 = pd.DataFrame({"rating": rating}, index=actors.value_counts().head(20).index)
actors_20
# * It is observed that most of the actors with the most movies have higher average ratings for the movies they starred in than the overall rating average
# * The purpose of this analysis was to question the reason why these actors play in so many films. Considering that their average ratings are not low, it can be said that there are actors who contribute to the ratings and are appreciated.
plt.figure(facecolor="orange", figsize=(12, 6))
ax = sns.barplot(y=actors_20.index, x=actors_20.rating)
ax.set(xlabel="Count", ylabel="Actors", title="Top 20 Actors Rating Averages")
ax.set_facecolor("xkcd:mint green")
ax.bar_label(ax.containers[0])
plt.show()
df.Director.value_counts()
# # Revenue of Movies by Rating Scores
# we get the title column with reset_index to see the movie names in the bubble chart
df.reset_index(inplace=True)
# Let's select the required columns
bubble_data = df[["Rating", "Revenue_Millions", "Title"]]
fig = px.scatter(
bubble_data,
x="Rating",
y="Revenue_Millions",
size="Revenue_Millions",
hover_name="Title",
title="Revenue of 250 Popular Movies by Rating Scores ",
)
fig.update_layout(width=1100, height=600)
fig.show()
| false | 0 | 1,607 | 4 | 1,718 | 1,607 |
||
129304219
|
<jupyter_start><jupyter_text>News dataset from Lenta.Ru
## Корпус новостей с Lenta.Ru
* Размер: 2 Гб
* Количество новостей: 800K+
* Период: Сентябрь 1999 - декабрь 2019
+ [Скрипт](https://github.com/yutkin/Lenta.Ru-News-Dataset/blob/master/download_lenta.py) для скачивания новостей.
## (Eng) Corpus of news articles from Lenta.Ru
* Size: 2 Gb
* News articles: 800K+
* Dates: Sept. 1999 - Dec 2019
+ [Script](https://github.com/yutkin/Lenta.Ru-News-Dataset/blob/master/download_lenta.py) for news downloading.
## Скачать / Download
* [GitHub](https://github.com/yutkin/Lenta.Ru-News-Dataset/releases)
Kaggle dataset identifier: corpus-of-russian-news-articles-from-lenta
<jupyter_script>import numpy as np
import pandas as pd
# # Глава 1. Сбор данных.
# В этом разделе мы распакуем наш датасет, рассмотрим существующие топики.
# Путь к нашей csv таблице новостей
data_path = "/kaggle/input/corpus-of-russian-news-articles-from-lenta/lenta-ru-news.csv"
# Вносим ее в DataFrame, берем только столбики "text" и "topic", так как остальные стоблцы нам не потребуются. Dtype нужен, чтобы не было конфликтов (некоторые значения полей определяются отличными от str)
df_news = pd.read_csv(
data_path, usecols=["text", "topic"], dtype={"text": "str", "topic": "str"}
)
print("Записей в таблице:", df_news.shape[0])
df_news.head()
pd.DataFrame(df_news["topic"].value_counts())
# # Глава 2. Предобработка данных.
# Вторым шагом нашей работы будет предобработка данных. В рамках предобработки мы избавим текст от спец. символов, чисел, одиночных символов и лишних пробелов, образовавшихся после удалений. Также произведем лемматизацию текста (приведем слова к нормальной форме). Также мы возьмем только определенные топики для обучения.
# re - регулярные выражения в python. Понадобятся для очистки текста
import re
# nltk - библиотека, используемая в обработке естественного языка
import nltk
# Удаляем NaN-строки
df_news.dropna(inplace=True)
# Для балансировки датасета возьмем 8 самых встречаемых топиков.
# Можно было бы взять 10, но топики "Из жизни" и "Дом" довольно абстрактные, тем более они отстают от последнего - 8 топика "Интернет и СМИ" - в среднем в 2 раза.
df_news_needed = df_news.loc[
df_news["topic"].isin(
[
"Россия",
"Мир",
"Экономика",
"Спорт",
"Культура",
"Бывший СССР",
"Наука и техника",
"Интернет и СМИ",
]
)
]
# Качаем Yandex Mystem
# Морфологический анализатор Yandex Mystem для лемматизации русскоязычных слов
from pymystem3 import Mystem
# Знаки пунктуации
from string import punctuation
# Русские стоп-слова
from nltk.corpus import stopwords
russian_stopwords = stopwords.words("russian")
# Создание экземпляра лемматизатора
lemmatizer = Mystem()
# Функция подготовки текствого блока
def preprocess_text(text):
# Приводим к нижнему регистру
text = text.lower()
# Токенезируем и лемматизируем текст
# Здесь лемматизируем
tokens = lemmatizer.lemmatize(text)
# Здесь проверяем на присутствие в stopwords, не пробел ли это и не пунктуация
tokens = [
token
for token in tokens
if token not in russian_stopwords
and token != " "
and token.strip() not in punctuation
]
text = " ".join(tokens)
return text
# Применение предобработки к столбцу 'text' в DataFrame
df_news_needed.loc[:, "text"] = df_news_needed["text"].apply(preprocess_text)
# Еще раз на всякий почистим пустые строки
df_news_needed.dropna(inplace=True)
print("Итоговое количество записей в таблице:", df_news_needed.shape[0])
df_news_needed.head()
# CountVectorizer - функция преобразования текста в матрицу (так называемый мешок слов)
from sklearn.feature_extraction.text import CountVectorizer
# Создание экземпляра CountVectorizer с удалением стоп-слов
vectorizer = CountVectorizer()
# Преобразование текста в мешок слов
bag_of_words = vectorizer.fit_transform(df_news_needed["text"])
print(bag_of_words.toarray())
# Удобная функция для разделения датасета на требуемые выборки
from sklearn.model_selection import train_test_split
# Разделение данных на тренировочную, тестовую и валидационную выборки
X = bag_of_words # Признаки (мешок слов)
y = df_news_needed["topic"] # Топики
# ???
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
X_train, X_val, y_train, y_val = train_test_split(
X_train, y_train, test_size=0.2, random_state=42
)
# # Глава 3. Создание и настройка модели. Тестирование модели и анализ результатов.
# В данной главе рассмотрим несколько алгоритмов классификации, представленных в sklearn.
# Метрики для тестирования
from sklearn.metrics import classification_report, confusion_matrix
# ### Глава 3. Раздел 1. Наивный классификатор Байеса.
# Сперва мы возьмем многомерный классификатор Байеса, так как он больше всего подходит для текстовых данных (для многомерно распределенных данных)
# Импортируем многомерную наивную модель Байеса
from sklearn.naive_bayes import MultinomialNB
# Создаем экземляр модели
multinominial = MultinomialNB()
# Запускаем тренировку этой модели
multinominial.fit(X_train, y_train)
# Прогнозирование на тестовом наборе данных
y_pred_bayes_test = multinominial.predict(X_test)
# Отчет классификации
classification_report(y_test, y_pred_bayes_test)
# Матрица
confusion_matrix(y_test, y_pred_bayes_test)
# Прогнозирование на валидационном наборе данных
y_pred_bayes_val = multinominial.predict(X_val)
# Отчет классификации
classification_report(y_val, y_pred_bayes_val)
# Матрица
confusion_matrix(y_valt, y_pred_bayes_val)
# ### Глава 3. Раздел 2. Метод опорных векторов.
# Рассмотрим метод опорных векторов. Он более эффективен, чем наивный классификатор Байеса, но более медленный.
# Импортируем модель метода опорных векторов
from sklearn.svm import SVC
# Создаем экземпляр модели
classifier = SVC()
# Запускаем тренировку этой модели
classifier.fit(X_train, y_train)
# Прогнозирование на тестовом наборе данных
y_pred_svc_test = classifier.predict(X_test)
# Отчет классификации
print(classification_report(y_test, y_pred_svc_test))
# Матрица ошибок
print(confusion_matrix(y_test, y_pred_svc_test))
# Прогнозирование на валидационном наборе данных
y_pred_svc_val = classifier.predict(X_val)
# Отчет классификации
print(classification_report(y_val, y_pred_svc_val))
# Матрица ошибок
print(confusion_matrix(y_val, y_pred_svc_val))
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/304/129304219.ipynb
|
corpus-of-russian-news-articles-from-lenta
|
yutkin
|
[{"Id": 129304219, "ScriptId": 38427050, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15068518, "CreationDate": "05/12/2023 15:37:42", "VersionNumber": 2.0, "Title": "kursach", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 152.0, "LinesInsertedFromPrevious": 3.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 149.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185224496, "KernelVersionId": 129304219, "SourceDatasetVersionId": 843399}]
|
[{"Id": 843399, "DatasetId": 37495, "DatasourceVersionId": 866335, "CreatorUserId": 434357, "LicenseName": "Unknown", "CreationDate": "12/14/2019 14:34:44", "VersionNumber": 2.0, "Title": "News dataset from Lenta.Ru", "Slug": "corpus-of-russian-news-articles-from-lenta", "Subtitle": "Corpus of Russian news articles collected from Lenta.Ru", "Description": "## \u041a\u043e\u0440\u043f\u0443\u0441 \u043d\u043e\u0432\u043e\u0441\u0442\u0435\u0439 \u0441 Lenta.Ru \n\n* \u0420\u0430\u0437\u043c\u0435\u0440: 2 \u0413\u0431\n* \u041a\u043e\u043b\u0438\u0447\u0435\u0441\u0442\u0432\u043e \u043d\u043e\u0432\u043e\u0441\u0442\u0435\u0439: 800K+\n* \u041f\u0435\u0440\u0438\u043e\u0434: \u0421\u0435\u043d\u0442\u044f\u0431\u0440\u044c 1999 - \u0434\u0435\u043a\u0430\u0431\u0440\u044c 2019\n\n+ [\u0421\u043a\u0440\u0438\u043f\u0442](https://github.com/yutkin/Lenta.Ru-News-Dataset/blob/master/download_lenta.py) \u0434\u043b\u044f \u0441\u043a\u0430\u0447\u0438\u0432\u0430\u043d\u0438\u044f \u043d\u043e\u0432\u043e\u0441\u0442\u0435\u0439.\n\n## (Eng) Corpus of news articles from Lenta.Ru\n* Size: 2 Gb\n* News articles: 800K+\n* Dates: Sept. 1999 - Dec 2019\n\n+ [Script](https://github.com/yutkin/Lenta.Ru-News-Dataset/blob/master/download_lenta.py) for news downloading.\n\n\n## \u0421\u043a\u0430\u0447\u0430\u0442\u044c / Download\n* [GitHub](https://github.com/yutkin/Lenta.Ru-News-Dataset/releases)", "VersionNotes": "Add news articles up to 14/12/2019", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 37495, "CreatorUserId": 434357, "OwnerUserId": 434357.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 843399.0, "CurrentDatasourceVersionId": 866335.0, "ForumId": 45927, "Type": 2, "CreationDate": "07/18/2018 08:11:37", "LastActivityDate": "07/18/2018", "TotalViews": 28475, "TotalDownloads": 2648, "TotalVotes": 75, "TotalKernels": 16}]
|
[{"Id": 434357, "UserName": "yutkin", "DisplayName": "DmitryYutkin", "RegisterDate": "10/03/2015", "PerformanceTier": 0}]
|
import numpy as np
import pandas as pd
# # Глава 1. Сбор данных.
# В этом разделе мы распакуем наш датасет, рассмотрим существующие топики.
# Путь к нашей csv таблице новостей
data_path = "/kaggle/input/corpus-of-russian-news-articles-from-lenta/lenta-ru-news.csv"
# Вносим ее в DataFrame, берем только столбики "text" и "topic", так как остальные стоблцы нам не потребуются. Dtype нужен, чтобы не было конфликтов (некоторые значения полей определяются отличными от str)
df_news = pd.read_csv(
data_path, usecols=["text", "topic"], dtype={"text": "str", "topic": "str"}
)
print("Записей в таблице:", df_news.shape[0])
df_news.head()
pd.DataFrame(df_news["topic"].value_counts())
# # Глава 2. Предобработка данных.
# Вторым шагом нашей работы будет предобработка данных. В рамках предобработки мы избавим текст от спец. символов, чисел, одиночных символов и лишних пробелов, образовавшихся после удалений. Также произведем лемматизацию текста (приведем слова к нормальной форме). Также мы возьмем только определенные топики для обучения.
# re - регулярные выражения в python. Понадобятся для очистки текста
import re
# nltk - библиотека, используемая в обработке естественного языка
import nltk
# Удаляем NaN-строки
df_news.dropna(inplace=True)
# Для балансировки датасета возьмем 8 самых встречаемых топиков.
# Можно было бы взять 10, но топики "Из жизни" и "Дом" довольно абстрактные, тем более они отстают от последнего - 8 топика "Интернет и СМИ" - в среднем в 2 раза.
df_news_needed = df_news.loc[
df_news["topic"].isin(
[
"Россия",
"Мир",
"Экономика",
"Спорт",
"Культура",
"Бывший СССР",
"Наука и техника",
"Интернет и СМИ",
]
)
]
# Качаем Yandex Mystem
# Морфологический анализатор Yandex Mystem для лемматизации русскоязычных слов
from pymystem3 import Mystem
# Знаки пунктуации
from string import punctuation
# Русские стоп-слова
from nltk.corpus import stopwords
russian_stopwords = stopwords.words("russian")
# Создание экземпляра лемматизатора
lemmatizer = Mystem()
# Функция подготовки текствого блока
def preprocess_text(text):
# Приводим к нижнему регистру
text = text.lower()
# Токенезируем и лемматизируем текст
# Здесь лемматизируем
tokens = lemmatizer.lemmatize(text)
# Здесь проверяем на присутствие в stopwords, не пробел ли это и не пунктуация
tokens = [
token
for token in tokens
if token not in russian_stopwords
and token != " "
and token.strip() not in punctuation
]
text = " ".join(tokens)
return text
# Применение предобработки к столбцу 'text' в DataFrame
df_news_needed.loc[:, "text"] = df_news_needed["text"].apply(preprocess_text)
# Еще раз на всякий почистим пустые строки
df_news_needed.dropna(inplace=True)
print("Итоговое количество записей в таблице:", df_news_needed.shape[0])
df_news_needed.head()
# CountVectorizer - функция преобразования текста в матрицу (так называемый мешок слов)
from sklearn.feature_extraction.text import CountVectorizer
# Создание экземпляра CountVectorizer с удалением стоп-слов
vectorizer = CountVectorizer()
# Преобразование текста в мешок слов
bag_of_words = vectorizer.fit_transform(df_news_needed["text"])
print(bag_of_words.toarray())
# Удобная функция для разделения датасета на требуемые выборки
from sklearn.model_selection import train_test_split
# Разделение данных на тренировочную, тестовую и валидационную выборки
X = bag_of_words # Признаки (мешок слов)
y = df_news_needed["topic"] # Топики
# ???
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
X_train, X_val, y_train, y_val = train_test_split(
X_train, y_train, test_size=0.2, random_state=42
)
# # Глава 3. Создание и настройка модели. Тестирование модели и анализ результатов.
# В данной главе рассмотрим несколько алгоритмов классификации, представленных в sklearn.
# Метрики для тестирования
from sklearn.metrics import classification_report, confusion_matrix
# ### Глава 3. Раздел 1. Наивный классификатор Байеса.
# Сперва мы возьмем многомерный классификатор Байеса, так как он больше всего подходит для текстовых данных (для многомерно распределенных данных)
# Импортируем многомерную наивную модель Байеса
from sklearn.naive_bayes import MultinomialNB
# Создаем экземляр модели
multinominial = MultinomialNB()
# Запускаем тренировку этой модели
multinominial.fit(X_train, y_train)
# Прогнозирование на тестовом наборе данных
y_pred_bayes_test = multinominial.predict(X_test)
# Отчет классификации
classification_report(y_test, y_pred_bayes_test)
# Матрица
confusion_matrix(y_test, y_pred_bayes_test)
# Прогнозирование на валидационном наборе данных
y_pred_bayes_val = multinominial.predict(X_val)
# Отчет классификации
classification_report(y_val, y_pred_bayes_val)
# Матрица
confusion_matrix(y_valt, y_pred_bayes_val)
# ### Глава 3. Раздел 2. Метод опорных векторов.
# Рассмотрим метод опорных векторов. Он более эффективен, чем наивный классификатор Байеса, но более медленный.
# Импортируем модель метода опорных векторов
from sklearn.svm import SVC
# Создаем экземпляр модели
classifier = SVC()
# Запускаем тренировку этой модели
classifier.fit(X_train, y_train)
# Прогнозирование на тестовом наборе данных
y_pred_svc_test = classifier.predict(X_test)
# Отчет классификации
print(classification_report(y_test, y_pred_svc_test))
# Матрица ошибок
print(confusion_matrix(y_test, y_pred_svc_test))
# Прогнозирование на валидационном наборе данных
y_pred_svc_val = classifier.predict(X_val)
# Отчет классификации
print(classification_report(y_val, y_pred_svc_val))
# Матрица ошибок
print(confusion_matrix(y_val, y_pred_svc_val))
| false | 0 | 2,355 | 0 | 2,643 | 2,355 |
||
129639443
|
# # **How to store many DataFrames in the same Excel workbook**
# 
# ### Saving pandas dataframes as excel is often useful, but it becomes complicated when we have lot of dataframes and saving them in separate excel files becomes complicated and requires more memory space.
# ### To avoid this pproblem, I will show in the present notebook how to get each dataframe in its own excel sheet and named with the same dataframe title.
# # 1. Imports
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# # 2. Getting the dataframe's name as string
# #### The following function allows us to get the name of a given dataframe as string
def _get_df_name(df):
"""returns the df's name as a string"""
name = [x for x in globals() if globals()[x] is df][0]
return name
# # 3. Getting list of names from a list of dataframes
# #### This one stores from a list of dataframes their names and retuns them as a list of strings
def get_dfs_names_as_string(list_dfs):
"""Gathers dfs names as strings in a list"""
names_dfs = []
for df in list_dfs:
names_dfs.append(_get_df_name(df))
return names_dfs
# # 4. Saving a list of dataframes as an excel workbook
# #### The following function saves a lsit of dataframes inside the same excel file, in multiple sheets named with same names as dataframes ones
def _save_xls(list_dfs, xls_path):
"""Save a list of dfs in an excel file"""
names_of_dfs = get_dfs_names_as_string(list_dfs)
with pd.ExcelWriter(xls_path) as writer:
for n, df in enumerate(list_dfs):
df.to_excel(writer, names_of_dfs[n])
# # 5. Putting all together
# #### this function aggregates the previous ones
def get_all_dataframes_in_excel(list_dfs, excel_file_name="excel_oject"):
"""This function converts a list of dataframe to excel, each dataframe will be
on its own sheet and named with same df name
imputs
------
list_dfs : List of pandas dataframes, ex. [df1, df2, df3]
excel_file_name : The name you want entitle the excel file with
outputs
-------
An excel file in the working directory with sheets like df1, df2, df3 according to dfs names
"""
exls_df = pd.DataFrame()
exls_df.to_excel(excel_file_name + ".xlsx")
xls_path = "/kaggle/working/" + excel_file_name + ".xlsx"
_save_xls(list_dfs, xls_path)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/639/129639443.ipynb
| null | null |
[{"Id": 129639443, "ScriptId": 38547818, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2306412, "CreationDate": "05/15/2023 12:14:39", "VersionNumber": 3.0, "Title": "Storing many DataFrames in the same EXCEL", "EvaluationDate": "05/15/2023", "IsChange": false, "TotalLines": 73.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 73.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 8}]
| null | null | null | null |
# # **How to store many DataFrames in the same Excel workbook**
# 
# ### Saving pandas dataframes as excel is often useful, but it becomes complicated when we have lot of dataframes and saving them in separate excel files becomes complicated and requires more memory space.
# ### To avoid this pproblem, I will show in the present notebook how to get each dataframe in its own excel sheet and named with the same dataframe title.
# # 1. Imports
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# # 2. Getting the dataframe's name as string
# #### The following function allows us to get the name of a given dataframe as string
def _get_df_name(df):
"""returns the df's name as a string"""
name = [x for x in globals() if globals()[x] is df][0]
return name
# # 3. Getting list of names from a list of dataframes
# #### This one stores from a list of dataframes their names and retuns them as a list of strings
def get_dfs_names_as_string(list_dfs):
"""Gathers dfs names as strings in a list"""
names_dfs = []
for df in list_dfs:
names_dfs.append(_get_df_name(df))
return names_dfs
# # 4. Saving a list of dataframes as an excel workbook
# #### The following function saves a lsit of dataframes inside the same excel file, in multiple sheets named with same names as dataframes ones
def _save_xls(list_dfs, xls_path):
"""Save a list of dfs in an excel file"""
names_of_dfs = get_dfs_names_as_string(list_dfs)
with pd.ExcelWriter(xls_path) as writer:
for n, df in enumerate(list_dfs):
df.to_excel(writer, names_of_dfs[n])
# # 5. Putting all together
# #### this function aggregates the previous ones
def get_all_dataframes_in_excel(list_dfs, excel_file_name="excel_oject"):
"""This function converts a list of dataframe to excel, each dataframe will be
on its own sheet and named with same df name
imputs
------
list_dfs : List of pandas dataframes, ex. [df1, df2, df3]
excel_file_name : The name you want entitle the excel file with
outputs
-------
An excel file in the working directory with sheets like df1, df2, df3 according to dfs names
"""
exls_df = pd.DataFrame()
exls_df.to_excel(excel_file_name + ".xlsx")
xls_path = "/kaggle/working/" + excel_file_name + ".xlsx"
_save_xls(list_dfs, xls_path)
| false | 0 | 730 | 8 | 730 | 730 |
||
129639726
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import ast
df = pd.read_csv(
"/kaggle/input/twitter-chat-gpt-with-embeddings/df_with_embeddings.csv"
)
print(len(df))
def to_np(x: str):
ex = ast.literal_eval(x)
num = np.array(ex)
return num
ex = df["embeddings"][0]
to_np(ex)
import numpy as np
def convert_to_BLOB(embedding):
out = np.array(
embedding
) # np array to bytes for blob data in sqlite, float 64 is the default
return out.tobytes()
df.columns
emb = convert_to_BLOB(df["embeddings"][0])
df["content_embedding"] = df["embeddings"].apply(lambda x: convert_to_BLOB((to_np(x))))
df.drop(columns=["embeddings"])
df.head()
# test
import numpy as np
def convert_bytes_to_nparray(embedding_bytes: bytes):
"""Converts a byte stream to a numpy array"""
embedding_np = np.frombuffer(embedding_bytes, dtype=np.float64)
return embedding_np
for i in range(4):
blob = df["content_embedding"][i]
print(type(blob))
arr = convert_bytes_to_nparray(blob)
print(type(arr))
df.to_csv("/kaggle/working/50k_embedded_dataset")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/639/129639726.ipynb
| null | null |
[{"Id": 129639726, "ScriptId": 38549593, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12091370, "CreationDate": "05/15/2023 12:17:09", "VersionNumber": 1.0, "Title": "converting nparr to bytes", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 63.0, "LinesInsertedFromPrevious": 63.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import ast
df = pd.read_csv(
"/kaggle/input/twitter-chat-gpt-with-embeddings/df_with_embeddings.csv"
)
print(len(df))
def to_np(x: str):
ex = ast.literal_eval(x)
num = np.array(ex)
return num
ex = df["embeddings"][0]
to_np(ex)
import numpy as np
def convert_to_BLOB(embedding):
out = np.array(
embedding
) # np array to bytes for blob data in sqlite, float 64 is the default
return out.tobytes()
df.columns
emb = convert_to_BLOB(df["embeddings"][0])
df["content_embedding"] = df["embeddings"].apply(lambda x: convert_to_BLOB((to_np(x))))
df.drop(columns=["embeddings"])
df.head()
# test
import numpy as np
def convert_bytes_to_nparray(embedding_bytes: bytes):
"""Converts a byte stream to a numpy array"""
embedding_np = np.frombuffer(embedding_bytes, dtype=np.float64)
return embedding_np
for i in range(4):
blob = df["content_embedding"][i]
print(type(blob))
arr = convert_bytes_to_nparray(blob)
print(type(arr))
df.to_csv("/kaggle/working/50k_embedded_dataset")
| false | 0 | 530 | 0 | 530 | 530 |
||
129743931
|
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
train = pd.read_csv(r"/kaggle/input/playground-series-s3e14/train.csv")
test = pd.read_csv(r"/kaggle/input/playground-series-s3e14/test.csv")
testids = test["id"]
train.head()
sns.pairplot(train.sample(n=1000), hue="yield")
test.isna().sum()
train.isna().sum()
from sklearn.neighbors import KNeighborsClassifier
def clean(df):
df.drop(["id"], axis=1, inplace=True)
return df
train = clean(train)
test = clean(test)
train.head()
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
X = train.drop(["yield"], axis=1)
y = train["yield"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
clf = LinearRegression()
clf.fit(X_train, y_train)
print(clf.score(X_test, y_test))
preds = clf.predict(test)
plt.scatter(testids, preds)
preds
sub = pd.DataFrame({"id": testids, "yield": preds})
sub.plot(kind="scatter", x="id", y="yield")
sub.to_csv("sub_blueberry.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/743/129743931.ipynb
| null | null |
[{"Id": 129743931, "ScriptId": 38584955, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8982069, "CreationDate": "05/16/2023 07:08:26", "VersionNumber": 2.0, "Title": "[S3E14]BlueBerryYield", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 47.0, "LinesInsertedFromPrevious": 2.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 45.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 7}]
| null | null | null | null |
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
train = pd.read_csv(r"/kaggle/input/playground-series-s3e14/train.csv")
test = pd.read_csv(r"/kaggle/input/playground-series-s3e14/test.csv")
testids = test["id"]
train.head()
sns.pairplot(train.sample(n=1000), hue="yield")
test.isna().sum()
train.isna().sum()
from sklearn.neighbors import KNeighborsClassifier
def clean(df):
df.drop(["id"], axis=1, inplace=True)
return df
train = clean(train)
test = clean(test)
train.head()
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
X = train.drop(["yield"], axis=1)
y = train["yield"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
clf = LinearRegression()
clf.fit(X_train, y_train)
print(clf.score(X_test, y_test))
preds = clf.predict(test)
plt.scatter(testids, preds)
preds
sub = pd.DataFrame({"id": testids, "yield": preds})
sub.plot(kind="scatter", x="id", y="yield")
sub.to_csv("sub_blueberry.csv", index=False)
| false | 0 | 368 | 7 | 368 | 368 |
||
129743143
|
<jupyter_start><jupyter_text>Breast Cancer Dataset
### Description:
Breast cancer is the most common cancer amongst women in the world. It accounts for 25% of all cancer cases, and affected over 2.1 Million people in 2015 alone. It starts when cells in the breast begin to grow out of control. These cells usually form tumors that can be seen via X-ray or felt as lumps in the breast area.
The key challenges against it’s detection is how to classify tumors into malignant (cancerous) or benign(non cancerous). We ask you to complete the analysis of classifying these tumors using machine learning (with SVMs) and the Breast Cancer Wisconsin (Diagnostic) Dataset.
Kaggle dataset identifier: breast-cancer-dataset
<jupyter_code>import pandas as pd
df = pd.read_csv('breast-cancer-dataset/breast-cancer.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 569 entries, 0 to 568
Data columns (total 32 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 id 569 non-null int64
1 diagnosis 569 non-null object
2 radius_mean 569 non-null float64
3 texture_mean 569 non-null float64
4 perimeter_mean 569 non-null float64
5 area_mean 569 non-null float64
6 smoothness_mean 569 non-null float64
7 compactness_mean 569 non-null float64
8 concavity_mean 569 non-null float64
9 concave points_mean 569 non-null float64
10 symmetry_mean 569 non-null float64
11 fractal_dimension_mean 569 non-null float64
12 radius_se 569 non-null float64
13 texture_se 569 non-null float64
14 perimeter_se 569 non-null float64
15 area_se 569 non-null float64
16 smoothness_se 569 non-null float64
17 compactness_se 569 non-null float64
18 concavity_se 569 non-null float64
19 concave points_se 569 non-null float64
20 symmetry_se 569 non-null float64
21 fractal_dimension_se 569 non-null float64
22 radius_worst 569 non-null float64
23 texture_worst 569 non-null float64
24 perimeter_worst 569 non-null float64
25 area_worst 569 non-null float64
26 smoothness_worst 569 non-null float64
27 compactness_worst 569 non-null float64
28 concavity_worst 569 non-null float64
29 concave points_worst 569 non-null float64
30 symmetry_worst 569 non-null float64
31 fractal_dimension_worst 569 non-null float64
dtypes: float64(30), int64(1), object(1)
memory usage: 142.4+ KB
<jupyter_text>Examples:
{
"id": 842302,
"diagnosis": "M",
"radius_mean": 17.99,
"texture_mean": 10.38,
"perimeter_mean": 122.8,
"area_mean": 1001.0,
"smoothness_mean": 0.1184,
"compactness_mean": 0.2776,
"concavity_mean": 0.30010000000000003,
"concave points_mean": 0.1471,
"symmetry_mean": 0.2419,
"fractal_dimension_mean": 0.07871,
"radius_se": 1.095,
"texture_se": 0.9053,
"perimeter_se": 8.589,
"area_se": 153.4,
"smoothness_se": 0.006398999999999999,
"compactness_se": 0.04904000000000001,
"concavity_se": 0.05373000000000001,
"concave points_se": 0.015870000000000002,
"...": "and 12 more columns"
}
{
"id": 842517,
"diagnosis": "M",
"radius_mean": 20.57,
"texture_mean": 17.77,
"perimeter_mean": 132.9,
"area_mean": 1326.0,
"smoothness_mean": 0.08474000000000001,
"compactness_mean": 0.07864,
"concavity_mean": 0.0869,
"concave points_mean": 0.07017000000000001,
"symmetry_mean": 0.1812,
"fractal_dimension_mean": 0.056670000000000005,
"radius_se": 0.5435,
"texture_se": 0.7339,
"perimeter_se": 3.398,
"area_se": 74.08,
"smoothness_se": 0.005225,
"compactness_se": 0.013080000000000001,
"concavity_se": 0.018600000000000002,
"concave points_se": 0.0134,
"...": "and 12 more columns"
}
{
"id": 84300903,
"diagnosis": "M",
"radius_mean": 19.69,
"texture_mean": 21.25,
"perimeter_mean": 130.0,
"area_mean": 1203.0,
"smoothness_mean": 0.1096,
"compactness_mean": 0.15990000000000001,
"concavity_mean": 0.19740000000000002,
"concave points_mean": 0.1279,
"symmetry_mean": 0.2069,
"fractal_dimension_mean": 0.05999,
"radius_se": 0.7456,
"texture_se": 0.7869,
"perimeter_se": 4.585,
"area_se": 94.03,
"smoothness_se": 0.00615,
"compactness_se": 0.040060000000000005,
"concavity_se": 0.03832,
"concave points_se": 0.02058,
"...": "and 12 more columns"
}
{
"id": 84348301,
"diagnosis": "M",
"radius_mean": 11.42,
"texture_mean": 20.38,
"perimeter_mean": 77.58,
"area_mean": 386.1,
"smoothness_mean": 0.14250000000000002,
"compactness_mean": 0.28390000000000004,
"concavity_mean": 0.2414,
"concave points_mean": 0.1052,
"symmetry_mean": 0.2597,
"fractal_dimension_mean": 0.09744000000000001,
"radius_se": 0.49560000000000004,
"texture_se": 1.156,
"perimeter_se": 3.445,
"area_se": 27.23,
"smoothness_se": 0.00911,
"compactness_se": 0.07458000000000001,
"concavity_se": 0.05661000000000001,
"concave points_se": 0.018670000000000003,
"...": "and 12 more columns"
}
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow import keras
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
import seaborn as sns
data = pd.read_csv("/kaggle/input/breast-cancer-dataset/breast-cancer.csv")
data.head()
data.isna().sum()
data["diagnosis"].unique()
# M- malignant (Dangerous) (1)
# B- benign (Not Dangerous) (0)
data.describe()
data.head() # m = 1 and b = 0
data["diagnosis"] = pd.Categorical(data.diagnosis).codes
data["diagnosis"].unique()
data.head()
# feature and target variable separation
X = data.drop(columns=["diagnosis"])
y = data["diagnosis"]
print("X shape", X.shape)
print("y shape", y.shape)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
print("X_train shape:", X_train.shape, " X_test shape:", X_test.shape)
print("y_train shape:", y_train.shape, " y_test shape:", y_test.shape)
# standard Scaling
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
model = keras.Sequential(
[
keras.layers.Dense(30, input_shape=(31,), activation="relu"),
keras.layers.Dense(10, activation="relu"),
keras.layers.Dense(1, activation="sigmoid"),
]
)
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
model.fit(X_train, y_train, epochs=20)
model.evaluate(X_test, y_test)
y_pred = pd.Series(model.predict(X_test).flatten())
y_pred = (y_pred > 0.5).astype(int)
y_pred[:5]
cm = tf.math.confusion_matrix(y_test, y_pred)
sns.heatmap(cm, annot=True)
accuracy_score(y_test, y_pred)
clf = classification_report(y_test, y_pred, output_dict=True)
sns.heatmap(pd.DataFrame(clf), annot=True)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/743/129743143.ipynb
|
breast-cancer-dataset
|
yasserh
|
[{"Id": 129743143, "ScriptId": 38584754, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7916356, "CreationDate": "05/16/2023 07:01:37", "VersionNumber": 1.0, "Title": "Breast Cancer Using ANN", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 89.0, "LinesInsertedFromPrevious": 89.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 8}]
|
[{"Id": 186093269, "KernelVersionId": 129743143, "SourceDatasetVersionId": 2984728}]
|
[{"Id": 2984728, "DatasetId": 1829286, "DatasourceVersionId": 3032465, "CreatorUserId": 8833583, "LicenseName": "CC0: Public Domain", "CreationDate": "12/29/2021 19:07:20", "VersionNumber": 1.0, "Title": "Breast Cancer Dataset", "Slug": "breast-cancer-dataset", "Subtitle": "Binary Classification Prediction for type of Breast Cancer", "Description": "### Description:\n\nBreast cancer is the most common cancer amongst women in the world. It accounts for 25% of all cancer cases, and affected over 2.1 Million people in 2015 alone. It starts when cells in the breast begin to grow out of control. These cells usually form tumors that can be seen via X-ray or felt as lumps in the breast area.\n\nThe key challenges against it\u2019s detection is how to classify tumors into malignant (cancerous) or benign(non cancerous). We ask you to complete the analysis of classifying these tumors using machine learning (with SVMs) and the Breast Cancer Wisconsin (Diagnostic) Dataset.\n\n### Acknowledgements:\nThis dataset has been referred from Kaggle.\n\n### Objective:\n- Understand the Dataset & cleanup (if required).\n- Build classification models to predict whether the cancer type is Malignant or Benign.\n- Also fine-tune the hyperparameters & compare the evaluation metrics of various classification algorithms.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1829286, "CreatorUserId": 8833583, "OwnerUserId": 8833583.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2984728.0, "CurrentDatasourceVersionId": 3032465.0, "ForumId": 1852047, "Type": 2, "CreationDate": "12/29/2021 19:07:20", "LastActivityDate": "12/29/2021", "TotalViews": 170368, "TotalDownloads": 29194, "TotalVotes": 276, "TotalKernels": 138}]
|
[{"Id": 8833583, "UserName": "yasserh", "DisplayName": "M Yasser H", "RegisterDate": "11/09/2021", "PerformanceTier": 3}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow import keras
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
import seaborn as sns
data = pd.read_csv("/kaggle/input/breast-cancer-dataset/breast-cancer.csv")
data.head()
data.isna().sum()
data["diagnosis"].unique()
# M- malignant (Dangerous) (1)
# B- benign (Not Dangerous) (0)
data.describe()
data.head() # m = 1 and b = 0
data["diagnosis"] = pd.Categorical(data.diagnosis).codes
data["diagnosis"].unique()
data.head()
# feature and target variable separation
X = data.drop(columns=["diagnosis"])
y = data["diagnosis"]
print("X shape", X.shape)
print("y shape", y.shape)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
print("X_train shape:", X_train.shape, " X_test shape:", X_test.shape)
print("y_train shape:", y_train.shape, " y_test shape:", y_test.shape)
# standard Scaling
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
model = keras.Sequential(
[
keras.layers.Dense(30, input_shape=(31,), activation="relu"),
keras.layers.Dense(10, activation="relu"),
keras.layers.Dense(1, activation="sigmoid"),
]
)
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
model.fit(X_train, y_train, epochs=20)
model.evaluate(X_test, y_test)
y_pred = pd.Series(model.predict(X_test).flatten())
y_pred = (y_pred > 0.5).astype(int)
y_pred[:5]
cm = tf.math.confusion_matrix(y_test, y_pred)
sns.heatmap(cm, annot=True)
accuracy_score(y_test, y_pred)
clf = classification_report(y_test, y_pred, output_dict=True)
sns.heatmap(pd.DataFrame(clf), annot=True)
|
[{"breast-cancer-dataset/breast-cancer.csv": {"column_names": "[\"id\", \"diagnosis\", \"radius_mean\", \"texture_mean\", \"perimeter_mean\", \"area_mean\", \"smoothness_mean\", \"compactness_mean\", \"concavity_mean\", \"concave points_mean\", \"symmetry_mean\", \"fractal_dimension_mean\", \"radius_se\", \"texture_se\", \"perimeter_se\", \"area_se\", \"smoothness_se\", \"compactness_se\", \"concavity_se\", \"concave points_se\", \"symmetry_se\", \"fractal_dimension_se\", \"radius_worst\", \"texture_worst\", \"perimeter_worst\", \"area_worst\", \"smoothness_worst\", \"compactness_worst\", \"concavity_worst\", \"concave points_worst\", \"symmetry_worst\", \"fractal_dimension_worst\"]", "column_data_types": "{\"id\": \"int64\", \"diagnosis\": \"object\", \"radius_mean\": \"float64\", \"texture_mean\": \"float64\", \"perimeter_mean\": \"float64\", \"area_mean\": \"float64\", \"smoothness_mean\": \"float64\", \"compactness_mean\": \"float64\", \"concavity_mean\": \"float64\", \"concave points_mean\": \"float64\", \"symmetry_mean\": \"float64\", \"fractal_dimension_mean\": \"float64\", \"radius_se\": \"float64\", \"texture_se\": \"float64\", \"perimeter_se\": \"float64\", \"area_se\": \"float64\", \"smoothness_se\": \"float64\", \"compactness_se\": \"float64\", \"concavity_se\": \"float64\", \"concave points_se\": \"float64\", \"symmetry_se\": \"float64\", \"fractal_dimension_se\": \"float64\", \"radius_worst\": \"float64\", \"texture_worst\": \"float64\", \"perimeter_worst\": \"float64\", \"area_worst\": \"float64\", \"smoothness_worst\": \"float64\", \"compactness_worst\": \"float64\", \"concavity_worst\": \"float64\", \"concave points_worst\": \"float64\", \"symmetry_worst\": \"float64\", \"fractal_dimension_worst\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 569 entries, 0 to 568\nData columns (total 32 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 id 569 non-null int64 \n 1 diagnosis 569 non-null object \n 2 radius_mean 569 non-null float64\n 3 texture_mean 569 non-null float64\n 4 perimeter_mean 569 non-null float64\n 5 area_mean 569 non-null float64\n 6 smoothness_mean 569 non-null float64\n 7 compactness_mean 569 non-null float64\n 8 concavity_mean 569 non-null float64\n 9 concave points_mean 569 non-null float64\n 10 symmetry_mean 569 non-null float64\n 11 fractal_dimension_mean 569 non-null float64\n 12 radius_se 569 non-null float64\n 13 texture_se 569 non-null float64\n 14 perimeter_se 569 non-null float64\n 15 area_se 569 non-null float64\n 16 smoothness_se 569 non-null float64\n 17 compactness_se 569 non-null float64\n 18 concavity_se 569 non-null float64\n 19 concave points_se 569 non-null float64\n 20 symmetry_se 569 non-null float64\n 21 fractal_dimension_se 569 non-null float64\n 22 radius_worst 569 non-null float64\n 23 texture_worst 569 non-null float64\n 24 perimeter_worst 569 non-null float64\n 25 area_worst 569 non-null float64\n 26 smoothness_worst 569 non-null float64\n 27 compactness_worst 569 non-null float64\n 28 concavity_worst 569 non-null float64\n 29 concave points_worst 569 non-null float64\n 30 symmetry_worst 569 non-null float64\n 31 fractal_dimension_worst 569 non-null float64\ndtypes: float64(30), int64(1), object(1)\nmemory usage: 142.4+ KB\n", "summary": "{\"id\": {\"count\": 569.0, \"mean\": 30371831.432337433, \"std\": 125020585.61222365, \"min\": 8670.0, \"25%\": 869218.0, \"50%\": 906024.0, \"75%\": 8813129.0, \"max\": 911320502.0}, \"radius_mean\": {\"count\": 569.0, \"mean\": 14.127291739894552, \"std\": 3.5240488262120775, \"min\": 6.981, \"25%\": 11.7, \"50%\": 13.37, \"75%\": 15.78, \"max\": 28.11}, \"texture_mean\": {\"count\": 569.0, \"mean\": 19.289648506151142, \"std\": 4.301035768166949, \"min\": 9.71, \"25%\": 16.17, \"50%\": 18.84, \"75%\": 21.8, \"max\": 39.28}, \"perimeter_mean\": {\"count\": 569.0, \"mean\": 91.96903339191564, \"std\": 24.298981038754906, \"min\": 43.79, \"25%\": 75.17, \"50%\": 86.24, \"75%\": 104.1, \"max\": 188.5}, \"area_mean\": {\"count\": 569.0, \"mean\": 654.8891036906855, \"std\": 351.914129181653, \"min\": 143.5, \"25%\": 420.3, \"50%\": 551.1, \"75%\": 782.7, \"max\": 2501.0}, \"smoothness_mean\": {\"count\": 569.0, \"mean\": 0.0963602811950791, \"std\": 0.01406412813767362, \"min\": 0.05263, \"25%\": 0.08637, \"50%\": 0.09587, \"75%\": 0.1053, \"max\": 0.1634}, \"compactness_mean\": {\"count\": 569.0, \"mean\": 0.10434098418277679, \"std\": 0.052812757932512194, \"min\": 0.01938, \"25%\": 0.06492, \"50%\": 0.09263, \"75%\": 0.1304, \"max\": 0.3454}, \"concavity_mean\": {\"count\": 569.0, \"mean\": 0.0887993158172232, \"std\": 0.07971980870789348, \"min\": 0.0, \"25%\": 0.02956, \"50%\": 0.06154, \"75%\": 0.1307, \"max\": 0.4268}, \"concave points_mean\": {\"count\": 569.0, \"mean\": 0.04891914586994728, \"std\": 0.038802844859153605, \"min\": 0.0, \"25%\": 0.02031, \"50%\": 0.0335, \"75%\": 0.074, \"max\": 0.2012}, \"symmetry_mean\": {\"count\": 569.0, \"mean\": 0.18116186291739894, \"std\": 0.027414281336035715, \"min\": 0.106, \"25%\": 0.1619, \"50%\": 0.1792, \"75%\": 0.1957, \"max\": 0.304}, \"fractal_dimension_mean\": {\"count\": 569.0, \"mean\": 0.06279760984182776, \"std\": 0.007060362795084459, \"min\": 0.04996, \"25%\": 0.0577, \"50%\": 0.06154, \"75%\": 0.06612, \"max\": 0.09744}, \"radius_se\": {\"count\": 569.0, \"mean\": 0.40517205623901575, \"std\": 0.2773127329861039, \"min\": 0.1115, \"25%\": 0.2324, \"50%\": 0.3242, \"75%\": 0.4789, \"max\": 2.873}, \"texture_se\": {\"count\": 569.0, \"mean\": 1.2168534270650264, \"std\": 0.5516483926172023, \"min\": 0.3602, \"25%\": 0.8339, \"50%\": 1.108, \"75%\": 1.474, \"max\": 4.885}, \"perimeter_se\": {\"count\": 569.0, \"mean\": 2.8660592267135327, \"std\": 2.0218545540421076, \"min\": 0.757, \"25%\": 1.606, \"50%\": 2.287, \"75%\": 3.357, \"max\": 21.98}, \"area_se\": {\"count\": 569.0, \"mean\": 40.337079086116, \"std\": 45.49100551613181, \"min\": 6.802, \"25%\": 17.85, \"50%\": 24.53, \"75%\": 45.19, \"max\": 542.2}, \"smoothness_se\": {\"count\": 569.0, \"mean\": 0.007040978910369069, \"std\": 0.0030025179438390656, \"min\": 0.001713, \"25%\": 0.005169, \"50%\": 0.00638, \"75%\": 0.008146, \"max\": 0.03113}, \"compactness_se\": {\"count\": 569.0, \"mean\": 0.025478138840070295, \"std\": 0.017908179325677388, \"min\": 0.002252, \"25%\": 0.01308, \"50%\": 0.02045, \"75%\": 0.03245, \"max\": 0.1354}, \"concavity_se\": {\"count\": 569.0, \"mean\": 0.03189371634446397, \"std\": 0.03018606032298841, \"min\": 0.0, \"25%\": 0.01509, \"50%\": 0.02589, \"75%\": 0.04205, \"max\": 0.396}, \"concave points_se\": {\"count\": 569.0, \"mean\": 0.011796137082601054, \"std\": 0.006170285174046869, \"min\": 0.0, \"25%\": 0.007638, \"50%\": 0.01093, \"75%\": 0.01471, \"max\": 0.05279}, \"symmetry_se\": {\"count\": 569.0, \"mean\": 0.02054229876977153, \"std\": 0.008266371528798399, \"min\": 0.007882, \"25%\": 0.01516, \"50%\": 0.01873, \"75%\": 0.02348, \"max\": 0.07895}, \"fractal_dimension_se\": {\"count\": 569.0, \"mean\": 0.0037949038664323374, \"std\": 0.002646070967089195, \"min\": 0.0008948, \"25%\": 0.002248, \"50%\": 0.003187, \"75%\": 0.004558, \"max\": 0.02984}, \"radius_worst\": {\"count\": 569.0, \"mean\": 16.269189806678387, \"std\": 4.833241580469323, \"min\": 7.93, \"25%\": 13.01, \"50%\": 14.97, \"75%\": 18.79, \"max\": 36.04}, \"texture_worst\": {\"count\": 569.0, \"mean\": 25.677223198594024, \"std\": 6.146257623038319, \"min\": 12.02, \"25%\": 21.08, \"50%\": 25.41, \"75%\": 29.72, \"max\": 49.54}, \"perimeter_worst\": {\"count\": 569.0, \"mean\": 107.26121265377857, \"std\": 33.602542269036356, \"min\": 50.41, \"25%\": 84.11, \"50%\": 97.66, \"75%\": 125.4, \"max\": 251.2}, \"area_worst\": {\"count\": 569.0, \"mean\": 880.5831282952548, \"std\": 569.356992669949, \"min\": 185.2, \"25%\": 515.3, \"50%\": 686.5, \"75%\": 1084.0, \"max\": 4254.0}, \"smoothness_worst\": {\"count\": 569.0, \"mean\": 0.13236859402460457, \"std\": 0.022832429404835465, \"min\": 0.07117, \"25%\": 0.1166, \"50%\": 0.1313, \"75%\": 0.146, \"max\": 0.2226}, \"compactness_worst\": {\"count\": 569.0, \"mean\": 0.25426504393673116, \"std\": 0.157336488913742, \"min\": 0.02729, \"25%\": 0.1472, \"50%\": 0.2119, \"75%\": 0.3391, \"max\": 1.058}, \"concavity_worst\": {\"count\": 569.0, \"mean\": 0.27218848330404216, \"std\": 0.2086242806081323, \"min\": 0.0, \"25%\": 0.1145, \"50%\": 0.2267, \"75%\": 0.3829, \"max\": 1.252}, \"concave points_worst\": {\"count\": 569.0, \"mean\": 0.11460622319859401, \"std\": 0.06573234119594207, \"min\": 0.0, \"25%\": 0.06493, \"50%\": 0.09993, \"75%\": 0.1614, \"max\": 0.291}, \"symmetry_worst\": {\"count\": 569.0, \"mean\": 0.2900755711775044, \"std\": 0.061867467537518685, \"min\": 0.1565, \"25%\": 0.2504, \"50%\": 0.2822, \"75%\": 0.3179, \"max\": 0.6638}, \"fractal_dimension_worst\": {\"count\": 569.0, \"mean\": 0.0839458172231986, \"std\": 0.018061267348893986, \"min\": 0.05504, \"25%\": 0.07146, \"50%\": 0.08004, \"75%\": 0.09208, \"max\": 0.2075}}", "examples": "{\"id\":{\"0\":842302,\"1\":842517,\"2\":84300903,\"3\":84348301},\"diagnosis\":{\"0\":\"M\",\"1\":\"M\",\"2\":\"M\",\"3\":\"M\"},\"radius_mean\":{\"0\":17.99,\"1\":20.57,\"2\":19.69,\"3\":11.42},\"texture_mean\":{\"0\":10.38,\"1\":17.77,\"2\":21.25,\"3\":20.38},\"perimeter_mean\":{\"0\":122.8,\"1\":132.9,\"2\":130.0,\"3\":77.58},\"area_mean\":{\"0\":1001.0,\"1\":1326.0,\"2\":1203.0,\"3\":386.1},\"smoothness_mean\":{\"0\":0.1184,\"1\":0.08474,\"2\":0.1096,\"3\":0.1425},\"compactness_mean\":{\"0\":0.2776,\"1\":0.07864,\"2\":0.1599,\"3\":0.2839},\"concavity_mean\":{\"0\":0.3001,\"1\":0.0869,\"2\":0.1974,\"3\":0.2414},\"concave points_mean\":{\"0\":0.1471,\"1\":0.07017,\"2\":0.1279,\"3\":0.1052},\"symmetry_mean\":{\"0\":0.2419,\"1\":0.1812,\"2\":0.2069,\"3\":0.2597},\"fractal_dimension_mean\":{\"0\":0.07871,\"1\":0.05667,\"2\":0.05999,\"3\":0.09744},\"radius_se\":{\"0\":1.095,\"1\":0.5435,\"2\":0.7456,\"3\":0.4956},\"texture_se\":{\"0\":0.9053,\"1\":0.7339,\"2\":0.7869,\"3\":1.156},\"perimeter_se\":{\"0\":8.589,\"1\":3.398,\"2\":4.585,\"3\":3.445},\"area_se\":{\"0\":153.4,\"1\":74.08,\"2\":94.03,\"3\":27.23},\"smoothness_se\":{\"0\":0.006399,\"1\":0.005225,\"2\":0.00615,\"3\":0.00911},\"compactness_se\":{\"0\":0.04904,\"1\":0.01308,\"2\":0.04006,\"3\":0.07458},\"concavity_se\":{\"0\":0.05373,\"1\":0.0186,\"2\":0.03832,\"3\":0.05661},\"concave points_se\":{\"0\":0.01587,\"1\":0.0134,\"2\":0.02058,\"3\":0.01867},\"symmetry_se\":{\"0\":0.03003,\"1\":0.01389,\"2\":0.0225,\"3\":0.05963},\"fractal_dimension_se\":{\"0\":0.006193,\"1\":0.003532,\"2\":0.004571,\"3\":0.009208},\"radius_worst\":{\"0\":25.38,\"1\":24.99,\"2\":23.57,\"3\":14.91},\"texture_worst\":{\"0\":17.33,\"1\":23.41,\"2\":25.53,\"3\":26.5},\"perimeter_worst\":{\"0\":184.6,\"1\":158.8,\"2\":152.5,\"3\":98.87},\"area_worst\":{\"0\":2019.0,\"1\":1956.0,\"2\":1709.0,\"3\":567.7},\"smoothness_worst\":{\"0\":0.1622,\"1\":0.1238,\"2\":0.1444,\"3\":0.2098},\"compactness_worst\":{\"0\":0.6656,\"1\":0.1866,\"2\":0.4245,\"3\":0.8663},\"concavity_worst\":{\"0\":0.7119,\"1\":0.2416,\"2\":0.4504,\"3\":0.6869},\"concave points_worst\":{\"0\":0.2654,\"1\":0.186,\"2\":0.243,\"3\":0.2575},\"symmetry_worst\":{\"0\":0.4601,\"1\":0.275,\"2\":0.3613,\"3\":0.6638},\"fractal_dimension_worst\":{\"0\":0.1189,\"1\":0.08902,\"2\":0.08758,\"3\":0.173}}"}}]
| true | 1 |
<start_data_description><data_path>breast-cancer-dataset/breast-cancer.csv:
<column_names>
['id', 'diagnosis', 'radius_mean', 'texture_mean', 'perimeter_mean', 'area_mean', 'smoothness_mean', 'compactness_mean', 'concavity_mean', 'concave points_mean', 'symmetry_mean', 'fractal_dimension_mean', 'radius_se', 'texture_se', 'perimeter_se', 'area_se', 'smoothness_se', 'compactness_se', 'concavity_se', 'concave points_se', 'symmetry_se', 'fractal_dimension_se', 'radius_worst', 'texture_worst', 'perimeter_worst', 'area_worst', 'smoothness_worst', 'compactness_worst', 'concavity_worst', 'concave points_worst', 'symmetry_worst', 'fractal_dimension_worst']
<column_types>
{'id': 'int64', 'diagnosis': 'object', 'radius_mean': 'float64', 'texture_mean': 'float64', 'perimeter_mean': 'float64', 'area_mean': 'float64', 'smoothness_mean': 'float64', 'compactness_mean': 'float64', 'concavity_mean': 'float64', 'concave points_mean': 'float64', 'symmetry_mean': 'float64', 'fractal_dimension_mean': 'float64', 'radius_se': 'float64', 'texture_se': 'float64', 'perimeter_se': 'float64', 'area_se': 'float64', 'smoothness_se': 'float64', 'compactness_se': 'float64', 'concavity_se': 'float64', 'concave points_se': 'float64', 'symmetry_se': 'float64', 'fractal_dimension_se': 'float64', 'radius_worst': 'float64', 'texture_worst': 'float64', 'perimeter_worst': 'float64', 'area_worst': 'float64', 'smoothness_worst': 'float64', 'compactness_worst': 'float64', 'concavity_worst': 'float64', 'concave points_worst': 'float64', 'symmetry_worst': 'float64', 'fractal_dimension_worst': 'float64'}
<dataframe_Summary>
{'id': {'count': 569.0, 'mean': 30371831.432337433, 'std': 125020585.61222365, 'min': 8670.0, '25%': 869218.0, '50%': 906024.0, '75%': 8813129.0, 'max': 911320502.0}, 'radius_mean': {'count': 569.0, 'mean': 14.127291739894552, 'std': 3.5240488262120775, 'min': 6.981, '25%': 11.7, '50%': 13.37, '75%': 15.78, 'max': 28.11}, 'texture_mean': {'count': 569.0, 'mean': 19.289648506151142, 'std': 4.301035768166949, 'min': 9.71, '25%': 16.17, '50%': 18.84, '75%': 21.8, 'max': 39.28}, 'perimeter_mean': {'count': 569.0, 'mean': 91.96903339191564, 'std': 24.298981038754906, 'min': 43.79, '25%': 75.17, '50%': 86.24, '75%': 104.1, 'max': 188.5}, 'area_mean': {'count': 569.0, 'mean': 654.8891036906855, 'std': 351.914129181653, 'min': 143.5, '25%': 420.3, '50%': 551.1, '75%': 782.7, 'max': 2501.0}, 'smoothness_mean': {'count': 569.0, 'mean': 0.0963602811950791, 'std': 0.01406412813767362, 'min': 0.05263, '25%': 0.08637, '50%': 0.09587, '75%': 0.1053, 'max': 0.1634}, 'compactness_mean': {'count': 569.0, 'mean': 0.10434098418277679, 'std': 0.052812757932512194, 'min': 0.01938, '25%': 0.06492, '50%': 0.09263, '75%': 0.1304, 'max': 0.3454}, 'concavity_mean': {'count': 569.0, 'mean': 0.0887993158172232, 'std': 0.07971980870789348, 'min': 0.0, '25%': 0.02956, '50%': 0.06154, '75%': 0.1307, 'max': 0.4268}, 'concave points_mean': {'count': 569.0, 'mean': 0.04891914586994728, 'std': 0.038802844859153605, 'min': 0.0, '25%': 0.02031, '50%': 0.0335, '75%': 0.074, 'max': 0.2012}, 'symmetry_mean': {'count': 569.0, 'mean': 0.18116186291739894, 'std': 0.027414281336035715, 'min': 0.106, '25%': 0.1619, '50%': 0.1792, '75%': 0.1957, 'max': 0.304}, 'fractal_dimension_mean': {'count': 569.0, 'mean': 0.06279760984182776, 'std': 0.007060362795084459, 'min': 0.04996, '25%': 0.0577, '50%': 0.06154, '75%': 0.06612, 'max': 0.09744}, 'radius_se': {'count': 569.0, 'mean': 0.40517205623901575, 'std': 0.2773127329861039, 'min': 0.1115, '25%': 0.2324, '50%': 0.3242, '75%': 0.4789, 'max': 2.873}, 'texture_se': {'count': 569.0, 'mean': 1.2168534270650264, 'std': 0.5516483926172023, 'min': 0.3602, '25%': 0.8339, '50%': 1.108, '75%': 1.474, 'max': 4.885}, 'perimeter_se': {'count': 569.0, 'mean': 2.8660592267135327, 'std': 2.0218545540421076, 'min': 0.757, '25%': 1.606, '50%': 2.287, '75%': 3.357, 'max': 21.98}, 'area_se': {'count': 569.0, 'mean': 40.337079086116, 'std': 45.49100551613181, 'min': 6.802, '25%': 17.85, '50%': 24.53, '75%': 45.19, 'max': 542.2}, 'smoothness_se': {'count': 569.0, 'mean': 0.007040978910369069, 'std': 0.0030025179438390656, 'min': 0.001713, '25%': 0.005169, '50%': 0.00638, '75%': 0.008146, 'max': 0.03113}, 'compactness_se': {'count': 569.0, 'mean': 0.025478138840070295, 'std': 0.017908179325677388, 'min': 0.002252, '25%': 0.01308, '50%': 0.02045, '75%': 0.03245, 'max': 0.1354}, 'concavity_se': {'count': 569.0, 'mean': 0.03189371634446397, 'std': 0.03018606032298841, 'min': 0.0, '25%': 0.01509, '50%': 0.02589, '75%': 0.04205, 'max': 0.396}, 'concave points_se': {'count': 569.0, 'mean': 0.011796137082601054, 'std': 0.006170285174046869, 'min': 0.0, '25%': 0.007638, '50%': 0.01093, '75%': 0.01471, 'max': 0.05279}, 'symmetry_se': {'count': 569.0, 'mean': 0.02054229876977153, 'std': 0.008266371528798399, 'min': 0.007882, '25%': 0.01516, '50%': 0.01873, '75%': 0.02348, 'max': 0.07895}, 'fractal_dimension_se': {'count': 569.0, 'mean': 0.0037949038664323374, 'std': 0.002646070967089195, 'min': 0.0008948, '25%': 0.002248, '50%': 0.003187, '75%': 0.004558, 'max': 0.02984}, 'radius_worst': {'count': 569.0, 'mean': 16.269189806678387, 'std': 4.833241580469323, 'min': 7.93, '25%': 13.01, '50%': 14.97, '75%': 18.79, 'max': 36.04}, 'texture_worst': {'count': 569.0, 'mean': 25.677223198594024, 'std': 6.146257623038319, 'min': 12.02, '25%': 21.08, '50%': 25.41, '75%': 29.72, 'max': 49.54}, 'perimeter_worst': {'count': 569.0, 'mean': 107.26121265377857, 'std': 33.602542269036356, 'min': 50.41, '25%': 84.11, '50%': 97.66, '75%': 125.4, 'max': 251.2}, 'area_worst': {'count': 569.0, 'mean': 880.5831282952548, 'std': 569.356992669949, 'min': 185.2, '25%': 515.3, '50%': 686.5, '75%': 1084.0, 'max': 4254.0}, 'smoothness_worst': {'count': 569.0, 'mean': 0.13236859402460457, 'std': 0.022832429404835465, 'min': 0.07117, '25%': 0.1166, '50%': 0.1313, '75%': 0.146, 'max': 0.2226}, 'compactness_worst': {'count': 569.0, 'mean': 0.25426504393673116, 'std': 0.157336488913742, 'min': 0.02729, '25%': 0.1472, '50%': 0.2119, '75%': 0.3391, 'max': 1.058}, 'concavity_worst': {'count': 569.0, 'mean': 0.27218848330404216, 'std': 0.2086242806081323, 'min': 0.0, '25%': 0.1145, '50%': 0.2267, '75%': 0.3829, 'max': 1.252}, 'concave points_worst': {'count': 569.0, 'mean': 0.11460622319859401, 'std': 0.06573234119594207, 'min': 0.0, '25%': 0.06493, '50%': 0.09993, '75%': 0.1614, 'max': 0.291}, 'symmetry_worst': {'count': 569.0, 'mean': 0.2900755711775044, 'std': 0.061867467537518685, 'min': 0.1565, '25%': 0.2504, '50%': 0.2822, '75%': 0.3179, 'max': 0.6638}, 'fractal_dimension_worst': {'count': 569.0, 'mean': 0.0839458172231986, 'std': 0.018061267348893986, 'min': 0.05504, '25%': 0.07146, '50%': 0.08004, '75%': 0.09208, 'max': 0.2075}}
<dataframe_info>
RangeIndex: 569 entries, 0 to 568
Data columns (total 32 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 id 569 non-null int64
1 diagnosis 569 non-null object
2 radius_mean 569 non-null float64
3 texture_mean 569 non-null float64
4 perimeter_mean 569 non-null float64
5 area_mean 569 non-null float64
6 smoothness_mean 569 non-null float64
7 compactness_mean 569 non-null float64
8 concavity_mean 569 non-null float64
9 concave points_mean 569 non-null float64
10 symmetry_mean 569 non-null float64
11 fractal_dimension_mean 569 non-null float64
12 radius_se 569 non-null float64
13 texture_se 569 non-null float64
14 perimeter_se 569 non-null float64
15 area_se 569 non-null float64
16 smoothness_se 569 non-null float64
17 compactness_se 569 non-null float64
18 concavity_se 569 non-null float64
19 concave points_se 569 non-null float64
20 symmetry_se 569 non-null float64
21 fractal_dimension_se 569 non-null float64
22 radius_worst 569 non-null float64
23 texture_worst 569 non-null float64
24 perimeter_worst 569 non-null float64
25 area_worst 569 non-null float64
26 smoothness_worst 569 non-null float64
27 compactness_worst 569 non-null float64
28 concavity_worst 569 non-null float64
29 concave points_worst 569 non-null float64
30 symmetry_worst 569 non-null float64
31 fractal_dimension_worst 569 non-null float64
dtypes: float64(30), int64(1), object(1)
memory usage: 142.4+ KB
<some_examples>
{'id': {'0': 842302, '1': 842517, '2': 84300903, '3': 84348301}, 'diagnosis': {'0': 'M', '1': 'M', '2': 'M', '3': 'M'}, 'radius_mean': {'0': 17.99, '1': 20.57, '2': 19.69, '3': 11.42}, 'texture_mean': {'0': 10.38, '1': 17.77, '2': 21.25, '3': 20.38}, 'perimeter_mean': {'0': 122.8, '1': 132.9, '2': 130.0, '3': 77.58}, 'area_mean': {'0': 1001.0, '1': 1326.0, '2': 1203.0, '3': 386.1}, 'smoothness_mean': {'0': 0.1184, '1': 0.08474, '2': 0.1096, '3': 0.1425}, 'compactness_mean': {'0': 0.2776, '1': 0.07864, '2': 0.1599, '3': 0.2839}, 'concavity_mean': {'0': 0.3001, '1': 0.0869, '2': 0.1974, '3': 0.2414}, 'concave points_mean': {'0': 0.1471, '1': 0.07017, '2': 0.1279, '3': 0.1052}, 'symmetry_mean': {'0': 0.2419, '1': 0.1812, '2': 0.2069, '3': 0.2597}, 'fractal_dimension_mean': {'0': 0.07871, '1': 0.05667, '2': 0.05999, '3': 0.09744}, 'radius_se': {'0': 1.095, '1': 0.5435, '2': 0.7456, '3': 0.4956}, 'texture_se': {'0': 0.9053, '1': 0.7339, '2': 0.7869, '3': 1.156}, 'perimeter_se': {'0': 8.589, '1': 3.398, '2': 4.585, '3': 3.445}, 'area_se': {'0': 153.4, '1': 74.08, '2': 94.03, '3': 27.23}, 'smoothness_se': {'0': 0.006399, '1': 0.005225, '2': 0.00615, '3': 0.00911}, 'compactness_se': {'0': 0.04904, '1': 0.01308, '2': 0.04006, '3': 0.07458}, 'concavity_se': {'0': 0.05373, '1': 0.0186, '2': 0.03832, '3': 0.05661}, 'concave points_se': {'0': 0.01587, '1': 0.0134, '2': 0.02058, '3': 0.01867}, 'symmetry_se': {'0': 0.03003, '1': 0.01389, '2': 0.0225, '3': 0.05963}, 'fractal_dimension_se': {'0': 0.006193, '1': 0.003532, '2': 0.004571, '3': 0.009208}, 'radius_worst': {'0': 25.38, '1': 24.99, '2': 23.57, '3': 14.91}, 'texture_worst': {'0': 17.33, '1': 23.41, '2': 25.53, '3': 26.5}, 'perimeter_worst': {'0': 184.6, '1': 158.8, '2': 152.5, '3': 98.87}, 'area_worst': {'0': 2019.0, '1': 1956.0, '2': 1709.0, '3': 567.7}, 'smoothness_worst': {'0': 0.1622, '1': 0.1238, '2': 0.1444, '3': 0.2098}, 'compactness_worst': {'0': 0.6656, '1': 0.1866, '2': 0.4245, '3': 0.8663}, 'concavity_worst': {'0': 0.7119, '1': 0.2416, '2': 0.4504, '3': 0.6869}, 'concave points_worst': {'0': 0.2654, '1': 0.186, '2': 0.243, '3': 0.2575}, 'symmetry_worst': {'0': 0.4601, '1': 0.275, '2': 0.3613, '3': 0.6638}, 'fractal_dimension_worst': {'0': 0.1189, '1': 0.08902, '2': 0.08758, '3': 0.173}}
<end_description>
| 806 | 8 | 3,208 | 806 |
129743610
|
# # import 資料集
import pandas as pd
train = pd.read_csv("/kaggle/input/titanic/train.csv", encoding="utf-8")
test = pd.read_csv("/kaggle/input/titanic/test.csv", encoding="utf-8")
# check df
# train.columns.tolist(), test.columns.tolist()
# # 資料預處理
# 1. 處理缺失值
# 1. 數值型 -> 填補中位數
# 2. 類別型 -> 填補最常出現的類別
# 2. One-Hot
# 3. 萃取更多欄位資訊
total = pd.concat([train, test], axis=0)
total = total.drop(["PassengerId", "Survived"], axis=1)
total
# Cabin
def cabin_preprocess(c):
if pd.isna(c):
return c
else:
return c[0]
total["Cabin"].apply(cabin_preprocess)
total["Cabin"] = total["Cabin"].apply(cabin_preprocess)
# Ticket
ticket_count = total["Ticket"].value_counts() # 多少人持有同一張票類
def ticket_preprocess(t):
if pd.isna(t):
return t
else:
return ticket_count[t]
total["Ticket"].apply(ticket_preprocess)
total["Ticket"] = total["Ticket"].apply(ticket_preprocess)
# Name
total["Name"].isna().value_counts() # 確認姓名無空值
def name_preprocess(n):
return n.split(".")[0].split(",")[-1]
total["Name"].apply(name_preprocess)
total["Name"] = total["Name"].apply(name_preprocess)
# 找尋數值類別的缺失值 -> 填補中位數
total.median() # Pclass 是不是 數值型類別 可再研究 (這邊先當類別型)
median = total.median().drop("Pclass")
total.fillna(median)
total = total.fillna(median)
# Embarked
# 類別型 -> 填補最常出現的值
total["Embarked"].value_counts()
most = total["Embarked"].value_counts().idxmax()
total["Embarked"].fillna(most)
total["Embarked"] = total["Embarked"].fillna(most)
# 類別型資料 需額外再處理 -> One hot encoding
# 剛剛 drop 的 "Pclass" 有必要做嗎? (需實際測試過, 才知道效果)
# Sex 二值型資料 可做可不做
# Name -> One hot encoding
name_count = total["Name"].value_counts()
name_count[name_count > 10]
name_reserved = name_count[name_count > 10].index
def name_onehot(n):
if n in name_reserved:
return n
else:
return None
total["Name"].apply(name_onehot)
total["Name"] = total["Name"].apply(name_onehot)
# One hot encoding
total = pd.get_dummies(total) # 一次對所有字串類的資料 one hot encoding
# Pclass -> One hot encoding
total = pd.get_dummies(total, columns=["Pclass"]) # Pclass資料型態為數字, 額外one hot
# 額外增加Family欄位 -> 避免決策樹 僅單獨考慮 SibSp or ParCh 而沒有考慮到家人總數
total["Family"] = total["SibSp"] + total["Parch"]
# # Scaling 標準化
# 用於 : 模型計算各個資料點之間的距離時
# 鐵達尼號, 各個數據之間 單位不同 且 差異太大, 需標準化
# 但如鳶尾花, 差異不大, 且主要就是需要比較樹鋸大小, 可以不標準化
# (但其實可以無腦做)
# 1. MinMax scaling : 轉換為 0 - 1 (作法 : 數值 - 最小值 / 最大值 - 最小值)
# 2. Standardization : 將資料縮放至平均值為0, 標準差為1
# 3. Robust Scaling
# 4. Max Abs Scaling
# 5. Unit Vector Scaling
# ...
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
total_scale = scaler.fit_transform(total)
total_scale = pd.DataFrame(total_scale)
total_scale
# # 資料分類
import numpy as np
x = np.array(total_scale.iloc[: len(train)]) # x_train
y = np.array(train["Survived"]) # y_train
x_predict = np.array(total_scale.iloc[len(train) :])
x.shape, y.shape, x_predict.shape
# # 交叉驗證
from sklearn.model_selection import cross_val_score
from sklearn.neighbors import KNeighborsClassifier
maxk, maxscore = None, -1
for k in range(3, 20): # 這邊在做的事情 就是 Grid search
clf = KNeighborsClassifier(n_neighbors=k)
scores = cross_val_score(clf, x, y, cv=10, n_jobs=-1) # 交叉驗證10次, 10個分數
avg = np.average(scores)
print(k, avg)
if avg > maxscore:
maxk, maxscore = k, avg
print("最好結果:", maxk, maxscore)
# # KNN
clf = KNeighborsClassifier(n_neighbors=11)
clf.fit(x, y)
pre = clf.predict(x_predict)
pd.DataFrame({"PassengerId": test["PassengerId"], "Survived": pre}).to_csv(
"knn_5.csv", encoding="utf-8", index=False
)
total
# # 隨機森林
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
params = {"n_estimators": range(21, 101, 2), "max_depth": range(6, 21)}
clf = RandomForestClassifier()
search = GridSearchCV(clf, params, cv=10, n_jobs=-1)
search.fit(x, y)
# # 額外補充
# 1. Pandas 篩選操作
# 2. loc vs iloc
# 3. 資料分類方式
# Pandas 篩選操作
test_df = pd.DataFrame([[1, 2], [3, 4], [5, 6]])
test_df[[True, False, True]]
# loc vs iloc
test_df = pd.DataFrame([[1, 2], [3, 4]], index=[0, 0])
test_df.loc[0] # 會將兩列都取出 (因兩列的index都為0)
test_df.iloc[0] # 僅取出index為0的第一列
# 資料分類方式
# 分兩份 -> train / test -> 依照test結果, 調整模型參數
# 分三份 -> train / test / valid -> 同上, 但最後用完全沒看過的 valid 驗證模型
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/743/129743610.ipynb
| null | null |
[{"Id": 129743610, "ScriptId": 38575404, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5948419, "CreationDate": "05/16/2023 07:05:46", "VersionNumber": 2.0, "Title": "Titanic_KNN & Random_Forest", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 200.0, "LinesInsertedFromPrevious": 71.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 129.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# # import 資料集
import pandas as pd
train = pd.read_csv("/kaggle/input/titanic/train.csv", encoding="utf-8")
test = pd.read_csv("/kaggle/input/titanic/test.csv", encoding="utf-8")
# check df
# train.columns.tolist(), test.columns.tolist()
# # 資料預處理
# 1. 處理缺失值
# 1. 數值型 -> 填補中位數
# 2. 類別型 -> 填補最常出現的類別
# 2. One-Hot
# 3. 萃取更多欄位資訊
total = pd.concat([train, test], axis=0)
total = total.drop(["PassengerId", "Survived"], axis=1)
total
# Cabin
def cabin_preprocess(c):
if pd.isna(c):
return c
else:
return c[0]
total["Cabin"].apply(cabin_preprocess)
total["Cabin"] = total["Cabin"].apply(cabin_preprocess)
# Ticket
ticket_count = total["Ticket"].value_counts() # 多少人持有同一張票類
def ticket_preprocess(t):
if pd.isna(t):
return t
else:
return ticket_count[t]
total["Ticket"].apply(ticket_preprocess)
total["Ticket"] = total["Ticket"].apply(ticket_preprocess)
# Name
total["Name"].isna().value_counts() # 確認姓名無空值
def name_preprocess(n):
return n.split(".")[0].split(",")[-1]
total["Name"].apply(name_preprocess)
total["Name"] = total["Name"].apply(name_preprocess)
# 找尋數值類別的缺失值 -> 填補中位數
total.median() # Pclass 是不是 數值型類別 可再研究 (這邊先當類別型)
median = total.median().drop("Pclass")
total.fillna(median)
total = total.fillna(median)
# Embarked
# 類別型 -> 填補最常出現的值
total["Embarked"].value_counts()
most = total["Embarked"].value_counts().idxmax()
total["Embarked"].fillna(most)
total["Embarked"] = total["Embarked"].fillna(most)
# 類別型資料 需額外再處理 -> One hot encoding
# 剛剛 drop 的 "Pclass" 有必要做嗎? (需實際測試過, 才知道效果)
# Sex 二值型資料 可做可不做
# Name -> One hot encoding
name_count = total["Name"].value_counts()
name_count[name_count > 10]
name_reserved = name_count[name_count > 10].index
def name_onehot(n):
if n in name_reserved:
return n
else:
return None
total["Name"].apply(name_onehot)
total["Name"] = total["Name"].apply(name_onehot)
# One hot encoding
total = pd.get_dummies(total) # 一次對所有字串類的資料 one hot encoding
# Pclass -> One hot encoding
total = pd.get_dummies(total, columns=["Pclass"]) # Pclass資料型態為數字, 額外one hot
# 額外增加Family欄位 -> 避免決策樹 僅單獨考慮 SibSp or ParCh 而沒有考慮到家人總數
total["Family"] = total["SibSp"] + total["Parch"]
# # Scaling 標準化
# 用於 : 模型計算各個資料點之間的距離時
# 鐵達尼號, 各個數據之間 單位不同 且 差異太大, 需標準化
# 但如鳶尾花, 差異不大, 且主要就是需要比較樹鋸大小, 可以不標準化
# (但其實可以無腦做)
# 1. MinMax scaling : 轉換為 0 - 1 (作法 : 數值 - 最小值 / 最大值 - 最小值)
# 2. Standardization : 將資料縮放至平均值為0, 標準差為1
# 3. Robust Scaling
# 4. Max Abs Scaling
# 5. Unit Vector Scaling
# ...
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
total_scale = scaler.fit_transform(total)
total_scale = pd.DataFrame(total_scale)
total_scale
# # 資料分類
import numpy as np
x = np.array(total_scale.iloc[: len(train)]) # x_train
y = np.array(train["Survived"]) # y_train
x_predict = np.array(total_scale.iloc[len(train) :])
x.shape, y.shape, x_predict.shape
# # 交叉驗證
from sklearn.model_selection import cross_val_score
from sklearn.neighbors import KNeighborsClassifier
maxk, maxscore = None, -1
for k in range(3, 20): # 這邊在做的事情 就是 Grid search
clf = KNeighborsClassifier(n_neighbors=k)
scores = cross_val_score(clf, x, y, cv=10, n_jobs=-1) # 交叉驗證10次, 10個分數
avg = np.average(scores)
print(k, avg)
if avg > maxscore:
maxk, maxscore = k, avg
print("最好結果:", maxk, maxscore)
# # KNN
clf = KNeighborsClassifier(n_neighbors=11)
clf.fit(x, y)
pre = clf.predict(x_predict)
pd.DataFrame({"PassengerId": test["PassengerId"], "Survived": pre}).to_csv(
"knn_5.csv", encoding="utf-8", index=False
)
total
# # 隨機森林
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
params = {"n_estimators": range(21, 101, 2), "max_depth": range(6, 21)}
clf = RandomForestClassifier()
search = GridSearchCV(clf, params, cv=10, n_jobs=-1)
search.fit(x, y)
# # 額外補充
# 1. Pandas 篩選操作
# 2. loc vs iloc
# 3. 資料分類方式
# Pandas 篩選操作
test_df = pd.DataFrame([[1, 2], [3, 4], [5, 6]])
test_df[[True, False, True]]
# loc vs iloc
test_df = pd.DataFrame([[1, 2], [3, 4]], index=[0, 0])
test_df.loc[0] # 會將兩列都取出 (因兩列的index都為0)
test_df.iloc[0] # 僅取出index為0的第一列
# 資料分類方式
# 分兩份 -> train / test -> 依照test結果, 調整模型參數
# 分三份 -> train / test / valid -> 同上, 但最後用完全沒看過的 valid 驗證模型
| false | 0 | 1,881 | 0 | 1,881 | 1,881 |
||
129734489
|
import numpy as np # linear algebra
import pandas as pd # train_data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
train_data = pd.read_csv("/kaggle/input/playground-series-s3e5/train.csv")
submission = pd.read_csv("/kaggle/input/playground-series-s3e5/sample_submission.csv")
# ### Preprocessing and analysing the train_data
train_data.sample(5)
train_data.describe(include="all")
# checking the of values in 'quality' and counting
train_data.quality.value_counts()
# Checking the null values
train_data.isnull().sum().sum()
# checking the duplicated values
train_data.duplicated().sum()
# correlation of quality with all the other factors used in wine
train_data.corr()["quality"].sort_values(ascending=True)
# test_data.shape
# ### Visualizing the train_data
sns_plot = sns.clustermap(train_data.corr(), cmap="rocket_r")
sns.distplot(train_data["quality"])
fig, ax = plt.subplots(figsize=(8, 8))
sns.heatmap(
train_data[
[
"Id",
"fixed acidity",
"volatile acidity",
"citric acid",
"residual sugar",
"chlorides",
"free sulfur dioxide",
"total sulfur dioxide",
"density",
"pH",
"sulphates",
"alcohol",
"quality",
]
]
)
plt.show()
sns.histplot(data=train_data, x="total sulfur dioxide")
sns.distplot(train_data["citric acid"])
# ### Test Data
test_data = pd.read_csv("/kaggle/input/playground-series-s3e5/test.csv")
test_data.head(4)
test_data.shape
test_data.isnull().sum().sum()
test_data.describe()
# ### Training the data
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
x = train_data.drop("quality", axis=1)
y = train_data["quality"]
print(x)
print(y)
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.4, random_state=42
)
# normalizing the data
scaler = StandardScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
logreg = LogisticRegression()
logreg.fit(x_train, y_train)
y_pred = logreg.predict(x_test)
accuracy = round(accuracy_score(y_test, y_pred) * 100, 2)
print("Accuracy: {}%".format(accuracy))
randomforest = RandomForestClassifier()
randomforest.fit(x_train, y_train)
y_pred = randomforest.predict(x_test)
accuracy_randomforest = round(accuracy_score(y_pred, y_test) * 100, 2)
print(accuracy_randomforest)
logit_preds_test = pd.DataFrame(y_pred).mode(axis=0).loc[0,]
submission["quality"] = logit_preds_test.astype(int)
submission.to_csv("randomsubmission3.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/734/129734489.ipynb
| null | null |
[{"Id": 129734489, "ScriptId": 38538382, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11565882, "CreationDate": "05/16/2023 05:38:32", "VersionNumber": 1.0, "Title": "Ordinal Regression Wine Quality Tabular Data", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 96.0, "LinesInsertedFromPrevious": 96.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # train_data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
train_data = pd.read_csv("/kaggle/input/playground-series-s3e5/train.csv")
submission = pd.read_csv("/kaggle/input/playground-series-s3e5/sample_submission.csv")
# ### Preprocessing and analysing the train_data
train_data.sample(5)
train_data.describe(include="all")
# checking the of values in 'quality' and counting
train_data.quality.value_counts()
# Checking the null values
train_data.isnull().sum().sum()
# checking the duplicated values
train_data.duplicated().sum()
# correlation of quality with all the other factors used in wine
train_data.corr()["quality"].sort_values(ascending=True)
# test_data.shape
# ### Visualizing the train_data
sns_plot = sns.clustermap(train_data.corr(), cmap="rocket_r")
sns.distplot(train_data["quality"])
fig, ax = plt.subplots(figsize=(8, 8))
sns.heatmap(
train_data[
[
"Id",
"fixed acidity",
"volatile acidity",
"citric acid",
"residual sugar",
"chlorides",
"free sulfur dioxide",
"total sulfur dioxide",
"density",
"pH",
"sulphates",
"alcohol",
"quality",
]
]
)
plt.show()
sns.histplot(data=train_data, x="total sulfur dioxide")
sns.distplot(train_data["citric acid"])
# ### Test Data
test_data = pd.read_csv("/kaggle/input/playground-series-s3e5/test.csv")
test_data.head(4)
test_data.shape
test_data.isnull().sum().sum()
test_data.describe()
# ### Training the data
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
x = train_data.drop("quality", axis=1)
y = train_data["quality"]
print(x)
print(y)
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.4, random_state=42
)
# normalizing the data
scaler = StandardScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
logreg = LogisticRegression()
logreg.fit(x_train, y_train)
y_pred = logreg.predict(x_test)
accuracy = round(accuracy_score(y_test, y_pred) * 100, 2)
print("Accuracy: {}%".format(accuracy))
randomforest = RandomForestClassifier()
randomforest.fit(x_train, y_train)
y_pred = randomforest.predict(x_test)
accuracy_randomforest = round(accuracy_score(y_pred, y_test) * 100, 2)
print(accuracy_randomforest)
logit_preds_test = pd.DataFrame(y_pred).mode(axis=0).loc[0,]
submission["quality"] = logit_preds_test.astype(int)
submission.to_csv("randomsubmission3.csv", index=False)
| false | 0 | 891 | 0 | 891 | 891 |
||
129734803
|
<jupyter_start><jupyter_text>Meta Kaggle
Kaggle dataset identifier: meta-kaggle
<jupyter_script># # Your Username = Your Own Personal Progression Dashboard 📊
# 
# 1. Import **libraries** and **datasets**
# 2. Retreiving **AuthorUserId**
# 3. Displaying User **Acheivement**
# 4. Displaying User **Kernels**
# 5. Displaying User **Kernels Vs Up-Votes**
# 6. Displaying User **Kernel Up-Votes Histort (only the days the user got up-votes)**
# 7. Displaying User **Kernels Vs Views**
# 9. Displaying **Datasets Vs TotalVotes**
# 10. Displaying **Datasets Vs TotalViews**
# 11. Displaying **Datasets Vs TotalDownloads**
# 12. Displaying User **Messages**
# 13. Displaying User **Messages Votes History**
# 14. Displaying User **Followers History (only the days the user got followers)**
# **NB : For this to work you need to insert your username:**
# # Enter your UserName Here :
# ------> Enter Your User Name Here >-----#
user_name = "pardeep19singh"
# ------> Enter Your User Name Here >-----#
# # Refs
# - https://www.kaggle.com/code/desalegngeb/plotly-guide-customize-for-better-visualizations
# # 1. Import libraries and datasets
import numpy as np
import pandas as pd
import math
import matplotlib.pyplot as plt
from itertools import cycle, islice
import seaborn as sns
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import warnings
sns.set()
warnings.filterwarnings(action="ignore")
bg_color = "#EFEAE0"
custom_params = {
"axes.spines.right": False,
"axes.spines.top": False,
"grid.alpha": 0.3,
"figure.figsize": (16, 6),
"axes.titlesize": "Large",
"axes.labelsize": "Large",
"figure.facecolor": bg_color,
"axes.facecolor": bg_color,
}
sns.set_theme(style="whitegrid", rc=custom_params)
colors = [
"#495867",
"#50723C",
"#D19C1D",
"#FE5F55",
"#5D2A42",
"#4CC26E",
"#7CB4B8",
"#FF9F1C",
"#BCCC6D",
"#EC6346",
"#6A4545",
"#3979D3",
"#E95DA6",
"#FFBE0B",
"#577399",
"#6B7A55",
"#b23a48",
"#90B860",
"#EDA84C",
]
palette = sns.color_palette(palette=colors)
sns.palplot(palette, size=1)
plt.show()
# DIR_PATH = 'data'
DIR_PATH = "../input/meta-kaggle"
users_df = pd.read_csv(f"{DIR_PATH}/Users.csv")
acheivements_df = pd.read_csv(f"{DIR_PATH}/UserAchievements.csv")
# datasets = pd.read_csv('../input/meta-kaggle/Datasets.csv')
# datasetsVersions = pd.read_csv('../input/meta-kaggle/DatasetVersions.csv')
# ForumMessages = pd.read_csv('../input/meta-kaggle/ForumMessages.csv')
# ForumMessagesVotes = pd.read_csv('../input/meta-kaggle/ForumMessageVotes.csv')
# Followers = pd.read_csv('../input/meta-kaggle/UserFollowers.csv')
# # 2. Retreiving AuthorUserId
display(users_df.head())
current_user_df = users_df[users_df["UserName"] == user_name]
display(current_user_df)
current_user_id = current_user_df["Id"].values[0]
# current_user_id = current_user.item()
print(current_user_id)
# # 3. Displaying User Acheivement
user_acheivements_df = acheivements_df[acheivements_df["UserId"] == current_user_id]
user_acheivements_df = user_acheivements_df[
["UserId", "AchievementType", "Tier", "TierAchievementDate", "CurrentRanking"]
]
user_acheivements_df = user_acheivements_df.sort_values(
by=["Tier", "CurrentRanking"], ascending=[False, True]
)
user_acheivements_df
TIER_TO_COLOR = {
"Novice": "#5ac995",
"Contributor": "#0bf",
"Expert": "#95628f",
"Master": "#f96517",
"Grandmaster": "#dca917",
}
TIERS_MAPPING = {
0: "Novice",
1: "Contributor",
2: "Expert",
3: "Master",
4: "Grandmaster",
}
user_acheivements_df = user_acheivements_df.sort_values(
by=["Tier", "CurrentRanking"], ascending=[True, True]
)
fig = go.Figure(
data=[
go.Bar(
y=user_acheivements_df["AchievementType"],
x=user_acheivements_df["CurrentRanking"],
text=user_acheivements_df["CurrentRanking"],
marker_color=user_acheivements_df["Tier"].apply(
lambda tier: TIER_TO_COLOR[TIERS_MAPPING[tier]]
),
orientation="h",
name="Ranking",
textposition="outside",
texttemplate="%{text}",
textfont_size=14,
hovertemplate="Ranking: %{x} <extra></extra>",
hoverlabel=dict(bgcolor="white", font_size=14, font_family="Rockwell"),
width=0.5,
marker_line_color="rgb(0,0,0)",
marker_line_width=1.5,
opacity=0.9,
showlegend=False,
),
go.Scatter(
x=[None],
y=[None],
mode="markers",
name="Novice",
marker=dict(size=10, color=TIER_TO_COLOR["Novice"], symbol="square"),
),
go.Scatter(
x=[None],
y=[None],
mode="markers",
name="Contributor",
marker=dict(size=10, color=TIER_TO_COLOR["Contributor"], symbol="square"),
),
go.Scatter(
x=[None],
y=[None],
mode="markers",
name="Expert",
marker=dict(size=10, color=TIER_TO_COLOR["Expert"], symbol="square"),
),
go.Scatter(
x=[None],
y=[None],
mode="markers",
name="Master",
marker=dict(size=10, color=TIER_TO_COLOR["Master"], symbol="square"),
),
go.Scatter(
x=[None],
y=[None],
mode="markers",
name="Grandmaster",
marker=dict(size=10, color=TIER_TO_COLOR["Grandmaster"], symbol="square"),
),
]
)
fig.update_layout(
template="simple_white",
title="Ranking across Categories",
width=1600,
height=600,
xaxis_title="Votes",
yaxis_title="Kernels",
legend_title="Tiers",
paper_bgcolor="lightgray",
plot_bgcolor="lightgray",
title_x=0.5,
title_font_size=20,
legend=dict(font=dict(size=14), orientation="v"),
xaxis=dict(
showgrid=False,
zeroline=False,
showticklabels=True,
tickfont=dict(size=14, family="Rockwell"),
),
yaxis=dict(
showgrid=False,
zeroline=False,
showticklabels=True,
tickfont=dict(size=14, family="Rockwell"),
),
)
fig.show()
# # 4. Displaying User Kernels
kernels_df = pd.read_csv(f"{DIR_PATH}/Kernels.csv")
kernel_votes_df = pd.read_csv(f"{DIR_PATH}/KernelVotes.csv")
kernel_versions_df = pd.read_csv(f"{DIR_PATH}/KernelVersions.csv")
user_kernels_df = kernels_df[kernels_df["AuthorUserId"] == current_user_id]
# display(CurrentUserKernels.head(10))
user_kernel_votes_df = kernel_votes_df[kernel_votes_df["UserId"] == current_user_id]
display(user_kernels_df.head())
user_kernels_df = user_kernels_df[
["Id", "CurrentUrlSlug", "TotalVotes", "TotalViews", "Medal", "MadePublicDate"]
]
user_kernels_df = user_kernels_df.sort_values(by="TotalVotes")
kernel_votes_received_data = []
for index, row in user_kernels_df.iterrows():
kernel_version_ids = kernel_versions_df[
kernel_versions_df["ScriptId"] == row["Id"]
]["Id"].values
kernel_votes_received = kernel_votes_df[
kernel_votes_df["KernelVersionId"].isin(kernel_version_ids)
]
for index, votes_row in kernel_votes_received.iterrows():
d = {
"kernel_id": row["Id"],
"kernel_url_slug": row["CurrentUrlSlug"],
"voter_user_id": votes_row["UserId"],
"kernel_version_id": votes_row["KernelVersionId"],
"vote_date": votes_row["VoteDate"],
}
kernel_votes_received_data.append(d)
kernel_votes_received_df = pd.DataFrame(kernel_votes_received_data)
kernel_votes_received_df["vote_date"] = pd.to_datetime(
kernel_votes_received_df["vote_date"]
)
kernel_votes_received_grouped_df = (
kernel_votes_received_df.groupby(["kernel_url_slug", "vote_date"])["kernel_id"]
.count()
.reset_index()
.rename(columns={"kernel_id": "votes_count"})
.sort_values(by=["vote_date", "kernel_url_slug"])
)
kernel_votes_received_grouped_df["votes_cum_sum"] = (
kernel_votes_received_grouped_df.groupby(["kernel_url_slug"])["votes_count"]
.cumsum()
.values
)
kernel_votes_received_grouped_by_date = (
kernel_votes_received_df.groupby("vote_date")["kernel_id"]
.count()
.reset_index()
.rename(columns={"kernel_id": "vote_counts"})
.sort_values(by="vote_date")
)
kernel_votes_received_grouped_by_date[
"cum_sum_votes"
] = kernel_votes_received_grouped_by_date["vote_counts"].cumsum()
display(user_kernels_df)
display(user_kernel_votes_df.head())
display(kernel_votes_received_df.head())
display(kernel_votes_received_grouped_df.head())
# # 5. Displaying User Kernels Vs Up-Votes
MEDAL_RANKING_TO_MEDAL = {
1: "Gold",
2: "Silver",
3: "Bronze",
}
MEDAL_TO_COLOR = {
"Gold": "#b88121",
"Silver": "#838280",
"Bronze": "#8e5b3d",
"No Medel": "white",
}
fig = go.Figure(
data=[
go.Bar(
name="Up Votes",
x=user_kernels_df["TotalVotes"],
y=user_kernels_df["CurrentUrlSlug"],
orientation="h",
marker=dict(
color=user_kernels_df["Medal"]
.map(
lambda d: MEDAL_TO_COLOR[MEDAL_RANKING_TO_MEDAL[d]]
if not np.isnan(d)
else MEDAL_TO_COLOR["No Medel"]
)
.values
),
text=user_kernels_df["TotalVotes"],
textposition="outside",
texttemplate="%{text:.2s}",
textfont_size=14,
hovertemplate="Votes: %{x}<extra></extra>",
hoverlabel=dict(bgcolor="white", font_size=14, font_family="Rockwell"),
width=0.5,
marker_line_color="rgb(0,0,0)",
marker_line_width=1.5,
opacity=0.9,
showlegend=False,
),
go.Scatter(
x=[None],
y=[None],
mode="markers",
name="No Medel",
marker=dict(size=10, color=MEDAL_TO_COLOR["No Medel"], symbol="square"),
),
go.Scatter(
x=[None],
y=[None],
mode="markers",
name="Gold",
marker=dict(size=10, color=MEDAL_TO_COLOR["Gold"], symbol="square"),
),
go.Scatter(
x=[None],
y=[None],
mode="markers",
name="Silver",
marker=dict(size=10, color=MEDAL_TO_COLOR["Silver"], symbol="square"),
),
go.Scatter(
x=[None],
y=[None],
mode="markers",
name="Bronze",
marker=dict(size=10, color=MEDAL_TO_COLOR["Bronze"], symbol="square"),
),
]
)
fig.update_layout(
barmode="stack",
template="simple_white",
title="Votes on Kernels",
width=1600,
height=600,
xaxis_title="Votes",
yaxis_title="Kernels",
legend_title="Medals",
paper_bgcolor="lightgray",
plot_bgcolor="lightgray",
title_x=0.5,
title_font_size=20,
legend=dict(font=dict(size=14), orientation="v"),
xaxis=dict(
showgrid=False,
zeroline=False,
showticklabels=True,
tickfont=dict(size=14, family="Rockwell"),
),
yaxis=dict(
showgrid=False,
zeroline=False,
showticklabels=True,
tickfont=dict(size=14, family="Rockwell"),
),
)
fig.show()
fig = go.Figure(
data=[
go.Bar(
name="Up Votes",
x=user_kernels_df["TotalVotes"],
y=user_kernels_df["CurrentUrlSlug"],
orientation="h",
text=user_kernels_df["TotalVotes"],
textposition="outside",
hovertemplate="Votes: %{x}<extra></extra>",
hoverlabel=dict(bgcolor="white", font_size=14, font_family="Rockwell"),
width=0.5,
marker_line_color="rgb(0,0,0)",
marker_line_width=1.5,
opacity=0.9,
),
go.Bar(
name="Views",
x=user_kernels_df["TotalViews"],
y=user_kernels_df["CurrentUrlSlug"],
orientation="h",
texttemplate=user_kernels_df.apply(
lambda row: f"{row['TotalVotes']} / {row['TotalViews']}", axis=1
),
textposition="outside",
textfont_size=14,
text=user_kernels_df["TotalVotes"],
hovertemplate="Views: %{x}<extra></extra>",
hoverlabel=dict(bgcolor="white", font_size=14, font_family="Rockwell"),
width=0.5,
marker_line_color="rgb(0,0,0)",
marker_line_width=1.5,
opacity=0.9,
),
]
)
fig.update_layout(
barmode="stack",
template="simple_white",
title="Votes vs Views on Kernels",
width=1600,
height=600,
xaxis_title="Votes & Views",
yaxis_title="Kernels",
paper_bgcolor="lightgray",
plot_bgcolor="lightgray",
title_x=0.5,
title_font_size=20,
legend=dict(font=dict(size=14), orientation="v"),
xaxis=dict(
showgrid=False,
zeroline=False,
showticklabels=True,
tickfont=dict(size=14, family="Rockwell"),
),
yaxis=dict(
showgrid=False,
zeroline=False,
showticklabels=True,
tickfont=dict(size=14, family="Rockwell"),
),
)
fig.show()
KERNEL_COLORS = {}
for index, kernel in enumerate(user_kernels_df["CurrentUrlSlug"].values):
KERNEL_COLORS[kernel] = colors[index]
fig = go.Figure(
data=[
go.Scatter(
x=kernel_votes_received_grouped_by_date["vote_date"].apply(
lambda x: x.date()
),
y=kernel_votes_received_grouped_by_date["cum_sum_votes"],
mode="lines+markers",
marker=dict(size=10),
text=kernel_votes_received_grouped_by_date["cum_sum_votes"],
textposition="top center",
textfont_size=14,
hovertemplate="Votes: %{y}<extra></extra>",
hoverlabel=dict(bgcolor="white", font_size=14, font_family="Rockwell"),
line=dict(width=3),
opacity=0.9,
name="Cumulative Votes",
),
go.Scatter(
x=[None],
y=[None],
mode="lines",
name="Notebook Released",
marker=dict(size=10, color="green", symbol="square"),
),
],
)
for index, kernel_url_slug in enumerate(
kernel_votes_received_grouped_df["kernel_url_slug"].unique()
):
df = kernel_votes_received_grouped_df[
kernel_votes_received_grouped_df["kernel_url_slug"] == kernel_url_slug
]
fig.add_vline(
x=pd.to_datetime(df["vote_date"]).apply(lambda x: x.date()).values[0],
line_width=3,
line_dash="dash",
line_color="green",
opacity=0.9,
)
fig.update_layout(
template="simple_white",
title="Cumulative Votes received overtime",
width=1600,
height=600,
xaxis_title="Date",
yaxis_title="Votes",
paper_bgcolor="lightgray",
plot_bgcolor="lightgray",
title_x=0.5,
title_font_size=20,
showlegend=True,
legend=dict(
font=dict(size=14), orientation="h", yanchor="bottom", y=1, xanchor="right", x=1
),
xaxis=dict(
showgrid=False,
zeroline=False,
showticklabels=True,
tickfont=dict(size=14, family="Rockwell"),
type="category",
categoryorder="category ascending",
tickangle=45,
automargin=True,
tickmode="linear",
tick0=0,
dtick=1,
ticklen=10,
tickwidth=2,
tickcolor="#000",
),
yaxis=dict(
showgrid=False,
zeroline=False,
showticklabels=True,
tickfont=dict(size=14, family="Rockwell"),
),
)
fig.show()
fig = go.Figure()
for index, kernel_url_slug in enumerate(
kernel_votes_received_grouped_df["kernel_url_slug"].unique()
):
df = kernel_votes_received_grouped_df[
kernel_votes_received_grouped_df["kernel_url_slug"] == kernel_url_slug
]
df["vote_date"] = pd.to_datetime(df["vote_date"]).apply(lambda x: x.date())
fig.add_trace(
go.Bar(
x=df["vote_date"],
y=df["votes_count"],
hovertext=df.apply(
lambda x: f"Votes: {x['votes_count']}<br>Kernel: {x['kernel_url_slug']}<br>Date: {x['vote_date']} <br>Kernel Created Date: {df['vote_date'].min()}",
axis=1,
),
hoverlabel=dict(bgcolor="white", font_size=14, font_family="Rockwell"),
opacity=0.9,
name="-".join(kernel_url_slug.split("-")[:4]),
orientation="v",
marker_color=colors[index],
)
)
fig.add_vline(
x=df["vote_date"].min(),
line_width=3,
line_dash="dash",
line_color="green",
opacity=0.9,
)
fig.update_layout(
barmode="stack",
template="simple_white",
title="Votes on Kernels over Time",
width=1600,
height=600,
xaxis_title="Date",
yaxis_title="Votes",
paper_bgcolor="lightgray",
plot_bgcolor="lightgray",
title_x=0.5,
title_font_size=20,
legend=dict(font=dict(size=14), orientation="v"),
showlegend=True,
xaxis=dict(
showgrid=False,
zeroline=False,
showticklabels=True,
tickfont=dict(size=14, family="Rockwell"),
type="category",
categoryorder="category ascending",
tickangle=45,
automargin=True,
tickmode="linear",
tick0=0,
dtick=1,
ticklen=10,
tickwidth=2,
tickcolor="#000",
),
yaxis=dict(
showgrid=False,
zeroline=False,
showticklabels=True,
tickfont=dict(size=14, family="Rockwell"),
),
)
fig.show()
# TODO: Plot for Votes by distinct user and users from different tiers
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/734/129734803.ipynb
|
meta-kaggle
| null |
[{"Id": 129734803, "ScriptId": 38540820, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 532385, "CreationDate": "05/16/2023 05:42:09", "VersionNumber": 2.0, "Title": "Personal Progress Dashboard", "EvaluationDate": "05/16/2023", "IsChange": false, "TotalLines": 347.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 347.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 186080865, "KernelVersionId": 129734803, "SourceDatasetVersionId": 5689391}]
|
[{"Id": 5689391, "DatasetId": 9, "DatasourceVersionId": 5764991, "CreatorUserId": 1, "LicenseName": "Other (specified in description)", "CreationDate": "05/15/2023 10:05:07", "VersionNumber": NaN, "Title": "Meta Kaggle", "Slug": "meta-kaggle", "Subtitle": "Kaggle's public data on competitions, users, submission scores, and kernels", "Description": NaN, "VersionNotes": "Data update up to 20230515", "TotalCompressedBytes": NaN, "TotalUncompressedBytes": NaN}]
|
[{"Id": 9, "CreatorUserId": 1, "OwnerUserId": NaN, "OwnerOrganizationId": 4.0, "CurrentDatasetVersionId": 6266290.0, "CurrentDatasourceVersionId": 6346140.0, "ForumId": 794, "Type": 2, "CreationDate": "09/08/2015 19:01:00", "LastActivityDate": "02/05/2018", "TotalViews": 260053, "TotalDownloads": 17602, "TotalVotes": 813, "TotalKernels": 917}]
| null |
# # Your Username = Your Own Personal Progression Dashboard 📊
# 
# 1. Import **libraries** and **datasets**
# 2. Retreiving **AuthorUserId**
# 3. Displaying User **Acheivement**
# 4. Displaying User **Kernels**
# 5. Displaying User **Kernels Vs Up-Votes**
# 6. Displaying User **Kernel Up-Votes Histort (only the days the user got up-votes)**
# 7. Displaying User **Kernels Vs Views**
# 9. Displaying **Datasets Vs TotalVotes**
# 10. Displaying **Datasets Vs TotalViews**
# 11. Displaying **Datasets Vs TotalDownloads**
# 12. Displaying User **Messages**
# 13. Displaying User **Messages Votes History**
# 14. Displaying User **Followers History (only the days the user got followers)**
# **NB : For this to work you need to insert your username:**
# # Enter your UserName Here :
# ------> Enter Your User Name Here >-----#
user_name = "pardeep19singh"
# ------> Enter Your User Name Here >-----#
# # Refs
# - https://www.kaggle.com/code/desalegngeb/plotly-guide-customize-for-better-visualizations
# # 1. Import libraries and datasets
import numpy as np
import pandas as pd
import math
import matplotlib.pyplot as plt
from itertools import cycle, islice
import seaborn as sns
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import warnings
sns.set()
warnings.filterwarnings(action="ignore")
bg_color = "#EFEAE0"
custom_params = {
"axes.spines.right": False,
"axes.spines.top": False,
"grid.alpha": 0.3,
"figure.figsize": (16, 6),
"axes.titlesize": "Large",
"axes.labelsize": "Large",
"figure.facecolor": bg_color,
"axes.facecolor": bg_color,
}
sns.set_theme(style="whitegrid", rc=custom_params)
colors = [
"#495867",
"#50723C",
"#D19C1D",
"#FE5F55",
"#5D2A42",
"#4CC26E",
"#7CB4B8",
"#FF9F1C",
"#BCCC6D",
"#EC6346",
"#6A4545",
"#3979D3",
"#E95DA6",
"#FFBE0B",
"#577399",
"#6B7A55",
"#b23a48",
"#90B860",
"#EDA84C",
]
palette = sns.color_palette(palette=colors)
sns.palplot(palette, size=1)
plt.show()
# DIR_PATH = 'data'
DIR_PATH = "../input/meta-kaggle"
users_df = pd.read_csv(f"{DIR_PATH}/Users.csv")
acheivements_df = pd.read_csv(f"{DIR_PATH}/UserAchievements.csv")
# datasets = pd.read_csv('../input/meta-kaggle/Datasets.csv')
# datasetsVersions = pd.read_csv('../input/meta-kaggle/DatasetVersions.csv')
# ForumMessages = pd.read_csv('../input/meta-kaggle/ForumMessages.csv')
# ForumMessagesVotes = pd.read_csv('../input/meta-kaggle/ForumMessageVotes.csv')
# Followers = pd.read_csv('../input/meta-kaggle/UserFollowers.csv')
# # 2. Retreiving AuthorUserId
display(users_df.head())
current_user_df = users_df[users_df["UserName"] == user_name]
display(current_user_df)
current_user_id = current_user_df["Id"].values[0]
# current_user_id = current_user.item()
print(current_user_id)
# # 3. Displaying User Acheivement
user_acheivements_df = acheivements_df[acheivements_df["UserId"] == current_user_id]
user_acheivements_df = user_acheivements_df[
["UserId", "AchievementType", "Tier", "TierAchievementDate", "CurrentRanking"]
]
user_acheivements_df = user_acheivements_df.sort_values(
by=["Tier", "CurrentRanking"], ascending=[False, True]
)
user_acheivements_df
TIER_TO_COLOR = {
"Novice": "#5ac995",
"Contributor": "#0bf",
"Expert": "#95628f",
"Master": "#f96517",
"Grandmaster": "#dca917",
}
TIERS_MAPPING = {
0: "Novice",
1: "Contributor",
2: "Expert",
3: "Master",
4: "Grandmaster",
}
user_acheivements_df = user_acheivements_df.sort_values(
by=["Tier", "CurrentRanking"], ascending=[True, True]
)
fig = go.Figure(
data=[
go.Bar(
y=user_acheivements_df["AchievementType"],
x=user_acheivements_df["CurrentRanking"],
text=user_acheivements_df["CurrentRanking"],
marker_color=user_acheivements_df["Tier"].apply(
lambda tier: TIER_TO_COLOR[TIERS_MAPPING[tier]]
),
orientation="h",
name="Ranking",
textposition="outside",
texttemplate="%{text}",
textfont_size=14,
hovertemplate="Ranking: %{x} <extra></extra>",
hoverlabel=dict(bgcolor="white", font_size=14, font_family="Rockwell"),
width=0.5,
marker_line_color="rgb(0,0,0)",
marker_line_width=1.5,
opacity=0.9,
showlegend=False,
),
go.Scatter(
x=[None],
y=[None],
mode="markers",
name="Novice",
marker=dict(size=10, color=TIER_TO_COLOR["Novice"], symbol="square"),
),
go.Scatter(
x=[None],
y=[None],
mode="markers",
name="Contributor",
marker=dict(size=10, color=TIER_TO_COLOR["Contributor"], symbol="square"),
),
go.Scatter(
x=[None],
y=[None],
mode="markers",
name="Expert",
marker=dict(size=10, color=TIER_TO_COLOR["Expert"], symbol="square"),
),
go.Scatter(
x=[None],
y=[None],
mode="markers",
name="Master",
marker=dict(size=10, color=TIER_TO_COLOR["Master"], symbol="square"),
),
go.Scatter(
x=[None],
y=[None],
mode="markers",
name="Grandmaster",
marker=dict(size=10, color=TIER_TO_COLOR["Grandmaster"], symbol="square"),
),
]
)
fig.update_layout(
template="simple_white",
title="Ranking across Categories",
width=1600,
height=600,
xaxis_title="Votes",
yaxis_title="Kernels",
legend_title="Tiers",
paper_bgcolor="lightgray",
plot_bgcolor="lightgray",
title_x=0.5,
title_font_size=20,
legend=dict(font=dict(size=14), orientation="v"),
xaxis=dict(
showgrid=False,
zeroline=False,
showticklabels=True,
tickfont=dict(size=14, family="Rockwell"),
),
yaxis=dict(
showgrid=False,
zeroline=False,
showticklabels=True,
tickfont=dict(size=14, family="Rockwell"),
),
)
fig.show()
# # 4. Displaying User Kernels
kernels_df = pd.read_csv(f"{DIR_PATH}/Kernels.csv")
kernel_votes_df = pd.read_csv(f"{DIR_PATH}/KernelVotes.csv")
kernel_versions_df = pd.read_csv(f"{DIR_PATH}/KernelVersions.csv")
user_kernels_df = kernels_df[kernels_df["AuthorUserId"] == current_user_id]
# display(CurrentUserKernels.head(10))
user_kernel_votes_df = kernel_votes_df[kernel_votes_df["UserId"] == current_user_id]
display(user_kernels_df.head())
user_kernels_df = user_kernels_df[
["Id", "CurrentUrlSlug", "TotalVotes", "TotalViews", "Medal", "MadePublicDate"]
]
user_kernels_df = user_kernels_df.sort_values(by="TotalVotes")
kernel_votes_received_data = []
for index, row in user_kernels_df.iterrows():
kernel_version_ids = kernel_versions_df[
kernel_versions_df["ScriptId"] == row["Id"]
]["Id"].values
kernel_votes_received = kernel_votes_df[
kernel_votes_df["KernelVersionId"].isin(kernel_version_ids)
]
for index, votes_row in kernel_votes_received.iterrows():
d = {
"kernel_id": row["Id"],
"kernel_url_slug": row["CurrentUrlSlug"],
"voter_user_id": votes_row["UserId"],
"kernel_version_id": votes_row["KernelVersionId"],
"vote_date": votes_row["VoteDate"],
}
kernel_votes_received_data.append(d)
kernel_votes_received_df = pd.DataFrame(kernel_votes_received_data)
kernel_votes_received_df["vote_date"] = pd.to_datetime(
kernel_votes_received_df["vote_date"]
)
kernel_votes_received_grouped_df = (
kernel_votes_received_df.groupby(["kernel_url_slug", "vote_date"])["kernel_id"]
.count()
.reset_index()
.rename(columns={"kernel_id": "votes_count"})
.sort_values(by=["vote_date", "kernel_url_slug"])
)
kernel_votes_received_grouped_df["votes_cum_sum"] = (
kernel_votes_received_grouped_df.groupby(["kernel_url_slug"])["votes_count"]
.cumsum()
.values
)
kernel_votes_received_grouped_by_date = (
kernel_votes_received_df.groupby("vote_date")["kernel_id"]
.count()
.reset_index()
.rename(columns={"kernel_id": "vote_counts"})
.sort_values(by="vote_date")
)
kernel_votes_received_grouped_by_date[
"cum_sum_votes"
] = kernel_votes_received_grouped_by_date["vote_counts"].cumsum()
display(user_kernels_df)
display(user_kernel_votes_df.head())
display(kernel_votes_received_df.head())
display(kernel_votes_received_grouped_df.head())
# # 5. Displaying User Kernels Vs Up-Votes
MEDAL_RANKING_TO_MEDAL = {
1: "Gold",
2: "Silver",
3: "Bronze",
}
MEDAL_TO_COLOR = {
"Gold": "#b88121",
"Silver": "#838280",
"Bronze": "#8e5b3d",
"No Medel": "white",
}
fig = go.Figure(
data=[
go.Bar(
name="Up Votes",
x=user_kernels_df["TotalVotes"],
y=user_kernels_df["CurrentUrlSlug"],
orientation="h",
marker=dict(
color=user_kernels_df["Medal"]
.map(
lambda d: MEDAL_TO_COLOR[MEDAL_RANKING_TO_MEDAL[d]]
if not np.isnan(d)
else MEDAL_TO_COLOR["No Medel"]
)
.values
),
text=user_kernels_df["TotalVotes"],
textposition="outside",
texttemplate="%{text:.2s}",
textfont_size=14,
hovertemplate="Votes: %{x}<extra></extra>",
hoverlabel=dict(bgcolor="white", font_size=14, font_family="Rockwell"),
width=0.5,
marker_line_color="rgb(0,0,0)",
marker_line_width=1.5,
opacity=0.9,
showlegend=False,
),
go.Scatter(
x=[None],
y=[None],
mode="markers",
name="No Medel",
marker=dict(size=10, color=MEDAL_TO_COLOR["No Medel"], symbol="square"),
),
go.Scatter(
x=[None],
y=[None],
mode="markers",
name="Gold",
marker=dict(size=10, color=MEDAL_TO_COLOR["Gold"], symbol="square"),
),
go.Scatter(
x=[None],
y=[None],
mode="markers",
name="Silver",
marker=dict(size=10, color=MEDAL_TO_COLOR["Silver"], symbol="square"),
),
go.Scatter(
x=[None],
y=[None],
mode="markers",
name="Bronze",
marker=dict(size=10, color=MEDAL_TO_COLOR["Bronze"], symbol="square"),
),
]
)
fig.update_layout(
barmode="stack",
template="simple_white",
title="Votes on Kernels",
width=1600,
height=600,
xaxis_title="Votes",
yaxis_title="Kernels",
legend_title="Medals",
paper_bgcolor="lightgray",
plot_bgcolor="lightgray",
title_x=0.5,
title_font_size=20,
legend=dict(font=dict(size=14), orientation="v"),
xaxis=dict(
showgrid=False,
zeroline=False,
showticklabels=True,
tickfont=dict(size=14, family="Rockwell"),
),
yaxis=dict(
showgrid=False,
zeroline=False,
showticklabels=True,
tickfont=dict(size=14, family="Rockwell"),
),
)
fig.show()
fig = go.Figure(
data=[
go.Bar(
name="Up Votes",
x=user_kernels_df["TotalVotes"],
y=user_kernels_df["CurrentUrlSlug"],
orientation="h",
text=user_kernels_df["TotalVotes"],
textposition="outside",
hovertemplate="Votes: %{x}<extra></extra>",
hoverlabel=dict(bgcolor="white", font_size=14, font_family="Rockwell"),
width=0.5,
marker_line_color="rgb(0,0,0)",
marker_line_width=1.5,
opacity=0.9,
),
go.Bar(
name="Views",
x=user_kernels_df["TotalViews"],
y=user_kernels_df["CurrentUrlSlug"],
orientation="h",
texttemplate=user_kernels_df.apply(
lambda row: f"{row['TotalVotes']} / {row['TotalViews']}", axis=1
),
textposition="outside",
textfont_size=14,
text=user_kernels_df["TotalVotes"],
hovertemplate="Views: %{x}<extra></extra>",
hoverlabel=dict(bgcolor="white", font_size=14, font_family="Rockwell"),
width=0.5,
marker_line_color="rgb(0,0,0)",
marker_line_width=1.5,
opacity=0.9,
),
]
)
fig.update_layout(
barmode="stack",
template="simple_white",
title="Votes vs Views on Kernels",
width=1600,
height=600,
xaxis_title="Votes & Views",
yaxis_title="Kernels",
paper_bgcolor="lightgray",
plot_bgcolor="lightgray",
title_x=0.5,
title_font_size=20,
legend=dict(font=dict(size=14), orientation="v"),
xaxis=dict(
showgrid=False,
zeroline=False,
showticklabels=True,
tickfont=dict(size=14, family="Rockwell"),
),
yaxis=dict(
showgrid=False,
zeroline=False,
showticklabels=True,
tickfont=dict(size=14, family="Rockwell"),
),
)
fig.show()
KERNEL_COLORS = {}
for index, kernel in enumerate(user_kernels_df["CurrentUrlSlug"].values):
KERNEL_COLORS[kernel] = colors[index]
fig = go.Figure(
data=[
go.Scatter(
x=kernel_votes_received_grouped_by_date["vote_date"].apply(
lambda x: x.date()
),
y=kernel_votes_received_grouped_by_date["cum_sum_votes"],
mode="lines+markers",
marker=dict(size=10),
text=kernel_votes_received_grouped_by_date["cum_sum_votes"],
textposition="top center",
textfont_size=14,
hovertemplate="Votes: %{y}<extra></extra>",
hoverlabel=dict(bgcolor="white", font_size=14, font_family="Rockwell"),
line=dict(width=3),
opacity=0.9,
name="Cumulative Votes",
),
go.Scatter(
x=[None],
y=[None],
mode="lines",
name="Notebook Released",
marker=dict(size=10, color="green", symbol="square"),
),
],
)
for index, kernel_url_slug in enumerate(
kernel_votes_received_grouped_df["kernel_url_slug"].unique()
):
df = kernel_votes_received_grouped_df[
kernel_votes_received_grouped_df["kernel_url_slug"] == kernel_url_slug
]
fig.add_vline(
x=pd.to_datetime(df["vote_date"]).apply(lambda x: x.date()).values[0],
line_width=3,
line_dash="dash",
line_color="green",
opacity=0.9,
)
fig.update_layout(
template="simple_white",
title="Cumulative Votes received overtime",
width=1600,
height=600,
xaxis_title="Date",
yaxis_title="Votes",
paper_bgcolor="lightgray",
plot_bgcolor="lightgray",
title_x=0.5,
title_font_size=20,
showlegend=True,
legend=dict(
font=dict(size=14), orientation="h", yanchor="bottom", y=1, xanchor="right", x=1
),
xaxis=dict(
showgrid=False,
zeroline=False,
showticklabels=True,
tickfont=dict(size=14, family="Rockwell"),
type="category",
categoryorder="category ascending",
tickangle=45,
automargin=True,
tickmode="linear",
tick0=0,
dtick=1,
ticklen=10,
tickwidth=2,
tickcolor="#000",
),
yaxis=dict(
showgrid=False,
zeroline=False,
showticklabels=True,
tickfont=dict(size=14, family="Rockwell"),
),
)
fig.show()
fig = go.Figure()
for index, kernel_url_slug in enumerate(
kernel_votes_received_grouped_df["kernel_url_slug"].unique()
):
df = kernel_votes_received_grouped_df[
kernel_votes_received_grouped_df["kernel_url_slug"] == kernel_url_slug
]
df["vote_date"] = pd.to_datetime(df["vote_date"]).apply(lambda x: x.date())
fig.add_trace(
go.Bar(
x=df["vote_date"],
y=df["votes_count"],
hovertext=df.apply(
lambda x: f"Votes: {x['votes_count']}<br>Kernel: {x['kernel_url_slug']}<br>Date: {x['vote_date']} <br>Kernel Created Date: {df['vote_date'].min()}",
axis=1,
),
hoverlabel=dict(bgcolor="white", font_size=14, font_family="Rockwell"),
opacity=0.9,
name="-".join(kernel_url_slug.split("-")[:4]),
orientation="v",
marker_color=colors[index],
)
)
fig.add_vline(
x=df["vote_date"].min(),
line_width=3,
line_dash="dash",
line_color="green",
opacity=0.9,
)
fig.update_layout(
barmode="stack",
template="simple_white",
title="Votes on Kernels over Time",
width=1600,
height=600,
xaxis_title="Date",
yaxis_title="Votes",
paper_bgcolor="lightgray",
plot_bgcolor="lightgray",
title_x=0.5,
title_font_size=20,
legend=dict(font=dict(size=14), orientation="v"),
showlegend=True,
xaxis=dict(
showgrid=False,
zeroline=False,
showticklabels=True,
tickfont=dict(size=14, family="Rockwell"),
type="category",
categoryorder="category ascending",
tickangle=45,
automargin=True,
tickmode="linear",
tick0=0,
dtick=1,
ticklen=10,
tickwidth=2,
tickcolor="#000",
),
yaxis=dict(
showgrid=False,
zeroline=False,
showticklabels=True,
tickfont=dict(size=14, family="Rockwell"),
),
)
fig.show()
# TODO: Plot for Votes by distinct user and users from different tiers
| false | 0 | 5,491 | 0 | 5,511 | 5,491 |
||
129644879
|
<jupyter_start><jupyter_text>Eurovision Song Contest scores 1975-2019
### Context
Scores presented at eurovision.tv in usable format.
### Content
All votes/scores given during all editions of the (semi) finals from the Eurovision Song Contest 1975-2019, televoting and jury split. Per country (to/from).
Kaggle dataset identifier: eurovision-song-contest-scores-19752019
<jupyter_script>from mpl_toolkits.mplot3d import Axes3D
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt # plotting
import numpy as np # linear algebra
import os # accessing directory structure
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_excel("/kaggle/input/eurovision_song_contest_1975_2019.xlsx")
from_df = df[df["From country"] == "Finland"]
to_df = from_df[from_df["To country"] == "Hungary"]
f_only = to_df[to_df["(semi-) final"] == "f"]
f_only
f_only.plot.bar(x="Year", y="Points ")
from_df = df[df["From country"] == "Hungary"]
to_df = from_df[from_df["To country"] == "Finland"]
f_only = to_df[to_df["(semi-) final"] == "f"]
f_only
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/644/129644879.ipynb
|
eurovision-song-contest-scores-19752019
|
datagraver
|
[{"Id": 129644879, "ScriptId": 38546466, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7012016, "CreationDate": "05/15/2023 12:55:21", "VersionNumber": 1.0, "Title": "Starter: Eurovision Song Contest scores 6c151311-d", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 25.0, "LinesInsertedFromPrevious": 12.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 13.0, "LinesInsertedFromFork": 12.0, "LinesDeletedFromFork": 72.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 13.0, "TotalVotes": 0}]
|
[{"Id": 185926602, "KernelVersionId": 129644879, "SourceDatasetVersionId": 437942}]
|
[{"Id": 437942, "DatasetId": 198138, "DatasourceVersionId": 453484, "CreatorUserId": 1457252, "LicenseName": "CC BY-NC-SA 4.0", "CreationDate": "05/19/2019 16:14:50", "VersionNumber": 1.0, "Title": "Eurovision Song Contest scores 1975-2019", "Slug": "eurovision-song-contest-scores-19752019", "Subtitle": "All votes/scores given during the (semi) finals from jury and televoting", "Description": "### Context\n\nScores presented at eurovision.tv in usable format. \n\n\n### Content\n\nAll votes/scores given during all editions of the (semi) finals from the Eurovision Song Contest 1975-2019, televoting and jury split. Per country (to/from).\n\n\n### Acknowledgements\n\n[Eurovision.tv](https://eurovision.tv) and [Datagraver.com](https://datagraver.com) for data cleansing and structuring", "VersionNotes": "Initial release", "TotalCompressedBytes": 1809527.0, "TotalUncompressedBytes": 1344879.0}]
|
[{"Id": 198138, "CreatorUserId": 1457252, "OwnerUserId": 1457252.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 437942.0, "CurrentDatasourceVersionId": 453484.0, "ForumId": 209087, "Type": 2, "CreationDate": "05/19/2019 16:14:50", "LastActivityDate": "05/19/2019", "TotalViews": 17520, "TotalDownloads": 1794, "TotalVotes": 28, "TotalKernels": 4}]
|
[{"Id": 1457252, "UserName": "datagraver", "DisplayName": "Datagraver", "RegisterDate": "11/29/2017", "PerformanceTier": 0}]
|
from mpl_toolkits.mplot3d import Axes3D
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt # plotting
import numpy as np # linear algebra
import os # accessing directory structure
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_excel("/kaggle/input/eurovision_song_contest_1975_2019.xlsx")
from_df = df[df["From country"] == "Finland"]
to_df = from_df[from_df["To country"] == "Hungary"]
f_only = to_df[to_df["(semi-) final"] == "f"]
f_only
f_only.plot.bar(x="Year", y="Points ")
from_df = df[df["From country"] == "Hungary"]
to_df = from_df[from_df["To country"] == "Finland"]
f_only = to_df[to_df["(semi-) final"] == "f"]
f_only
| false | 0 | 261 | 0 | 383 | 261 |
||
129644281
|
<jupyter_start><jupyter_text>MNIST Dataset
### Context
MNIST is a subset of a larger set available from NIST (it's copied from http://yann.lecun.com/exdb/mnist/)
### Content
The MNIST database of handwritten digits has a training set of 60,000 examples, and a test set of 10,000 examples. .
Four files are available:
- train-images-idx3-ubyte.gz: training set images (9912422 bytes)
- train-labels-idx1-ubyte.gz: training set labels (28881 bytes)
- t10k-images-idx3-ubyte.gz: test set images (1648877 bytes)
- t10k-labels-idx1-ubyte.gz: test set labels (4542 bytes)
### How to read
See [sample MNIST reader][1]
Kaggle dataset identifier: mnist-dataset
<jupyter_script>from pathlib import Path
from array import array
import struct
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
def read_image_data(path):
data_dir = Path("/kaggle/input/mnist-dataset")
with open(data_dir / path, "rb") as f:
# IDX file format
magic, size, rows, cols = struct.unpack(">IIII", f.read(16))
image_data = array("B", f.read())
images = []
for i in range(size):
image = np.array(image_data[i * rows * cols : (i + 1) * rows * cols]).reshape(
28, 28
)
images.append(image)
return np.array(images)
def read_labels(path):
data_dir = Path("/kaggle/input/mnist-dataset")
with open(data_dir / path, "rb") as f:
magic, size = struct.unpack(">II", f.read(8))
if magic != 2049:
raise ValueError(
"Magic number mismatch, expected 2049, got {}".format(magic)
)
labels = np.array(array("B", f.read()))
return labels
train_val_labels = read_labels("train-labels.idx1-ubyte")
train_val_images = read_image_data("train-images.idx3-ubyte")
fig = plt.figure(figsize=(12.0, 4.0))
grid = ImageGrid(
fig,
111,
nrows_ncols=(5, 20),
axes_pad=0.1, # pad between axes in inch.
)
indices = np.arange(len(train_val_labels))
for ax, digit in zip(grid, range(100)):
# Iterating over the grid returns the Axes.
i = digit % 10
ax.imshow(
Image.fromarray(train_val_images[indices[train_val_labels == digit // 10][i]]),
cmap="bone",
)
ax.axis("off")
plt.savefig("mnist-sample-100.eps", dpi=300)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/644/129644281.ipynb
|
mnist-dataset
|
hojjatk
|
[{"Id": 129644281, "ScriptId": 38544598, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3115179, "CreationDate": "05/15/2023 12:51:21", "VersionNumber": 1.0, "Title": "Exploring MNIST", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 47.0, "LinesInsertedFromPrevious": 47.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
|
[{"Id": 185925373, "KernelVersionId": 129644281, "SourceDatasetVersionId": 242592}]
|
[{"Id": 242592, "DatasetId": 102285, "DatasourceVersionId": 254413, "CreatorUserId": 1840515, "LicenseName": "Data files \u00a9 Original Authors", "CreationDate": "01/08/2019 13:01:57", "VersionNumber": 1.0, "Title": "MNIST Dataset", "Slug": "mnist-dataset", "Subtitle": "The MNIST database of handwritten digits (http://yann.lecun.com)", "Description": "### Context\n\nMNIST is a subset of a larger set available from NIST (it's copied from http://yann.lecun.com/exdb/mnist/)\n\n\n### Content\nThe MNIST database of handwritten digits has a training set of 60,000 examples, and a test set of 10,000 examples. .\nFour files are available:\n\n - train-images-idx3-ubyte.gz: training set images (9912422 bytes) \n - train-labels-idx1-ubyte.gz: training set labels (28881 bytes)\n - t10k-images-idx3-ubyte.gz: test set images (1648877 bytes) \n - t10k-labels-idx1-ubyte.gz: test set labels (4542 bytes)\n\n### How to read\nSee [sample MNIST reader][1]\n\n### Acknowledgements\n* Yann LeCun, Courant Institute, NYU\n* Corinna Cortes, Google Labs, New York\n* Christopher J.C. Burges, Microsoft Research, Redmond\n\n### Inspiration\nMany methods have been tested with this training set and test set (see http://yann.lecun.com/exdb/mnist/ for more details)\n\n\n [1]: https://www.kaggle.com/hojjatk/read-mnist-dataset", "VersionNotes": "Initial release", "TotalCompressedBytes": 11594722.0, "TotalUncompressedBytes": 11594722.0}]
|
[{"Id": 102285, "CreatorUserId": 1840515, "OwnerUserId": 1840515.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 242592.0, "CurrentDatasourceVersionId": 254413.0, "ForumId": 111993, "Type": 2, "CreationDate": "01/08/2019 13:01:57", "LastActivityDate": "01/08/2019", "TotalViews": 113247, "TotalDownloads": 16600, "TotalVotes": 111, "TotalKernels": 67}]
|
[{"Id": 1840515, "UserName": "hojjatk", "DisplayName": "Hojjat Khodabakhsh", "RegisterDate": "04/20/2018", "PerformanceTier": 0}]
|
from pathlib import Path
from array import array
import struct
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
def read_image_data(path):
data_dir = Path("/kaggle/input/mnist-dataset")
with open(data_dir / path, "rb") as f:
# IDX file format
magic, size, rows, cols = struct.unpack(">IIII", f.read(16))
image_data = array("B", f.read())
images = []
for i in range(size):
image = np.array(image_data[i * rows * cols : (i + 1) * rows * cols]).reshape(
28, 28
)
images.append(image)
return np.array(images)
def read_labels(path):
data_dir = Path("/kaggle/input/mnist-dataset")
with open(data_dir / path, "rb") as f:
magic, size = struct.unpack(">II", f.read(8))
if magic != 2049:
raise ValueError(
"Magic number mismatch, expected 2049, got {}".format(magic)
)
labels = np.array(array("B", f.read()))
return labels
train_val_labels = read_labels("train-labels.idx1-ubyte")
train_val_images = read_image_data("train-images.idx3-ubyte")
fig = plt.figure(figsize=(12.0, 4.0))
grid = ImageGrid(
fig,
111,
nrows_ncols=(5, 20),
axes_pad=0.1, # pad between axes in inch.
)
indices = np.arange(len(train_val_labels))
for ax, digit in zip(grid, range(100)):
# Iterating over the grid returns the Axes.
i = digit % 10
ax.imshow(
Image.fromarray(train_val_images[indices[train_val_labels == digit // 10][i]]),
cmap="bone",
)
ax.axis("off")
plt.savefig("mnist-sample-100.eps", dpi=300)
| false | 0 | 535 | 3 | 768 | 535 |
||
129203514
|
# ### Prepare
from datasets import load_from_disk, Dataset
from sklearn.feature_extraction.text import CountVectorizer
from transformers import DataCollatorWithPadding
import sys
import os
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
import torch.utils.data as data_utils
import torch
from tqdm import tqdm
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from torch.utils.data import TensorDataset, random_split
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from transformers import (
get_linear_schedule_with_warmup,
T5ForConditionalGeneration,
T5Tokenizer,
)
from transformers import AutoModelForSequenceClassification, TrainingArguments, Trainer
import random
import warnings
import gc
import json
import evaluate
from transformers import (
AutoTokenizer,
AutoModelForSeq2SeqLM,
Seq2SeqTrainingArguments,
Seq2SeqTrainer,
DataCollatorForSeq2Seq,
)
from transformers import LongformerTokenizer, EncoderDecoderModel
from transformers import AutoTokenizer, AutoModel
import torch
from tqdm import tqdm
from torch import nn
warnings.filterwarnings("ignore")
import pickle
from collections import defaultdict
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")
def format_sample(sample):
question = sample["question"]["text"]
context = sample["document"]["tokens"]["token"]
is_html = sample["document"]["tokens"]["is_html"]
long_answers = sample["annotations"]["long_answer"]
short_answers = sample["annotations"]["short_answers"]
context_string = " ".join(
[context[i] for i in range(len(context)) if not is_html[i]]
)
for answer in sample["annotations"]["yes_no_answer"]:
if answer == 0 or answer == 1:
return {
"question": question,
"context": context_string,
"short_answers": [],
"long_answers": [],
"category": "no" if answer == 0 else "yes",
}
short_targets = []
for s in short_answers:
short_targets.extend(s["text"])
short_targets = list(set(short_targets))
long_targets = []
for s in long_answers:
if s["start_token"] == -1:
continue
answer = context[s["start_token"] : s["end_token"]]
html = is_html[s["start_token"] : s["end_token"]]
new_answer = " ".join([answer[i] for i in range(len(answer)) if not html[i]])
if new_answer not in long_targets:
long_targets.append(new_answer)
category = "long_short" if len(short_targets + long_targets) > 0 else "null"
return {
"question": question,
"context": context_string,
"short_answers": short_targets,
"long_answers": long_targets,
"category": category,
}
def format_valudation_dataset(dataset_path, answer_type="long"):
dataset = load_from_disk(dataset_path)
dataset = dataset.map(format_sample).remove_columns(
["annotations", "document", "id"]
)
dataset = dataset.filter(lambda x: (x["category"] == "long_short")).remove_columns(
["category"]
)
dataset = dataset.filter(lambda x: len(x["context"]) <= 50000)
return dataset
PUNCTUATION_SET_TO_EXCLUDE = set("".join(["‘", "’", "´", "`", ".", ",", "-", '"']))
def get_sub_answers(answers, begin=0, end=None):
return [" ".join(x.split(" ")[begin:end]) for x in answers if len(x.split(" ")) > 1]
def expand_to_aliases(given_answers, make_sub_answers=False):
if make_sub_answers:
given_answers = (
given_answers
+ get_sub_answers(given_answers, begin=1)
+ get_sub_answers(given_answers, end=-1)
)
answers = []
for answer in given_answers:
alias = answer.replace("_", " ").lower()
alias = "".join(
c if c not in PUNCTUATION_SET_TO_EXCLUDE else " " for c in alias
)
answers.append(" ".join(alias.split()).strip())
return list(set(answers))
def format_answer(answer):
alias = answer.replace("_", " ").lower()
alias = "".join(c if c not in PUNCTUATION_SET_TO_EXCLUDE else " " for c in alias)
return " ".join(alias.split()).strip() + " </s>"
def preprocess_function(
dataset,
tokenizer,
max_length=512,
max_length_answer=512,
answer_type="short_answer",
make_sub_answers=False,
):
model_inputs = tokenizer(
dataset["text_input"], max_length=max_length, truncation=True
)
if answer_type == "has_answer":
return model_inputs
if answer_type == "short_answer":
text_target = [format_answer(x) for x in dataset[answer_type]]
elif answer_type == "long_answer":
text_target = [x + " </s>" for x in dataset[answer_type]]
else:
raise ValueError(f"Wrong answer_type {answer_type}")
labels = tokenizer(text_target, max_length=max_length_answer, truncation=True)
model_inputs["labels"] = labels["input_ids"]
return model_inputs
rouge = evaluate.load("rouge")
bleu = evaluate.load("bleu")
exact_match = evaluate.load("exact_match")
accuracy = evaluate.load("accuracy")
class ActiveQA:
def __init__(self, config):
self.config = config
self._reset_models()
self.models_are_trained = False
def _reset_models(self):
self.model_answer = T5ForConditionalGeneration.from_pretrained(
self.config["checkpoint_answer"]
).to(self.config["device"])
self.tokenizer_answer = T5Tokenizer.from_pretrained(
self.config["checkpoint_answer"], padding=True
)
self.data_collator_answer = DataCollatorForSeq2Seq(
tokenizer=self.tokenizer_answer,
model=self.model_answer,
max_length=self.config["max_length"],
)
self.model_label = model = AutoModelForSequenceClassification.from_pretrained(
self.config["checkpoint_label"], num_labels=2
).to(self.config["device"])
self.tokenizer_label = AutoTokenizer.from_pretrained(
self.config["checkpoint_label"]
)
self.data_collator_label = DataCollatorWithPadding(
tokenizer=self.tokenizer_label
)
def load_from_disk(self, path):
with open(path, "rb") as f:
self.__dict__ = pickle.load(f)
def save_to_disk(self, path):
with open(path, "wb") as f:
pickle.dump(self.__dict__, f)
def get_compute_metrics(self, metrics_type):
def compute_metrics_answer(eval_pred):
predictions, labels = eval_pred
decoded_preds = self.tokenizer_answer.batch_decode(
predictions, skip_special_tokens=True
)
labels = np.where(
labels != -100, labels, self.tokenizer_answer.pad_token_id
)
decoded_labels = self.tokenizer_answer.batch_decode(
labels, skip_special_tokens=True
)
result = dict()
rouge_result = rouge.compute(
predictions=decoded_preds, references=decoded_labels
)
result["rouge1"] = rouge_result["rouge1"]
result["rouge2"] = rouge_result["rouge2"]
bleu_result = bleu.compute(
predictions=decoded_preds, references=decoded_labels
)
result["Bleu"] = bleu_result["bleu"]
em_result = exact_match.compute(
predictions=decoded_preds,
references=decoded_labels,
regexes_to_ignore=["the "],
ignore_case=True,
ignore_punctuation=True,
)
result["EM"] = em_result["exact_match"]
prediction_lens = [
np.count_nonzero(pred != self.tokenizer_answer.pad_token_id)
for pred in predictions
]
result["gen_len"] = np.mean(prediction_lens)
return {k: round(v, 4) for k, v in result.items()}
def compute_metrics_label(eval_pred):
predictions, labels = eval_pred
predictions = np.argmax(predictions, axis=1)
return accuracy.compute(predictions=predictions, references=labels)
if metrics_type == "answer":
return compute_metrics_answer
elif metrics_type == "label":
return compute_metrics_label
else:
raise ValueError(f"Wrong metrics type {metrics_type}")
def train_answer(self, train_dataset, test_dataset):
args = {
"tokenizer": self.tokenizer_answer,
"max_length": self.config["max_length"],
"max_length_answer": self.config["max_length_answer"],
"answer_type": self.config["answer_type"],
"make_sub_answers": self.config["make_sub_answers"],
}
train_dataset = train_dataset.filter(
lambda x: x[self.config["answer_type"]] is not None
).remove_columns("label")
test_dataset = test_dataset.filter(
lambda x: x[self.config["answer_type"]] is not None
).remove_columns("label")
train_dataset = train_dataset.map(
preprocess_function, batched=True, fn_kwargs=args
)
test_dataset = test_dataset.map(
preprocess_function, batched=True, fn_kwargs=args
)
training_args = Seq2SeqTrainingArguments(
output_dir="answer_model",
evaluation_strategy="epoch",
save_strategy="epoch",
learning_rate=self.config["learning_rate_answer"],
per_device_train_batch_size=self.config[
"per_device_train_batch_size_answer"
],
per_device_eval_batch_size=self.config["per_device_eval_batch_size_answer"],
weight_decay=self.config["weight_decay_answer"],
num_train_epochs=self.config["num_train_epochs_answer"],
predict_with_generate=True,
generation_max_length=self.config["max_length_answer"],
report_to="none",
push_to_hub=False,
)
trainer = Seq2SeqTrainer(
model=self.model_answer,
args=training_args,
train_dataset=train_dataset,
eval_dataset=test_dataset,
tokenizer=self.tokenizer_answer,
data_collator=self.data_collator_answer,
compute_metrics=self.get_compute_metrics("answer"),
)
trainer.train()
def train_label(self, train_dataset, test_dataset):
args = {
"tokenizer": self.tokenizer_label,
"max_length": self.config["max_length"],
"answer_type": "has_answer",
}
train_dataset = train_dataset.map(
preprocess_function, batched=True, fn_kwargs=args
)
test_dataset = test_dataset.map(
preprocess_function, batched=True, fn_kwargs=args
)
training_args = TrainingArguments(
output_dir="label_model",
learning_rate=self.config["learning_rate_label"],
per_device_train_batch_size=self.config[
"per_device_train_batch_size_label"
],
per_device_eval_batch_size=self.config["per_device_eval_batch_size_label"],
num_train_epochs=self.config["num_train_epochs_label"],
weight_decay=self.config["weight_decay_label"],
evaluation_strategy="epoch",
save_strategy="epoch",
load_best_model_at_end=True,
report_to="none",
push_to_hub=False,
)
trainer = Trainer(
model=self.model_label,
args=training_args,
train_dataset=train_dataset,
eval_dataset=test_dataset,
tokenizer=self.tokenizer_label,
data_collator=self.data_collator_label,
compute_metrics=self.get_compute_metrics("label"),
)
trainer.train()
def train(self, train_dataset, test_dataset):
self.train_label(train_dataset, test_dataset)
self.train_answer(train_dataset, test_dataset)
self.models_are_trained = True
def score_answer(input_ids, labels, model):
with torch.no_grad():
logits = model(input_ids=input_ids, labels=labels).logits[0].detach()
probs = torch.softmax(logits, -1)
p = 1
for idx, token in enumerate(labels[0]):
p *= probs[idx][token]
return p.item()
def _predict_with_probs(self, test_dataset):
answers = []
probs = []
document_ids = []
with torch.no_grad():
for row in test_dataset:
input_ids = torch.tensor([row["input_ids"]]).to(self.config["device"])
labels = self.model_answer.generate(input_ids)
probs.append(score_answer(input_ids, labels, model))
answers.append(labels[0])
document_ids.append(row["document_id"])
return pd.dataframe(
{"answer": answers, "prob": probs, "document_id": document_id}
)
def evaluate(self, test_dataset, test_text):
df = self._predict_with_probs(self, test_dataset)
df = (
df.sort("prob", ascending=False)
.groupby("document_id", as_index=False)
.first()
)
df = df.sort("document_id")
answers = df["answer"].values
metric = self.get_compute_metrics(self, "answer")
return metric((answers, test_text))
def calculate_probs_score(probs):
return max(probs)
def predict(self, input_text):
input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device)
with torch.no_grad():
labels = model.generate(input_ids)
return self.tokenizer_answer.decode(labels, skip_special_tokens=True)[0]
def answer(self, context, question):
sentences = context.split(".")
texts = [
".".join(sentences[idx : idx + part_len])
for idx in range(0, len(sentences), step)
] + [question]
encoded_input = tokenizer(
texts, padding=True, truncation=True, max_length=512, return_tensors="pt"
)
text_dataset = data_utils.TensorDataset(
encoded_input["input_ids"], encoded_input["attention_mask"]
)
text_dataloader = data_utils.DataLoader(
text_dataset, batch_size=32, shuffle=False
)
token_embeddings_list = []
with torch.no_grad():
for inputs, masks in text_dataloader:
inputs, masks = inputs.to(device), masks.to(device)
model_output = model(inputs, masks)
token_embeddings = model_output[0]
token_embeddings_list.append(token_embeddings)
token_embeddings = torch.concat(token_embeddings_list, axis=0)
parts_embeddings = mean_pooling(
token_embeddings, encoded_input["attention_mask"].to(device)
)
scores = torch.tensor(
[
cos(parts_embeddings[i, :], parts_embeddings[-1, :])
for i in range(len(texts) - 1)
]
)
idx = torch.argmax(scores)
return texts[idx]
def emulate_active_learning(
self, text_dataset, full_dataset, val_dataset, test_dataset
):
document_ids = text_dataset["document_id"]
ids_in_train = set(
random.sample(document_ids, self.config["start_document_cnt"])
)
full_dataset = full_dataset.add_column(
"in_train", [x in start_ids for x in full_dataset["document_id"]]
)
train_dataset = full_dataset.filter(lambda x: x["document_id"] in start_ids)
self.train(train_dataset, test_dataset)
metrics = dict()
metrics["val"] = [self.evaluate(val_dataset)]
for step in range(self.config["al_steps_cnt"]):
print(f"step {step + 1}")
ids_to_add = self.choose_ids(
text_dataset.filter(lambda x: x["document_id"] not in ids_in_train)
)
ids_in_train = ids_in_train.union(ids_to_add)
train_part = full_dataset.filter(lambda x: x["document_id"] in ids_to_add)
train_dataset = concatenate_datasets([train_dataset, train_part])
self.train(train_dataset, test_dataset)
metrics["val"].append(self.evaluate(val_dataset))
return metrics
config = {
"checkpoint_answer": "t5-small",
"checkpoint_label": "distilbert-base-uncased",
"max_length": 512,
"max_length_answer": 512,
"learning_rate_answer": 1e-5,
"weight_decay_answer": 1e-2,
"per_device_train_batch_size_answer": 4,
"per_device_eval_batch_size_answer": 4,
"num_train_epochs_answer": 1,
"make_sub_answers": False,
"learning_rate_label": 1e-5,
"weight_decay_label": 1e-2,
"per_device_train_batch_size_label": 4,
"per_device_eval_batch_size_label": 16,
"num_train_epochs_label": 1,
"answer_type": "long_answer",
"device": device,
"train_size": 0.95,
}
torch.cuda.empty_cache()
import gc
gc.collect()
df_path = "/kaggle/input/nq-prepared-8-4/nq_prepared_8_4.csv"
train_dataset, test_dataset = get_data(df_path, "short_answer", sample_cnt=40)
train_dataset
np.mean(train_dataset["label"])
qa = ActiveQA(config)
test_dataset["label"]
qa.train_label(train_dataset, test_dataset)
qa.train_answer(train_dataset, test_dataset)
import transformers
transformers.__version__
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/203/129203514.ipynb
| null | null |
[{"Id": 129203514, "ScriptId": 37978823, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6054920, "CreationDate": "05/11/2023 19:38:12", "VersionNumber": 19.0, "Title": "active learning v1", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 410.0, "LinesInsertedFromPrevious": 28.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 382.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# ### Prepare
from datasets import load_from_disk, Dataset
from sklearn.feature_extraction.text import CountVectorizer
from transformers import DataCollatorWithPadding
import sys
import os
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
import torch.utils.data as data_utils
import torch
from tqdm import tqdm
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from torch.utils.data import TensorDataset, random_split
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from transformers import (
get_linear_schedule_with_warmup,
T5ForConditionalGeneration,
T5Tokenizer,
)
from transformers import AutoModelForSequenceClassification, TrainingArguments, Trainer
import random
import warnings
import gc
import json
import evaluate
from transformers import (
AutoTokenizer,
AutoModelForSeq2SeqLM,
Seq2SeqTrainingArguments,
Seq2SeqTrainer,
DataCollatorForSeq2Seq,
)
from transformers import LongformerTokenizer, EncoderDecoderModel
from transformers import AutoTokenizer, AutoModel
import torch
from tqdm import tqdm
from torch import nn
warnings.filterwarnings("ignore")
import pickle
from collections import defaultdict
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")
def format_sample(sample):
question = sample["question"]["text"]
context = sample["document"]["tokens"]["token"]
is_html = sample["document"]["tokens"]["is_html"]
long_answers = sample["annotations"]["long_answer"]
short_answers = sample["annotations"]["short_answers"]
context_string = " ".join(
[context[i] for i in range(len(context)) if not is_html[i]]
)
for answer in sample["annotations"]["yes_no_answer"]:
if answer == 0 or answer == 1:
return {
"question": question,
"context": context_string,
"short_answers": [],
"long_answers": [],
"category": "no" if answer == 0 else "yes",
}
short_targets = []
for s in short_answers:
short_targets.extend(s["text"])
short_targets = list(set(short_targets))
long_targets = []
for s in long_answers:
if s["start_token"] == -1:
continue
answer = context[s["start_token"] : s["end_token"]]
html = is_html[s["start_token"] : s["end_token"]]
new_answer = " ".join([answer[i] for i in range(len(answer)) if not html[i]])
if new_answer not in long_targets:
long_targets.append(new_answer)
category = "long_short" if len(short_targets + long_targets) > 0 else "null"
return {
"question": question,
"context": context_string,
"short_answers": short_targets,
"long_answers": long_targets,
"category": category,
}
def format_valudation_dataset(dataset_path, answer_type="long"):
dataset = load_from_disk(dataset_path)
dataset = dataset.map(format_sample).remove_columns(
["annotations", "document", "id"]
)
dataset = dataset.filter(lambda x: (x["category"] == "long_short")).remove_columns(
["category"]
)
dataset = dataset.filter(lambda x: len(x["context"]) <= 50000)
return dataset
PUNCTUATION_SET_TO_EXCLUDE = set("".join(["‘", "’", "´", "`", ".", ",", "-", '"']))
def get_sub_answers(answers, begin=0, end=None):
return [" ".join(x.split(" ")[begin:end]) for x in answers if len(x.split(" ")) > 1]
def expand_to_aliases(given_answers, make_sub_answers=False):
if make_sub_answers:
given_answers = (
given_answers
+ get_sub_answers(given_answers, begin=1)
+ get_sub_answers(given_answers, end=-1)
)
answers = []
for answer in given_answers:
alias = answer.replace("_", " ").lower()
alias = "".join(
c if c not in PUNCTUATION_SET_TO_EXCLUDE else " " for c in alias
)
answers.append(" ".join(alias.split()).strip())
return list(set(answers))
def format_answer(answer):
alias = answer.replace("_", " ").lower()
alias = "".join(c if c not in PUNCTUATION_SET_TO_EXCLUDE else " " for c in alias)
return " ".join(alias.split()).strip() + " </s>"
def preprocess_function(
dataset,
tokenizer,
max_length=512,
max_length_answer=512,
answer_type="short_answer",
make_sub_answers=False,
):
model_inputs = tokenizer(
dataset["text_input"], max_length=max_length, truncation=True
)
if answer_type == "has_answer":
return model_inputs
if answer_type == "short_answer":
text_target = [format_answer(x) for x in dataset[answer_type]]
elif answer_type == "long_answer":
text_target = [x + " </s>" for x in dataset[answer_type]]
else:
raise ValueError(f"Wrong answer_type {answer_type}")
labels = tokenizer(text_target, max_length=max_length_answer, truncation=True)
model_inputs["labels"] = labels["input_ids"]
return model_inputs
rouge = evaluate.load("rouge")
bleu = evaluate.load("bleu")
exact_match = evaluate.load("exact_match")
accuracy = evaluate.load("accuracy")
class ActiveQA:
def __init__(self, config):
self.config = config
self._reset_models()
self.models_are_trained = False
def _reset_models(self):
self.model_answer = T5ForConditionalGeneration.from_pretrained(
self.config["checkpoint_answer"]
).to(self.config["device"])
self.tokenizer_answer = T5Tokenizer.from_pretrained(
self.config["checkpoint_answer"], padding=True
)
self.data_collator_answer = DataCollatorForSeq2Seq(
tokenizer=self.tokenizer_answer,
model=self.model_answer,
max_length=self.config["max_length"],
)
self.model_label = model = AutoModelForSequenceClassification.from_pretrained(
self.config["checkpoint_label"], num_labels=2
).to(self.config["device"])
self.tokenizer_label = AutoTokenizer.from_pretrained(
self.config["checkpoint_label"]
)
self.data_collator_label = DataCollatorWithPadding(
tokenizer=self.tokenizer_label
)
def load_from_disk(self, path):
with open(path, "rb") as f:
self.__dict__ = pickle.load(f)
def save_to_disk(self, path):
with open(path, "wb") as f:
pickle.dump(self.__dict__, f)
def get_compute_metrics(self, metrics_type):
def compute_metrics_answer(eval_pred):
predictions, labels = eval_pred
decoded_preds = self.tokenizer_answer.batch_decode(
predictions, skip_special_tokens=True
)
labels = np.where(
labels != -100, labels, self.tokenizer_answer.pad_token_id
)
decoded_labels = self.tokenizer_answer.batch_decode(
labels, skip_special_tokens=True
)
result = dict()
rouge_result = rouge.compute(
predictions=decoded_preds, references=decoded_labels
)
result["rouge1"] = rouge_result["rouge1"]
result["rouge2"] = rouge_result["rouge2"]
bleu_result = bleu.compute(
predictions=decoded_preds, references=decoded_labels
)
result["Bleu"] = bleu_result["bleu"]
em_result = exact_match.compute(
predictions=decoded_preds,
references=decoded_labels,
regexes_to_ignore=["the "],
ignore_case=True,
ignore_punctuation=True,
)
result["EM"] = em_result["exact_match"]
prediction_lens = [
np.count_nonzero(pred != self.tokenizer_answer.pad_token_id)
for pred in predictions
]
result["gen_len"] = np.mean(prediction_lens)
return {k: round(v, 4) for k, v in result.items()}
def compute_metrics_label(eval_pred):
predictions, labels = eval_pred
predictions = np.argmax(predictions, axis=1)
return accuracy.compute(predictions=predictions, references=labels)
if metrics_type == "answer":
return compute_metrics_answer
elif metrics_type == "label":
return compute_metrics_label
else:
raise ValueError(f"Wrong metrics type {metrics_type}")
def train_answer(self, train_dataset, test_dataset):
args = {
"tokenizer": self.tokenizer_answer,
"max_length": self.config["max_length"],
"max_length_answer": self.config["max_length_answer"],
"answer_type": self.config["answer_type"],
"make_sub_answers": self.config["make_sub_answers"],
}
train_dataset = train_dataset.filter(
lambda x: x[self.config["answer_type"]] is not None
).remove_columns("label")
test_dataset = test_dataset.filter(
lambda x: x[self.config["answer_type"]] is not None
).remove_columns("label")
train_dataset = train_dataset.map(
preprocess_function, batched=True, fn_kwargs=args
)
test_dataset = test_dataset.map(
preprocess_function, batched=True, fn_kwargs=args
)
training_args = Seq2SeqTrainingArguments(
output_dir="answer_model",
evaluation_strategy="epoch",
save_strategy="epoch",
learning_rate=self.config["learning_rate_answer"],
per_device_train_batch_size=self.config[
"per_device_train_batch_size_answer"
],
per_device_eval_batch_size=self.config["per_device_eval_batch_size_answer"],
weight_decay=self.config["weight_decay_answer"],
num_train_epochs=self.config["num_train_epochs_answer"],
predict_with_generate=True,
generation_max_length=self.config["max_length_answer"],
report_to="none",
push_to_hub=False,
)
trainer = Seq2SeqTrainer(
model=self.model_answer,
args=training_args,
train_dataset=train_dataset,
eval_dataset=test_dataset,
tokenizer=self.tokenizer_answer,
data_collator=self.data_collator_answer,
compute_metrics=self.get_compute_metrics("answer"),
)
trainer.train()
def train_label(self, train_dataset, test_dataset):
args = {
"tokenizer": self.tokenizer_label,
"max_length": self.config["max_length"],
"answer_type": "has_answer",
}
train_dataset = train_dataset.map(
preprocess_function, batched=True, fn_kwargs=args
)
test_dataset = test_dataset.map(
preprocess_function, batched=True, fn_kwargs=args
)
training_args = TrainingArguments(
output_dir="label_model",
learning_rate=self.config["learning_rate_label"],
per_device_train_batch_size=self.config[
"per_device_train_batch_size_label"
],
per_device_eval_batch_size=self.config["per_device_eval_batch_size_label"],
num_train_epochs=self.config["num_train_epochs_label"],
weight_decay=self.config["weight_decay_label"],
evaluation_strategy="epoch",
save_strategy="epoch",
load_best_model_at_end=True,
report_to="none",
push_to_hub=False,
)
trainer = Trainer(
model=self.model_label,
args=training_args,
train_dataset=train_dataset,
eval_dataset=test_dataset,
tokenizer=self.tokenizer_label,
data_collator=self.data_collator_label,
compute_metrics=self.get_compute_metrics("label"),
)
trainer.train()
def train(self, train_dataset, test_dataset):
self.train_label(train_dataset, test_dataset)
self.train_answer(train_dataset, test_dataset)
self.models_are_trained = True
def score_answer(input_ids, labels, model):
with torch.no_grad():
logits = model(input_ids=input_ids, labels=labels).logits[0].detach()
probs = torch.softmax(logits, -1)
p = 1
for idx, token in enumerate(labels[0]):
p *= probs[idx][token]
return p.item()
def _predict_with_probs(self, test_dataset):
answers = []
probs = []
document_ids = []
with torch.no_grad():
for row in test_dataset:
input_ids = torch.tensor([row["input_ids"]]).to(self.config["device"])
labels = self.model_answer.generate(input_ids)
probs.append(score_answer(input_ids, labels, model))
answers.append(labels[0])
document_ids.append(row["document_id"])
return pd.dataframe(
{"answer": answers, "prob": probs, "document_id": document_id}
)
def evaluate(self, test_dataset, test_text):
df = self._predict_with_probs(self, test_dataset)
df = (
df.sort("prob", ascending=False)
.groupby("document_id", as_index=False)
.first()
)
df = df.sort("document_id")
answers = df["answer"].values
metric = self.get_compute_metrics(self, "answer")
return metric((answers, test_text))
def calculate_probs_score(probs):
return max(probs)
def predict(self, input_text):
input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device)
with torch.no_grad():
labels = model.generate(input_ids)
return self.tokenizer_answer.decode(labels, skip_special_tokens=True)[0]
def answer(self, context, question):
sentences = context.split(".")
texts = [
".".join(sentences[idx : idx + part_len])
for idx in range(0, len(sentences), step)
] + [question]
encoded_input = tokenizer(
texts, padding=True, truncation=True, max_length=512, return_tensors="pt"
)
text_dataset = data_utils.TensorDataset(
encoded_input["input_ids"], encoded_input["attention_mask"]
)
text_dataloader = data_utils.DataLoader(
text_dataset, batch_size=32, shuffle=False
)
token_embeddings_list = []
with torch.no_grad():
for inputs, masks in text_dataloader:
inputs, masks = inputs.to(device), masks.to(device)
model_output = model(inputs, masks)
token_embeddings = model_output[0]
token_embeddings_list.append(token_embeddings)
token_embeddings = torch.concat(token_embeddings_list, axis=0)
parts_embeddings = mean_pooling(
token_embeddings, encoded_input["attention_mask"].to(device)
)
scores = torch.tensor(
[
cos(parts_embeddings[i, :], parts_embeddings[-1, :])
for i in range(len(texts) - 1)
]
)
idx = torch.argmax(scores)
return texts[idx]
def emulate_active_learning(
self, text_dataset, full_dataset, val_dataset, test_dataset
):
document_ids = text_dataset["document_id"]
ids_in_train = set(
random.sample(document_ids, self.config["start_document_cnt"])
)
full_dataset = full_dataset.add_column(
"in_train", [x in start_ids for x in full_dataset["document_id"]]
)
train_dataset = full_dataset.filter(lambda x: x["document_id"] in start_ids)
self.train(train_dataset, test_dataset)
metrics = dict()
metrics["val"] = [self.evaluate(val_dataset)]
for step in range(self.config["al_steps_cnt"]):
print(f"step {step + 1}")
ids_to_add = self.choose_ids(
text_dataset.filter(lambda x: x["document_id"] not in ids_in_train)
)
ids_in_train = ids_in_train.union(ids_to_add)
train_part = full_dataset.filter(lambda x: x["document_id"] in ids_to_add)
train_dataset = concatenate_datasets([train_dataset, train_part])
self.train(train_dataset, test_dataset)
metrics["val"].append(self.evaluate(val_dataset))
return metrics
config = {
"checkpoint_answer": "t5-small",
"checkpoint_label": "distilbert-base-uncased",
"max_length": 512,
"max_length_answer": 512,
"learning_rate_answer": 1e-5,
"weight_decay_answer": 1e-2,
"per_device_train_batch_size_answer": 4,
"per_device_eval_batch_size_answer": 4,
"num_train_epochs_answer": 1,
"make_sub_answers": False,
"learning_rate_label": 1e-5,
"weight_decay_label": 1e-2,
"per_device_train_batch_size_label": 4,
"per_device_eval_batch_size_label": 16,
"num_train_epochs_label": 1,
"answer_type": "long_answer",
"device": device,
"train_size": 0.95,
}
torch.cuda.empty_cache()
import gc
gc.collect()
df_path = "/kaggle/input/nq-prepared-8-4/nq_prepared_8_4.csv"
train_dataset, test_dataset = get_data(df_path, "short_answer", sample_cnt=40)
train_dataset
np.mean(train_dataset["label"])
qa = ActiveQA(config)
test_dataset["label"]
qa.train_label(train_dataset, test_dataset)
qa.train_answer(train_dataset, test_dataset)
import transformers
transformers.__version__
| false | 0 | 4,625 | 0 | 4,625 | 4,625 |
||
129203939
|
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use("dark_background") # вкл тёмную тему для графики
from phik.report import plot_correlation_matrix
from catboost import CatBoostRegressor
# Публикую только те шаги, которые привели к положительному результату, множество
# разных и неслишком уклюжих попыток улучшить модель в разных направлениях опускаю
# загружаем данные
df = pd.read_csv("/kaggle/input/leopard-challenge-regression/train.csv")
df.head()
len(df)
# смотрим какие есть столбцы
df.columns
# и где категориальные переменные
df.select_dtypes(include="object").head()
# смотрим на распределение цен
df["Price"].hist()
# смотрим где пропуски есть
df.isna().mean()
# определяем адрессацию пустых значений в Regionname, Propertycount, CouncilArea
df[
df["CouncilArea"].isnull()
| df["Regionname"].isnull()
| df["Propertycount"].isnull()
][["Suburb", "Postcode", "CouncilArea", "Regionname", "Propertycount"]]
# значит Regionname - Western Metropolitan, Propertycount - 7570.0, CouncilArea - Maribyrnong City Council
df[(df["Postcode"] == 3011.0) & (df["Suburb"] == "Footscray")][
["Suburb", "Postcode", "CouncilArea", "Regionname", "Propertycount"]
]
# заполняем пропущенное значение
df["CouncilArea"].fillna("Maribyrnong City Council", inplace=True)
df["Regionname"].fillna("Western Metropolitan", inplace=True)
df["Propertycount"].fillna(7570.0, inplace=True)
# проверяем
df.isna().mean()
# была также попытка заполнить пропущенные геоданные медианными геокоординатами по почтовому коду и Suburb.
# Но она не прибавила модели эффективность. Также не принесла успеха попытка заполнить пропуски в
# BuildingArea на основе регрессии с Rooms (между этими данными сильная корреляция)
# возьмём небольшую часть данных, чтобы оценить влияние по Фику
df_exp = df.sample(frac=0.05, random_state=42)
phik_overview = df_exp.phik_matrix()
# смотрим зависимоть по Фику
phik_overview["Price"].sort_values(ascending=False)
# строим корреляционную матрицу
df_exp.corr()
# опускаю проверку с разбиением выборки на тренировочную и валидационную и подбор параметров с помощью CV
# в ходе изучения влияния в итоге исключил из модели столбцы 'Address' (слабое влияние, без него лучше),
# 'Bedroom2' (сильная корреляция с Rooms, без него лучше), 'Propertycount', 'id' (лабое влияние, без них
# лучше)
X = [
"Suburb",
"Rooms",
"Type",
"Method",
"SellerG",
"Date",
"Distance",
"Postcode",
"Bathroom",
"Car",
"Landsize",
"BuildingArea",
"YearBuilt",
"CouncilArea",
"Lattitude",
"Longtitude",
"Regionname",
]
cat_features = [
"Suburb",
"Type",
"Method",
"SellerG",
"Date",
"CouncilArea",
"Regionname",
]
y = ["Price"]
# задаём параметры модели
parameters = {
"cat_features": cat_features,
"loss_function": "MAE",
"eval_metric": "MAPE",
"learning_rate": 0.1,
"random_seed": 42,
"verbose": 130,
}
model = CatBoostRegressor(**parameters)
# тренируем на всей выборке
model.fit(df[X], df[y])
# загружаем данные для проверки
test_df = pd.read_csv("/kaggle/input/leopard-challenge-regression/test.csv")
test_df.head()
# отбираем нужные столбцы
test_df_test = test_df.loc[
:,
[
"Suburb",
"Rooms",
"Type",
"Method",
"SellerG",
"Date",
"Distance",
"Postcode",
"Bathroom",
"Car",
"Landsize",
"BuildingArea",
"YearBuilt",
"CouncilArea",
"Lattitude",
"Longtitude",
"Regionname",
],
]
# смотрим пропущенные значения
test_df_test.isna().mean()
# определяем адрессацию пустых значений в Regionname и Propertycount
test_df_test[
test_df_test["CouncilArea"].isnull() | test_df_test["Regionname"].isnull()
][["Suburb", "Postcode", "CouncilArea", "Regionname"]]
# значит Regionname - Southern Metropolitan, CouncilArea - Boroondara City Council
test_df_test[
(test_df_test["Postcode"] == 3124.0) & (test_df_test["Suburb"] == "Camberwell")
][["Suburb", "Postcode", "CouncilArea", "Regionname"]]
# проверяем, что этой комбинации Suburb Postcode соответсвует только 1 регион
test_df_test[
(test_df_test["Postcode"] == 3124.0) & (test_df_test["Suburb"] == "Camberwell")
]["Regionname"].nunique()
# заполняем
test_df_test.loc[
(test_df_test["Postcode"] == 3124.0) & (test_df_test["Suburb"] == "Camberwell"),
["CouncilArea"],
] = "Camberwell"
test_df_test.loc[
(test_df_test["Postcode"] == 3124.0) & (test_df_test["Suburb"] == "Camberwell"),
["Regionname"],
] = "Southern Metropolitan"
# а здесь нет данных для заполнения
test_df_test[(test_df_test["Suburb"] == "Fawkner Lot")][
["Suburb", "Postcode", "CouncilArea", "Regionname"]
]
# заполняем оставшееся значение как no_info
test_df_test["CouncilArea"].fillna("no_info", inplace=True)
test_df_test["Regionname"].fillna("no_info", inplace=True)
# проверяем
test_df_test.isna().mean()
# применяем модель
test_df["Price"] = model.predict(test_df_test)
test_df.head(3)
# выгружаем данные
result_df = test_df.loc[:, ["id", "Price"]]
result_df.to_csv("ptv_submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/203/129203939.ipynb
| null | null |
[{"Id": 129203939, "ScriptId": 38411630, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14702712, "CreationDate": "05/11/2023 19:44:10", "VersionNumber": 1.0, "Title": "First price prediction", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 130.0, "LinesInsertedFromPrevious": 130.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
| null | null | null | null |
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use("dark_background") # вкл тёмную тему для графики
from phik.report import plot_correlation_matrix
from catboost import CatBoostRegressor
# Публикую только те шаги, которые привели к положительному результату, множество
# разных и неслишком уклюжих попыток улучшить модель в разных направлениях опускаю
# загружаем данные
df = pd.read_csv("/kaggle/input/leopard-challenge-regression/train.csv")
df.head()
len(df)
# смотрим какие есть столбцы
df.columns
# и где категориальные переменные
df.select_dtypes(include="object").head()
# смотрим на распределение цен
df["Price"].hist()
# смотрим где пропуски есть
df.isna().mean()
# определяем адрессацию пустых значений в Regionname, Propertycount, CouncilArea
df[
df["CouncilArea"].isnull()
| df["Regionname"].isnull()
| df["Propertycount"].isnull()
][["Suburb", "Postcode", "CouncilArea", "Regionname", "Propertycount"]]
# значит Regionname - Western Metropolitan, Propertycount - 7570.0, CouncilArea - Maribyrnong City Council
df[(df["Postcode"] == 3011.0) & (df["Suburb"] == "Footscray")][
["Suburb", "Postcode", "CouncilArea", "Regionname", "Propertycount"]
]
# заполняем пропущенное значение
df["CouncilArea"].fillna("Maribyrnong City Council", inplace=True)
df["Regionname"].fillna("Western Metropolitan", inplace=True)
df["Propertycount"].fillna(7570.0, inplace=True)
# проверяем
df.isna().mean()
# была также попытка заполнить пропущенные геоданные медианными геокоординатами по почтовому коду и Suburb.
# Но она не прибавила модели эффективность. Также не принесла успеха попытка заполнить пропуски в
# BuildingArea на основе регрессии с Rooms (между этими данными сильная корреляция)
# возьмём небольшую часть данных, чтобы оценить влияние по Фику
df_exp = df.sample(frac=0.05, random_state=42)
phik_overview = df_exp.phik_matrix()
# смотрим зависимоть по Фику
phik_overview["Price"].sort_values(ascending=False)
# строим корреляционную матрицу
df_exp.corr()
# опускаю проверку с разбиением выборки на тренировочную и валидационную и подбор параметров с помощью CV
# в ходе изучения влияния в итоге исключил из модели столбцы 'Address' (слабое влияние, без него лучше),
# 'Bedroom2' (сильная корреляция с Rooms, без него лучше), 'Propertycount', 'id' (лабое влияние, без них
# лучше)
X = [
"Suburb",
"Rooms",
"Type",
"Method",
"SellerG",
"Date",
"Distance",
"Postcode",
"Bathroom",
"Car",
"Landsize",
"BuildingArea",
"YearBuilt",
"CouncilArea",
"Lattitude",
"Longtitude",
"Regionname",
]
cat_features = [
"Suburb",
"Type",
"Method",
"SellerG",
"Date",
"CouncilArea",
"Regionname",
]
y = ["Price"]
# задаём параметры модели
parameters = {
"cat_features": cat_features,
"loss_function": "MAE",
"eval_metric": "MAPE",
"learning_rate": 0.1,
"random_seed": 42,
"verbose": 130,
}
model = CatBoostRegressor(**parameters)
# тренируем на всей выборке
model.fit(df[X], df[y])
# загружаем данные для проверки
test_df = pd.read_csv("/kaggle/input/leopard-challenge-regression/test.csv")
test_df.head()
# отбираем нужные столбцы
test_df_test = test_df.loc[
:,
[
"Suburb",
"Rooms",
"Type",
"Method",
"SellerG",
"Date",
"Distance",
"Postcode",
"Bathroom",
"Car",
"Landsize",
"BuildingArea",
"YearBuilt",
"CouncilArea",
"Lattitude",
"Longtitude",
"Regionname",
],
]
# смотрим пропущенные значения
test_df_test.isna().mean()
# определяем адрессацию пустых значений в Regionname и Propertycount
test_df_test[
test_df_test["CouncilArea"].isnull() | test_df_test["Regionname"].isnull()
][["Suburb", "Postcode", "CouncilArea", "Regionname"]]
# значит Regionname - Southern Metropolitan, CouncilArea - Boroondara City Council
test_df_test[
(test_df_test["Postcode"] == 3124.0) & (test_df_test["Suburb"] == "Camberwell")
][["Suburb", "Postcode", "CouncilArea", "Regionname"]]
# проверяем, что этой комбинации Suburb Postcode соответсвует только 1 регион
test_df_test[
(test_df_test["Postcode"] == 3124.0) & (test_df_test["Suburb"] == "Camberwell")
]["Regionname"].nunique()
# заполняем
test_df_test.loc[
(test_df_test["Postcode"] == 3124.0) & (test_df_test["Suburb"] == "Camberwell"),
["CouncilArea"],
] = "Camberwell"
test_df_test.loc[
(test_df_test["Postcode"] == 3124.0) & (test_df_test["Suburb"] == "Camberwell"),
["Regionname"],
] = "Southern Metropolitan"
# а здесь нет данных для заполнения
test_df_test[(test_df_test["Suburb"] == "Fawkner Lot")][
["Suburb", "Postcode", "CouncilArea", "Regionname"]
]
# заполняем оставшееся значение как no_info
test_df_test["CouncilArea"].fillna("no_info", inplace=True)
test_df_test["Regionname"].fillna("no_info", inplace=True)
# проверяем
test_df_test.isna().mean()
# применяем модель
test_df["Price"] = model.predict(test_df_test)
test_df.head(3)
# выгружаем данные
result_df = test_df.loc[:, ["id", "Price"]]
result_df.to_csv("ptv_submission.csv", index=False)
| false | 0 | 1,958 | 2 | 1,958 | 1,958 |
||
129203627
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
from lazypredict.Supervised import LazyClassifier
from sklearn.model_selection import train_test_split
import pandas as pd
results = {}
# def train_df(filename):
df = pd.read_csv(
"/kaggle/input/ddos-evaluation-dataset-cic-ddos2019/CSV-01-12/01-12/UDPLag.csv",
index_col=0,
)
m = list(df[" Label"].value_counts().index)
j = {}
h = 1
for i in m:
if i == "BENIGN":
j[i] = 0
else:
j[i] = h
h += 1
df[" Label"] = [j[x] for x in df[" Label"]]
df = df.dropna()
y = df[" Label"]
X = df.drop(
columns=[
"Flow ID",
" Label",
" Source IP",
" Destination IP",
" Timestamp",
"SimillarHTTP",
]
).clip(-1e6, 1e6)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=123
)
# print(X_train.to_numpy().shape)
# print(y_train.to_numpy().shape)
# print(y_train.head())
clf = LazyClassifier(verbose=0, ignore_warnings=False)
models, predictions = clf.fit(X_train, X_test, y_train, y_test)
models
# print(predictions)
# results[filename.split('/')[-1].replace('.csv', '')] = models
# train_df('/kaggle/input/ddos-evaluation-dataset-cic-ddos2019/CSV-01-12/01-12/UDPLag.csv')
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/203/129203627.ipynb
| null | null |
[{"Id": 129203627, "ScriptId": 38409016, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6678451, "CreationDate": "05/11/2023 19:39:44", "VersionNumber": 1.0, "Title": "notebook5001e360d2", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 66.0, "LinesInsertedFromPrevious": 66.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
from lazypredict.Supervised import LazyClassifier
from sklearn.model_selection import train_test_split
import pandas as pd
results = {}
# def train_df(filename):
df = pd.read_csv(
"/kaggle/input/ddos-evaluation-dataset-cic-ddos2019/CSV-01-12/01-12/UDPLag.csv",
index_col=0,
)
m = list(df[" Label"].value_counts().index)
j = {}
h = 1
for i in m:
if i == "BENIGN":
j[i] = 0
else:
j[i] = h
h += 1
df[" Label"] = [j[x] for x in df[" Label"]]
df = df.dropna()
y = df[" Label"]
X = df.drop(
columns=[
"Flow ID",
" Label",
" Source IP",
" Destination IP",
" Timestamp",
"SimillarHTTP",
]
).clip(-1e6, 1e6)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=123
)
# print(X_train.to_numpy().shape)
# print(y_train.to_numpy().shape)
# print(y_train.head())
clf = LazyClassifier(verbose=0, ignore_warnings=False)
models, predictions = clf.fit(X_train, X_test, y_train, y_test)
models
# print(predictions)
# results[filename.split('/')[-1].replace('.csv', '')] = models
# train_df('/kaggle/input/ddos-evaluation-dataset-cic-ddos2019/CSV-01-12/01-12/UDPLag.csv')
| false | 0 | 629 | 0 | 629 | 629 |
||
129203181
|
<jupyter_start><jupyter_text>Financial Sentiment Analysis
### Data
The following data is intended for advancing financial sentiment analysis research. It's two datasets (FiQA, Financial PhraseBank) combined into one easy-to-use CSV file. It provides financial sentences with sentiment labels.
### Citations
Malo, Pekka, et al. "Good debt or bad debt: Detecting semantic orientations in economic texts." *Journal of the Association for Information Science and Technology* 65.4 (2014): 782-796.
Kaggle dataset identifier: financial-sentiment-analysis
<jupyter_script># 
# Sentiment Analysis on CU Tweets
# **SNSCRAPE**
# > snscrape is a scraper for social networking services (SNS). It scrapes things like user profiles, hashtags, or searches and returns the discovered items, e.g. the relevant posts.
# Required Libraries
import re
import numpy as np
import string
import warnings
import pandas as pd
from tqdm import tqdm
from tqdm.notebook import tqdm
import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
# Configuration
tqdm.pandas()
warnings.filterwarnings("ignore")
pd.set_option("display.max_columns", None)
nltk.download("stopwords")
nltk.download("wordnet")
# > The main theme of Data Extraction is handled by the below cli statement.
# > The Process of Data Loading and Processing begins with removing irrevalant column as stated below
# Reading the json file using pandas
tweets_df = pd.read_json("/kaggle/working/text-query-tweets.json", lines=True)
# Dropping Irrevalant Columns
dropping_columns = [
"_type",
"url",
"date",
"rawContent",
"id",
"user",
"quoteCount",
"conversationId",
"lang",
"source",
"sourceUrl",
"sourceLabel",
"links",
"media",
"retweetedTweet",
"quotedTweet",
"inReplyToTweetId",
"inReplyToUser",
"mentionedUsers",
"coordinates",
"place",
"cashtags",
"card",
"vibe",
"bookmarkCount",
"content",
"outlinks",
"outlinksss",
"tcooutlinks",
"tcooutlinksss",
]
tweets_df.drop(dropping_columns, axis=1, inplace=True)
tweets_df.head()
tweets_df.info()
# > The Tweets are sorted on the basis of Likecount to achive the priorrity and signifance of a particular tweet about organization other factors such as view_count and retweet or reply can be considered as well. (Later in next versions)
tweets_df = tweets_df.sort_values("likeCount", ascending=False)
# Function for whole Processing of texts
def tPreprocessing(tweet):
wordnet_lemmatizer = WordNetLemmatizer()
tweet = re.sub(r"https?:\/\/.*[\r\n]*", "", tweet)
tweet = re.sub("\n", " ", tweet)
tweet = re.sub(r"#", "", tweet)
tweet = re.sub(r"[^a-zA-Z0-9.?! ]+", "", tweet)
tweet = tweet.lower()
tweet_tokens = nltk.word_tokenize(tweet)
tweets_clean = []
for word in tweet_tokens:
if (
word not in set(stopwords.words("english"))
and word not in string.punctuation
and len(word) > 2
):
s = wordnet_lemmatizer.lemmatize(word)
tweets_clean.append(s)
return np.array(tweets_clean)
tweets_df["renderedContent"] = tweets_df["renderedContent"].progress_apply(
lambda x: tPreprocessing(x)
)
tweets_df
# > The Model selection part consist of Tensorflow Bert and the default weight are maintained due to unavailibilty of labels on the textual data
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_text as text
bert_preprocessor = hub.KerasLayer(
"https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
)
bert_encoder = hub.KerasLayer(
"https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/4"
)
def build_model():
text_input = tf.keras.layers.Input(shape=(), dtype=tf.string, name="text_input")
encoder_input = bert_preprocessor(text_input)
output = bert_encoder(encoder_input)
o = tf.keras.layers.Dropout(0.1, name="Dropout")(output["pooled_output"])
o = tf.keras.layers.Dense(units=1, activation="sigmoid")(o)
model = tf.keras.Model(inputs=[text_input], outputs=[o])
return model
model = build_model()
Y = np.zeros(len(tweets_df))
for i, x in tqdm(enumerate(tweets_df["renderedContent"])):
Y[i] = model.predict(x).mean()
prediction = np.where(Y >= 0.5, "negative", "positive")
tweets_df.to_csv("cu_tweets.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/203/129203181.ipynb
|
financial-sentiment-analysis
|
sbhatti
|
[{"Id": 129203181, "ScriptId": 37517405, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8945642, "CreationDate": "05/11/2023 19:33:26", "VersionNumber": 2.0, "Title": "Sentiment Analysis on Chandigarh University", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 115.0, "LinesInsertedFromPrevious": 89.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 26.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185039136, "KernelVersionId": 129203181, "SourceDatasetVersionId": 3205803}]
|
[{"Id": 3205803, "DatasetId": 1918992, "DatasourceVersionId": 3255590, "CreatorUserId": 9271896, "LicenseName": "CC0: Public Domain", "CreationDate": "02/19/2022 21:23:44", "VersionNumber": 4.0, "Title": "Financial Sentiment Analysis", "Slug": "financial-sentiment-analysis", "Subtitle": "Financial sentences with sentiment labels", "Description": "### Data\nThe following data is intended for advancing financial sentiment analysis research. It's two datasets (FiQA, Financial PhraseBank) combined into one easy-to-use CSV file. It provides financial sentences with sentiment labels.\n\n\n### Citations\nMalo, Pekka, et al. \"Good debt or bad debt: Detecting semantic orientations in economic texts.\" *Journal of the Association for Information Science and Technology* 65.4 (2014): 782-796.", "VersionNotes": "Fix unnamed column issues", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1918992, "CreatorUserId": 9271896, "OwnerUserId": 9271896.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 3205803.0, "CurrentDatasourceVersionId": 3255590.0, "ForumId": 1942512, "Type": 2, "CreationDate": "02/07/2022 20:19:58", "LastActivityDate": "02/07/2022", "TotalViews": 63930, "TotalDownloads": 7850, "TotalVotes": 91, "TotalKernels": 44}]
|
[{"Id": 9271896, "UserName": "sbhatti", "DisplayName": "sbhatti", "RegisterDate": "12/28/2021", "PerformanceTier": 2}]
|
# 
# Sentiment Analysis on CU Tweets
# **SNSCRAPE**
# > snscrape is a scraper for social networking services (SNS). It scrapes things like user profiles, hashtags, or searches and returns the discovered items, e.g. the relevant posts.
# Required Libraries
import re
import numpy as np
import string
import warnings
import pandas as pd
from tqdm import tqdm
from tqdm.notebook import tqdm
import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
# Configuration
tqdm.pandas()
warnings.filterwarnings("ignore")
pd.set_option("display.max_columns", None)
nltk.download("stopwords")
nltk.download("wordnet")
# > The main theme of Data Extraction is handled by the below cli statement.
# > The Process of Data Loading and Processing begins with removing irrevalant column as stated below
# Reading the json file using pandas
tweets_df = pd.read_json("/kaggle/working/text-query-tweets.json", lines=True)
# Dropping Irrevalant Columns
dropping_columns = [
"_type",
"url",
"date",
"rawContent",
"id",
"user",
"quoteCount",
"conversationId",
"lang",
"source",
"sourceUrl",
"sourceLabel",
"links",
"media",
"retweetedTweet",
"quotedTweet",
"inReplyToTweetId",
"inReplyToUser",
"mentionedUsers",
"coordinates",
"place",
"cashtags",
"card",
"vibe",
"bookmarkCount",
"content",
"outlinks",
"outlinksss",
"tcooutlinks",
"tcooutlinksss",
]
tweets_df.drop(dropping_columns, axis=1, inplace=True)
tweets_df.head()
tweets_df.info()
# > The Tweets are sorted on the basis of Likecount to achive the priorrity and signifance of a particular tweet about organization other factors such as view_count and retweet or reply can be considered as well. (Later in next versions)
tweets_df = tweets_df.sort_values("likeCount", ascending=False)
# Function for whole Processing of texts
def tPreprocessing(tweet):
wordnet_lemmatizer = WordNetLemmatizer()
tweet = re.sub(r"https?:\/\/.*[\r\n]*", "", tweet)
tweet = re.sub("\n", " ", tweet)
tweet = re.sub(r"#", "", tweet)
tweet = re.sub(r"[^a-zA-Z0-9.?! ]+", "", tweet)
tweet = tweet.lower()
tweet_tokens = nltk.word_tokenize(tweet)
tweets_clean = []
for word in tweet_tokens:
if (
word not in set(stopwords.words("english"))
and word not in string.punctuation
and len(word) > 2
):
s = wordnet_lemmatizer.lemmatize(word)
tweets_clean.append(s)
return np.array(tweets_clean)
tweets_df["renderedContent"] = tweets_df["renderedContent"].progress_apply(
lambda x: tPreprocessing(x)
)
tweets_df
# > The Model selection part consist of Tensorflow Bert and the default weight are maintained due to unavailibilty of labels on the textual data
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_text as text
bert_preprocessor = hub.KerasLayer(
"https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
)
bert_encoder = hub.KerasLayer(
"https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/4"
)
def build_model():
text_input = tf.keras.layers.Input(shape=(), dtype=tf.string, name="text_input")
encoder_input = bert_preprocessor(text_input)
output = bert_encoder(encoder_input)
o = tf.keras.layers.Dropout(0.1, name="Dropout")(output["pooled_output"])
o = tf.keras.layers.Dense(units=1, activation="sigmoid")(o)
model = tf.keras.Model(inputs=[text_input], outputs=[o])
return model
model = build_model()
Y = np.zeros(len(tweets_df))
for i, x in tqdm(enumerate(tweets_df["renderedContent"])):
Y[i] = model.predict(x).mean()
prediction = np.where(Y >= 0.5, "negative", "positive")
tweets_df.to_csv("cu_tweets.csv", index=False)
| false | 0 | 1,163 | 0 | 1,302 | 1,163 |
||
129397965
|
# Summary of the Competition
# 🥅 The goal of the competition is to create a summary of the advancements of AI in the past 2 years, in one of the broad cathegories that Kaggle provides.
# 🧑⚖️ The submissions will be graded by a panel of Kaggle grandmasters, while participants are urged to peer review at least 3 other teams to be eligible for grading and prizes.
# The proposed cathegories that Kaggle gives are:
# Text data.
# Image and/or video data.
# Tabular and/or time series data.
# Kaggle Competitions.
# Generative AI.
# AI ethics.
# Other.
#
# Purpose of this Notebook
# The purpose of this notebook is to present some interesting subgroups to the broader groups given by Kaggle, so that both experienced and more novice users can read about niche topics that appeal to them, without having to go through all of the papers in each cathegory. This notebook is still a Work In Progress, so more topics will be added in the future.
# ✏️ Comments/Suggestions greatly appreciated!
# # Imports
import pandas as pd
import numpy as np
import json
from datetime import datetime
# # Basic preparation of the dataset
df = pd.read_csv("/kaggle/input/2023-kaggle-ai-report/kaggle_writeups_20230510.csv")
df["Competition Launch Date"] = pd.to_datetime(df["Competition Launch Date"])
df["Date of Writeup"] = pd.to_datetime(df["Date of Writeup"])
df.drop(df[df["Date of Writeup"] < "2021-01-01"].index, inplace=True)
df.head()
# # Arxiv Articles
# Following the approach of [@leonydkulik ](https://www.kaggle.com/leonidkulyk) in his notebook ( [Link to Notebook](https://www.kaggle.com/code/leonidkulyk/kaggle-ai-report-topic-selection), definitely give it a look, it is **amazing** ) we will be filtering the massive arxiv dataset firstly by date of publication, and after, by our different subtopics.
# import JSON data
dict_arxiv = []
for line in open(
"/kaggle/input/2023-kaggle-ai-report/arxiv_metadata_20230510.json", "r"
):
dict_arxiv.append(json.loads(line))
date_format = "%a, %d %b %Y %H:%M:%S %Z"
limit_year = datetime.strptime("2021", "%Y")
filtered_arxiv = []
for article_meta in dict_arxiv:
parsed_date = datetime.strptime(article_meta["versions"][0]["created"], date_format)
if parsed_date >= limit_year:
filtered_arxiv.append(article_meta)
# # Subtopics
# # Finance and Investing
# We will first be filtering by the different Arxiv cathegories, the main cathegory will be Quantitative Finance.
fincat = [
"q-fin.CP",
"q-fin.EC",
"q-fin.GN",
"q-fin.MF",
"q-fin.PM",
"q-fin.PR",
"q-fin.RM",
"q-fin.ST",
"q-fin.TR",
]
fin_articles = []
aicat = ["cs.AI", "cs.DS", "cs.LG", "cs.NE", "cs.RO", "cs.CL"]
for article in filtered_arxiv:
print(article["categories"])
if article["categories"] in fincat:
fin_articles.append(article)
for article in fin_articles:
print(article)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/397/129397965.ipynb
| null | null |
[{"Id": 129397965, "ScriptId": 38467267, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13141352, "CreationDate": "05/13/2023 12:52:33", "VersionNumber": 1.0, "Title": "Kaggle AI Report | Topic Suggestions", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 78.0, "LinesInsertedFromPrevious": 78.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# Summary of the Competition
# 🥅 The goal of the competition is to create a summary of the advancements of AI in the past 2 years, in one of the broad cathegories that Kaggle provides.
# 🧑⚖️ The submissions will be graded by a panel of Kaggle grandmasters, while participants are urged to peer review at least 3 other teams to be eligible for grading and prizes.
# The proposed cathegories that Kaggle gives are:
# Text data.
# Image and/or video data.
# Tabular and/or time series data.
# Kaggle Competitions.
# Generative AI.
# AI ethics.
# Other.
#
# Purpose of this Notebook
# The purpose of this notebook is to present some interesting subgroups to the broader groups given by Kaggle, so that both experienced and more novice users can read about niche topics that appeal to them, without having to go through all of the papers in each cathegory. This notebook is still a Work In Progress, so more topics will be added in the future.
# ✏️ Comments/Suggestions greatly appreciated!
# # Imports
import pandas as pd
import numpy as np
import json
from datetime import datetime
# # Basic preparation of the dataset
df = pd.read_csv("/kaggle/input/2023-kaggle-ai-report/kaggle_writeups_20230510.csv")
df["Competition Launch Date"] = pd.to_datetime(df["Competition Launch Date"])
df["Date of Writeup"] = pd.to_datetime(df["Date of Writeup"])
df.drop(df[df["Date of Writeup"] < "2021-01-01"].index, inplace=True)
df.head()
# # Arxiv Articles
# Following the approach of [@leonydkulik ](https://www.kaggle.com/leonidkulyk) in his notebook ( [Link to Notebook](https://www.kaggle.com/code/leonidkulyk/kaggle-ai-report-topic-selection), definitely give it a look, it is **amazing** ) we will be filtering the massive arxiv dataset firstly by date of publication, and after, by our different subtopics.
# import JSON data
dict_arxiv = []
for line in open(
"/kaggle/input/2023-kaggle-ai-report/arxiv_metadata_20230510.json", "r"
):
dict_arxiv.append(json.loads(line))
date_format = "%a, %d %b %Y %H:%M:%S %Z"
limit_year = datetime.strptime("2021", "%Y")
filtered_arxiv = []
for article_meta in dict_arxiv:
parsed_date = datetime.strptime(article_meta["versions"][0]["created"], date_format)
if parsed_date >= limit_year:
filtered_arxiv.append(article_meta)
# # Subtopics
# # Finance and Investing
# We will first be filtering by the different Arxiv cathegories, the main cathegory will be Quantitative Finance.
fincat = [
"q-fin.CP",
"q-fin.EC",
"q-fin.GN",
"q-fin.MF",
"q-fin.PM",
"q-fin.PR",
"q-fin.RM",
"q-fin.ST",
"q-fin.TR",
]
fin_articles = []
aicat = ["cs.AI", "cs.DS", "cs.LG", "cs.NE", "cs.RO", "cs.CL"]
for article in filtered_arxiv:
print(article["categories"])
if article["categories"] in fincat:
fin_articles.append(article)
for article in fin_articles:
print(article)
| false | 0 | 910 | 0 | 910 | 910 |
||
129397540
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import (
train_test_split,
KFold,
StratifiedKFold,
GridSearchCV,
)
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.impute import SimpleImputer
from sklearn.metrics import (
balanced_accuracy_score,
make_scorer,
log_loss,
roc_auc_score,
f1_score,
)
from sklearn.utils import class_weight
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
import xgboost as xgb
## load data
def load_data(path: str, train: bool = True):
df = pd.read_csv(path)
if train:
return df.drop(columns="Class"), df.Class
else:
return df, pd.Series()
raw_feats, labels = load_data(
"/kaggle/input/icr-identify-age-related-conditions/train.csv"
)
raw_feats
labels.value_counts()
raw_feats["EJ"].value_counts()
# Perform one-hot encoding on the 'EJ' column in raw_feats with integer dtype
raw_feats = pd.get_dummies(raw_feats, columns=["EJ"], dtype=int)
def plot_correlation_heatmap(dataframe: pd.DataFrame, top_n: int = 10) -> None:
"""
Plots a correlation heatmap for the top N most correlated columns in the given dataframe,
excluding the first column.
Parameters:
dataframe (pd.DataFrame): The input dataframe.
top_n (int): The number of most correlated columns to display in the heatmap. Default is 10.
Returns:
None
"""
# Exclude the first column (ID column) from correlation analysis
dataframe = dataframe.iloc[:, 1:]
# Compute the column-wise Pearson correlation
correlation_matrix = dataframe.corr(method="pearson").abs()
# Get the top N most correlated columns
top_cols = correlation_matrix.nlargest(top_n, correlation_matrix.columns).index
# Create a heatmap using matplotlib
plt.figure(figsize=(12, 10))
plt.imshow(
correlation_matrix.loc[top_cols, top_cols],
cmap="coolwarm",
interpolation="nearest",
)
plt.colorbar(label="Pearson Correlation")
# Add correlation values in the heatmap cells
for i in range(len(top_cols)):
for j in range(len(top_cols)):
text = plt.text(
j,
i,
"{:.2f}".format(correlation_matrix.loc[top_cols[i], top_cols[j]]),
ha="center",
va="center",
color="w",
)
plt.xticks(np.arange(len(top_cols)), top_cols, rotation=90)
plt.yticks(np.arange(len(top_cols)), top_cols)
plt.title("Top {} Pearson Correlation Heatmap".format(top_n))
plt.show()
# Call the function to plot the correlation heatmap for the top 10 columns
plot_correlation_heatmap(raw_feats, top_n=20)
raw_feats.iloc[:, 1:].corrwith(labels).sort_values(ascending=False)
greeks = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/greeks.csv")[
["Id", "Epsilon"]
]
## date maybe usefull for splitting
raw_feats = raw_feats.merge(greeks)
raw_feats["Epsilon"] = pd.to_datetime(raw_feats["Epsilon"], errors="coerce")
# TTS for local validation
holdout_sizes = 10, 50
hold_out_ids = pd.concat(
[
raw_feats[labels == 1]["Epsilon"].sort_values().dropna().tail(holdout_sizes[0]),
raw_feats[labels == 0]["Epsilon"].sort_values().dropna().tail(holdout_sizes[1]),
]
).index
hold_in_ids = raw_feats[~raw_feats.index.isin(hold_out_ids)].index
X_train, y_train = raw_feats.loc[hold_in_ids].iloc[:, 1:-1], labels.loc[hold_in_ids]
X_test, y_test = raw_feats.loc[hold_out_ids].iloc[:, 1:-1], labels.loc[hold_out_ids]
def train_model(
model, param_grid, X_train, X_test, y_train, y_test, impute=False, scale=True
):
# Define the balanced log loss scorer
def balanced_log_loss(y_true, y_pred):
class_weights = class_weight.compute_class_weight(
"balanced", classes=np.unique(y_train), y=y_train
)
weights = class_weights[y_true.astype(int)]
loss = log_loss(y_true, y_pred, sample_weight=weights)
return loss
# Create the pipeline
steps = []
if impute:
steps.append(("imputer", SimpleImputer()))
if scale:
steps.append(("scaler", StandardScaler()))
steps.append(("classifier", model))
pipeline = Pipeline(steps)
# Define the scoring functions for grid search
scoring = {
"Balanced Log Loss": make_scorer(balanced_log_loss, greater_is_better=False),
"ROC-AUC": make_scorer(roc_auc_score),
"F1 Score": make_scorer(f1_score),
}
# Perform grid search
cv = StratifiedKFold(n_splits=3, shuffle=True, random_state=42).split(
X_train, y_train
)
grid_search = GridSearchCV(
pipeline,
param_grid=param_grid,
scoring=scoring,
refit="Balanced Log Loss",
cv=cv,
n_jobs=-1,
)
grid_search.fit(X_train, y_train)
# Get the best estimator and its parameters
best_estimator = grid_search.best_estimator_
best_params = grid_search.best_params_
# Estimate the best model on the test set
y_pred_test = best_estimator.predict(X_test)
balanced_accuracy = balanced_accuracy_score(y_test, y_pred_test)
logloss = log_loss(y_test, best_estimator.predict_proba(X_test))
roc_auc = roc_auc_score(y_test, best_estimator.predict_proba(X_test)[:, 1])
f1 = f1_score(y_test, y_pred_test)
return best_estimator, best_params, balanced_accuracy, logloss, roc_auc, f1
# xgboost
# Define the parameter grid for the grid search
param_grid = {
"classifier__n_estimators": [200, 500, 750, 1000, 1500],
"classifier__max_depth": [3, 4, 5, 6],
"classifier__learning_rate": [0.03, 0.01, 0.005],
"classifier__reg_alpha": [1e-5, 1e-3, 0.1],
}
# Calculate the class weights
class_weights = class_weight.compute_class_weight(
"balanced", classes=np.unique(y_train), y=y_train
)
model = xgb.XGBClassifier(
random_state=42,
objective="binary:logistic",
scale_pos_weight=class_weights[1],
n_jobs=-1,
)
best_estimator, best_params, balanced_accuracy, logloss, roc_auc, f1 = train_model(
model, param_grid, X_train, X_test, y_train, y_test
)
best_params
best_estimator
# Print the evaluation metrics
print("Best Parameters:", best_params)
print("Balanced Accuracy on Test Set:", balanced_accuracy)
print("Log Loss on Test Set:", logloss)
print("ROC-AUC on Test Set:", roc_auc)
print("F1 Score on Test Set:", f1)
importance_values = best_estimator.named_steps["classifier"].feature_importances_
feature_names = X_train.columns
# Plot feature importances
plt.figure(figsize=(8, 12))
plt.barh(range(len(importance_values)), importance_values, tick_label=feature_names)
plt.xlabel("Feature Importance")
plt.ylabel("Feature")
plt.title("Feature Importances")
plt.show()
# Sort feature importances in descending order
sorted_indices = np.argsort(importance_values)[::-1]
# Select the top 30 most important features
top_features = feature_names[sorted_indices[:30]]
top_features
# random forest
# Define the parameter grid for the grid search
param_grid = {
"classifier__n_estimators": [100, 200, 300],
"classifier__max_depth": [3, 4, 5, 6, 8, 10],
"classifier__criterion": ["gini", "log_loss"],
"classifier__max_features": ["sqrt", 0.5, 0.7, 0.9],
}
model = RandomForestClassifier(
class_weight="balanced_subsample", n_jobs=-1, random_state=42
)
(
best_estimator_rf,
best_params_rf,
balanced_accuracy_rf,
logloss_rf,
roc_auc_rf,
f1_rf,
) = train_model(
model,
param_grid,
X_train,
X_test,
y_train,
y_test,
impute=True,
)
# Print the evaluation metrics
print("Best Parameters:", best_params_rf)
print("Balanced Accuracy on Test Set:", balanced_accuracy_rf)
print("Log Loss on Test Set:", logloss_rf)
print("ROC-AUC on Test Set:", roc_auc_rf)
print("F1 Score on Test Set:", f1_rf)
# KNN
# Define the parameter grid for the grid search
param_grid = {
"classifier__n_neighbors": [2, 3, 5, 7, 10],
"classifier__weights": ["uniform", "distance"],
}
model = KNeighborsClassifier(n_jobs=-1)
(
best_estimator_knn,
best_params_knn,
balanced_accuracy_knn,
logloss_knn,
roc_auc_knn,
f1_knn,
) = train_model(
model,
param_grid,
X_train,
X_test,
y_train,
y_test,
impute=True,
)
# Print the evaluation metrics
print("Best Parameters:", best_params_knn)
print("Balanced Accuracy on Test Set:", balanced_accuracy_knn)
print("Log Loss on Test Set:", logloss_knn)
print("ROC-AUC on Test Set:", roc_auc_knn)
print("F1 Score on Test Set:", f1_knn)
# Voting ensemble
# Define the parameter grid for the grid search
param_grid = {
"classifier__voting": ["soft"],
}
estimators = [
("xgb", best_estimator),
("rf", best_estimator_rf),
("knn", best_estimator_knn),
]
model = VotingClassifier(estimators, n_jobs=-1)
(
best_estimator_ens,
best_params_ens,
balanced_accuracy_ens,
logloss_ens,
roc_auc_ens,
f1_ens,
) = train_model(
model,
param_grid,
X_train,
X_test,
y_train,
y_test,
impute=True,
)
# Print the evaluation metrics
print("Best Parameters:", best_params_ens)
print("Balanced Accuracy on Test Set:", balanced_accuracy_ens)
print("Log Loss on Test Set:", logloss_ens)
print("ROC-AUC on Test Set:", roc_auc_ens)
print("F1 Score on Test Set:", f1_ens)
# final fit to all available data
best_estimator_ens.fit(raw_feats.iloc[:, 1:-1], labels)
test = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/test.csv")
test = pd.get_dummies(test, columns=["EJ"], dtype=int)
if "EJ_B" not in test.columns:
test["EJ_B"] = 0
probas = best_estimator_ens.predict_proba(test.iloc[:, 1:])
out = pd.DataFrame(
{
"Id": test.iloc[:, 0],
"class_0": probas[:, 0],
"class_1": probas[:, 1],
}
)
out.to_csv(r"submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/397/129397540.ipynb
| null | null |
[{"Id": 129397540, "ScriptId": 38473905, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12961935, "CreationDate": "05/13/2023 12:49:13", "VersionNumber": 1.0, "Title": "\ud83d\udcab Voting Ensemble [xgb, rf, knn]\ud83e\udd47", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 292.0, "LinesInsertedFromPrevious": 292.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 8}]
| null | null | null | null |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import (
train_test_split,
KFold,
StratifiedKFold,
GridSearchCV,
)
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.impute import SimpleImputer
from sklearn.metrics import (
balanced_accuracy_score,
make_scorer,
log_loss,
roc_auc_score,
f1_score,
)
from sklearn.utils import class_weight
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
import xgboost as xgb
## load data
def load_data(path: str, train: bool = True):
df = pd.read_csv(path)
if train:
return df.drop(columns="Class"), df.Class
else:
return df, pd.Series()
raw_feats, labels = load_data(
"/kaggle/input/icr-identify-age-related-conditions/train.csv"
)
raw_feats
labels.value_counts()
raw_feats["EJ"].value_counts()
# Perform one-hot encoding on the 'EJ' column in raw_feats with integer dtype
raw_feats = pd.get_dummies(raw_feats, columns=["EJ"], dtype=int)
def plot_correlation_heatmap(dataframe: pd.DataFrame, top_n: int = 10) -> None:
"""
Plots a correlation heatmap for the top N most correlated columns in the given dataframe,
excluding the first column.
Parameters:
dataframe (pd.DataFrame): The input dataframe.
top_n (int): The number of most correlated columns to display in the heatmap. Default is 10.
Returns:
None
"""
# Exclude the first column (ID column) from correlation analysis
dataframe = dataframe.iloc[:, 1:]
# Compute the column-wise Pearson correlation
correlation_matrix = dataframe.corr(method="pearson").abs()
# Get the top N most correlated columns
top_cols = correlation_matrix.nlargest(top_n, correlation_matrix.columns).index
# Create a heatmap using matplotlib
plt.figure(figsize=(12, 10))
plt.imshow(
correlation_matrix.loc[top_cols, top_cols],
cmap="coolwarm",
interpolation="nearest",
)
plt.colorbar(label="Pearson Correlation")
# Add correlation values in the heatmap cells
for i in range(len(top_cols)):
for j in range(len(top_cols)):
text = plt.text(
j,
i,
"{:.2f}".format(correlation_matrix.loc[top_cols[i], top_cols[j]]),
ha="center",
va="center",
color="w",
)
plt.xticks(np.arange(len(top_cols)), top_cols, rotation=90)
plt.yticks(np.arange(len(top_cols)), top_cols)
plt.title("Top {} Pearson Correlation Heatmap".format(top_n))
plt.show()
# Call the function to plot the correlation heatmap for the top 10 columns
plot_correlation_heatmap(raw_feats, top_n=20)
raw_feats.iloc[:, 1:].corrwith(labels).sort_values(ascending=False)
greeks = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/greeks.csv")[
["Id", "Epsilon"]
]
## date maybe usefull for splitting
raw_feats = raw_feats.merge(greeks)
raw_feats["Epsilon"] = pd.to_datetime(raw_feats["Epsilon"], errors="coerce")
# TTS for local validation
holdout_sizes = 10, 50
hold_out_ids = pd.concat(
[
raw_feats[labels == 1]["Epsilon"].sort_values().dropna().tail(holdout_sizes[0]),
raw_feats[labels == 0]["Epsilon"].sort_values().dropna().tail(holdout_sizes[1]),
]
).index
hold_in_ids = raw_feats[~raw_feats.index.isin(hold_out_ids)].index
X_train, y_train = raw_feats.loc[hold_in_ids].iloc[:, 1:-1], labels.loc[hold_in_ids]
X_test, y_test = raw_feats.loc[hold_out_ids].iloc[:, 1:-1], labels.loc[hold_out_ids]
def train_model(
model, param_grid, X_train, X_test, y_train, y_test, impute=False, scale=True
):
# Define the balanced log loss scorer
def balanced_log_loss(y_true, y_pred):
class_weights = class_weight.compute_class_weight(
"balanced", classes=np.unique(y_train), y=y_train
)
weights = class_weights[y_true.astype(int)]
loss = log_loss(y_true, y_pred, sample_weight=weights)
return loss
# Create the pipeline
steps = []
if impute:
steps.append(("imputer", SimpleImputer()))
if scale:
steps.append(("scaler", StandardScaler()))
steps.append(("classifier", model))
pipeline = Pipeline(steps)
# Define the scoring functions for grid search
scoring = {
"Balanced Log Loss": make_scorer(balanced_log_loss, greater_is_better=False),
"ROC-AUC": make_scorer(roc_auc_score),
"F1 Score": make_scorer(f1_score),
}
# Perform grid search
cv = StratifiedKFold(n_splits=3, shuffle=True, random_state=42).split(
X_train, y_train
)
grid_search = GridSearchCV(
pipeline,
param_grid=param_grid,
scoring=scoring,
refit="Balanced Log Loss",
cv=cv,
n_jobs=-1,
)
grid_search.fit(X_train, y_train)
# Get the best estimator and its parameters
best_estimator = grid_search.best_estimator_
best_params = grid_search.best_params_
# Estimate the best model on the test set
y_pred_test = best_estimator.predict(X_test)
balanced_accuracy = balanced_accuracy_score(y_test, y_pred_test)
logloss = log_loss(y_test, best_estimator.predict_proba(X_test))
roc_auc = roc_auc_score(y_test, best_estimator.predict_proba(X_test)[:, 1])
f1 = f1_score(y_test, y_pred_test)
return best_estimator, best_params, balanced_accuracy, logloss, roc_auc, f1
# xgboost
# Define the parameter grid for the grid search
param_grid = {
"classifier__n_estimators": [200, 500, 750, 1000, 1500],
"classifier__max_depth": [3, 4, 5, 6],
"classifier__learning_rate": [0.03, 0.01, 0.005],
"classifier__reg_alpha": [1e-5, 1e-3, 0.1],
}
# Calculate the class weights
class_weights = class_weight.compute_class_weight(
"balanced", classes=np.unique(y_train), y=y_train
)
model = xgb.XGBClassifier(
random_state=42,
objective="binary:logistic",
scale_pos_weight=class_weights[1],
n_jobs=-1,
)
best_estimator, best_params, balanced_accuracy, logloss, roc_auc, f1 = train_model(
model, param_grid, X_train, X_test, y_train, y_test
)
best_params
best_estimator
# Print the evaluation metrics
print("Best Parameters:", best_params)
print("Balanced Accuracy on Test Set:", balanced_accuracy)
print("Log Loss on Test Set:", logloss)
print("ROC-AUC on Test Set:", roc_auc)
print("F1 Score on Test Set:", f1)
importance_values = best_estimator.named_steps["classifier"].feature_importances_
feature_names = X_train.columns
# Plot feature importances
plt.figure(figsize=(8, 12))
plt.barh(range(len(importance_values)), importance_values, tick_label=feature_names)
plt.xlabel("Feature Importance")
plt.ylabel("Feature")
plt.title("Feature Importances")
plt.show()
# Sort feature importances in descending order
sorted_indices = np.argsort(importance_values)[::-1]
# Select the top 30 most important features
top_features = feature_names[sorted_indices[:30]]
top_features
# random forest
# Define the parameter grid for the grid search
param_grid = {
"classifier__n_estimators": [100, 200, 300],
"classifier__max_depth": [3, 4, 5, 6, 8, 10],
"classifier__criterion": ["gini", "log_loss"],
"classifier__max_features": ["sqrt", 0.5, 0.7, 0.9],
}
model = RandomForestClassifier(
class_weight="balanced_subsample", n_jobs=-1, random_state=42
)
(
best_estimator_rf,
best_params_rf,
balanced_accuracy_rf,
logloss_rf,
roc_auc_rf,
f1_rf,
) = train_model(
model,
param_grid,
X_train,
X_test,
y_train,
y_test,
impute=True,
)
# Print the evaluation metrics
print("Best Parameters:", best_params_rf)
print("Balanced Accuracy on Test Set:", balanced_accuracy_rf)
print("Log Loss on Test Set:", logloss_rf)
print("ROC-AUC on Test Set:", roc_auc_rf)
print("F1 Score on Test Set:", f1_rf)
# KNN
# Define the parameter grid for the grid search
param_grid = {
"classifier__n_neighbors": [2, 3, 5, 7, 10],
"classifier__weights": ["uniform", "distance"],
}
model = KNeighborsClassifier(n_jobs=-1)
(
best_estimator_knn,
best_params_knn,
balanced_accuracy_knn,
logloss_knn,
roc_auc_knn,
f1_knn,
) = train_model(
model,
param_grid,
X_train,
X_test,
y_train,
y_test,
impute=True,
)
# Print the evaluation metrics
print("Best Parameters:", best_params_knn)
print("Balanced Accuracy on Test Set:", balanced_accuracy_knn)
print("Log Loss on Test Set:", logloss_knn)
print("ROC-AUC on Test Set:", roc_auc_knn)
print("F1 Score on Test Set:", f1_knn)
# Voting ensemble
# Define the parameter grid for the grid search
param_grid = {
"classifier__voting": ["soft"],
}
estimators = [
("xgb", best_estimator),
("rf", best_estimator_rf),
("knn", best_estimator_knn),
]
model = VotingClassifier(estimators, n_jobs=-1)
(
best_estimator_ens,
best_params_ens,
balanced_accuracy_ens,
logloss_ens,
roc_auc_ens,
f1_ens,
) = train_model(
model,
param_grid,
X_train,
X_test,
y_train,
y_test,
impute=True,
)
# Print the evaluation metrics
print("Best Parameters:", best_params_ens)
print("Balanced Accuracy on Test Set:", balanced_accuracy_ens)
print("Log Loss on Test Set:", logloss_ens)
print("ROC-AUC on Test Set:", roc_auc_ens)
print("F1 Score on Test Set:", f1_ens)
# final fit to all available data
best_estimator_ens.fit(raw_feats.iloc[:, 1:-1], labels)
test = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/test.csv")
test = pd.get_dummies(test, columns=["EJ"], dtype=int)
if "EJ_B" not in test.columns:
test["EJ_B"] = 0
probas = best_estimator_ens.predict_proba(test.iloc[:, 1:])
out = pd.DataFrame(
{
"Id": test.iloc[:, 0],
"class_0": probas[:, 0],
"class_1": probas[:, 1],
}
)
out.to_csv(r"submission.csv", index=False)
| false | 0 | 3,163 | 8 | 3,163 | 3,163 |
||
129274071
|
<jupyter_start><jupyter_text>Chest X-Ray Images (Pneumonia)
### Context
http://www.cell.com/cell/fulltext/S0092-8674(18)30154-5

Figure S6. Illustrative Examples of Chest X-Rays in Patients with Pneumonia, Related to Figure 6
The normal chest X-ray (left panel) depicts clear lungs without any areas of abnormal opacification in the image. Bacterial pneumonia (middle) typically exhibits a focal lobar consolidation, in this case in the right upper lobe (white arrows), whereas viral pneumonia (right) manifests with a more diffuse ‘‘interstitial’’ pattern in both lungs.
http://www.cell.com/cell/fulltext/S0092-8674(18)30154-5
### Content
The dataset is organized into 3 folders (train, test, val) and contains subfolders for each image category (Pneumonia/Normal). There are 5,863 X-Ray images (JPEG) and 2 categories (Pneumonia/Normal).
Chest X-ray images (anterior-posterior) were selected from retrospective cohorts of pediatric patients of one to five years old from Guangzhou Women and Children’s Medical Center, Guangzhou. All chest X-ray imaging was performed as part of patients’ routine clinical care.
For the analysis of chest x-ray images, all chest radiographs were initially screened for quality control by removing all low quality or unreadable scans. The diagnoses for the images were then graded by two expert physicians before being cleared for training the AI system. In order to account for any grading errors, the evaluation set was also checked by a third expert.
Kaggle dataset identifier: chest-xray-pneumonia
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
import cv2
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
if "PNEUMONIA" not in dirname and filename.endswith(".jpeg"):
img = cv2.imread(os.path.join(dirname, filename))
flipped_img = cv2.flip(img, 1)
print("/kaggle/working/" + filename + "_flipped.jpeg")
cv2.imwrite(r"/kaggle/working/" + filename + "_flipped.jpeg", flipped_img)
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os
import random
import tensorflow as tf
from tensorflow.python.keras.models import Sequential, Model
from tensorflow.python.keras.layers import (
Input,
Conv2D,
Dense,
Flatten,
Dropout,
MaxPooling2D,
UpSampling2D,
)
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras.callbacks import ReduceLROnPlateau
total_images_train_normal = os.listdir(
"/kaggle/input/chest-xray-pneumonia/chest_xray/chest_xray/train/NORMAL"
)
total_images_train_pneumonia = os.listdir(
"/kaggle/input/chest-xray-pneumonia/chest_xray/chest_xray/train/PNEUMONIA"
)
image_height = 150
image_width = 150
batch_size = 10
no_of_epochs = 20
model = Sequential()
model.add(
Conv2D(32, (3, 3), input_shape=(image_height, image_width, 3), activation="relu")
)
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(64, (3, 3), activation="relu"))
model.add(Conv2D(64, (3, 3), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(128, (3, 3), activation="relu"))
model.add(Conv2D(128, (3, 3), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(units=128, activation="relu"))
model.add(Dropout(0.2))
model.add(Dense(units=1, activation="sigmoid"))
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
model.summary()
train_datagen = ImageDataGenerator(
rescale=1.0 / 255, rotation_range=15, shear_range=0.2, zoom_range=0.2
)
test_datagen = ImageDataGenerator(rescale=1.0 / 255)
training_set = train_datagen.flow_from_directory(
"/kaggle/input/chest-xray-pneumonia/chest_xray/train",
target_size=(image_width, image_height),
batch_size=batch_size,
class_mode="binary",
)
test_set = test_datagen.flow_from_directory(
"/kaggle/input/chest-xray-pneumonia/chest_xray/test",
target_size=(image_width, image_height),
batch_size=batch_size,
class_mode="binary",
)
reduce_learning_rate = ReduceLROnPlateau(
monitor="loss", factor=0.1, patience=2, cooldown=2, min_lr=0.00001, verbose=1
)
callbacks = [reduce_learning_rate]
history = model.fit_generator(
training_set,
steps_per_epoch=5216 // batch_size,
epochs=no_of_epochs,
validation_data=test_set,
validation_steps=624 // batch_size,
callbacks=callbacks,
)
acc = history.history["accuracy"]
val_acc = history.history["val_accuracy"]
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs = range(1, len(acc) + 1)
fig = plt.figure(figsize=(16, 9))
plt.subplot(1, 2, 1)
plt.plot(epochs, loss, "bo", label="Training loss")
plt.plot(epochs, val_loss, "b", label="Validation loss")
plt.title("Training and validation loss")
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(epochs, acc, "bo", label="Training acc")
plt.plot(epochs, val_acc, "b", label="Validation acc")
plt.title("Training and validation accuracy")
plt.legend()
no_steps = len(test_set)
result = model.evaluate_generator(test_set, steps=no_steps)
print("Test-set classification accuracy: {0:.2%}".format(result[1]))
import glob
from pathlib import Path
from tensorflow.keras.utils import to_categorical
normal_cases_dir = Path("/kaggle/input/chest-xray-pneumonia/chest_xray/test/NORMAL")
pneumonia_cases_dir = Path(
"/kaggle/input/chest-xray-pneumonia/chest_xray/test/PNEUMONIA"
)
normal_cases = normal_cases_dir.glob("*.jpeg")
pneumonia_cases = pneumonia_cases_dir.glob("*.jpeg")
test_data = []
test_labels = []
for img in normal_cases:
img = cv2.imread(str(img))
img = cv2.resize(img, (image_width, image_height))
if img.shape[2] == 1:
img = np.dstack([img, img, img])
else:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.float32) / 255.0
label = [0]
test_data.append(img)
test_labels.append(label)
for img in pneumonia_cases:
img = cv2.imread(str(img))
img = cv2.resize(img, (image_width, image_height))
if img.shape[2] == 1:
img = np.dstack([img, img, img])
else:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.float32) / 255.0
label = [1]
test_data.append(img)
test_labels.append(label)
test_data = np.array(test_data)
test_labels = np.array(test_labels)
print("Total number of test examples: ", test_data.shape)
print("Total number of labels:", test_labels.shape)
# Evaluation on test dataset
test_loss, test_score = model.evaluate(test_data, test_labels, batch_size=16)
print("Loss on test set: ", test_loss)
print("Accuracy on test set: ", test_score)
# Get predictions
preds = model.predict(test_data, batch_size=16, verbose=1)
preds = np.around(preds)
orig_test_labels = test_labels
print(preds.shape)
print(orig_test_labels.shape)
# Get the confusion matrix
from sklearn.metrics import confusion_matrix
from mlxtend.plotting import plot_confusion_matrix
cm = confusion_matrix(orig_test_labels, preds)
plt.figure()
plot_confusion_matrix(cm, figsize=(12, 8), hide_ticks=True, cmap=plt.cm.Blues)
plt.xticks(range(2), ["Normal", "Pneumonia"], fontsize=16)
plt.yticks(range(2), ["Normal", "Pneumonia"], fontsize=16)
plt.show()
# Calculate Precision and Recall
tn, fp, fn, tp = cm.ravel()
precision = tp / (tp + fp)
recall = tp / (tp + fn)
specificity = tn / (tn + fp)
print("Sensitivity (Recall) of the model is {:.2f}".format(recall))
print("Specificity of the model is {:.2f}".format(specificity))
print("Precision of the model is {:.2f}".format(precision))
# Create an ImageDataGenerator for data loading and augmentation
datagen = ImageDataGenerator(rescale=1.0 / 255.0)
# Load and preprocess your dataset using the ImageDataGenerator
batch_size = 32
target_size = (152, 152)
train_generator = datagen.flow_from_directory(
"/kaggle/input/chest-xray-pneumonia/chest_xray/train", # Replace with the path to your image directory
target_size=target_size,
batch_size=batch_size,
class_mode=None,
shuffle=True,
)
validation_generator = datagen.flow_from_directory(
"/kaggle/input/chest-xray-pneumonia/chest_xray/val", # Replace with the path to your validation image directory
target_size=target_size,
batch_size=batch_size,
class_mode=None,
shuffle=False,
)
# Define the denoising convolutional autoencoder architecture
input_img = Input(shape=(target_size[0], target_size[1], 3))
# Encoder
x = Conv2D(32, (3, 3), activation="relu", padding="same")(input_img)
x = MaxPooling2D((2, 2), padding="same")(x)
x = Conv2D(64, (3, 3), activation="relu", padding="same")(x)
encoded = MaxPooling2D((2, 2), padding="same")(x)
# Decoder
x = Conv2D(64, (3, 3), activation="relu", padding="same")(encoded)
x = UpSampling2D((2, 2))(x)
x = Conv2D(32, (3, 3), activation="relu", padding="same")(x)
x = UpSampling2D((2, 2))(x)
decoded = Conv2D(3, (3, 3), activation="sigmoid", padding="same")(x)
# Create the autoencoder model
autoencoder = Model(input_img, decoded)
# Compile the model
autoencoder.compile(optimizer="sgd", loss="mse", metrics=["accuracy"])
# Preprocess and normalize the images, and add noise
def preprocess_images(x):
x = x.astype("float32") / 255.0
noise_factor = 0.5
x_noisy = x + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x.shape)
x_noisy = np.clip(x_noisy, 0.0, 1.0)
return x_noisy, x
autoencoder.summary()
from tqdm import tqdm
# Training loop
epochs = 20
steps_per_epoch = len(train_generator)
validation_steps = len(validation_generator)
history = {"loss": [], "val_loss": [], "accuracy": [], "val_accuracy": []}
for epoch in range(epochs):
print(f"Epoch {epoch + 1}/{epochs}")
train_loss = 0.0
train_accuracy = 0.0
with tqdm(total=steps_per_epoch, desc="Training") as pbar:
# Training
for step in range(steps_per_epoch):
x_batch = next(train_generator)
x_noisy, x_clean = preprocess_images(x_batch)
loss_step, accuracy_step = autoencoder.train_on_batch(x_noisy, x_clean)
train_loss += loss_step
train_accuracy += accuracy_step
pbar.update(1)
pbar.set_postfix({"Loss": loss_step, "Accuracy": accuracy_step})
val_loss = 0.0
val_accuracy = 0.0
with tqdm(total=validation_steps, desc="Validation") as pbar:
# Validation
for step in range(validation_steps):
x_batch = next(validation_generator)
x_noisy, x_clean = preprocess_images(x_batch)
val_loss_step, val_accuracy_step = autoencoder.evaluate(
x_noisy, x_clean, verbose=0
)
val_loss += val_loss_step
val_accuracy += val_accuracy_step
pbar.update(1)
pbar.set_postfix({"Loss": val_loss_step, "Accuracy": val_accuracy_step})
# Calculate average losses and accuracies for the epoch
train_loss /= steps_per_epoch
train_accuracy /= steps_per_epoch
val_loss /= validation_steps
val_accuracy /= validation_steps
# Print the results for the epoch
print(
"Train Loss: {:.4f} - Train Accuracy: {:.4f} - Val Loss: {:.4f} - Val Accuracy: {:.4f}".format(
train_loss, train_accuracy, val_loss, val_accuracy
)
)
# Store the history
history["loss"].append(train_loss)
history["val_loss"].append(val_loss)
history["accuracy"].append(train_accuracy)
history["val_accuracy"].append(val_accuracy)
# Plot the loss
plt.figure(figsize=(10, 5))
plt.plot(history["loss"], label="Train Loss")
plt.plot(history["val_loss"], label="Validation Loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.title("Train and Validation Loss")
plt.legend()
plt.show()
# Plot the accuracy
plt.figure(figsize=(10, 5))
plt.plot(history["accuracy"], label="Train Accuracy")
plt.plot(history["val_accuracy"], label="Validation Accuracy")
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.title("Train and Validation Accuracy")
plt.legend()
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/274/129274071.ipynb
|
chest-xray-pneumonia
|
paultimothymooney
|
[{"Id": 129274071, "ScriptId": 38425032, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12661578, "CreationDate": "05/12/2023 11:09:04", "VersionNumber": 2.0, "Title": "notebook3f6d2f72d1", "EvaluationDate": "05/12/2023", "IsChange": false, "TotalLines": 327.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 327.0, "LinesInsertedFromFork": 139.0, "LinesDeletedFromFork": 18.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 188.0, "TotalVotes": 0}]
|
[{"Id": 185170725, "KernelVersionId": 129274071, "SourceDatasetVersionId": 23812}]
|
[{"Id": 23812, "DatasetId": 17810, "DatasourceVersionId": 23851, "CreatorUserId": 1314380, "LicenseName": "Other (specified in description)", "CreationDate": "03/24/2018 19:41:59", "VersionNumber": 2.0, "Title": "Chest X-Ray Images (Pneumonia)", "Slug": "chest-xray-pneumonia", "Subtitle": "5,863 images, 2 categories", "Description": "### Context\n\nhttp://www.cell.com/cell/fulltext/S0092-8674(18)30154-5\n\n\n\nFigure S6. Illustrative Examples of Chest X-Rays in Patients with Pneumonia, Related to Figure 6\nThe normal chest X-ray (left panel) depicts clear lungs without any areas of abnormal opacification in the image. Bacterial pneumonia (middle) typically exhibits a focal lobar consolidation, in this case in the right upper lobe (white arrows), whereas viral pneumonia (right) manifests with a more diffuse \u2018\u2018interstitial\u2019\u2019 pattern in both lungs.\nhttp://www.cell.com/cell/fulltext/S0092-8674(18)30154-5\n\n### Content\n\nThe dataset is organized into 3 folders (train, test, val) and contains subfolders for each image category (Pneumonia/Normal). There are 5,863 X-Ray images (JPEG) and 2 categories (Pneumonia/Normal). \n\nChest X-ray images (anterior-posterior) were selected from retrospective cohorts of pediatric patients of one to five years old from Guangzhou Women and Children\u2019s Medical Center, Guangzhou. All chest X-ray imaging was performed as part of patients\u2019 routine clinical care. \n\nFor the analysis of chest x-ray images, all chest radiographs were initially screened for quality control by removing all low quality or unreadable scans. The diagnoses for the images were then graded by two expert physicians before being cleared for training the AI system. In order to account for any grading errors, the evaluation set was also checked by a third expert.\n\n### Acknowledgements\n\nData: https://data.mendeley.com/datasets/rscbjbr9sj/2\n\nLicense: [CC BY 4.0][1]\n\nCitation: http://www.cell.com/cell/fulltext/S0092-8674(18)30154-5\n\n![enter image description here][2]\n\n\n### Inspiration\n\nAutomated methods to detect and classify human diseases from medical images.\n\n\n [1]: https://creativecommons.org/licenses/by/4.0/\n [2]: https://i.imgur.com/8AUJkin.png", "VersionNotes": "train/test/val", "TotalCompressedBytes": 1237249419.0, "TotalUncompressedBytes": 1237249419.0}]
|
[{"Id": 17810, "CreatorUserId": 1314380, "OwnerUserId": 1314380.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 23812.0, "CurrentDatasourceVersionId": 23851.0, "ForumId": 25540, "Type": 2, "CreationDate": "03/22/2018 05:42:41", "LastActivityDate": "03/22/2018", "TotalViews": 2063138, "TotalDownloads": 237932, "TotalVotes": 5834, "TotalKernels": 2058}]
|
[{"Id": 1314380, "UserName": "paultimothymooney", "DisplayName": "Paul Mooney", "RegisterDate": "10/05/2017", "PerformanceTier": 5}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
import cv2
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
if "PNEUMONIA" not in dirname and filename.endswith(".jpeg"):
img = cv2.imread(os.path.join(dirname, filename))
flipped_img = cv2.flip(img, 1)
print("/kaggle/working/" + filename + "_flipped.jpeg")
cv2.imwrite(r"/kaggle/working/" + filename + "_flipped.jpeg", flipped_img)
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os
import random
import tensorflow as tf
from tensorflow.python.keras.models import Sequential, Model
from tensorflow.python.keras.layers import (
Input,
Conv2D,
Dense,
Flatten,
Dropout,
MaxPooling2D,
UpSampling2D,
)
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras.callbacks import ReduceLROnPlateau
total_images_train_normal = os.listdir(
"/kaggle/input/chest-xray-pneumonia/chest_xray/chest_xray/train/NORMAL"
)
total_images_train_pneumonia = os.listdir(
"/kaggle/input/chest-xray-pneumonia/chest_xray/chest_xray/train/PNEUMONIA"
)
image_height = 150
image_width = 150
batch_size = 10
no_of_epochs = 20
model = Sequential()
model.add(
Conv2D(32, (3, 3), input_shape=(image_height, image_width, 3), activation="relu")
)
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(64, (3, 3), activation="relu"))
model.add(Conv2D(64, (3, 3), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(128, (3, 3), activation="relu"))
model.add(Conv2D(128, (3, 3), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(units=128, activation="relu"))
model.add(Dropout(0.2))
model.add(Dense(units=1, activation="sigmoid"))
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
model.summary()
train_datagen = ImageDataGenerator(
rescale=1.0 / 255, rotation_range=15, shear_range=0.2, zoom_range=0.2
)
test_datagen = ImageDataGenerator(rescale=1.0 / 255)
training_set = train_datagen.flow_from_directory(
"/kaggle/input/chest-xray-pneumonia/chest_xray/train",
target_size=(image_width, image_height),
batch_size=batch_size,
class_mode="binary",
)
test_set = test_datagen.flow_from_directory(
"/kaggle/input/chest-xray-pneumonia/chest_xray/test",
target_size=(image_width, image_height),
batch_size=batch_size,
class_mode="binary",
)
reduce_learning_rate = ReduceLROnPlateau(
monitor="loss", factor=0.1, patience=2, cooldown=2, min_lr=0.00001, verbose=1
)
callbacks = [reduce_learning_rate]
history = model.fit_generator(
training_set,
steps_per_epoch=5216 // batch_size,
epochs=no_of_epochs,
validation_data=test_set,
validation_steps=624 // batch_size,
callbacks=callbacks,
)
acc = history.history["accuracy"]
val_acc = history.history["val_accuracy"]
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs = range(1, len(acc) + 1)
fig = plt.figure(figsize=(16, 9))
plt.subplot(1, 2, 1)
plt.plot(epochs, loss, "bo", label="Training loss")
plt.plot(epochs, val_loss, "b", label="Validation loss")
plt.title("Training and validation loss")
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(epochs, acc, "bo", label="Training acc")
plt.plot(epochs, val_acc, "b", label="Validation acc")
plt.title("Training and validation accuracy")
plt.legend()
no_steps = len(test_set)
result = model.evaluate_generator(test_set, steps=no_steps)
print("Test-set classification accuracy: {0:.2%}".format(result[1]))
import glob
from pathlib import Path
from tensorflow.keras.utils import to_categorical
normal_cases_dir = Path("/kaggle/input/chest-xray-pneumonia/chest_xray/test/NORMAL")
pneumonia_cases_dir = Path(
"/kaggle/input/chest-xray-pneumonia/chest_xray/test/PNEUMONIA"
)
normal_cases = normal_cases_dir.glob("*.jpeg")
pneumonia_cases = pneumonia_cases_dir.glob("*.jpeg")
test_data = []
test_labels = []
for img in normal_cases:
img = cv2.imread(str(img))
img = cv2.resize(img, (image_width, image_height))
if img.shape[2] == 1:
img = np.dstack([img, img, img])
else:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.float32) / 255.0
label = [0]
test_data.append(img)
test_labels.append(label)
for img in pneumonia_cases:
img = cv2.imread(str(img))
img = cv2.resize(img, (image_width, image_height))
if img.shape[2] == 1:
img = np.dstack([img, img, img])
else:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.float32) / 255.0
label = [1]
test_data.append(img)
test_labels.append(label)
test_data = np.array(test_data)
test_labels = np.array(test_labels)
print("Total number of test examples: ", test_data.shape)
print("Total number of labels:", test_labels.shape)
# Evaluation on test dataset
test_loss, test_score = model.evaluate(test_data, test_labels, batch_size=16)
print("Loss on test set: ", test_loss)
print("Accuracy on test set: ", test_score)
# Get predictions
preds = model.predict(test_data, batch_size=16, verbose=1)
preds = np.around(preds)
orig_test_labels = test_labels
print(preds.shape)
print(orig_test_labels.shape)
# Get the confusion matrix
from sklearn.metrics import confusion_matrix
from mlxtend.plotting import plot_confusion_matrix
cm = confusion_matrix(orig_test_labels, preds)
plt.figure()
plot_confusion_matrix(cm, figsize=(12, 8), hide_ticks=True, cmap=plt.cm.Blues)
plt.xticks(range(2), ["Normal", "Pneumonia"], fontsize=16)
plt.yticks(range(2), ["Normal", "Pneumonia"], fontsize=16)
plt.show()
# Calculate Precision and Recall
tn, fp, fn, tp = cm.ravel()
precision = tp / (tp + fp)
recall = tp / (tp + fn)
specificity = tn / (tn + fp)
print("Sensitivity (Recall) of the model is {:.2f}".format(recall))
print("Specificity of the model is {:.2f}".format(specificity))
print("Precision of the model is {:.2f}".format(precision))
# Create an ImageDataGenerator for data loading and augmentation
datagen = ImageDataGenerator(rescale=1.0 / 255.0)
# Load and preprocess your dataset using the ImageDataGenerator
batch_size = 32
target_size = (152, 152)
train_generator = datagen.flow_from_directory(
"/kaggle/input/chest-xray-pneumonia/chest_xray/train", # Replace with the path to your image directory
target_size=target_size,
batch_size=batch_size,
class_mode=None,
shuffle=True,
)
validation_generator = datagen.flow_from_directory(
"/kaggle/input/chest-xray-pneumonia/chest_xray/val", # Replace with the path to your validation image directory
target_size=target_size,
batch_size=batch_size,
class_mode=None,
shuffle=False,
)
# Define the denoising convolutional autoencoder architecture
input_img = Input(shape=(target_size[0], target_size[1], 3))
# Encoder
x = Conv2D(32, (3, 3), activation="relu", padding="same")(input_img)
x = MaxPooling2D((2, 2), padding="same")(x)
x = Conv2D(64, (3, 3), activation="relu", padding="same")(x)
encoded = MaxPooling2D((2, 2), padding="same")(x)
# Decoder
x = Conv2D(64, (3, 3), activation="relu", padding="same")(encoded)
x = UpSampling2D((2, 2))(x)
x = Conv2D(32, (3, 3), activation="relu", padding="same")(x)
x = UpSampling2D((2, 2))(x)
decoded = Conv2D(3, (3, 3), activation="sigmoid", padding="same")(x)
# Create the autoencoder model
autoencoder = Model(input_img, decoded)
# Compile the model
autoencoder.compile(optimizer="sgd", loss="mse", metrics=["accuracy"])
# Preprocess and normalize the images, and add noise
def preprocess_images(x):
x = x.astype("float32") / 255.0
noise_factor = 0.5
x_noisy = x + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x.shape)
x_noisy = np.clip(x_noisy, 0.0, 1.0)
return x_noisy, x
autoencoder.summary()
from tqdm import tqdm
# Training loop
epochs = 20
steps_per_epoch = len(train_generator)
validation_steps = len(validation_generator)
history = {"loss": [], "val_loss": [], "accuracy": [], "val_accuracy": []}
for epoch in range(epochs):
print(f"Epoch {epoch + 1}/{epochs}")
train_loss = 0.0
train_accuracy = 0.0
with tqdm(total=steps_per_epoch, desc="Training") as pbar:
# Training
for step in range(steps_per_epoch):
x_batch = next(train_generator)
x_noisy, x_clean = preprocess_images(x_batch)
loss_step, accuracy_step = autoencoder.train_on_batch(x_noisy, x_clean)
train_loss += loss_step
train_accuracy += accuracy_step
pbar.update(1)
pbar.set_postfix({"Loss": loss_step, "Accuracy": accuracy_step})
val_loss = 0.0
val_accuracy = 0.0
with tqdm(total=validation_steps, desc="Validation") as pbar:
# Validation
for step in range(validation_steps):
x_batch = next(validation_generator)
x_noisy, x_clean = preprocess_images(x_batch)
val_loss_step, val_accuracy_step = autoencoder.evaluate(
x_noisy, x_clean, verbose=0
)
val_loss += val_loss_step
val_accuracy += val_accuracy_step
pbar.update(1)
pbar.set_postfix({"Loss": val_loss_step, "Accuracy": val_accuracy_step})
# Calculate average losses and accuracies for the epoch
train_loss /= steps_per_epoch
train_accuracy /= steps_per_epoch
val_loss /= validation_steps
val_accuracy /= validation_steps
# Print the results for the epoch
print(
"Train Loss: {:.4f} - Train Accuracy: {:.4f} - Val Loss: {:.4f} - Val Accuracy: {:.4f}".format(
train_loss, train_accuracy, val_loss, val_accuracy
)
)
# Store the history
history["loss"].append(train_loss)
history["val_loss"].append(val_loss)
history["accuracy"].append(train_accuracy)
history["val_accuracy"].append(val_accuracy)
# Plot the loss
plt.figure(figsize=(10, 5))
plt.plot(history["loss"], label="Train Loss")
plt.plot(history["val_loss"], label="Validation Loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.title("Train and Validation Loss")
plt.legend()
plt.show()
# Plot the accuracy
plt.figure(figsize=(10, 5))
plt.plot(history["accuracy"], label="Train Accuracy")
plt.plot(history["val_accuracy"], label="Validation Accuracy")
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.title("Train and Validation Accuracy")
plt.legend()
plt.show()
| false | 0 | 3,680 | 0 | 4,157 | 3,680 |
||
129274067
|
# This data set will be summarising data from a stack overflow survey!
# We will mostly be tidying/cleaning data here then exporting to Tableau for visualizations, but we will so some EDA here to get a better idea of what this dataset represents
# import all modules required for analysis/cleaning
import numpy as np
import pandas as pd
import seaborn as sns
import statsmodels
import matplotlib.pyplot as plt
import math
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
from sklearn.model_selection import train_test_split
pd.set_option("mode.chained_assignment", None)
sns.set_style("whitegrid")
sns.set_palette("bright")
# # Stack overflow survey
# ## Inspecting/cleaning/dealing with null values
# We'll run some simple functions to get a basic overview of the dataset
df = pd.read_csv("/kaggle/input/dev-dataset/developer_dataset.csv", low_memory=False)
df.describe()
# We can see that some columns have more values than others, so we'll have Null values
df.head()
maxRows = df["RespondentID"].count()
print("% Missing Data:")
print((1 - df.count() / maxRows) * 100)
# This will give the percentage of missing data for each column
# These columns are so empty we are unlikely to be able to draw any conclusions about the overall dataset from them, so we can drop them
df.drop(["NEWJobHunt", "NEWJobHuntResearch", "NEWLearn"], axis=1, inplace=True)
df.head()
df.to_csv("developer_dataset_clean.csv")
# Lets look at the missing data by country
df[["RespondentID", "Country"]].groupby("Country").count()
missingData = (
df[["Employment", "DevType"]].isnull().groupby(df["Country"]).sum().reset_index()
)
missingData
df[["RespondentID", "Country"]].groupby("Country").count()
missingData = (
df[["Employment", "DevType"]].isnull().groupby(df["Country"]).sum().reset_index()
)
# We should consider how the distribution of missing values by Country for the employment and devtype columns before we do any analysis
A = sns.catplot(
data=missingData, kind="bar", x="Country", y="Employment", height=6, aspect=2
)
B = sns.catplot(
data=missingData, kind="bar", x="Country", y="DevType", height=6, aspect=2
)
# Missing data seems to be MCAR so we are safe to delete these missing null rows
df.dropna(subset=["Employment", "DevType"], inplace=True, how="any")
df.head()
# ## Lets consider employment and dev types by country
devdf = df[["Country", "DevType"]]
devdf.loc[devdf["DevType"].str.contains("back-end"), "BackEnd"] = True
devdf.loc[devdf["DevType"].str.contains("front-end"), "FrontEnd"] = True
devdf.loc[devdf["DevType"].str.contains("full-stack"), "FullStack"] = True
devdf.loc[devdf["DevType"].str.contains("mobile"), "Mobile"] = True
devdf.loc[devdf["DevType"].str.contains("administrator"), "Admin"] = True
empdf = df[["Country", "Employment"]]
empdf.loc[empdf["Employment"].str.contains("full-time"), "FullTime"] = True
empdf.loc[empdf["Employment"].str.contains("part-time"), "PartTime"] = True
empdf.loc[
empdf["Employment"].str.contains("contractor"), "Contractor/SelfEmployed"
] = True
empdf.loc[empdf["Employment"].str.contains("but looking for work"), "LFW"] = True
empdf.loc[empdf["Employment"].str.contains("not looking for work"), "NE-NLFW"] = True
empdf.loc[empdf["Employment"].str.contains("Retired"), "Retired"] = True
# We can melt or pivot this new dataframe for Dev type to make it more readable and easier to analyse
devdf = pd.melt(
frame=devdf,
id_vars="Country",
value_vars=["BackEnd", "FrontEnd", "FullStack", "Mobile", "Admin"],
var_name="Dev Cat",
value_name="Dev Flag",
)
devdf.dropna(how="any", inplace=True)
devdf.to_csv("DevTypeByCountry.csv")
# We can use the above clean dataframe in Tableau for further analysis
devdf
# We can repeat the above step for the employment column
empdf = pd.melt(
frame=empdf,
id_vars="Country",
value_vars=[
"FullTime",
"PartTime",
"Contractor/SelfEmployed",
"LFW",
"NE-NLFW",
"Retired",
],
var_name="Employment Cat",
value_name="Employment Flag",
)
empdf.dropna(how="any", inplace=True)
empdf.to_csv("EmpTypeByCountry.csv")
# We can use the above clean dataframe in Tableau for further analysis
devFig = sns.catplot(
x="Country", col="Dev Cat", data=devdf, kind="count", height=6, aspect=1.5
)
empFig = sns.catplot(
x="Country", col="Employment Cat", data=empdf, kind="count", height=6, aspect=1.5
)
# You see that the vast majority of respondents are employed full-time. Since these developers are employed, this data will be relevant for a client who wants to see what developers look for in a potential job. You also see that the majority of developers will have skill sets in front-end, back-end, or full-stack development. This is interesting, and shows that the market values developers who can excel in at least a major part of the development lifecycle, if not the entire stack.
# Now it's time to investigate undergrad majors, lets see the different kind of educational backgrounds we have!
# Consider how much null data we have for undergrad major, as seen above it should be 11.47%
MissingUnderGrad = df["UndergradMajor"].isnull().groupby(df.Year).sum().reset_index()
MissingUnderGrad
sns.catplot(x="Year", y="UndergradMajor", data=MissingUnderGrad, kind="bar")
# All 2020 data is filled in, while we are missing large amounts of data for 2019, and even more for 2020. This means that people likely didn't know what undergrad they were doing until later in the survey (as we have multiple years for each respondant) we can carry the values for each respondant back to clean up the data, as it only matter what major they ended up doing! So we can use NOCB (Next Observation Carried Backward)
df.sort_values(["RespondentID", "Year"])
df.UndergradMajor.bfill(axis=0, inplace=True)
# now lets get a view of what values we have now - we are only missing 2 entries in 2018, much better!
MissingUnderGrad = df["UndergradMajor"].isnull().groupby(df.Year).sum().reset_index()
MissingUnderGrad
sns.catplot(x="Year", y="UndergradMajor", data=MissingUnderGrad, kind="bar")
edudf = df[["Year", "UndergradMajor", "ConvertedComp"]]
edudf = edudf.dropna(how="any")
edudf.loc[
edudf.UndergradMajor.str.contains("(?i)social science"), "SocialScience"
] = True
edudf.loc[
edudf.UndergradMajor.str.contains("(?i)natural science"), "NaturalScience"
] = True
edudf.loc[edudf.UndergradMajor.str.contains("(?i)computer science"), "CompSci"] = True
edudf.loc[edudf.UndergradMajor.str.contains("(?i)development"), "CompSci"] = True
edudf.loc[
edudf.UndergradMajor.str.contains("(?i)another engineering"), "OtherEng"
] = True
edudf.loc[edudf.UndergradMajor.str.contains("(?i)never declared"), "NoMajor"] = True
edudf = edudf.melt(
id_vars=["Year"],
value_vars=["SocialScience", "NaturalScience", "CompSci", "OtherEng", "NoMajor"],
var_name="Major",
value_name="MajorFlag",
)
edudf.dropna(how="any", inplace=True)
edudf = edudf.groupby(["Year", "Major"]).count().reset_index()
edudf.columns = ["Year", "Major", "Count"]
print(edudf)
eduFig = sns.catplot(x="Year", y="Count", col="Major", data=edudf, kind="bar")
# Now lets consider the relationship between years of experience and compensation!
compdf = df[["Year", "YearsCodePro", "ConvertedComp"]]
D = sns.catplot(x="Year", y="YearsCodePro", data=compdf, kind="boxen")
E = sns.catplot(x="Year", y="ConvertedComp", data=compdf, kind="boxen")
# The overall distribution is fairly consistent year-over-year! This indicates that there is a strong correlation between the data points, which should tell a good story about how experience can translate into compensation. Since there is a clear trend with the data points, the best method for filling in the missing data for these two columns is through Multiple Imputation
MIdf = df[["YearsCodePro", "ConvertedComp"]]
traindf, testdf = train_test_split(MIdf, train_size=0.1)
# Create the IterativeImputer model
imp = IterativeImputer(max_iter=20, random_state=0)
# Fit model to our data
imp.fit(MIdf)
compdf = pd.DataFrame(
np.round(imp.transform(MIdf), 0), columns=["YearsCodePro", "ConvertedComp"]
)
# Now we've filled in our data, lets investigate the relationship between these two!
CompPlotdf = MIdf[MIdf.ConvertedComp <= 200000]
CompPlotdf["CodeYearBins"] = pd.qcut(CompPlotdf["YearsCodePro"], q=5)
F = sns.boxplot(x="CodeYearBins", y="ConvertedComp", data=CompPlotdf)
plt.hist(CompPlotdf.ConvertedComp[CompPlotdf.YearsCodePro.isnull()])
CompPlotdf.YearsCodePro[CompPlotdf.ConvertedComp.isnull()].count()
# From the above we can see that the distribution of missing years of experience values is uniform across all compensation levels (there's no correlation), and all empty compensation values fall within the set of empty years of experience. The data seems to be MAR meaning we are safe to drop these null values
CompPlotdf.dropna(inplace=True)
# Calculate mean/percentiles for
CompPlotdf.to_csv("ExperienceVsPay.csv")
CompPlotTS = (
CompPlotdf.ConvertedComp.groupby(CompPlotdf.YearsCodePro).mean().reset_index()
)
CompPlotTS["75th"] = CompPlotdf.groupby("YearsCodePro").ConvertedComp.apply(
lambda x: np.percentile(x, 75)
)
CompPlotTS["25th"] = CompPlotdf.groupby("YearsCodePro").ConvertedComp.apply(
lambda x: np.percentile(x, 25)
)
CompPlotTS.head()
plt.plot(CompPlotTS.YearsCodePro, CompPlotTS.ConvertedComp)
plt.fill_between(
CompPlotTS.YearsCodePro, CompPlotTS["25th"], CompPlotTS["75th"], alpha=0.2
)
plt.xlabel("Years coding experience")
plt.ylabel("Annual compensation ($)")
plt.title("Average salary by experience")
plt.show()
# It could be useful to see what undergraduate degrees the top and bottom earners studied
top_percentile = 20
topcomp = df[["UndergradMajor", "ConvertedComp"]]
topcomp
top_percentile = 90
top_comp = df[["UndergradMajor", "ConvertedComp"]]
top_comp = top_comp.dropna(how="any")
top_comp = top_comp[
top_comp.ConvertedComp >= np.percentile(top_comp.ConvertedComp, top_percentile)
]
top_comp.loc[
top_comp.UndergradMajor.str.contains("(?i)social science"), "SocialScience"
] = True
top_comp.loc[
top_comp.UndergradMajor.str.contains("(?i)natural science"), "NaturalScience"
] = True
top_comp.loc[
top_comp.UndergradMajor.str.contains("(?i)computer science"), "CompSci"
] = True
top_comp.loc[top_comp.UndergradMajor.str.contains("(?i)development"), "CompSci"] = True
top_comp.loc[
top_comp.UndergradMajor.str.contains("(?i)another engineering"), "OtherEng"
] = True
top_comp.loc[
top_comp.UndergradMajor.str.contains("(?i)never declared"), "NoMajor"
] = True
top_comp = top_comp.melt(
id_vars=["UndergradMajor"],
value_vars=["SocialScience", "NaturalScience", "CompSci", "OtherEng", "NoMajor"],
var_name="Major",
value_name="MajorFlag",
)
top_comp = top_comp.groupby("Major").count().reset_index()
top_comp = top_comp[["Major", "MajorFlag"]]
top_comp.columns = ["Major", "Count"]
top_comp
G = sns.catplot(x="Major", y="Count", data=top_comp, kind="bar")
plt.title("Which majors did the top {}% of earners study?".format(100 - top_percentile))
bot_percentile = 10
bot_comp = df[["UndergradMajor", "ConvertedComp"]]
bot_comp = bot_comp.dropna(how="any")
bot_comp = bot_comp[
bot_comp.ConvertedComp <= np.percentile(bot_comp.ConvertedComp, bot_percentile)
]
bot_comp.loc[
bot_comp.UndergradMajor.str.contains("(?i)social science"), "SocialScience"
] = True
bot_comp.loc[
bot_comp.UndergradMajor.str.contains("(?i)natural science"), "NaturalScience"
] = True
bot_comp.loc[
bot_comp.UndergradMajor.str.contains("(?i)computer science"), "CompSci"
] = True
bot_comp.loc[bot_comp.UndergradMajor.str.contains("(?i)development"), "CompSci"] = True
bot_comp.loc[
bot_comp.UndergradMajor.str.contains("(?i)another engineering"), "OtherEng"
] = True
bot_comp.loc[
bot_comp.UndergradMajor.str.contains("(?i)never declared"), "NoMajor"
] = True
bot_comp = bot_comp.melt(
id_vars=["UndergradMajor"],
value_vars=["SocialScience", "NaturalScience", "CompSci", "OtherEng", "NoMajor"],
var_name="Major",
value_name="MajorFlag",
)
bot_comp = bot_comp.groupby("Major").count().reset_index()
bot_comp = bot_comp[["Major", "MajorFlag"]]
bot_comp.columns = ["Major", "Count"]
bot_comp
H = sns.catplot(x="Major", y="Count", data=bot_comp, kind="bar")
plt.title("Which majors did the bottom {}% of earners study?".format(bot_percentile))
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/274/129274067.ipynb
| null | null |
[{"Id": 129274067, "ScriptId": 38435125, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11698170, "CreationDate": "05/12/2023 11:09:01", "VersionNumber": 1.0, "Title": "Stack overflow survey", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 275.0, "LinesInsertedFromPrevious": 275.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# This data set will be summarising data from a stack overflow survey!
# We will mostly be tidying/cleaning data here then exporting to Tableau for visualizations, but we will so some EDA here to get a better idea of what this dataset represents
# import all modules required for analysis/cleaning
import numpy as np
import pandas as pd
import seaborn as sns
import statsmodels
import matplotlib.pyplot as plt
import math
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
from sklearn.model_selection import train_test_split
pd.set_option("mode.chained_assignment", None)
sns.set_style("whitegrid")
sns.set_palette("bright")
# # Stack overflow survey
# ## Inspecting/cleaning/dealing with null values
# We'll run some simple functions to get a basic overview of the dataset
df = pd.read_csv("/kaggle/input/dev-dataset/developer_dataset.csv", low_memory=False)
df.describe()
# We can see that some columns have more values than others, so we'll have Null values
df.head()
maxRows = df["RespondentID"].count()
print("% Missing Data:")
print((1 - df.count() / maxRows) * 100)
# This will give the percentage of missing data for each column
# These columns are so empty we are unlikely to be able to draw any conclusions about the overall dataset from them, so we can drop them
df.drop(["NEWJobHunt", "NEWJobHuntResearch", "NEWLearn"], axis=1, inplace=True)
df.head()
df.to_csv("developer_dataset_clean.csv")
# Lets look at the missing data by country
df[["RespondentID", "Country"]].groupby("Country").count()
missingData = (
df[["Employment", "DevType"]].isnull().groupby(df["Country"]).sum().reset_index()
)
missingData
df[["RespondentID", "Country"]].groupby("Country").count()
missingData = (
df[["Employment", "DevType"]].isnull().groupby(df["Country"]).sum().reset_index()
)
# We should consider how the distribution of missing values by Country for the employment and devtype columns before we do any analysis
A = sns.catplot(
data=missingData, kind="bar", x="Country", y="Employment", height=6, aspect=2
)
B = sns.catplot(
data=missingData, kind="bar", x="Country", y="DevType", height=6, aspect=2
)
# Missing data seems to be MCAR so we are safe to delete these missing null rows
df.dropna(subset=["Employment", "DevType"], inplace=True, how="any")
df.head()
# ## Lets consider employment and dev types by country
devdf = df[["Country", "DevType"]]
devdf.loc[devdf["DevType"].str.contains("back-end"), "BackEnd"] = True
devdf.loc[devdf["DevType"].str.contains("front-end"), "FrontEnd"] = True
devdf.loc[devdf["DevType"].str.contains("full-stack"), "FullStack"] = True
devdf.loc[devdf["DevType"].str.contains("mobile"), "Mobile"] = True
devdf.loc[devdf["DevType"].str.contains("administrator"), "Admin"] = True
empdf = df[["Country", "Employment"]]
empdf.loc[empdf["Employment"].str.contains("full-time"), "FullTime"] = True
empdf.loc[empdf["Employment"].str.contains("part-time"), "PartTime"] = True
empdf.loc[
empdf["Employment"].str.contains("contractor"), "Contractor/SelfEmployed"
] = True
empdf.loc[empdf["Employment"].str.contains("but looking for work"), "LFW"] = True
empdf.loc[empdf["Employment"].str.contains("not looking for work"), "NE-NLFW"] = True
empdf.loc[empdf["Employment"].str.contains("Retired"), "Retired"] = True
# We can melt or pivot this new dataframe for Dev type to make it more readable and easier to analyse
devdf = pd.melt(
frame=devdf,
id_vars="Country",
value_vars=["BackEnd", "FrontEnd", "FullStack", "Mobile", "Admin"],
var_name="Dev Cat",
value_name="Dev Flag",
)
devdf.dropna(how="any", inplace=True)
devdf.to_csv("DevTypeByCountry.csv")
# We can use the above clean dataframe in Tableau for further analysis
devdf
# We can repeat the above step for the employment column
empdf = pd.melt(
frame=empdf,
id_vars="Country",
value_vars=[
"FullTime",
"PartTime",
"Contractor/SelfEmployed",
"LFW",
"NE-NLFW",
"Retired",
],
var_name="Employment Cat",
value_name="Employment Flag",
)
empdf.dropna(how="any", inplace=True)
empdf.to_csv("EmpTypeByCountry.csv")
# We can use the above clean dataframe in Tableau for further analysis
devFig = sns.catplot(
x="Country", col="Dev Cat", data=devdf, kind="count", height=6, aspect=1.5
)
empFig = sns.catplot(
x="Country", col="Employment Cat", data=empdf, kind="count", height=6, aspect=1.5
)
# You see that the vast majority of respondents are employed full-time. Since these developers are employed, this data will be relevant for a client who wants to see what developers look for in a potential job. You also see that the majority of developers will have skill sets in front-end, back-end, or full-stack development. This is interesting, and shows that the market values developers who can excel in at least a major part of the development lifecycle, if not the entire stack.
# Now it's time to investigate undergrad majors, lets see the different kind of educational backgrounds we have!
# Consider how much null data we have for undergrad major, as seen above it should be 11.47%
MissingUnderGrad = df["UndergradMajor"].isnull().groupby(df.Year).sum().reset_index()
MissingUnderGrad
sns.catplot(x="Year", y="UndergradMajor", data=MissingUnderGrad, kind="bar")
# All 2020 data is filled in, while we are missing large amounts of data for 2019, and even more for 2020. This means that people likely didn't know what undergrad they were doing until later in the survey (as we have multiple years for each respondant) we can carry the values for each respondant back to clean up the data, as it only matter what major they ended up doing! So we can use NOCB (Next Observation Carried Backward)
df.sort_values(["RespondentID", "Year"])
df.UndergradMajor.bfill(axis=0, inplace=True)
# now lets get a view of what values we have now - we are only missing 2 entries in 2018, much better!
MissingUnderGrad = df["UndergradMajor"].isnull().groupby(df.Year).sum().reset_index()
MissingUnderGrad
sns.catplot(x="Year", y="UndergradMajor", data=MissingUnderGrad, kind="bar")
edudf = df[["Year", "UndergradMajor", "ConvertedComp"]]
edudf = edudf.dropna(how="any")
edudf.loc[
edudf.UndergradMajor.str.contains("(?i)social science"), "SocialScience"
] = True
edudf.loc[
edudf.UndergradMajor.str.contains("(?i)natural science"), "NaturalScience"
] = True
edudf.loc[edudf.UndergradMajor.str.contains("(?i)computer science"), "CompSci"] = True
edudf.loc[edudf.UndergradMajor.str.contains("(?i)development"), "CompSci"] = True
edudf.loc[
edudf.UndergradMajor.str.contains("(?i)another engineering"), "OtherEng"
] = True
edudf.loc[edudf.UndergradMajor.str.contains("(?i)never declared"), "NoMajor"] = True
edudf = edudf.melt(
id_vars=["Year"],
value_vars=["SocialScience", "NaturalScience", "CompSci", "OtherEng", "NoMajor"],
var_name="Major",
value_name="MajorFlag",
)
edudf.dropna(how="any", inplace=True)
edudf = edudf.groupby(["Year", "Major"]).count().reset_index()
edudf.columns = ["Year", "Major", "Count"]
print(edudf)
eduFig = sns.catplot(x="Year", y="Count", col="Major", data=edudf, kind="bar")
# Now lets consider the relationship between years of experience and compensation!
compdf = df[["Year", "YearsCodePro", "ConvertedComp"]]
D = sns.catplot(x="Year", y="YearsCodePro", data=compdf, kind="boxen")
E = sns.catplot(x="Year", y="ConvertedComp", data=compdf, kind="boxen")
# The overall distribution is fairly consistent year-over-year! This indicates that there is a strong correlation between the data points, which should tell a good story about how experience can translate into compensation. Since there is a clear trend with the data points, the best method for filling in the missing data for these two columns is through Multiple Imputation
MIdf = df[["YearsCodePro", "ConvertedComp"]]
traindf, testdf = train_test_split(MIdf, train_size=0.1)
# Create the IterativeImputer model
imp = IterativeImputer(max_iter=20, random_state=0)
# Fit model to our data
imp.fit(MIdf)
compdf = pd.DataFrame(
np.round(imp.transform(MIdf), 0), columns=["YearsCodePro", "ConvertedComp"]
)
# Now we've filled in our data, lets investigate the relationship between these two!
CompPlotdf = MIdf[MIdf.ConvertedComp <= 200000]
CompPlotdf["CodeYearBins"] = pd.qcut(CompPlotdf["YearsCodePro"], q=5)
F = sns.boxplot(x="CodeYearBins", y="ConvertedComp", data=CompPlotdf)
plt.hist(CompPlotdf.ConvertedComp[CompPlotdf.YearsCodePro.isnull()])
CompPlotdf.YearsCodePro[CompPlotdf.ConvertedComp.isnull()].count()
# From the above we can see that the distribution of missing years of experience values is uniform across all compensation levels (there's no correlation), and all empty compensation values fall within the set of empty years of experience. The data seems to be MAR meaning we are safe to drop these null values
CompPlotdf.dropna(inplace=True)
# Calculate mean/percentiles for
CompPlotdf.to_csv("ExperienceVsPay.csv")
CompPlotTS = (
CompPlotdf.ConvertedComp.groupby(CompPlotdf.YearsCodePro).mean().reset_index()
)
CompPlotTS["75th"] = CompPlotdf.groupby("YearsCodePro").ConvertedComp.apply(
lambda x: np.percentile(x, 75)
)
CompPlotTS["25th"] = CompPlotdf.groupby("YearsCodePro").ConvertedComp.apply(
lambda x: np.percentile(x, 25)
)
CompPlotTS.head()
plt.plot(CompPlotTS.YearsCodePro, CompPlotTS.ConvertedComp)
plt.fill_between(
CompPlotTS.YearsCodePro, CompPlotTS["25th"], CompPlotTS["75th"], alpha=0.2
)
plt.xlabel("Years coding experience")
plt.ylabel("Annual compensation ($)")
plt.title("Average salary by experience")
plt.show()
# It could be useful to see what undergraduate degrees the top and bottom earners studied
top_percentile = 20
topcomp = df[["UndergradMajor", "ConvertedComp"]]
topcomp
top_percentile = 90
top_comp = df[["UndergradMajor", "ConvertedComp"]]
top_comp = top_comp.dropna(how="any")
top_comp = top_comp[
top_comp.ConvertedComp >= np.percentile(top_comp.ConvertedComp, top_percentile)
]
top_comp.loc[
top_comp.UndergradMajor.str.contains("(?i)social science"), "SocialScience"
] = True
top_comp.loc[
top_comp.UndergradMajor.str.contains("(?i)natural science"), "NaturalScience"
] = True
top_comp.loc[
top_comp.UndergradMajor.str.contains("(?i)computer science"), "CompSci"
] = True
top_comp.loc[top_comp.UndergradMajor.str.contains("(?i)development"), "CompSci"] = True
top_comp.loc[
top_comp.UndergradMajor.str.contains("(?i)another engineering"), "OtherEng"
] = True
top_comp.loc[
top_comp.UndergradMajor.str.contains("(?i)never declared"), "NoMajor"
] = True
top_comp = top_comp.melt(
id_vars=["UndergradMajor"],
value_vars=["SocialScience", "NaturalScience", "CompSci", "OtherEng", "NoMajor"],
var_name="Major",
value_name="MajorFlag",
)
top_comp = top_comp.groupby("Major").count().reset_index()
top_comp = top_comp[["Major", "MajorFlag"]]
top_comp.columns = ["Major", "Count"]
top_comp
G = sns.catplot(x="Major", y="Count", data=top_comp, kind="bar")
plt.title("Which majors did the top {}% of earners study?".format(100 - top_percentile))
bot_percentile = 10
bot_comp = df[["UndergradMajor", "ConvertedComp"]]
bot_comp = bot_comp.dropna(how="any")
bot_comp = bot_comp[
bot_comp.ConvertedComp <= np.percentile(bot_comp.ConvertedComp, bot_percentile)
]
bot_comp.loc[
bot_comp.UndergradMajor.str.contains("(?i)social science"), "SocialScience"
] = True
bot_comp.loc[
bot_comp.UndergradMajor.str.contains("(?i)natural science"), "NaturalScience"
] = True
bot_comp.loc[
bot_comp.UndergradMajor.str.contains("(?i)computer science"), "CompSci"
] = True
bot_comp.loc[bot_comp.UndergradMajor.str.contains("(?i)development"), "CompSci"] = True
bot_comp.loc[
bot_comp.UndergradMajor.str.contains("(?i)another engineering"), "OtherEng"
] = True
bot_comp.loc[
bot_comp.UndergradMajor.str.contains("(?i)never declared"), "NoMajor"
] = True
bot_comp = bot_comp.melt(
id_vars=["UndergradMajor"],
value_vars=["SocialScience", "NaturalScience", "CompSci", "OtherEng", "NoMajor"],
var_name="Major",
value_name="MajorFlag",
)
bot_comp = bot_comp.groupby("Major").count().reset_index()
bot_comp = bot_comp[["Major", "MajorFlag"]]
bot_comp.columns = ["Major", "Count"]
bot_comp
H = sns.catplot(x="Major", y="Count", data=bot_comp, kind="bar")
plt.title("Which majors did the bottom {}% of earners study?".format(bot_percentile))
| false | 0 | 3,842 | 0 | 3,842 | 3,842 |
||
129274979
|
# # 1. Imports / Installations
MAIN_DIR = "/kaggle/input/icr-identify-age-related-conditions/"
import pandas as pd
import numpy as np
from tqdm import tqdm
import torch
from torch.utils.data import Dataset
class config:
# for training
greeks_path = MAIN_DIR + "greeks.csv"
train_path = MAIN_DIR + "train.csv"
# for testing
test_path = MAIN_DIR + "test.csv"
# for submission
sub_path = MAIN_DIR + "sample_submission.csv"
# # 2. About "Train" File
train_df = pd.read_csv(config.train_path, index_col="Id")
train_df.head(5)
train_df.info()
# The first training dataset **Train** is composed of 617 observations and 56 features that are associated to anonymized health characteristics :
# - the 56 first features (AB to GL) are numerical, and EJ is categorical
# - the last feature (Class) is a binary target : 1 indicates that the subject has been diagnosed with one of the three conditions, 0 indicates they have not.
train_df["EJ"]
# # 2. About Greeks file
greeks_df = pd.read_csv(config.greeks_path, index_col="Id")
greeks_df.head(5)
greeks_df.info()
# The second training dataset **Greeks** is composed of 617 observations and 5 features that are associated to anonymized health characteristics :
# - **Alpha** : The type of age-related condition, if present (A = No age-related condition. Corresponds to class 0. B, D, G = The three age-related conditions. Correspond to class 1)
# - **Beta, Gamma, Delta** : Three experimental characteristics.
# - **Epsilon** : The date the data for this subject was collected. Note that all of the data in the test set was collected after the training set was collected.
# # 3. About Test file
test_df = pd.read_csv(config.test_path, index_col="Id")
test_df
# Its important to note that the test dataset is hidden, it will be replaced when giving set for submission
# # 3. Pytorch Dataset
class ICRDataset(Dataset):
def __init__(self, dataframe):
super(ICRDataset).__init__()
self.features = dataframe.drop(["Class"], axis=1)
self.targets = dataframe["Class"]
# Transform EJ categorical to numerical for train set:
if "EJ" in list(self.features.columns):
self.features["EJ_numerical"], unique_categories = pd.factorize(
self.features["EJ"]
)
self.features = self.features.drop(["EJ"], axis=1)
def __len__(self):
return len(self.targets)
def __getitem__(self, index):
features = self.features.iloc[index]
features = torch.tensor(features, dtype=torch.float32)
target = self.targets.iloc[index]
target = torch.tensor(target, dtype=torch.int64)
return features, target
train_dataset = ICRDataset(dataframe=train_df)
train_dataset.__getitem__(0)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/274/129274979.ipynb
| null | null |
[{"Id": 129274979, "ScriptId": 38433598, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5980139, "CreationDate": "05/12/2023 11:18:21", "VersionNumber": 1.0, "Title": "Pytorch + Data Augmentation", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 97.0, "LinesInsertedFromPrevious": 97.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# # 1. Imports / Installations
MAIN_DIR = "/kaggle/input/icr-identify-age-related-conditions/"
import pandas as pd
import numpy as np
from tqdm import tqdm
import torch
from torch.utils.data import Dataset
class config:
# for training
greeks_path = MAIN_DIR + "greeks.csv"
train_path = MAIN_DIR + "train.csv"
# for testing
test_path = MAIN_DIR + "test.csv"
# for submission
sub_path = MAIN_DIR + "sample_submission.csv"
# # 2. About "Train" File
train_df = pd.read_csv(config.train_path, index_col="Id")
train_df.head(5)
train_df.info()
# The first training dataset **Train** is composed of 617 observations and 56 features that are associated to anonymized health characteristics :
# - the 56 first features (AB to GL) are numerical, and EJ is categorical
# - the last feature (Class) is a binary target : 1 indicates that the subject has been diagnosed with one of the three conditions, 0 indicates they have not.
train_df["EJ"]
# # 2. About Greeks file
greeks_df = pd.read_csv(config.greeks_path, index_col="Id")
greeks_df.head(5)
greeks_df.info()
# The second training dataset **Greeks** is composed of 617 observations and 5 features that are associated to anonymized health characteristics :
# - **Alpha** : The type of age-related condition, if present (A = No age-related condition. Corresponds to class 0. B, D, G = The three age-related conditions. Correspond to class 1)
# - **Beta, Gamma, Delta** : Three experimental characteristics.
# - **Epsilon** : The date the data for this subject was collected. Note that all of the data in the test set was collected after the training set was collected.
# # 3. About Test file
test_df = pd.read_csv(config.test_path, index_col="Id")
test_df
# Its important to note that the test dataset is hidden, it will be replaced when giving set for submission
# # 3. Pytorch Dataset
class ICRDataset(Dataset):
def __init__(self, dataframe):
super(ICRDataset).__init__()
self.features = dataframe.drop(["Class"], axis=1)
self.targets = dataframe["Class"]
# Transform EJ categorical to numerical for train set:
if "EJ" in list(self.features.columns):
self.features["EJ_numerical"], unique_categories = pd.factorize(
self.features["EJ"]
)
self.features = self.features.drop(["EJ"], axis=1)
def __len__(self):
return len(self.targets)
def __getitem__(self, index):
features = self.features.iloc[index]
features = torch.tensor(features, dtype=torch.float32)
target = self.targets.iloc[index]
target = torch.tensor(target, dtype=torch.int64)
return features, target
train_dataset = ICRDataset(dataframe=train_df)
train_dataset.__getitem__(0)
| false | 0 | 781 | 0 | 781 | 781 |
||
129379807
|
from sklearn.cluster import KMeans
import rasterio as rs
import os
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
from math import hypot
from sklearn.cluster import DBSCAN
from sklearn.preprocessing import StandardScaler
from PIL import Image
import gzip
import cv2
import zlib
from scipy.spatial.distance import pdist, squareform
from sklearn.model_selection import ParameterGrid
dataset = dataset = rs.open("/kaggle/input/diploma-20/12/12_05-18.tif")
ndvi = dataset.read(6)
normalized_ndvi = (ndvi - np.min(ndvi)) / (np.max(ndvi) - np.min(ndvi))
distance_matrix = pdist(normalized_ndvi, metric="euclidean")
distance_matrix_square = squareform(distance_matrix)
from sklearn.cluster import DBSCAN
from sklearn.metrics import silhouette_score
import numpy as np
eps_values = [100, 1000, 5000, 10000, 15000, 20000]
min_samples_values = [10, 500, 1000, 2000, 5000, 10000]
best_score = -1
best_eps = None
best_min_samples = None
for eps in eps_values:
for min_samples in min_samples_values:
dbscan = DBSCAN(eps=eps, min_samples=min_samples)
labels = dbscan.fit_predict(distance_matrix_square)
unique_labels = np.unique(labels)
if len(unique_labels) > 1:
score = silhouette_score(distance_matrix_square, labels)
if score > best_score:
best_score = score
best_eps = eps
best_min_samples = min_samples
print("Best parameters: eps =", best_eps, "min_samples =", best_min_samples)
new_eps = 100
new_min_samples = 1000
new_dbscan = DBSCAN(eps=new_eps, min_samples=new_min_samples, metric="precomputed")
labels = new_dbscan.fit_predict(distance_matrix_square)
print("Cluster Labels:", labels)
unique_labels, label_counts = np.unique(labels, return_counts=True)
num_clusters = len(unique_labels) - 1 # Exclude the outlier label (-1)
print("Number of Clusters:", num_clusters)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/379/129379807.ipynb
| null | null |
[{"Id": 129379807, "ScriptId": 38452028, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8938823, "CreationDate": "05/13/2023 09:41:28", "VersionNumber": 1.0, "Title": "notebook6deba2c250", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 69.0, "LinesInsertedFromPrevious": 69.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
from sklearn.cluster import KMeans
import rasterio as rs
import os
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
from math import hypot
from sklearn.cluster import DBSCAN
from sklearn.preprocessing import StandardScaler
from PIL import Image
import gzip
import cv2
import zlib
from scipy.spatial.distance import pdist, squareform
from sklearn.model_selection import ParameterGrid
dataset = dataset = rs.open("/kaggle/input/diploma-20/12/12_05-18.tif")
ndvi = dataset.read(6)
normalized_ndvi = (ndvi - np.min(ndvi)) / (np.max(ndvi) - np.min(ndvi))
distance_matrix = pdist(normalized_ndvi, metric="euclidean")
distance_matrix_square = squareform(distance_matrix)
from sklearn.cluster import DBSCAN
from sklearn.metrics import silhouette_score
import numpy as np
eps_values = [100, 1000, 5000, 10000, 15000, 20000]
min_samples_values = [10, 500, 1000, 2000, 5000, 10000]
best_score = -1
best_eps = None
best_min_samples = None
for eps in eps_values:
for min_samples in min_samples_values:
dbscan = DBSCAN(eps=eps, min_samples=min_samples)
labels = dbscan.fit_predict(distance_matrix_square)
unique_labels = np.unique(labels)
if len(unique_labels) > 1:
score = silhouette_score(distance_matrix_square, labels)
if score > best_score:
best_score = score
best_eps = eps
best_min_samples = min_samples
print("Best parameters: eps =", best_eps, "min_samples =", best_min_samples)
new_eps = 100
new_min_samples = 1000
new_dbscan = DBSCAN(eps=new_eps, min_samples=new_min_samples, metric="precomputed")
labels = new_dbscan.fit_predict(distance_matrix_square)
print("Cluster Labels:", labels)
unique_labels, label_counts = np.unique(labels, return_counts=True)
num_clusters = len(unique_labels) - 1 # Exclude the outlier label (-1)
print("Number of Clusters:", num_clusters)
| false | 0 | 622 | 0 | 622 | 622 |
||
129379511
|
<jupyter_start><jupyter_text>Company's Ideal Customers | Marketing Strategy
## Context
### Problem Statement
Customer Personality Analysis is a detailed analysis of a company’s ideal customers. It helps a business to better understand its customers and makes it easier for them to modify products according to the specific needs, behaviors and concerns of different types of customers.
Customer personality analysis helps a business to modify its product based on its target customers from different types of customer segments. For example, instead of spending money to market a new product to every customer in the company’s database, a company can analyze which customer segment is most likely to buy the product and then market the product only on that particular segment.
## Content
### Attributes
#### People
- ID: Customer's unique identifier
- Year_Birth: Customer's birth year
- Education: Customer's education level
- Marital_Status: Customer's marital status
- Income: Customer's yearly household income
- Kidhome: Number of children in customer's household
- Teenhome: Number of teenagers in customer's household
- Dt_Customer: Date of customer's enrollment with the company
- Recency: Number of days since customer's last purchase
- Complain: 1 if the customer complained in the last 2 years, 0 otherwise
#### Products
- MntWines: Amount spent on wine in last 2 years
- MntFruits: Amount spent on fruits in last 2 years
- MntMeatProducts: Amount spent on meat in last 2 years
- MntFishProducts: Amount spent on fish in last 2 years
- MntSweetProducts: Amount spent on sweets in last 2 years
- MntGoldProds: Amount spent on gold in last 2 years
#### Promotion
- NumDealsPurchases: Number of purchases made with a discount
- AcceptedCmp1: 1 if customer accepted the offer in the 1st campaign, 0 otherwise
- AcceptedCmp2: 1 if customer accepted the offer in the 2nd campaign, 0 otherwise
- AcceptedCmp3: 1 if customer accepted the offer in the 3rd campaign, 0 otherwise
- AcceptedCmp4: 1 if customer accepted the offer in the 4th campaign, 0 otherwise
- AcceptedCmp5: 1 if customer accepted the offer in the 5th campaign, 0 otherwise
- Response: 1 if customer accepted the offer in the last campaign, 0 otherwise
#### Place
- NumWebPurchases: Number of purchases made through the company’s website
- NumCatalogPurchases: Number of purchases made using a catalogue
- NumStorePurchases: Number of purchases made directly in stores
- NumWebVisitsMonth: Number of visits to company’s website in the last month
### Target
- Need to perform clustering to summarize customer segments.
Kaggle dataset identifier: customer-personality-analysis
<jupyter_script># ## Importing needed Libraries
# As our first objective, before getting started with the real task, we import all neccessary libraries & functions that will be essential to successfully execute this project. by doing so, we also prepare the environment to be ready to handle all requests throughout this activity.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import LabelEncoder
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVR
from sklearn.metrics import accuracy_score
from sklearn.metrics import r2_score
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import classification_report
import warnings
warnings.filterwarnings("ignore")
# ## Data Ingestion
# we now import our dataset upon which we shall base our entire project.
dataset = pd.read_csv(
"../input/customer-personality-analysis/marketing_campaign.csv", sep="\t"
)
dataset
dataset.info()
# From the info above , we get insights of the various attributes, their data types and also the idea of missing data from each individual attribute. As we can see the attribute "Income" has lesser number of rows as compared to other attributes, indicating potential missing data. so in our next step we shall be getting rid of such incomplete rows.
dataset = dataset.dropna()
dataset.shape
# ## Data Pre-Processing & Feature Selection
# #### Data Transformation & aggregartion
# The raw data availaible posses no buisiness value. It needs to be transformed & aggregrated to make it suitable to derieve buisiness insights. Under this section we will be applying transformations to some of the columns and also adding/removing columns to make the dataset more neat & meaningful.
def isLeap(year):
Leap = None
if year % 400 == 0:
Leap = True
elif year % 100 == 0:
Leap = False
elif year % 4 == 0:
Leap = True
else:
Leap = False
return Leap
def get_LastVisit(date):
Months = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
today = [1, 1, 2023]
d, m, y = list(map(int, date.split("-")))
Months[1] = 29 if isLeap(y) else 28
d1 = Months[m - 1] - d + (today[0] - 1)
d2 = sum(Months[m:]) + sum(Months[: today[0] - 1])
d3 = sum([366 if isLeap(y) else 365 for y in range(y + 1, today[2])])
diff = d1 + d2 + d3
return diff
def getAgeGroup(age):
ageGroup = ""
if age <= 18:
ageGroup = "Teenager"
elif age > 18 and age <= 30:
ageGroup = "Bachelors"
elif age > 30 and age <= 50:
ageGroup = "Adults"
elif age > 50 and age <= 70:
ageGroup = "Seniors"
elif age >= 70 and age < 100:
ageGroup = "Retired"
else:
ageGroup = "Expired"
return ageGroup
def AcceptOffer(responses):
Accepted = 0
if 1 in responses:
Accepted = 1
return Accepted
dataset["Age"] = dataset.apply(lambda row: 2023 - int(row.Year_Birth), axis=1)
dataset["FirstVisit"] = dataset.apply(
lambda row: get_LastVisit(row.Dt_Customer), axis=1
)
dataset["AgeGroup"] = dataset.apply(lambda row: getAgeGroup(row.Age), axis=1)
dataset.rename(columns={"Response": "AcceptedCmpN"}, inplace=True)
dataset["OfferAccepted"] = dataset.apply(
lambda row: AcceptOffer(
[
row.AcceptedCmp1,
row.AcceptedCmp2,
row.AcceptedCmp3,
row.AcceptedCmp4,
row.AcceptedCmp5,
row.AcceptedCmpN,
]
),
axis=1,
)
# #### Feature Selection
# Now that we have our data transformed & aggregrated , lets get rid of the columns that are of no use in our project. For example columns such as primary keys / redundant columns are needed to be get rid of.
dataset = dataset[
[
"Age",
"AgeGroup",
"Education",
"Marital_Status",
"Income",
"Kidhome",
"Teenhome",
"FirstVisit",
"Recency",
"MntWines",
"MntFruits",
"MntMeatProducts",
"MntFishProducts",
"MntSweetProducts",
"MntGoldProds",
"NumDealsPurchases",
"NumWebPurchases",
"NumCatalogPurchases",
"NumStorePurchases",
"NumWebVisitsMonth",
"OfferAccepted",
"Complain",
]
]
dataset
dataset.describe()
# ## Exploratory Data Analysis
# #### Count Analysis of Categorical attributes in our dataset
# Now that we have our dataset clean , it now has meaning. In buisiness , this dataset is considered as Gold data. Lets now get started to explore for correlations , distributions or spread of the varaibles throughout the sample space.
# assign required values
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(25, 5))
# illustrate count plots
ax1 = sns.countplot(x=dataset["Marital_Status"], ax=axes[0])
for i in ax1.containers:
ax1.bar_label(i)
ax2 = sns.countplot(x=dataset["Education"], ax=axes[1])
for i in ax2.containers:
ax2.bar_label(i)
ax3 = sns.histplot(x=dataset["AgeGroup"], ax=axes[2])
for i in ax3.containers:
ax3.bar_label(i)
plt.show()
# In the plots above we get the count distribution within our categorical variables: Marital_Status, Education & Age Group.
# Inferences:
# 1. within our dataset, the count of Married individuals is highest, followed by Together & Singles.
# 2. within our dataset, the count of Graduated individuals is highest, followed by PHD & Masters.
# 3. within our dataset, the count of Senior individuals is highest, followed by Adults & Retired.
# #### Distribution Analysis of Customer Income accross different Categorical groups
import seaborn as sns
# assign required values
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(25, 5))
ax4 = sns.boxplot(data=dataset, x="Income", y="Education", ax=axes[0])
ax5 = sns.boxplot(data=dataset, x="Income", y="Marital_Status", ax=axes[1])
ax6 = sns.boxplot(data=dataset, x="Income", y="AgeGroup", ax=axes[2])
plt.show()
# In the plot we aim to study the distribution of income, across 3 categorical variables.
# Inferences obtained:
# 1. we have an average income of 10K accross all education statuses, Graduated holding the highest outlier with approx 65K.
# 2. we have an average income of 10K accross all Marital statuses, Together holding the highest outlier with approx 70K.
# 3. we have an average income of 10K accross all age groups, Adults holding the highest outlier with approx 65K.
# #### Aggregrated Purchase pattern analysis accross different categorical groups
aggregrated_data = dataset.groupby(["AgeGroup", "Education", "Marital_Status"]).agg(
{
"Income": "mean",
"MntWines": "mean",
"MntFruits": "mean",
"MntMeatProducts": "mean",
"MntFishProducts": "mean",
"MntSweetProducts": "mean",
"MntGoldProds": "mean",
"NumDealsPurchases": "mean",
"NumWebPurchases": "mean",
"NumCatalogPurchases": "mean",
"NumStorePurchases": "mean",
"NumWebVisitsMonth": "mean",
}
)
aggregrated_data = aggregrated_data.reset_index()
aggregrated_data
# In the table above we have an aggregrated data displaying the average statistics accross our categorical variables. The table above can be utilized to understand more in depth about the characteristics of the categorical variables and also study the correlation existing within them.
# #### Corelation analysis of Income across Product segments
v2 = [
"MntWines",
"MntFruits",
"MntMeatProducts",
"MntFishProducts",
"MntSweetProducts",
"MntGoldProds",
]
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(25, 8))
c = 0
for i in range(2):
for j in range(len(v2) // 2):
sns.scatterplot(dataset, x="Income", y=v2[c], ax=axes[i, j])
c += 1
plt.show()
# The plots above aim to illustrate the correlation of Income accross the different product categories.
# #### Corelation analysis of Income Across Market segments
v2 = [
"NumDealsPurchases",
"NumWebPurchases",
"NumCatalogPurchases",
"NumStorePurchases",
"NumWebVisitsMonth",
]
fig, axes = plt.subplots(nrows=1, ncols=5, figsize=(25, 5))
for i in range(len(v2)):
axes[i] = sns.scatterplot(dataset, x="Income", y=v2[i], ax=axes[i])
axes[i].tick_params(axis="x", labelrotation=90)
plt.show()
# The plots above aim to illustrate the correlation of Income accross the different Market categories.
# ## Model development & predictive analysis
# Under this section, now we shall be focusing on model development. These models are objects that analyse patterns hidden uderlying within our datasets and can be utilised to perform predictive ,clustering or regression analysis. As a matter of of fact we will be looking into all 3 types of model development and thier usage within this project. But first there are some data engineering & pre processing ensential to make them suitable to be fit in a model.
# #### Encoding Categorical attributes
encoder1 = LabelEncoder()
dataset["Education"] = encoder1.fit_transform(dataset["Education"])
encoder2 = LabelEncoder()
dataset["Marital_Status"] = encoder2.fit_transform(dataset["Marital_Status"])
dataset
# #### Feature Selection & Data cleaning
final_dataset = dataset[
[
"Age",
"Education",
"Marital_Status",
"Income",
"Recency",
"MntWines",
"MntFruits",
"MntMeatProducts",
"MntFishProducts",
"MntSweetProducts",
"MntGoldProds",
"NumDealsPurchases",
"NumWebPurchases",
"NumCatalogPurchases",
"NumStorePurchases",
"NumWebVisitsMonth",
"Complain",
"OfferAccepted",
]
]
final_dataset.info()
# #### Pearson's Corelation Analysis
plt.figure(figsize=(20, 5))
cor = final_dataset.corr()
sns.heatmap(cor, annot=True)
plt.show()
# In the heatmap above , we aim to study the correlation between different variables using pearson correlation analysis.
# #### Principal Component Analysis of the data attributes
data = final_dataset.iloc[:, :].values
pca = PCA()
pca_data = pca.fit_transform(data)
for variance in pca.explained_variance_ratio_:
print(round(variance, 5), end=" ")
# The above list demonstrates the Percent of information retained in each matrix , after the principal component decomposition. By using this we can detect & remove unwanted noise from our dataset. The heat map that follows also illustrates the amount of information retained by each matrix accross all attributes.
cols = list(final_dataset.columns)
comps = pd.DataFrame(pca.components_, columns=cols)
fig = plt.figure(figsize=(20, 5))
sns.heatmap(comps, annot=True)
plt.show()
# #### Predictive analysis using classification modelling
# Within this section we will be building a predictive model to predict weather a person will accept a marketting campaign offer or not , using the customers demographic & purchases history.
X = final_dataset.iloc[:, :-1].values
Y = final_dataset.iloc[:, -1].values
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=0)
scores = []
degrees = [int(i) for i in range(2, 11)]
for i in degrees:
model = SVC(kernel="poly", degree=i, random_state=0)
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
model_accuracy = accuracy_score(y_test, y_pred)
scores.append(model_accuracy * 100)
plt.plot(degrees, scores)
plt.xlabel("degree of exponent used by the polynomial function")
plt.ylabel("Model Accuracy Score(in %)")
plt.title("SVM model Performance")
plt.grid()
plt.show
# In the above illustration we get a glimpse of model optimization via tuning of hyperparameters vs its performance. we can see Support Vector model could give a maximum of 74% accuracy. Lets now experiment using a ensemble learning model.
scores = []
degrees = [int(5 * i) for i in range(1, 26)]
for i in degrees:
model = RandomForestClassifier(n_estimators=i, random_state=32)
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
model_accuracy = accuracy_score(y_test, y_pred)
scores.append(model_accuracy * 100)
plt.plot(degrees, scores)
plt.xlabel("number of estimators")
plt.ylabel("Model Accuracy Score(in %)")
plt.title("Model Performace Optimization: RandomForestClassifier")
plt.grid()
plt.show
# In the illustration above we could see model optimization via tuning of hyperparameters and parallely studing its performance. we can see our RandomForest model could give an accuracy above 80% using n_estimators = 60. so we finailize RandomForestModel using n_estimators = 60 within this project our perfect model for the task.
best_predictive_model = RandomForestClassifier(n_estimators=60, random_state=32)
best_predictive_model.fit(x_train, y_train)
preds = best_predictive_model.predict(x_test)
model_acc = accuracy_score(y_test, preds)
print(f"The model Performs at an accuracy of {model_acc*100} %\n")
model_performance = classification_report(
y_test, preds, target_names=["Offer Rejected", "Offer Purchased"]
)
print(model_performance)
# The statistics above illustrates the Performance Report card of our Predictative analysis model.
# Predictive Analysis using Regression modelling
# Under this section we shall be builing a regression analysis model to predict an average amount of investment that can be expected from a potential customer , using his demographic data and purchase histories.
X = final_dataset.iloc[:, [0, 1, 2, 3, 4, 11, 12, 13, 14, 15, 16, 17]].values
targets = final_dataset.iloc[:, [5, 6, 7, 8, 9, 10]].values
Y = []
for row in range(len(targets)):
avg = sum(targets[row]) // len(targets[row])
Y.append(avg)
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=0)
regression_model = SVR(kernel="rbf")
regression_model.fit(x_train, y_train)
preds = regression_model.predict(x_test)
score = r2_score(y_test, preds)
score = round(score, 2)
print(f"The model has a R2 score of {score}.")
# In finance, an R-squared equals or above 0.7 would generally be seen as showing a high level of correlation, indicating strong ability of the regression model to predict the average investment a given customer will make, given his demographic data.
# #### Customer Segmentation using KMeans Clustering
# The models we have been building so far can be reffered to as supervised learning models. Within this section we shall aim to build an unsupervised learning model, using KMeans Clustering to group customers into clusters.
X = final_dataset.iloc[:, :].values
scaler = StandardScaler()
x_scaled = scaler.fit_transform(X)
inertia = []
n_clusters = [int(i) for i in range(2, 10)]
for i in n_clusters:
model = KMeans(init="k-means++", n_clusters=i, random_state=32)
model.fit(x_scaled)
inertia.append(model.inertia_)
plt.plot(n_clusters, inertia)
plt.xlabel("Number of Clusters")
plt.ylabel("Model inertia")
plt.title("Use of Elbow Method to determine optimum number of Clusters")
plt.grid()
plt.show()
# In the chart illustrated above we make use of the Elbow Method to determine the optimum number of clusters to be chosen for the job.
cluster_model = KMeans(init="k-means++", n_clusters=6, random_state=0)
cluster_model.fit(x_scaled)
final_dataset["Customer_Segment"] = cluster_model.labels_
# #### Exploratory Data analysis accross customer segments obtained
customers_segment = final_dataset.groupby("Customer_Segment").agg(
{
"Age": "mean",
"Income": "mean",
"MntWines": "mean",
"MntFruits": "mean",
"MntMeatProducts": "mean",
"MntFishProducts": "mean",
"MntSweetProducts": "mean",
"MntGoldProds": "mean",
"NumDealsPurchases": "mean",
"NumWebPurchases": "mean",
"NumCatalogPurchases": "mean",
"NumStorePurchases": "mean",
"NumWebVisitsMonth": "mean",
}
)
customers_segment = customers_segment.reset_index()
customers_segment
final_dataset["Education"] = encoder1.inverse_transform(dataset["Education"])
final_dataset["Marital_Status"] = encoder2.inverse_transform(dataset["Marital_Status"])
# So from our analysis we can conclude that the customers within our dataset can be broadly categorized into 6 Clusters, Each with thier own unique characteristics & behaviour. The behaviour of each cluster can be breifly described as:
# * Cluster 0: Lower age group, lower income , mostly graduates & married with no complaints & poor chances to accept any marketting campaigns.
# * Cluster 1: Median age group , higher income with high product purchases, mostly graduates & married with no complaints and higher likely to decline any marketting campaigns.
# * Cluster 2: Median age group , median income, mostly married & graduates with no complaints and a 50-50 liklihood to accept any marketting campaigns.
# * Cluster 3: Median age group , lower quartile income category, mostly married & graduates with many complaints & poor chances to accept any marketting campaigns.
# * Cluster 4: Median age group ,higher income category with relatively poor product purchases, mostly married & graduates with no complaints and higher liklihood to accept any marketting campaigns.
# * Cluster 5: Upper age group, lower quartile income category, mostly PhD & married with no complaints & poor chances to accept any marketting campaigns.
# To understand more in detail we can refer to the table & charts ,above and below this cell respectively which provides us with a more detailed insight about the characteristics of each cluster.
fig, axes = plt.subplots(nrows=4, ncols=6, figsize=(30, 20))
vars = ["Education", "Marital_Status", "Complain", "OfferAccepted"]
row = 0
for var in vars:
for i in range(6):
data = final_dataset.where(final_dataset["Customer_Segment"] == i)
axes[row][i] = sns.countplot(data, x=var, ax=axes[row][i])
axes[row][i].set_xlabel(f"Customer Segment {i}")
axes[row][i].set_ylabel(var)
row += 1
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/379/129379511.ipynb
|
customer-personality-analysis
|
whenamancodes
|
[{"Id": 129379511, "ScriptId": 37974531, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6798117, "CreationDate": "05/13/2023 09:38:51", "VersionNumber": 4.0, "Title": "Marketting_Campaign_Analysis", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 372.0, "LinesInsertedFromPrevious": 14.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 358.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185373262, "KernelVersionId": 129379511, "SourceDatasetVersionId": 4423098}]
|
[{"Id": 4423098, "DatasetId": 2590810, "DatasourceVersionId": 4482323, "CreatorUserId": 8676029, "LicenseName": "CC0: Public Domain", "CreationDate": "10/30/2022 14:17:42", "VersionNumber": 1.0, "Title": "Company's Ideal Customers | Marketing Strategy", "Slug": "customer-personality-analysis", "Subtitle": "Analysis of company's ideal customers", "Description": "## Context\n### Problem Statement\nCustomer Personality Analysis is a detailed analysis of a company\u2019s ideal customers. It helps a business to better understand its customers and makes it easier for them to modify products according to the specific needs, behaviors and concerns of different types of customers.\n\nCustomer personality analysis helps a business to modify its product based on its target customers from different types of customer segments. For example, instead of spending money to market a new product to every customer in the company\u2019s database, a company can analyze which customer segment is most likely to buy the product and then market the product only on that particular segment.\n\n## Content\n### Attributes\n\n#### People\n- ID: Customer's unique identifier\n- Year_Birth: Customer's birth year\n- Education: Customer's education level\n- Marital_Status: Customer's marital status\n- Income: Customer's yearly household income\n- Kidhome: Number of children in customer's household\n- Teenhome: Number of teenagers in customer's household\n- Dt_Customer: Date of customer's enrollment with the company\n- Recency: Number of days since customer's last purchase\n- Complain: 1 if the customer complained in the last 2 years, 0 otherwise\n\n#### Products\n- MntWines: Amount spent on wine in last 2 years\n- MntFruits: Amount spent on fruits in last 2 years\n- MntMeatProducts: Amount spent on meat in last 2 years\n- MntFishProducts: Amount spent on fish in last 2 years\n- MntSweetProducts: Amount spent on sweets in last 2 years\n- MntGoldProds: Amount spent on gold in last 2 years\n\n#### Promotion\n- NumDealsPurchases: Number of purchases made with a discount\n- AcceptedCmp1: 1 if customer accepted the offer in the 1st campaign, 0 otherwise\n- AcceptedCmp2: 1 if customer accepted the offer in the 2nd campaign, 0 otherwise\n- AcceptedCmp3: 1 if customer accepted the offer in the 3rd campaign, 0 otherwise\n- AcceptedCmp4: 1 if customer accepted the offer in the 4th campaign, 0 otherwise\n- AcceptedCmp5: 1 if customer accepted the offer in the 5th campaign, 0 otherwise\n- Response: 1 if customer accepted the offer in the last campaign, 0 otherwise\n\n#### Place\n- NumWebPurchases: Number of purchases made through the company\u2019s website\n- NumCatalogPurchases: Number of purchases made using a catalogue\n- NumStorePurchases: Number of purchases made directly in stores\n- NumWebVisitsMonth: Number of visits to company\u2019s website in the last month\n\n### Target\n- Need to perform clustering to summarize customer segments.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 2590810, "CreatorUserId": 8676029, "OwnerUserId": 8676029.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4423098.0, "CurrentDatasourceVersionId": 4482323.0, "ForumId": 2620840, "Type": 2, "CreationDate": "10/30/2022 14:17:42", "LastActivityDate": "10/30/2022", "TotalViews": 22638, "TotalDownloads": 3277, "TotalVotes": 65, "TotalKernels": 7}]
|
[{"Id": 8676029, "UserName": "whenamancodes", "DisplayName": "Aman Chauhan", "RegisterDate": "10/22/2021", "PerformanceTier": 2}]
|
# ## Importing needed Libraries
# As our first objective, before getting started with the real task, we import all neccessary libraries & functions that will be essential to successfully execute this project. by doing so, we also prepare the environment to be ready to handle all requests throughout this activity.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import LabelEncoder
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVR
from sklearn.metrics import accuracy_score
from sklearn.metrics import r2_score
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import classification_report
import warnings
warnings.filterwarnings("ignore")
# ## Data Ingestion
# we now import our dataset upon which we shall base our entire project.
dataset = pd.read_csv(
"../input/customer-personality-analysis/marketing_campaign.csv", sep="\t"
)
dataset
dataset.info()
# From the info above , we get insights of the various attributes, their data types and also the idea of missing data from each individual attribute. As we can see the attribute "Income" has lesser number of rows as compared to other attributes, indicating potential missing data. so in our next step we shall be getting rid of such incomplete rows.
dataset = dataset.dropna()
dataset.shape
# ## Data Pre-Processing & Feature Selection
# #### Data Transformation & aggregartion
# The raw data availaible posses no buisiness value. It needs to be transformed & aggregrated to make it suitable to derieve buisiness insights. Under this section we will be applying transformations to some of the columns and also adding/removing columns to make the dataset more neat & meaningful.
def isLeap(year):
Leap = None
if year % 400 == 0:
Leap = True
elif year % 100 == 0:
Leap = False
elif year % 4 == 0:
Leap = True
else:
Leap = False
return Leap
def get_LastVisit(date):
Months = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
today = [1, 1, 2023]
d, m, y = list(map(int, date.split("-")))
Months[1] = 29 if isLeap(y) else 28
d1 = Months[m - 1] - d + (today[0] - 1)
d2 = sum(Months[m:]) + sum(Months[: today[0] - 1])
d3 = sum([366 if isLeap(y) else 365 for y in range(y + 1, today[2])])
diff = d1 + d2 + d3
return diff
def getAgeGroup(age):
ageGroup = ""
if age <= 18:
ageGroup = "Teenager"
elif age > 18 and age <= 30:
ageGroup = "Bachelors"
elif age > 30 and age <= 50:
ageGroup = "Adults"
elif age > 50 and age <= 70:
ageGroup = "Seniors"
elif age >= 70 and age < 100:
ageGroup = "Retired"
else:
ageGroup = "Expired"
return ageGroup
def AcceptOffer(responses):
Accepted = 0
if 1 in responses:
Accepted = 1
return Accepted
dataset["Age"] = dataset.apply(lambda row: 2023 - int(row.Year_Birth), axis=1)
dataset["FirstVisit"] = dataset.apply(
lambda row: get_LastVisit(row.Dt_Customer), axis=1
)
dataset["AgeGroup"] = dataset.apply(lambda row: getAgeGroup(row.Age), axis=1)
dataset.rename(columns={"Response": "AcceptedCmpN"}, inplace=True)
dataset["OfferAccepted"] = dataset.apply(
lambda row: AcceptOffer(
[
row.AcceptedCmp1,
row.AcceptedCmp2,
row.AcceptedCmp3,
row.AcceptedCmp4,
row.AcceptedCmp5,
row.AcceptedCmpN,
]
),
axis=1,
)
# #### Feature Selection
# Now that we have our data transformed & aggregrated , lets get rid of the columns that are of no use in our project. For example columns such as primary keys / redundant columns are needed to be get rid of.
dataset = dataset[
[
"Age",
"AgeGroup",
"Education",
"Marital_Status",
"Income",
"Kidhome",
"Teenhome",
"FirstVisit",
"Recency",
"MntWines",
"MntFruits",
"MntMeatProducts",
"MntFishProducts",
"MntSweetProducts",
"MntGoldProds",
"NumDealsPurchases",
"NumWebPurchases",
"NumCatalogPurchases",
"NumStorePurchases",
"NumWebVisitsMonth",
"OfferAccepted",
"Complain",
]
]
dataset
dataset.describe()
# ## Exploratory Data Analysis
# #### Count Analysis of Categorical attributes in our dataset
# Now that we have our dataset clean , it now has meaning. In buisiness , this dataset is considered as Gold data. Lets now get started to explore for correlations , distributions or spread of the varaibles throughout the sample space.
# assign required values
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(25, 5))
# illustrate count plots
ax1 = sns.countplot(x=dataset["Marital_Status"], ax=axes[0])
for i in ax1.containers:
ax1.bar_label(i)
ax2 = sns.countplot(x=dataset["Education"], ax=axes[1])
for i in ax2.containers:
ax2.bar_label(i)
ax3 = sns.histplot(x=dataset["AgeGroup"], ax=axes[2])
for i in ax3.containers:
ax3.bar_label(i)
plt.show()
# In the plots above we get the count distribution within our categorical variables: Marital_Status, Education & Age Group.
# Inferences:
# 1. within our dataset, the count of Married individuals is highest, followed by Together & Singles.
# 2. within our dataset, the count of Graduated individuals is highest, followed by PHD & Masters.
# 3. within our dataset, the count of Senior individuals is highest, followed by Adults & Retired.
# #### Distribution Analysis of Customer Income accross different Categorical groups
import seaborn as sns
# assign required values
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(25, 5))
ax4 = sns.boxplot(data=dataset, x="Income", y="Education", ax=axes[0])
ax5 = sns.boxplot(data=dataset, x="Income", y="Marital_Status", ax=axes[1])
ax6 = sns.boxplot(data=dataset, x="Income", y="AgeGroup", ax=axes[2])
plt.show()
# In the plot we aim to study the distribution of income, across 3 categorical variables.
# Inferences obtained:
# 1. we have an average income of 10K accross all education statuses, Graduated holding the highest outlier with approx 65K.
# 2. we have an average income of 10K accross all Marital statuses, Together holding the highest outlier with approx 70K.
# 3. we have an average income of 10K accross all age groups, Adults holding the highest outlier with approx 65K.
# #### Aggregrated Purchase pattern analysis accross different categorical groups
aggregrated_data = dataset.groupby(["AgeGroup", "Education", "Marital_Status"]).agg(
{
"Income": "mean",
"MntWines": "mean",
"MntFruits": "mean",
"MntMeatProducts": "mean",
"MntFishProducts": "mean",
"MntSweetProducts": "mean",
"MntGoldProds": "mean",
"NumDealsPurchases": "mean",
"NumWebPurchases": "mean",
"NumCatalogPurchases": "mean",
"NumStorePurchases": "mean",
"NumWebVisitsMonth": "mean",
}
)
aggregrated_data = aggregrated_data.reset_index()
aggregrated_data
# In the table above we have an aggregrated data displaying the average statistics accross our categorical variables. The table above can be utilized to understand more in depth about the characteristics of the categorical variables and also study the correlation existing within them.
# #### Corelation analysis of Income across Product segments
v2 = [
"MntWines",
"MntFruits",
"MntMeatProducts",
"MntFishProducts",
"MntSweetProducts",
"MntGoldProds",
]
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(25, 8))
c = 0
for i in range(2):
for j in range(len(v2) // 2):
sns.scatterplot(dataset, x="Income", y=v2[c], ax=axes[i, j])
c += 1
plt.show()
# The plots above aim to illustrate the correlation of Income accross the different product categories.
# #### Corelation analysis of Income Across Market segments
v2 = [
"NumDealsPurchases",
"NumWebPurchases",
"NumCatalogPurchases",
"NumStorePurchases",
"NumWebVisitsMonth",
]
fig, axes = plt.subplots(nrows=1, ncols=5, figsize=(25, 5))
for i in range(len(v2)):
axes[i] = sns.scatterplot(dataset, x="Income", y=v2[i], ax=axes[i])
axes[i].tick_params(axis="x", labelrotation=90)
plt.show()
# The plots above aim to illustrate the correlation of Income accross the different Market categories.
# ## Model development & predictive analysis
# Under this section, now we shall be focusing on model development. These models are objects that analyse patterns hidden uderlying within our datasets and can be utilised to perform predictive ,clustering or regression analysis. As a matter of of fact we will be looking into all 3 types of model development and thier usage within this project. But first there are some data engineering & pre processing ensential to make them suitable to be fit in a model.
# #### Encoding Categorical attributes
encoder1 = LabelEncoder()
dataset["Education"] = encoder1.fit_transform(dataset["Education"])
encoder2 = LabelEncoder()
dataset["Marital_Status"] = encoder2.fit_transform(dataset["Marital_Status"])
dataset
# #### Feature Selection & Data cleaning
final_dataset = dataset[
[
"Age",
"Education",
"Marital_Status",
"Income",
"Recency",
"MntWines",
"MntFruits",
"MntMeatProducts",
"MntFishProducts",
"MntSweetProducts",
"MntGoldProds",
"NumDealsPurchases",
"NumWebPurchases",
"NumCatalogPurchases",
"NumStorePurchases",
"NumWebVisitsMonth",
"Complain",
"OfferAccepted",
]
]
final_dataset.info()
# #### Pearson's Corelation Analysis
plt.figure(figsize=(20, 5))
cor = final_dataset.corr()
sns.heatmap(cor, annot=True)
plt.show()
# In the heatmap above , we aim to study the correlation between different variables using pearson correlation analysis.
# #### Principal Component Analysis of the data attributes
data = final_dataset.iloc[:, :].values
pca = PCA()
pca_data = pca.fit_transform(data)
for variance in pca.explained_variance_ratio_:
print(round(variance, 5), end=" ")
# The above list demonstrates the Percent of information retained in each matrix , after the principal component decomposition. By using this we can detect & remove unwanted noise from our dataset. The heat map that follows also illustrates the amount of information retained by each matrix accross all attributes.
cols = list(final_dataset.columns)
comps = pd.DataFrame(pca.components_, columns=cols)
fig = plt.figure(figsize=(20, 5))
sns.heatmap(comps, annot=True)
plt.show()
# #### Predictive analysis using classification modelling
# Within this section we will be building a predictive model to predict weather a person will accept a marketting campaign offer or not , using the customers demographic & purchases history.
X = final_dataset.iloc[:, :-1].values
Y = final_dataset.iloc[:, -1].values
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=0)
scores = []
degrees = [int(i) for i in range(2, 11)]
for i in degrees:
model = SVC(kernel="poly", degree=i, random_state=0)
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
model_accuracy = accuracy_score(y_test, y_pred)
scores.append(model_accuracy * 100)
plt.plot(degrees, scores)
plt.xlabel("degree of exponent used by the polynomial function")
plt.ylabel("Model Accuracy Score(in %)")
plt.title("SVM model Performance")
plt.grid()
plt.show
# In the above illustration we get a glimpse of model optimization via tuning of hyperparameters vs its performance. we can see Support Vector model could give a maximum of 74% accuracy. Lets now experiment using a ensemble learning model.
scores = []
degrees = [int(5 * i) for i in range(1, 26)]
for i in degrees:
model = RandomForestClassifier(n_estimators=i, random_state=32)
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
model_accuracy = accuracy_score(y_test, y_pred)
scores.append(model_accuracy * 100)
plt.plot(degrees, scores)
plt.xlabel("number of estimators")
plt.ylabel("Model Accuracy Score(in %)")
plt.title("Model Performace Optimization: RandomForestClassifier")
plt.grid()
plt.show
# In the illustration above we could see model optimization via tuning of hyperparameters and parallely studing its performance. we can see our RandomForest model could give an accuracy above 80% using n_estimators = 60. so we finailize RandomForestModel using n_estimators = 60 within this project our perfect model for the task.
best_predictive_model = RandomForestClassifier(n_estimators=60, random_state=32)
best_predictive_model.fit(x_train, y_train)
preds = best_predictive_model.predict(x_test)
model_acc = accuracy_score(y_test, preds)
print(f"The model Performs at an accuracy of {model_acc*100} %\n")
model_performance = classification_report(
y_test, preds, target_names=["Offer Rejected", "Offer Purchased"]
)
print(model_performance)
# The statistics above illustrates the Performance Report card of our Predictative analysis model.
# Predictive Analysis using Regression modelling
# Under this section we shall be builing a regression analysis model to predict an average amount of investment that can be expected from a potential customer , using his demographic data and purchase histories.
X = final_dataset.iloc[:, [0, 1, 2, 3, 4, 11, 12, 13, 14, 15, 16, 17]].values
targets = final_dataset.iloc[:, [5, 6, 7, 8, 9, 10]].values
Y = []
for row in range(len(targets)):
avg = sum(targets[row]) // len(targets[row])
Y.append(avg)
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=0)
regression_model = SVR(kernel="rbf")
regression_model.fit(x_train, y_train)
preds = regression_model.predict(x_test)
score = r2_score(y_test, preds)
score = round(score, 2)
print(f"The model has a R2 score of {score}.")
# In finance, an R-squared equals or above 0.7 would generally be seen as showing a high level of correlation, indicating strong ability of the regression model to predict the average investment a given customer will make, given his demographic data.
# #### Customer Segmentation using KMeans Clustering
# The models we have been building so far can be reffered to as supervised learning models. Within this section we shall aim to build an unsupervised learning model, using KMeans Clustering to group customers into clusters.
X = final_dataset.iloc[:, :].values
scaler = StandardScaler()
x_scaled = scaler.fit_transform(X)
inertia = []
n_clusters = [int(i) for i in range(2, 10)]
for i in n_clusters:
model = KMeans(init="k-means++", n_clusters=i, random_state=32)
model.fit(x_scaled)
inertia.append(model.inertia_)
plt.plot(n_clusters, inertia)
plt.xlabel("Number of Clusters")
plt.ylabel("Model inertia")
plt.title("Use of Elbow Method to determine optimum number of Clusters")
plt.grid()
plt.show()
# In the chart illustrated above we make use of the Elbow Method to determine the optimum number of clusters to be chosen for the job.
cluster_model = KMeans(init="k-means++", n_clusters=6, random_state=0)
cluster_model.fit(x_scaled)
final_dataset["Customer_Segment"] = cluster_model.labels_
# #### Exploratory Data analysis accross customer segments obtained
customers_segment = final_dataset.groupby("Customer_Segment").agg(
{
"Age": "mean",
"Income": "mean",
"MntWines": "mean",
"MntFruits": "mean",
"MntMeatProducts": "mean",
"MntFishProducts": "mean",
"MntSweetProducts": "mean",
"MntGoldProds": "mean",
"NumDealsPurchases": "mean",
"NumWebPurchases": "mean",
"NumCatalogPurchases": "mean",
"NumStorePurchases": "mean",
"NumWebVisitsMonth": "mean",
}
)
customers_segment = customers_segment.reset_index()
customers_segment
final_dataset["Education"] = encoder1.inverse_transform(dataset["Education"])
final_dataset["Marital_Status"] = encoder2.inverse_transform(dataset["Marital_Status"])
# So from our analysis we can conclude that the customers within our dataset can be broadly categorized into 6 Clusters, Each with thier own unique characteristics & behaviour. The behaviour of each cluster can be breifly described as:
# * Cluster 0: Lower age group, lower income , mostly graduates & married with no complaints & poor chances to accept any marketting campaigns.
# * Cluster 1: Median age group , higher income with high product purchases, mostly graduates & married with no complaints and higher likely to decline any marketting campaigns.
# * Cluster 2: Median age group , median income, mostly married & graduates with no complaints and a 50-50 liklihood to accept any marketting campaigns.
# * Cluster 3: Median age group , lower quartile income category, mostly married & graduates with many complaints & poor chances to accept any marketting campaigns.
# * Cluster 4: Median age group ,higher income category with relatively poor product purchases, mostly married & graduates with no complaints and higher liklihood to accept any marketting campaigns.
# * Cluster 5: Upper age group, lower quartile income category, mostly PhD & married with no complaints & poor chances to accept any marketting campaigns.
# To understand more in detail we can refer to the table & charts ,above and below this cell respectively which provides us with a more detailed insight about the characteristics of each cluster.
fig, axes = plt.subplots(nrows=4, ncols=6, figsize=(30, 20))
vars = ["Education", "Marital_Status", "Complain", "OfferAccepted"]
row = 0
for var in vars:
for i in range(6):
data = final_dataset.where(final_dataset["Customer_Segment"] == i)
axes[row][i] = sns.countplot(data, x=var, ax=axes[row][i])
axes[row][i].set_xlabel(f"Customer Segment {i}")
axes[row][i].set_ylabel(var)
row += 1
plt.show()
| false | 1 | 5,194 | 0 | 5,850 | 5,194 |
||
129379891
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.preprocessing import LabelEncoder
# from sklearn.pandas import CategoricalImputer
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
data_dir = "/kaggle/input/loan-status-binary-classification/"
train = pd.read_csv(data_dir + "train.csv")
test = pd.read_csv(data_dir + "test.csv")
print(train.shape)
print(train.head())
print("==============")
print(test.shape)
print(test.head())
train["Gender"].value_counts(dropna=False)
test["Gender"].value_counts(dropna=False)
train["Gender"].fillna("Third sex", inplace=True)
test["Gender"].fillna("Third sex", inplace=True)
train["Gender"].value_counts(dropna=False)
test["Gender"].value_counts(dropna=False)
train1 = pd.DataFrame(train)
dummies = pd.get_dummies(train1["Gender"], prefix="Gender")
train1 = pd.concat([train1, dummies], axis=1)
train1 = train1.drop(["Gender"], axis=1)
print(train1)
test1 = pd.DataFrame(test)
dummies = pd.get_dummies(test1["Gender"], prefix="Gender")
test1 = pd.concat([test1, dummies], axis=1)
test1 = test1.drop(["Gender"], axis=1)
print(test1)
train1["Married"].value_counts(dropna=False)
test1["Married"].value_counts(dropna=False)
train1["Married"].fillna("Yes", inplace=True)
test1["Married"].fillna("Yes", inplace=True)
train1["Married"].value_counts(dropna=False)
train1["Married"].value_counts(dropna=False)
train2["Married"] = train1["Married"].map({"Yes": 1, "No": 0})
test2["Married"] = test1["Married"].map({"Yes": 1, "No": 0})
train2["Dependents"].value_counts(dropna=False)
test2["Dependents"].value_counts(dropna=False)
train2["Dependents"].fillna("0", inplace=True)
test2["Dependents"].fillna("0", inplace=True)
train2["Dependents"].value_counts(dropna=False)
test2["Dependents"].value_counts(dropna=False)
encoder = LabelEncoder()
train2["Dependents"] = encoder.fit_transform(train2["Dependents"])
print(train2)
encoder = LabelEncoder()
test2["Dependents"] = encoder.fit_transform(test2["Dependents"])
print(test2)
train2["Education"].value_counts(dropna=False)
train3["Education"] = train2["Education"].map({"Graduate": 1, "Not Graduate": 0})
test3["Education"] = test2["Education"].map({"Graduate": 1, "Not Graduate": 0})
test3["Self_Employed"].value_counts(dropna=False)
train3["Self_Employed"].fillna("No", inplace=True)
test3["Self_Employed"].fillna("No", inplace=True)
train4["Self_Employed"] = train3["Self_Employed"].map({"Yes": 1, "No": 0})
test4["Self_Employed"] = test3["Self_Employed"].map({"Yes": 1, "No": 0})
train4["Self_Employed"].value_counts(dropna=False)
train4["Applicant_Income"].value_counts(dropna=False)
train4["Coapplicant_Income"].value_counts(dropna=False)
train4["Loan_Amount"].value_counts(dropna=False)
train4["Term"].value_counts(dropna=False)
mean_term = train4["Term"].mean()
train4["Term"].fillna(mean_term, inplace=True)
test4["Term"].fillna(mean_term, inplace=True)
train4["Term"].value_counts(dropna=False)
train4["Credit_History"].value_counts(dropna=False)
train4["Credit_History"].fillna(1.0, inplace=True)
test4["Credit_History"].fillna(1.0, inplace=True)
train5 = pd.DataFrame(train4)
dummies = pd.get_dummies(train5["Credit_History"], prefix="Credit_History")
train5 = pd.concat([train5, dummies], axis=1)
train5 = train5.drop(["Credit_History"], axis=1)
print(train5)
test5 = pd.DataFrame(test4)
dummies = pd.get_dummies(test5["Credit_History"], prefix="Credit_History")
ttest5 = pd.concat([test5, dummies], axis=1)
ttest5 = test5.drop(["Credit_History"], axis=1)
print(test5)
test5["Area"].value_counts(dropna=False)
train6 = pd.DataFrame(train5)
dummies = pd.get_dummies(train6["Area"], prefix="Area")
train6 = pd.concat([train6, dummies], axis=1)
train6 = train6.drop(["Area"], axis=1)
print(train6)
test6 = pd.DataFrame(test5)
dummies = pd.get_dummies(test6["Area"], prefix="Area")
ttest6 = pd.concat([test6, dummies], axis=1)
ttest6 = test6.drop(["Area"], axis=1)
print(test6)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/379/129379891.ipynb
| null | null |
[{"Id": 129379891, "ScriptId": 38435961, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15037956, "CreationDate": "05/13/2023 09:42:24", "VersionNumber": 2.0, "Title": "loan-baseline2222", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 152.0, "LinesInsertedFromPrevious": 50.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 102.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.preprocessing import LabelEncoder
# from sklearn.pandas import CategoricalImputer
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
data_dir = "/kaggle/input/loan-status-binary-classification/"
train = pd.read_csv(data_dir + "train.csv")
test = pd.read_csv(data_dir + "test.csv")
print(train.shape)
print(train.head())
print("==============")
print(test.shape)
print(test.head())
train["Gender"].value_counts(dropna=False)
test["Gender"].value_counts(dropna=False)
train["Gender"].fillna("Third sex", inplace=True)
test["Gender"].fillna("Third sex", inplace=True)
train["Gender"].value_counts(dropna=False)
test["Gender"].value_counts(dropna=False)
train1 = pd.DataFrame(train)
dummies = pd.get_dummies(train1["Gender"], prefix="Gender")
train1 = pd.concat([train1, dummies], axis=1)
train1 = train1.drop(["Gender"], axis=1)
print(train1)
test1 = pd.DataFrame(test)
dummies = pd.get_dummies(test1["Gender"], prefix="Gender")
test1 = pd.concat([test1, dummies], axis=1)
test1 = test1.drop(["Gender"], axis=1)
print(test1)
train1["Married"].value_counts(dropna=False)
test1["Married"].value_counts(dropna=False)
train1["Married"].fillna("Yes", inplace=True)
test1["Married"].fillna("Yes", inplace=True)
train1["Married"].value_counts(dropna=False)
train1["Married"].value_counts(dropna=False)
train2["Married"] = train1["Married"].map({"Yes": 1, "No": 0})
test2["Married"] = test1["Married"].map({"Yes": 1, "No": 0})
train2["Dependents"].value_counts(dropna=False)
test2["Dependents"].value_counts(dropna=False)
train2["Dependents"].fillna("0", inplace=True)
test2["Dependents"].fillna("0", inplace=True)
train2["Dependents"].value_counts(dropna=False)
test2["Dependents"].value_counts(dropna=False)
encoder = LabelEncoder()
train2["Dependents"] = encoder.fit_transform(train2["Dependents"])
print(train2)
encoder = LabelEncoder()
test2["Dependents"] = encoder.fit_transform(test2["Dependents"])
print(test2)
train2["Education"].value_counts(dropna=False)
train3["Education"] = train2["Education"].map({"Graduate": 1, "Not Graduate": 0})
test3["Education"] = test2["Education"].map({"Graduate": 1, "Not Graduate": 0})
test3["Self_Employed"].value_counts(dropna=False)
train3["Self_Employed"].fillna("No", inplace=True)
test3["Self_Employed"].fillna("No", inplace=True)
train4["Self_Employed"] = train3["Self_Employed"].map({"Yes": 1, "No": 0})
test4["Self_Employed"] = test3["Self_Employed"].map({"Yes": 1, "No": 0})
train4["Self_Employed"].value_counts(dropna=False)
train4["Applicant_Income"].value_counts(dropna=False)
train4["Coapplicant_Income"].value_counts(dropna=False)
train4["Loan_Amount"].value_counts(dropna=False)
train4["Term"].value_counts(dropna=False)
mean_term = train4["Term"].mean()
train4["Term"].fillna(mean_term, inplace=True)
test4["Term"].fillna(mean_term, inplace=True)
train4["Term"].value_counts(dropna=False)
train4["Credit_History"].value_counts(dropna=False)
train4["Credit_History"].fillna(1.0, inplace=True)
test4["Credit_History"].fillna(1.0, inplace=True)
train5 = pd.DataFrame(train4)
dummies = pd.get_dummies(train5["Credit_History"], prefix="Credit_History")
train5 = pd.concat([train5, dummies], axis=1)
train5 = train5.drop(["Credit_History"], axis=1)
print(train5)
test5 = pd.DataFrame(test4)
dummies = pd.get_dummies(test5["Credit_History"], prefix="Credit_History")
ttest5 = pd.concat([test5, dummies], axis=1)
ttest5 = test5.drop(["Credit_History"], axis=1)
print(test5)
test5["Area"].value_counts(dropna=False)
train6 = pd.DataFrame(train5)
dummies = pd.get_dummies(train6["Area"], prefix="Area")
train6 = pd.concat([train6, dummies], axis=1)
train6 = train6.drop(["Area"], axis=1)
print(train6)
test6 = pd.DataFrame(test5)
dummies = pd.get_dummies(test6["Area"], prefix="Area")
ttest6 = pd.concat([test6, dummies], axis=1)
ttest6 = test6.drop(["Area"], axis=1)
print(test6)
| false | 0 | 1,512 | 0 | 1,512 | 1,512 |
||
129379941
|
# Для первой задачи мы используем данные [Jester Online Joke Recommender System](https://goldberg.berkeley.edu/jester-data/)
# **Описание данных**
# Файл `train_joke_df.csv` содержит:
# - UID - id пользователей
# - JID - id шуток, которые
# - Ratin - рейтинг шутки, который проставил пользователь
# Рейтинг имеет значение от -10.00 до 10.00. Могут встречаться значения 99.00, но это обозначает Null (нет рейтинга от пользователя).
# Метрика для оценки [RMSE](https://www.codecamp.ru/blog/how-to-interpret-rmse/)
# RMSE: `3.99981`
# # Import
import numpy as np
import pandas as pd
from catboost import CatBoostRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
import seaborn as sns
import os
import joblib
np.random.seed(42)
# ### Загрузка и обработка данных
df = pd.read_csv("/kaggle/input/recsys-in-practice/train_joke_df.csv")
df.head(5)
# Преобразуем датафрейм к более удобному виду, теперь каждая строка - 100 оцекок одного пользовотеля
df1 = pd.pivot_table(data=df, values="Rating", index="UID", columns="JID")
df1
df1["UID"] = df1.index
df1.info()
# Наши будущие модели:
# 100 `Ridge`
# 100 `CatBoostRegressor`
# # Ridge
# Добавим такие фичи, как:
# `mean_user_ratin` - Средний рейтинг, который ставит пользовотель
# `median_user_ratin` - Медиана рейтингов, которые стаивт пользовотель
# `min_user_ratin` - Минимальный рейтинг, который поставил пользователь
# `max_user_ratin` - Максимальный рейтинг, который поставил пользователь
# `count_user_ratin` - Количество шуток, которое оценил пользовотель
# Так же заменим Nan-ы на медиану оценок по данной шутке(что бы избежать влияния выбросов)
# На этих фичах (105) обучим `Ridge` регрессию.
# Важно отметить, что мы имеем 100 моделей `Ridge`, так как каждая модель обучается на предсказание оценки отдельной шутки(это позволяет сделать настройку более точной)
ridge_RMSE = []
linear_models = {}
for i in range(1, 101):
df2 = df1.dropna(subset=[i])
X = df2.drop(columns=[i, "UID"], axis=1)
count = X.isnull().sum(axis=1)
y = df2[i]
X = X.fillna(X.median())
X["mean_user_ratin"] = X.mean(axis=1)
X["median_user_ratin"] = X.median(axis=1)
X["min_user_ratin"] = X.min(axis=1)
X["max_user_ratin"] = X.max(axis=1)
X["count_user_ratin"] = 100 - count
X_train, X_test, y_train, y_test = train_test_split(
np.array(X), np.array(y), test_size=0.1, random_state=42
)
ridge = Ridge(alpha=0.1)
ridge.fit(X_train, y_train)
linear_models[f"model_{i}"] = ridge
ridge_pred = ridge.predict(X_test)
ridge_rms = mean_squared_error(y_test, ridge_pred, squared=False)
ridge_RMSE.append(ridge_rms)
if i % 10 == 0:
print(i, "%")
scores_RMSE = np.array(ridge_RMSE).mean()
scores_RMSE
# Взглянем на среднее качество по шуткам
width = 1.0
plt.figure(figsize=(80, 20))
plt.axhline(y=4, color="r", linestyle="-", linewidth=3)
plt.axhline(y=scores_RMSE, color="black", linestyle="-.", linewidth=3)
plt.bar(np.arange(1, 101, 1), ridge_RMSE, width, color="g", edgecolor="black")
os.mkdir("linear_models")
dir = "linear_models"
def save_ridge_models(linear_models, dir):
for i in range(1, 101):
model = linear_models[f"model_{i}"]
path = dir + "/" + f"model_{i}.pkl"
joblib.dump(model, path)
print("Done!")
def load_ridge_models(dir):
linear_models = {}
for i in range(1, 101):
path = dir + "/" + f"model_{i}.pkl"
model = joblib.load(path)
linear_models[f"model_{i}"] = model
print("Done!")
return linear_models
save_ridge_models(linear_models, dir)
linear_models = load_ridge_models(dir)
# # Сatboost
df1
# ### Теперь обучим 100 моделей CatBoostRegressor на 99 рейтингах и 6 придуманных нами фичах:
# `mean_user_ratin` - Средний рейтинг, который ставит пользовотель
# `median_user_ratin` - Медиана рейтингов, которые стаивт пользовотель
# `min_user_ratin` - Минимальный рейтинг, который поставил пользователь
# `max_user_ratin` - Максимальный рейтинг, который поставил пользователь
# `count_user_ratin` - Количество шуток, которое оценил пользовотель
# `ridge_model_pred` - Рейтинг шутки предсказанный `Ridge`
######
RMSE = []
models = {}
for i in range(1, 101):
df2 = df1.dropna(subset=[i])
X = df2.drop(columns=[i, "UID"], axis=1)
count = X.isnull().sum(axis=1)
y = df2[i]
X = X.fillna(X.median())
X["mean_user_ratin"] = X.mean(axis=1)
X["median_user_ratin"] = X.median(axis=1)
X["min_user_ratin"] = X.min(axis=1)
X["max_user_ratin"] = X.max(axis=1)
X["count_user_ratin"] = 100 - count
predictions = linear_models[f"model_{i}"].predict(np.array(X))
X["ridge_model_pred"] = predictions
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42
)
model = CatBoostRegressor(
loss_function="RMSE",
eval_metric="RMSE",
iterations=300,
learning_rate=0.1,
task_type="GPU",
use_best_model=True,
)
model.fit(X_train, y_train, eval_set=(X_test, y_test), verbose=100)
models[f"model_{i}"] = model
pred = model.predict(np.array(X_test))
rms = mean_squared_error(y_test, pred, squared=False)
RMSE.append(rms)
if i % 10 == 0:
print(i, "%")
reg = models["model_71"]
feature_importance = np.array(reg.feature_importances_)
feature_names = np.array(X_train.columns)
data = {"feature_names": feature_names, "feature_importance": feature_importance}
fi_df = pd.DataFrame(data)
fi_df.sort_values(by=["feature_importance"], ascending=False, inplace=True)
plt.figure(figsize=(15, 10))
sns.barplot(x=fi_df["feature_importance"], y=fi_df["feature_names"])
plt.title("Tree " + "feature importance")
plt.xlabel("feature importance")
plt.ylabel("feature names")
# Видим, что наши фичи не такие уж и бесполезные
np.array(RMSE).mean()
width = 1.0
plt.figure(figsize=(80, 20))
plt.axhline(y=4, color="r", linestyle="-", linewidth=3)
plt.axhline(y=np.array(RMSE).mean(), color="r", linestyle="-.", linewidth=3)
plt.bar(np.arange(1, 101, 1), RMSE, width, color="g", edgecolor="black")
os.mkdir("models")
dir = "models"
def save_catboost_models(models, dir):
for i in range(1, 101):
model = models[f"model_{i}"]
path = dir + "/" + f"model_{i}.cbm"
model.save_model(path)
print("Done!")
def load_catboost_models(dir):
models = {}
for i in range(1, 101):
path = dir + "/" + f"model_{i}.cbm"
# model = joblib.load(path)
model = CatBoostRegressor() # parameters not required.
model.load_model(path)
models[f"model_{i}"] = model
print("Done!")
return models
save_catboost_models(models, dir)
models = load_catboost_models(dir)
df_f = df1.fillna(df1.median())
count = df1.isnull().sum(axis=1)
df_f
df_f["mean_user_ratin"] = df_f.drop(columns="UID").mean(axis=1)
df_f["median_user_ratin"] = df_f.median(axis=1)
df_f["min_user_ratin"] = df_f.drop(columns="UID").min(axis=1)
df_f["max_user_ratin"] = df_f.drop(columns="UID").max(axis=1)
df_f["count_user_ratin"] = 100 - count
df_f
# # Для отправки на тестирование
test = pd.read_csv(
"/kaggle/input/recsys-in-practice/test_joke_df_nofactrating.csv", index_col=0
)
test.head(5)
# Само предсказание на test-е (`!!!` ДОЛГО `!!!`)
ANS = []
for i, row in test.iterrows():
X_teat_f = df_f[df_f["UID"] == row["UID"]].drop(columns=[row["JID"], "UID"])
X_teat_f["ridge_model_pred"] = linear_models[f'model_{row["JID"]}'].predict(
np.array(X_teat_f).reshape(1, -1)
)
ans = models[f'model_{row["JID"]}'].predict(X_teat_f)
if len(ANS) % 1000 == 0:
print(len(ANS) / len(test))
ANS.append(ans[0])
test["Rating"] = ANS
test = test.drop(["UID", "JID"], axis=1)
test.to_csv("submission.csv")
# # Для финального предсказания
# Данные для моделей
df = pd.read_csv(f"/kaggle/input/recsys-in-practice/train_joke_df.csv")
test = pd.DataFrame({"UID": [7]}) # pd.read_csv('input.csv')
arr = test["UID"]
# Предсказание
BEST = []
TOP_10 = []
for uid in arr:
rating = {}
for jid in range(1, 101):
if np.sum((df["UID"] == uid) & (df["JID"] == jid)):
rating_jid = np.array(
df[(df["UID"] == uid) & (df["JID"] == jid)]["Rating"]
)[0]
else:
X_test_f = df_f[df_f["UID"] == jid].drop(columns=[jid, "UID"])
X_test_f["ridge_model_pred"] = linear_models[f"model_{jid}"].predict(
np.array(X_test_f)
)
rating_jid = models[f"model_{jid}"].predict(X_test_f)[0]
rating[f"{jid}"] = rating_jid
sorted_rating = sorted(rating.items(), key=lambda kv: -kv[1])
best = {sorted_rating[0][0]: sorted_rating[0][1]}
top_10 = [i[0] for i in sorted_rating[1:10]]
BEST.append(best)
TOP_10.append(top_10)
ans = pd.DataFrame({"best": BEST, "top_10": TOP_10})
ans
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/379/129379941.ipynb
| null | null |
[{"Id": 129379941, "ScriptId": 38077008, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11986931, "CreationDate": "05/13/2023 09:42:58", "VersionNumber": 6.0, "Title": "CatBoost+Ridge(3.99981)", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 307.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 306.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
| null | null | null | null |
# Для первой задачи мы используем данные [Jester Online Joke Recommender System](https://goldberg.berkeley.edu/jester-data/)
# **Описание данных**
# Файл `train_joke_df.csv` содержит:
# - UID - id пользователей
# - JID - id шуток, которые
# - Ratin - рейтинг шутки, который проставил пользователь
# Рейтинг имеет значение от -10.00 до 10.00. Могут встречаться значения 99.00, но это обозначает Null (нет рейтинга от пользователя).
# Метрика для оценки [RMSE](https://www.codecamp.ru/blog/how-to-interpret-rmse/)
# RMSE: `3.99981`
# # Import
import numpy as np
import pandas as pd
from catboost import CatBoostRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
import seaborn as sns
import os
import joblib
np.random.seed(42)
# ### Загрузка и обработка данных
df = pd.read_csv("/kaggle/input/recsys-in-practice/train_joke_df.csv")
df.head(5)
# Преобразуем датафрейм к более удобному виду, теперь каждая строка - 100 оцекок одного пользовотеля
df1 = pd.pivot_table(data=df, values="Rating", index="UID", columns="JID")
df1
df1["UID"] = df1.index
df1.info()
# Наши будущие модели:
# 100 `Ridge`
# 100 `CatBoostRegressor`
# # Ridge
# Добавим такие фичи, как:
# `mean_user_ratin` - Средний рейтинг, который ставит пользовотель
# `median_user_ratin` - Медиана рейтингов, которые стаивт пользовотель
# `min_user_ratin` - Минимальный рейтинг, который поставил пользователь
# `max_user_ratin` - Максимальный рейтинг, который поставил пользователь
# `count_user_ratin` - Количество шуток, которое оценил пользовотель
# Так же заменим Nan-ы на медиану оценок по данной шутке(что бы избежать влияния выбросов)
# На этих фичах (105) обучим `Ridge` регрессию.
# Важно отметить, что мы имеем 100 моделей `Ridge`, так как каждая модель обучается на предсказание оценки отдельной шутки(это позволяет сделать настройку более точной)
ridge_RMSE = []
linear_models = {}
for i in range(1, 101):
df2 = df1.dropna(subset=[i])
X = df2.drop(columns=[i, "UID"], axis=1)
count = X.isnull().sum(axis=1)
y = df2[i]
X = X.fillna(X.median())
X["mean_user_ratin"] = X.mean(axis=1)
X["median_user_ratin"] = X.median(axis=1)
X["min_user_ratin"] = X.min(axis=1)
X["max_user_ratin"] = X.max(axis=1)
X["count_user_ratin"] = 100 - count
X_train, X_test, y_train, y_test = train_test_split(
np.array(X), np.array(y), test_size=0.1, random_state=42
)
ridge = Ridge(alpha=0.1)
ridge.fit(X_train, y_train)
linear_models[f"model_{i}"] = ridge
ridge_pred = ridge.predict(X_test)
ridge_rms = mean_squared_error(y_test, ridge_pred, squared=False)
ridge_RMSE.append(ridge_rms)
if i % 10 == 0:
print(i, "%")
scores_RMSE = np.array(ridge_RMSE).mean()
scores_RMSE
# Взглянем на среднее качество по шуткам
width = 1.0
plt.figure(figsize=(80, 20))
plt.axhline(y=4, color="r", linestyle="-", linewidth=3)
plt.axhline(y=scores_RMSE, color="black", linestyle="-.", linewidth=3)
plt.bar(np.arange(1, 101, 1), ridge_RMSE, width, color="g", edgecolor="black")
os.mkdir("linear_models")
dir = "linear_models"
def save_ridge_models(linear_models, dir):
for i in range(1, 101):
model = linear_models[f"model_{i}"]
path = dir + "/" + f"model_{i}.pkl"
joblib.dump(model, path)
print("Done!")
def load_ridge_models(dir):
linear_models = {}
for i in range(1, 101):
path = dir + "/" + f"model_{i}.pkl"
model = joblib.load(path)
linear_models[f"model_{i}"] = model
print("Done!")
return linear_models
save_ridge_models(linear_models, dir)
linear_models = load_ridge_models(dir)
# # Сatboost
df1
# ### Теперь обучим 100 моделей CatBoostRegressor на 99 рейтингах и 6 придуманных нами фичах:
# `mean_user_ratin` - Средний рейтинг, который ставит пользовотель
# `median_user_ratin` - Медиана рейтингов, которые стаивт пользовотель
# `min_user_ratin` - Минимальный рейтинг, который поставил пользователь
# `max_user_ratin` - Максимальный рейтинг, который поставил пользователь
# `count_user_ratin` - Количество шуток, которое оценил пользовотель
# `ridge_model_pred` - Рейтинг шутки предсказанный `Ridge`
######
RMSE = []
models = {}
for i in range(1, 101):
df2 = df1.dropna(subset=[i])
X = df2.drop(columns=[i, "UID"], axis=1)
count = X.isnull().sum(axis=1)
y = df2[i]
X = X.fillna(X.median())
X["mean_user_ratin"] = X.mean(axis=1)
X["median_user_ratin"] = X.median(axis=1)
X["min_user_ratin"] = X.min(axis=1)
X["max_user_ratin"] = X.max(axis=1)
X["count_user_ratin"] = 100 - count
predictions = linear_models[f"model_{i}"].predict(np.array(X))
X["ridge_model_pred"] = predictions
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42
)
model = CatBoostRegressor(
loss_function="RMSE",
eval_metric="RMSE",
iterations=300,
learning_rate=0.1,
task_type="GPU",
use_best_model=True,
)
model.fit(X_train, y_train, eval_set=(X_test, y_test), verbose=100)
models[f"model_{i}"] = model
pred = model.predict(np.array(X_test))
rms = mean_squared_error(y_test, pred, squared=False)
RMSE.append(rms)
if i % 10 == 0:
print(i, "%")
reg = models["model_71"]
feature_importance = np.array(reg.feature_importances_)
feature_names = np.array(X_train.columns)
data = {"feature_names": feature_names, "feature_importance": feature_importance}
fi_df = pd.DataFrame(data)
fi_df.sort_values(by=["feature_importance"], ascending=False, inplace=True)
plt.figure(figsize=(15, 10))
sns.barplot(x=fi_df["feature_importance"], y=fi_df["feature_names"])
plt.title("Tree " + "feature importance")
plt.xlabel("feature importance")
plt.ylabel("feature names")
# Видим, что наши фичи не такие уж и бесполезные
np.array(RMSE).mean()
width = 1.0
plt.figure(figsize=(80, 20))
plt.axhline(y=4, color="r", linestyle="-", linewidth=3)
plt.axhline(y=np.array(RMSE).mean(), color="r", linestyle="-.", linewidth=3)
plt.bar(np.arange(1, 101, 1), RMSE, width, color="g", edgecolor="black")
os.mkdir("models")
dir = "models"
def save_catboost_models(models, dir):
for i in range(1, 101):
model = models[f"model_{i}"]
path = dir + "/" + f"model_{i}.cbm"
model.save_model(path)
print("Done!")
def load_catboost_models(dir):
models = {}
for i in range(1, 101):
path = dir + "/" + f"model_{i}.cbm"
# model = joblib.load(path)
model = CatBoostRegressor() # parameters not required.
model.load_model(path)
models[f"model_{i}"] = model
print("Done!")
return models
save_catboost_models(models, dir)
models = load_catboost_models(dir)
df_f = df1.fillna(df1.median())
count = df1.isnull().sum(axis=1)
df_f
df_f["mean_user_ratin"] = df_f.drop(columns="UID").mean(axis=1)
df_f["median_user_ratin"] = df_f.median(axis=1)
df_f["min_user_ratin"] = df_f.drop(columns="UID").min(axis=1)
df_f["max_user_ratin"] = df_f.drop(columns="UID").max(axis=1)
df_f["count_user_ratin"] = 100 - count
df_f
# # Для отправки на тестирование
test = pd.read_csv(
"/kaggle/input/recsys-in-practice/test_joke_df_nofactrating.csv", index_col=0
)
test.head(5)
# Само предсказание на test-е (`!!!` ДОЛГО `!!!`)
ANS = []
for i, row in test.iterrows():
X_teat_f = df_f[df_f["UID"] == row["UID"]].drop(columns=[row["JID"], "UID"])
X_teat_f["ridge_model_pred"] = linear_models[f'model_{row["JID"]}'].predict(
np.array(X_teat_f).reshape(1, -1)
)
ans = models[f'model_{row["JID"]}'].predict(X_teat_f)
if len(ANS) % 1000 == 0:
print(len(ANS) / len(test))
ANS.append(ans[0])
test["Rating"] = ANS
test = test.drop(["UID", "JID"], axis=1)
test.to_csv("submission.csv")
# # Для финального предсказания
# Данные для моделей
df = pd.read_csv(f"/kaggle/input/recsys-in-practice/train_joke_df.csv")
test = pd.DataFrame({"UID": [7]}) # pd.read_csv('input.csv')
arr = test["UID"]
# Предсказание
BEST = []
TOP_10 = []
for uid in arr:
rating = {}
for jid in range(1, 101):
if np.sum((df["UID"] == uid) & (df["JID"] == jid)):
rating_jid = np.array(
df[(df["UID"] == uid) & (df["JID"] == jid)]["Rating"]
)[0]
else:
X_test_f = df_f[df_f["UID"] == jid].drop(columns=[jid, "UID"])
X_test_f["ridge_model_pred"] = linear_models[f"model_{jid}"].predict(
np.array(X_test_f)
)
rating_jid = models[f"model_{jid}"].predict(X_test_f)[0]
rating[f"{jid}"] = rating_jid
sorted_rating = sorted(rating.items(), key=lambda kv: -kv[1])
best = {sorted_rating[0][0]: sorted_rating[0][1]}
top_10 = [i[0] for i in sorted_rating[1:10]]
BEST.append(best)
TOP_10.append(top_10)
ans = pd.DataFrame({"best": BEST, "top_10": TOP_10})
ans
| false | 0 | 3,525 | 1 | 3,525 | 3,525 |
||
129379607
|
<jupyter_start><jupyter_text>IMDB Dataset of 50K Movie Reviews
IMDB dataset having 50K movie reviews for natural language processing or Text analytics.
This is a dataset for binary sentiment classification containing substantially more data than previous benchmark datasets. We provide a set of 25,000 highly polar movie reviews for training and 25,000 for testing. So, predict the number of positive and negative reviews using either classification or deep learning algorithms.
For more dataset information, please go through the following link,
http://ai.stanford.edu/~amaas/data/sentiment/
Kaggle dataset identifier: imdb-dataset-of-50k-movie-reviews
<jupyter_code>import pandas as pd
df = pd.read_csv('imdb-dataset-of-50k-movie-reviews/IMDB Dataset.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 50000 entries, 0 to 49999
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 review 50000 non-null object
1 sentiment 50000 non-null object
dtypes: object(2)
memory usage: 781.4+ KB
<jupyter_text>Examples:
{
"review": "One of the other reviewers has mentioned that after watching just 1 Oz episode you'll be hooked. They are right, as this is exactly what happened with me.<br /><br />The first thing that struck me about Oz was its brutality and unflinching scenes of violence, which set in right from t...(truncated)",
"sentiment": "positive"
}
{
"review": "A wonderful little production. <br /><br />The filming technique is very unassuming- very old-time-BBC fashion and gives a comforting, and sometimes discomforting, sense of realism to the entire piece. <br /><br />The actors are extremely well chosen- Michael Sheen not only \"has got ...(truncated)",
"sentiment": "positive"
}
{
"review": "I thought this was a wonderful way to spend time on a too hot summer weekend, sitting in the air conditioned theater and watching a light-hearted comedy. The plot is simplistic, but the dialogue is witty and the characters are likable (even the well bread suspected serial killer). Whi...(truncated)",
"sentiment": "positive"
}
{
"review": "Basically there's a family where a little boy (Jake) thinks there's a zombie in his closet & his parents are fighting all the time.<br /><br />This movie is slower than a soap opera... and suddenly, Jake decides to become Rambo and kill the zombie.<br /><br />OK, first of all when you...(truncated)",
"sentiment": "negative"
}
<jupyter_script># basic LIB
import numpy as np
import pandas as pd
import os
import warnings
import sys
# Visulization
import matplotlib.pyplot as plt
import seaborn as sns
# Preprosessing
import nltk
import gensim
from gensim.models import Word2Vec
from gensim.models import KeyedVectors
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
import re
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
# Model Building
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (
Embedding,
Bidirectional,
GRU,
Conv1D,
MaxPool1D,
GlobalMaxPooling1D,
Dense,
Dropout,
LSTM,
)
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.regularizers import l2
from tensorflow.keras.optimizers import legacy
# # Reading Data
imdb = pd.read_csv("/kaggle/input/imdb-dataset-of-50k-movie-reviews/IMDB Dataset.csv")
# Combine the training and test data into a single DataFrame
data = np.concatenate((train_data, test_data), axis=0)
labels = np.concatenate((train_labels, test_labels), axis=0)
df = pd.DataFrame({"text": data, "label": labels})
# Create a dictionary mapping word indices to words
word_index = imdb.get_word_index()
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
# Decode the review text from word indices to English words
def decode_review(text):
return " ".join([reverse_word_index.get(i - 3, "?") for i in text])
df["text"] = df["text"].apply(decode_review)
df["text"][0]
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/379/129379607.ipynb
|
imdb-dataset-of-50k-movie-reviews
|
lakshmi25npathi
|
[{"Id": 129379607, "ScriptId": 38455594, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3651513, "CreationDate": "05/13/2023 09:39:41", "VersionNumber": 1.0, "Title": "Sentiment Analysis Using IMDB Dataset", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 55.0, "LinesInsertedFromPrevious": 55.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185373490, "KernelVersionId": 129379607, "SourceDatasetVersionId": 320111}]
|
[{"Id": 320111, "DatasetId": 134715, "DatasourceVersionId": 333307, "CreatorUserId": 2483565, "LicenseName": "Other (specified in description)", "CreationDate": "03/09/2019 06:32:21", "VersionNumber": 1.0, "Title": "IMDB Dataset of 50K Movie Reviews", "Slug": "imdb-dataset-of-50k-movie-reviews", "Subtitle": "Large Movie Review Dataset", "Description": "IMDB dataset having 50K movie reviews for natural language processing or Text analytics.\nThis is a dataset for binary sentiment classification containing substantially more data than previous benchmark datasets. We provide a set of 25,000 highly polar movie reviews for training and 25,000 for testing. So, predict the number of positive and negative reviews using either classification or deep learning algorithms.\nFor more dataset information, please go through the following link,\nhttp://ai.stanford.edu/~amaas/data/sentiment/", "VersionNotes": "Initial release", "TotalCompressedBytes": 66212309.0, "TotalUncompressedBytes": 26558952.0}]
|
[{"Id": 134715, "CreatorUserId": 2483565, "OwnerUserId": 2483565.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 320111.0, "CurrentDatasourceVersionId": 333307.0, "ForumId": 144904, "Type": 2, "CreationDate": "03/09/2019 06:32:21", "LastActivityDate": "03/09/2019", "TotalViews": 739266, "TotalDownloads": 131721, "TotalVotes": 959, "TotalKernels": 746}]
|
[{"Id": 2483565, "UserName": "lakshmi25npathi", "DisplayName": "Lakshmipathi N", "RegisterDate": "11/12/2018", "PerformanceTier": 2}]
|
# basic LIB
import numpy as np
import pandas as pd
import os
import warnings
import sys
# Visulization
import matplotlib.pyplot as plt
import seaborn as sns
# Preprosessing
import nltk
import gensim
from gensim.models import Word2Vec
from gensim.models import KeyedVectors
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
import re
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
# Model Building
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (
Embedding,
Bidirectional,
GRU,
Conv1D,
MaxPool1D,
GlobalMaxPooling1D,
Dense,
Dropout,
LSTM,
)
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.regularizers import l2
from tensorflow.keras.optimizers import legacy
# # Reading Data
imdb = pd.read_csv("/kaggle/input/imdb-dataset-of-50k-movie-reviews/IMDB Dataset.csv")
# Combine the training and test data into a single DataFrame
data = np.concatenate((train_data, test_data), axis=0)
labels = np.concatenate((train_labels, test_labels), axis=0)
df = pd.DataFrame({"text": data, "label": labels})
# Create a dictionary mapping word indices to words
word_index = imdb.get_word_index()
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
# Decode the review text from word indices to English words
def decode_review(text):
return " ".join([reverse_word_index.get(i - 3, "?") for i in text])
df["text"] = df["text"].apply(decode_review)
df["text"][0]
|
[{"imdb-dataset-of-50k-movie-reviews/IMDB Dataset.csv": {"column_names": "[\"review\", \"sentiment\"]", "column_data_types": "{\"review\": \"object\", \"sentiment\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 50000 entries, 0 to 49999\nData columns (total 2 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 review 50000 non-null object\n 1 sentiment 50000 non-null object\ndtypes: object(2)\nmemory usage: 781.4+ KB\n", "summary": "{\"review\": {\"count\": 50000, \"unique\": 49582, \"top\": \"Loved today's show!!! It was a variety and not solely cooking (which would have been great too). Very stimulating and captivating, always keeping the viewer peeking around the corner to see what was coming up next. She is as down to earth and as personable as you get, like one of us which made the show all the more enjoyable. Special guests, who are friends as well made for a nice surprise too. Loved the 'first' theme and that the audience was invited to play along too. I must admit I was shocked to see her come in under her time limits on a few things, but she did it and by golly I'll be writing those recipes down. Saving time in the kitchen means more time with family. Those who haven't tuned in yet, find out what channel and the time, I assure you that you won't be disappointed.\", \"freq\": 5}, \"sentiment\": {\"count\": 50000, \"unique\": 2, \"top\": \"positive\", \"freq\": 25000}}", "examples": "{\"review\":{\"0\":\"One of the other reviewers has mentioned that after watching just 1 Oz episode you'll be hooked. They are right, as this is exactly what happened with me.<br \\/><br \\/>The first thing that struck me about Oz was its brutality and unflinching scenes of violence, which set in right from the word GO. Trust me, this is not a show for the faint hearted or timid. This show pulls no punches with regards to drugs, sex or violence. Its is hardcore, in the classic use of the word.<br \\/><br \\/>It is called OZ as that is the nickname given to the Oswald Maximum Security State Penitentary. It focuses mainly on Emerald City, an experimental section of the prison where all the cells have glass fronts and face inwards, so privacy is not high on the agenda. Em City is home to many..Aryans, Muslims, gangstas, Latinos, Christians, Italians, Irish and more....so scuffles, death stares, dodgy dealings and shady agreements are never far away.<br \\/><br \\/>I would say the main appeal of the show is due to the fact that it goes where other shows wouldn't dare. Forget pretty pictures painted for mainstream audiences, forget charm, forget romance...OZ doesn't mess around. The first episode I ever saw struck me as so nasty it was surreal, I couldn't say I was ready for it, but as I watched more, I developed a taste for Oz, and got accustomed to the high levels of graphic violence. Not just violence, but injustice (crooked guards who'll be sold out for a nickel, inmates who'll kill on order and get away with it, well mannered, middle class inmates being turned into prison bitches due to their lack of street skills or prison experience) Watching Oz, you may become comfortable with what is uncomfortable viewing....thats if you can get in touch with your darker side.\",\"1\":\"A wonderful little production. <br \\/><br \\/>The filming technique is very unassuming- very old-time-BBC fashion and gives a comforting, and sometimes discomforting, sense of realism to the entire piece. <br \\/><br \\/>The actors are extremely well chosen- Michael Sheen not only \\\"has got all the polari\\\" but he has all the voices down pat too! You can truly see the seamless editing guided by the references to Williams' diary entries, not only is it well worth the watching but it is a terrificly written and performed piece. A masterful production about one of the great master's of comedy and his life. <br \\/><br \\/>The realism really comes home with the little things: the fantasy of the guard which, rather than use the traditional 'dream' techniques remains solid then disappears. It plays on our knowledge and our senses, particularly with the scenes concerning Orton and Halliwell and the sets (particularly of their flat with Halliwell's murals decorating every surface) are terribly well done.\",\"2\":\"I thought this was a wonderful way to spend time on a too hot summer weekend, sitting in the air conditioned theater and watching a light-hearted comedy. The plot is simplistic, but the dialogue is witty and the characters are likable (even the well bread suspected serial killer). While some may be disappointed when they realize this is not Match Point 2: Risk Addiction, I thought it was proof that Woody Allen is still fully in control of the style many of us have grown to love.<br \\/><br \\/>This was the most I'd laughed at one of Woody's comedies in years (dare I say a decade?). While I've never been impressed with Scarlet Johanson, in this she managed to tone down her \\\"sexy\\\" image and jumped right into a average, but spirited young woman.<br \\/><br \\/>This may not be the crown jewel of his career, but it was wittier than \\\"Devil Wears Prada\\\" and more interesting than \\\"Superman\\\" a great comedy to go see with friends.\",\"3\":\"Basically there's a family where a little boy (Jake) thinks there's a zombie in his closet & his parents are fighting all the time.<br \\/><br \\/>This movie is slower than a soap opera... and suddenly, Jake decides to become Rambo and kill the zombie.<br \\/><br \\/>OK, first of all when you're going to make a film you must Decide if its a thriller or a drama! As a drama the movie is watchable. Parents are divorcing & arguing like in real life. And then we have Jake with his closet which totally ruins all the film! I expected to see a BOOGEYMAN similar movie, and instead i watched a drama with some meaningless thriller spots.<br \\/><br \\/>3 out of 10 just for the well playing parents & descent dialogs. As for the shots with Jake: just ignore them.\"},\"sentiment\":{\"0\":\"positive\",\"1\":\"positive\",\"2\":\"positive\",\"3\":\"negative\"}}"}}]
| true | 1 |
<start_data_description><data_path>imdb-dataset-of-50k-movie-reviews/IMDB Dataset.csv:
<column_names>
['review', 'sentiment']
<column_types>
{'review': 'object', 'sentiment': 'object'}
<dataframe_Summary>
{'review': {'count': 50000, 'unique': 49582, 'top': "Loved today's show!!! It was a variety and not solely cooking (which would have been great too). Very stimulating and captivating, always keeping the viewer peeking around the corner to see what was coming up next. She is as down to earth and as personable as you get, like one of us which made the show all the more enjoyable. Special guests, who are friends as well made for a nice surprise too. Loved the 'first' theme and that the audience was invited to play along too. I must admit I was shocked to see her come in under her time limits on a few things, but she did it and by golly I'll be writing those recipes down. Saving time in the kitchen means more time with family. Those who haven't tuned in yet, find out what channel and the time, I assure you that you won't be disappointed.", 'freq': 5}, 'sentiment': {'count': 50000, 'unique': 2, 'top': 'positive', 'freq': 25000}}
<dataframe_info>
RangeIndex: 50000 entries, 0 to 49999
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 review 50000 non-null object
1 sentiment 50000 non-null object
dtypes: object(2)
memory usage: 781.4+ KB
<some_examples>
{'review': {'0': "One of the other reviewers has mentioned that after watching just 1 Oz episode you'll be hooked. They are right, as this is exactly what happened with me.<br /><br />The first thing that struck me about Oz was its brutality and unflinching scenes of violence, which set in right from the word GO. Trust me, this is not a show for the faint hearted or timid. This show pulls no punches with regards to drugs, sex or violence. Its is hardcore, in the classic use of the word.<br /><br />It is called OZ as that is the nickname given to the Oswald Maximum Security State Penitentary. It focuses mainly on Emerald City, an experimental section of the prison where all the cells have glass fronts and face inwards, so privacy is not high on the agenda. Em City is home to many..Aryans, Muslims, gangstas, Latinos, Christians, Italians, Irish and more....so scuffles, death stares, dodgy dealings and shady agreements are never far away.<br /><br />I would say the main appeal of the show is due to the fact that it goes where other shows wouldn't dare. Forget pretty pictures painted for mainstream audiences, forget charm, forget romance...OZ doesn't mess around. The first episode I ever saw struck me as so nasty it was surreal, I couldn't say I was ready for it, but as I watched more, I developed a taste for Oz, and got accustomed to the high levels of graphic violence. Not just violence, but injustice (crooked guards who'll be sold out for a nickel, inmates who'll kill on order and get away with it, well mannered, middle class inmates being turned into prison bitches due to their lack of street skills or prison experience) Watching Oz, you may become comfortable with what is uncomfortable viewing....thats if you can get in touch with your darker side.", '1': 'A wonderful little production. <br /><br />The filming technique is very unassuming- very old-time-BBC fashion and gives a comforting, and sometimes discomforting, sense of realism to the entire piece. <br /><br />The actors are extremely well chosen- Michael Sheen not only "has got all the polari" but he has all the voices down pat too! You can truly see the seamless editing guided by the references to Williams\' diary entries, not only is it well worth the watching but it is a terrificly written and performed piece. A masterful production about one of the great master\'s of comedy and his life. <br /><br />The realism really comes home with the little things: the fantasy of the guard which, rather than use the traditional \'dream\' techniques remains solid then disappears. It plays on our knowledge and our senses, particularly with the scenes concerning Orton and Halliwell and the sets (particularly of their flat with Halliwell\'s murals decorating every surface) are terribly well done.', '2': 'I thought this was a wonderful way to spend time on a too hot summer weekend, sitting in the air conditioned theater and watching a light-hearted comedy. The plot is simplistic, but the dialogue is witty and the characters are likable (even the well bread suspected serial killer). While some may be disappointed when they realize this is not Match Point 2: Risk Addiction, I thought it was proof that Woody Allen is still fully in control of the style many of us have grown to love.<br /><br />This was the most I\'d laughed at one of Woody\'s comedies in years (dare I say a decade?). While I\'ve never been impressed with Scarlet Johanson, in this she managed to tone down her "sexy" image and jumped right into a average, but spirited young woman.<br /><br />This may not be the crown jewel of his career, but it was wittier than "Devil Wears Prada" and more interesting than "Superman" a great comedy to go see with friends.', '3': "Basically there's a family where a little boy (Jake) thinks there's a zombie in his closet & his parents are fighting all the time.<br /><br />This movie is slower than a soap opera... and suddenly, Jake decides to become Rambo and kill the zombie.<br /><br />OK, first of all when you're going to make a film you must Decide if its a thriller or a drama! As a drama the movie is watchable. Parents are divorcing & arguing like in real life. And then we have Jake with his closet which totally ruins all the film! I expected to see a BOOGEYMAN similar movie, and instead i watched a drama with some meaningless thriller spots.<br /><br />3 out of 10 just for the well playing parents & descent dialogs. As for the shots with Jake: just ignore them."}, 'sentiment': {'0': 'positive', '1': 'positive', '2': 'positive', '3': 'negative'}}
<end_description>
| 477 | 0 | 1,162 | 477 |
129132016
|
<jupyter_start><jupyter_text>Video Game Sales
This dataset contains a list of video games with sales greater than 100,000 copies. It was generated by a scrape of [vgchartz.com][1].
Fields include
* Rank - Ranking of overall sales
* Name - The games name
* Platform - Platform of the games release (i.e. PC,PS4, etc.)
* Year - Year of the game's release
* Genre - Genre of the game
* Publisher - Publisher of the game
* NA_Sales - Sales in North America (in millions)
* EU_Sales - Sales in Europe (in millions)
* JP_Sales - Sales in Japan (in millions)
* Other_Sales - Sales in the rest of the world (in millions)
* Global_Sales - Total worldwide sales.
The script to scrape the data is available at https://github.com/GregorUT/vgchartzScrape.
It is based on BeautifulSoup using Python.
There are 16,598 records. 2 records were dropped due to incomplete information.
[1]: http://www.vgchartz.com/
Kaggle dataset identifier: videogamesales
<jupyter_code>import pandas as pd
df = pd.read_csv('videogamesales/vgsales.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 16598 entries, 0 to 16597
Data columns (total 11 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Rank 16598 non-null int64
1 Name 16598 non-null object
2 Platform 16598 non-null object
3 Year 16327 non-null float64
4 Genre 16598 non-null object
5 Publisher 16540 non-null object
6 NA_Sales 16598 non-null float64
7 EU_Sales 16598 non-null float64
8 JP_Sales 16598 non-null float64
9 Other_Sales 16598 non-null float64
10 Global_Sales 16598 non-null float64
dtypes: float64(6), int64(1), object(4)
memory usage: 1.4+ MB
<jupyter_text>Examples:
{
"Rank": 1,
"Name": "Wii Sports",
"Platform": "Wii",
"Year": 2006,
"Genre": "Sports",
"Publisher": "Nintendo",
"NA_Sales": 41.49,
"EU_Sales": 29.02,
"JP_Sales": 3.77,
"Other_Sales": 8.46,
"Global_Sales": 82.74
}
{
"Rank": 2,
"Name": "Super Mario Bros.",
"Platform": "NES",
"Year": 1985,
"Genre": "Platform",
"Publisher": "Nintendo",
"NA_Sales": 29.08,
"EU_Sales": 3.58,
"JP_Sales": 6.8100000000000005,
"Other_Sales": 0.77,
"Global_Sales": 40.24
}
{
"Rank": 3,
"Name": "Mario Kart Wii",
"Platform": "Wii",
"Year": 2008,
"Genre": "Racing",
"Publisher": "Nintendo",
"NA_Sales": 15.85,
"EU_Sales": 12.88,
"JP_Sales": 3.79,
"Other_Sales": 3.31,
"Global_Sales": 35.82
}
{
"Rank": 4,
"Name": "Wii Sports Resort",
"Platform": "Wii",
"Year": 2009,
"Genre": "Sports",
"Publisher": "Nintendo",
"NA_Sales": 15.75,
"EU_Sales": 11.01,
"JP_Sales": 3.2800000000000002,
"Other_Sales": 2.96,
"Global_Sales": 33.0
}
<jupyter_script># # Data Analysis with Pandas
# ## Dataset : The Video Game Sales
# ## Name : Rami Ghanem
# ## data : 05/10/2023
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # read data
df = pd.read_csv("/kaggle/input/videogamesales/vgsales.csv")
df.head()
# # the most common video game publisher
print(
"the most common video game publisher : ", df["Publisher"].value_counts().idxmax()
)
# # the most common platform
print("the most common platform : ", df["Platform"].value_counts().idxmax())
# # the most common genre
print("the most common genre : ", df["Genre"].value_counts().idxmax())
# # The top 20 highest grossing games
Result_df = df.sort_values(by="Global_Sales", ascending=False).head(20)
print("The top 20 highest grossing games is :")
print()
for games in Result_df[["Name"]].values.tolist():
print(games)
print(Result_df[["Name", "Global_Sales"]])
# # the median video game sales For North American and surrounding
median_value = df["NA_Sales"].median()
above_median_df = df[df["NA_Sales"] > median_value]
above_median_df = above_median_df.sort_values("NA_Sales", ascending=True).head(5)
below_median_df = df[df["NA_Sales"] < median_value]
below_median_df = below_median_df.sort_values("NA_Sales", ascending=False).head(5)
Result_df = pd.concat([above_median_df, below_median_df])
Result_df = Result_df.sort_values("NA_Sales", ascending=False)
print("the median video game sales For North American is ", median_value)
print()
print("ten games surrounding the median sales is :")
Result_df
# # For the top-selling game of all time, how many standard deviations above/below the mean are its sales for North America?
# top-selling game of all time
top_game = df.loc[df["Global_Sales"].idxmax()]
std_na_sales = df["NA_Sales"].std()
median_na_sales = df["NA_Sales"].median()
z_score = (top_game["NA_Sales"] - median_na_sales) / std_na_sales
print(z_score, " standard deviations above the mean.")
# # How does The Nintendo Wii average number of sales compare with all of the other platforms?
averages = df.groupby("Platform")["Global_Sales"].mean()
wii_average = averages.loc["Wii"]
overall_average = averages.drop("Wii").mean()
print("Wii average sales", wii_average)
print("overall average sales", overall_average)
# if wii_average > overall_average:
# print("The average sales for the Wii is higher than the average for all other platforms.")
# else:
# print("The average sales for the Wii is lower than the average for all other platforms.")
# # Come up with 3 more questions that can be answered with this data set.
# ## The most common sales in each year from 2000
most_common_platforms = df.groupby("Year")["Platform"].apply(lambda x: x.mode())
print("Year and most common used platform:")
for year, platform in most_common_platforms.items():
if int(year[0]) > 2000:
print(int(year[0]), platform)
# ## Total sales in each year from 2000
total_sales_per_year = df.groupby("Year")["Global_Sales"].sum()
print("Year and Total Sales:")
print()
for year, total_sales in total_sales_per_year.items():
if int(year) > 2000:
print(int(year), total_sales)
# ## Which platform has the highest average sales per game
average_sales_per_game = df.groupby("Platform")["Global_Sales"].mean()
platform_highest_average_sales = average_sales_per_game.idxmax()
print(
"Platform with the highest average sales per game:", platform_highest_average_sales
)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/132/129132016.ipynb
|
videogamesales
|
gregorut
|
[{"Id": 129132016, "ScriptId": 38347167, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10586591, "CreationDate": "05/11/2023 08:30:03", "VersionNumber": 2.0, "Title": "vg-stats", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 130.0, "LinesInsertedFromPrevious": 14.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 116.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 184913969, "KernelVersionId": 129132016, "SourceDatasetVersionId": 618}]
|
[{"Id": 618, "DatasetId": 284, "DatasourceVersionId": 618, "CreatorUserId": 462330, "LicenseName": "Unknown", "CreationDate": "10/26/2016 09:10:49", "VersionNumber": 2.0, "Title": "Video Game Sales", "Slug": "videogamesales", "Subtitle": "Analyze sales data from more than 16,500 games.", "Description": "This dataset contains a list of video games with sales greater than 100,000 copies. It was generated by a scrape of [vgchartz.com][1].\n\nFields include\n\n* Rank - Ranking of overall sales\n\n* Name - The games name\n\n* Platform - Platform of the games release (i.e. PC,PS4, etc.)\n\n* Year - Year of the game's release\n\n* Genre - Genre of the game\n\n* Publisher - Publisher of the game\n\n* NA_Sales - Sales in North America (in millions)\n\n* EU_Sales - Sales in Europe (in millions)\n\n* JP_Sales - Sales in Japan (in millions)\n\n* Other_Sales - Sales in the rest of the world (in millions)\n\n* Global_Sales - Total worldwide sales.\n\nThe script to scrape the data is available at https://github.com/GregorUT/vgchartzScrape.\nIt is based on BeautifulSoup using Python.\nThere are 16,598 records. 2 records were dropped due to incomplete information.\n\n\n [1]: http://www.vgchartz.com/", "VersionNotes": "Cleaned up formating", "TotalCompressedBytes": 1355781.0, "TotalUncompressedBytes": 1355781.0}]
|
[{"Id": 284, "CreatorUserId": 462330, "OwnerUserId": 462330.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 618.0, "CurrentDatasourceVersionId": 618.0, "ForumId": 1788, "Type": 2, "CreationDate": "10/26/2016 08:17:30", "LastActivityDate": "02/06/2018", "TotalViews": 1798828, "TotalDownloads": 471172, "TotalVotes": 5485, "TotalKernels": 1480}]
|
[{"Id": 462330, "UserName": "gregorut", "DisplayName": "GregorySmith", "RegisterDate": "11/09/2015", "PerformanceTier": 1}]
|
# # Data Analysis with Pandas
# ## Dataset : The Video Game Sales
# ## Name : Rami Ghanem
# ## data : 05/10/2023
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # read data
df = pd.read_csv("/kaggle/input/videogamesales/vgsales.csv")
df.head()
# # the most common video game publisher
print(
"the most common video game publisher : ", df["Publisher"].value_counts().idxmax()
)
# # the most common platform
print("the most common platform : ", df["Platform"].value_counts().idxmax())
# # the most common genre
print("the most common genre : ", df["Genre"].value_counts().idxmax())
# # The top 20 highest grossing games
Result_df = df.sort_values(by="Global_Sales", ascending=False).head(20)
print("The top 20 highest grossing games is :")
print()
for games in Result_df[["Name"]].values.tolist():
print(games)
print(Result_df[["Name", "Global_Sales"]])
# # the median video game sales For North American and surrounding
median_value = df["NA_Sales"].median()
above_median_df = df[df["NA_Sales"] > median_value]
above_median_df = above_median_df.sort_values("NA_Sales", ascending=True).head(5)
below_median_df = df[df["NA_Sales"] < median_value]
below_median_df = below_median_df.sort_values("NA_Sales", ascending=False).head(5)
Result_df = pd.concat([above_median_df, below_median_df])
Result_df = Result_df.sort_values("NA_Sales", ascending=False)
print("the median video game sales For North American is ", median_value)
print()
print("ten games surrounding the median sales is :")
Result_df
# # For the top-selling game of all time, how many standard deviations above/below the mean are its sales for North America?
# top-selling game of all time
top_game = df.loc[df["Global_Sales"].idxmax()]
std_na_sales = df["NA_Sales"].std()
median_na_sales = df["NA_Sales"].median()
z_score = (top_game["NA_Sales"] - median_na_sales) / std_na_sales
print(z_score, " standard deviations above the mean.")
# # How does The Nintendo Wii average number of sales compare with all of the other platforms?
averages = df.groupby("Platform")["Global_Sales"].mean()
wii_average = averages.loc["Wii"]
overall_average = averages.drop("Wii").mean()
print("Wii average sales", wii_average)
print("overall average sales", overall_average)
# if wii_average > overall_average:
# print("The average sales for the Wii is higher than the average for all other platforms.")
# else:
# print("The average sales for the Wii is lower than the average for all other platforms.")
# # Come up with 3 more questions that can be answered with this data set.
# ## The most common sales in each year from 2000
most_common_platforms = df.groupby("Year")["Platform"].apply(lambda x: x.mode())
print("Year and most common used platform:")
for year, platform in most_common_platforms.items():
if int(year[0]) > 2000:
print(int(year[0]), platform)
# ## Total sales in each year from 2000
total_sales_per_year = df.groupby("Year")["Global_Sales"].sum()
print("Year and Total Sales:")
print()
for year, total_sales in total_sales_per_year.items():
if int(year) > 2000:
print(int(year), total_sales)
# ## Which platform has the highest average sales per game
average_sales_per_game = df.groupby("Platform")["Global_Sales"].mean()
platform_highest_average_sales = average_sales_per_game.idxmax()
print(
"Platform with the highest average sales per game:", platform_highest_average_sales
)
|
[{"videogamesales/vgsales.csv": {"column_names": "[\"Rank\", \"Name\", \"Platform\", \"Year\", \"Genre\", \"Publisher\", \"NA_Sales\", \"EU_Sales\", \"JP_Sales\", \"Other_Sales\", \"Global_Sales\"]", "column_data_types": "{\"Rank\": \"int64\", \"Name\": \"object\", \"Platform\": \"object\", \"Year\": \"float64\", \"Genre\": \"object\", \"Publisher\": \"object\", \"NA_Sales\": \"float64\", \"EU_Sales\": \"float64\", \"JP_Sales\": \"float64\", \"Other_Sales\": \"float64\", \"Global_Sales\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 16598 entries, 0 to 16597\nData columns (total 11 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Rank 16598 non-null int64 \n 1 Name 16598 non-null object \n 2 Platform 16598 non-null object \n 3 Year 16327 non-null float64\n 4 Genre 16598 non-null object \n 5 Publisher 16540 non-null object \n 6 NA_Sales 16598 non-null float64\n 7 EU_Sales 16598 non-null float64\n 8 JP_Sales 16598 non-null float64\n 9 Other_Sales 16598 non-null float64\n 10 Global_Sales 16598 non-null float64\ndtypes: float64(6), int64(1), object(4)\nmemory usage: 1.4+ MB\n", "summary": "{\"Rank\": {\"count\": 16598.0, \"mean\": 8300.605253645017, \"std\": 4791.853932896403, \"min\": 1.0, \"25%\": 4151.25, \"50%\": 8300.5, \"75%\": 12449.75, \"max\": 16600.0}, \"Year\": {\"count\": 16327.0, \"mean\": 2006.4064433147546, \"std\": 5.828981114712805, \"min\": 1980.0, \"25%\": 2003.0, \"50%\": 2007.0, \"75%\": 2010.0, \"max\": 2020.0}, \"NA_Sales\": {\"count\": 16598.0, \"mean\": 0.26466742981082064, \"std\": 0.8166830292988796, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.08, \"75%\": 0.24, \"max\": 41.49}, \"EU_Sales\": {\"count\": 16598.0, \"mean\": 0.14665200626581515, \"std\": 0.5053512312869116, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.02, \"75%\": 0.11, \"max\": 29.02}, \"JP_Sales\": {\"count\": 16598.0, \"mean\": 0.077781660441017, \"std\": 0.30929064808220297, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.04, \"max\": 10.22}, \"Other_Sales\": {\"count\": 16598.0, \"mean\": 0.0480630196409206, \"std\": 0.18858840291271461, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.01, \"75%\": 0.04, \"max\": 10.57}, \"Global_Sales\": {\"count\": 16598.0, \"mean\": 0.5374406555006628, \"std\": 1.5550279355699124, \"min\": 0.01, \"25%\": 0.06, \"50%\": 0.17, \"75%\": 0.47, \"max\": 82.74}}", "examples": "{\"Rank\":{\"0\":1,\"1\":2,\"2\":3,\"3\":4},\"Name\":{\"0\":\"Wii Sports\",\"1\":\"Super Mario Bros.\",\"2\":\"Mario Kart Wii\",\"3\":\"Wii Sports Resort\"},\"Platform\":{\"0\":\"Wii\",\"1\":\"NES\",\"2\":\"Wii\",\"3\":\"Wii\"},\"Year\":{\"0\":2006.0,\"1\":1985.0,\"2\":2008.0,\"3\":2009.0},\"Genre\":{\"0\":\"Sports\",\"1\":\"Platform\",\"2\":\"Racing\",\"3\":\"Sports\"},\"Publisher\":{\"0\":\"Nintendo\",\"1\":\"Nintendo\",\"2\":\"Nintendo\",\"3\":\"Nintendo\"},\"NA_Sales\":{\"0\":41.49,\"1\":29.08,\"2\":15.85,\"3\":15.75},\"EU_Sales\":{\"0\":29.02,\"1\":3.58,\"2\":12.88,\"3\":11.01},\"JP_Sales\":{\"0\":3.77,\"1\":6.81,\"2\":3.79,\"3\":3.28},\"Other_Sales\":{\"0\":8.46,\"1\":0.77,\"2\":3.31,\"3\":2.96},\"Global_Sales\":{\"0\":82.74,\"1\":40.24,\"2\":35.82,\"3\":33.0}}"}}]
| true | 1 |
<start_data_description><data_path>videogamesales/vgsales.csv:
<column_names>
['Rank', 'Name', 'Platform', 'Year', 'Genre', 'Publisher', 'NA_Sales', 'EU_Sales', 'JP_Sales', 'Other_Sales', 'Global_Sales']
<column_types>
{'Rank': 'int64', 'Name': 'object', 'Platform': 'object', 'Year': 'float64', 'Genre': 'object', 'Publisher': 'object', 'NA_Sales': 'float64', 'EU_Sales': 'float64', 'JP_Sales': 'float64', 'Other_Sales': 'float64', 'Global_Sales': 'float64'}
<dataframe_Summary>
{'Rank': {'count': 16598.0, 'mean': 8300.605253645017, 'std': 4791.853932896403, 'min': 1.0, '25%': 4151.25, '50%': 8300.5, '75%': 12449.75, 'max': 16600.0}, 'Year': {'count': 16327.0, 'mean': 2006.4064433147546, 'std': 5.828981114712805, 'min': 1980.0, '25%': 2003.0, '50%': 2007.0, '75%': 2010.0, 'max': 2020.0}, 'NA_Sales': {'count': 16598.0, 'mean': 0.26466742981082064, 'std': 0.8166830292988796, 'min': 0.0, '25%': 0.0, '50%': 0.08, '75%': 0.24, 'max': 41.49}, 'EU_Sales': {'count': 16598.0, 'mean': 0.14665200626581515, 'std': 0.5053512312869116, 'min': 0.0, '25%': 0.0, '50%': 0.02, '75%': 0.11, 'max': 29.02}, 'JP_Sales': {'count': 16598.0, 'mean': 0.077781660441017, 'std': 0.30929064808220297, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.04, 'max': 10.22}, 'Other_Sales': {'count': 16598.0, 'mean': 0.0480630196409206, 'std': 0.18858840291271461, 'min': 0.0, '25%': 0.0, '50%': 0.01, '75%': 0.04, 'max': 10.57}, 'Global_Sales': {'count': 16598.0, 'mean': 0.5374406555006628, 'std': 1.5550279355699124, 'min': 0.01, '25%': 0.06, '50%': 0.17, '75%': 0.47, 'max': 82.74}}
<dataframe_info>
RangeIndex: 16598 entries, 0 to 16597
Data columns (total 11 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Rank 16598 non-null int64
1 Name 16598 non-null object
2 Platform 16598 non-null object
3 Year 16327 non-null float64
4 Genre 16598 non-null object
5 Publisher 16540 non-null object
6 NA_Sales 16598 non-null float64
7 EU_Sales 16598 non-null float64
8 JP_Sales 16598 non-null float64
9 Other_Sales 16598 non-null float64
10 Global_Sales 16598 non-null float64
dtypes: float64(6), int64(1), object(4)
memory usage: 1.4+ MB
<some_examples>
{'Rank': {'0': 1, '1': 2, '2': 3, '3': 4}, 'Name': {'0': 'Wii Sports', '1': 'Super Mario Bros.', '2': 'Mario Kart Wii', '3': 'Wii Sports Resort'}, 'Platform': {'0': 'Wii', '1': 'NES', '2': 'Wii', '3': 'Wii'}, 'Year': {'0': 2006.0, '1': 1985.0, '2': 2008.0, '3': 2009.0}, 'Genre': {'0': 'Sports', '1': 'Platform', '2': 'Racing', '3': 'Sports'}, 'Publisher': {'0': 'Nintendo', '1': 'Nintendo', '2': 'Nintendo', '3': 'Nintendo'}, 'NA_Sales': {'0': 41.49, '1': 29.08, '2': 15.85, '3': 15.75}, 'EU_Sales': {'0': 29.02, '1': 3.58, '2': 12.88, '3': 11.01}, 'JP_Sales': {'0': 3.77, '1': 6.81, '2': 3.79, '3': 3.28}, 'Other_Sales': {'0': 8.46, '1': 0.77, '2': 3.31, '3': 2.96}, 'Global_Sales': {'0': 82.74, '1': 40.24, '2': 35.82, '3': 33.0}}
<end_description>
| 1,214 | 0 | 2,327 | 1,214 |
129132225
|
<jupyter_start><jupyter_text>Question Pairs Dataset
# Context
Quora's first public dataset is related to the problem of identifying duplicate questions. At Quora, an important product principle is that there should be a single question page for each logically distinct question. For example, the queries “What is the most populous state in the USA?” and “Which state in the United States has the most people?” should not exist separately on Quora because the intent behind both is identical. Having a canonical page for each logically distinct query makes knowledge-sharing more efficient in many ways: for example, knowledge seekers can access all the answers to a question in a single location, and writers can reach a larger readership than if that audience was divided amongst several pages.
The dataset is based on actual data from Quora and will give anyone the opportunity to train and test models of semantic equivalence.
# Content
There are over 400,000 lines of potential question duplicate pairs. Each line contains IDs for each question in the pair, the full text for each question, and a binary value that indicates whether the line truly contains a duplicate pair.
# Acknowledgements
For more information on this dataset, check out Quora's [first dataset release page](https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs).
# License
This data is subject to Quora's [Terms of Service](https://www.quora.com/about/tos), allowing for non-commercial use.
Kaggle dataset identifier: question-pairs-dataset
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # XgBoost based Question pairs similarity
# #### Method Overview
# Xgboost is an optimized implementation of the gradient boosting algorithm that has become popular for solving various machine learning problems. In the context of question pairs similarity on Kaggle's Quora competition, Xgboost can be used as a machine learning model to predict whether two questions are semantically similar or not based on various features.
# The following code installs relevant modules and imports them whose imports are mandatory.
# installing xgboost
#!pip install xgboost
# importing modules
import os
import gensim
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier, LogisticRegression
from sklearn.ensemble import GradientBoostingClassifier
from xgboost import XGBClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import (
cross_val_score,
train_test_split,
GridSearchCV,
StratifiedKFold,
RandomizedSearchCV,
)
from sklearn.metrics import confusion_matrix, f1_score, accuracy_score
import pickle
import json
from fuzzywuzzy import fuzz
from scipy.spatial.distance import (
cosine,
cityblock,
jaccard,
canberra,
euclidean,
minkowski,
braycurtis,
)
from nltk.corpus import stopwords
from nltk import word_tokenize
import nltk
nltk.download("stopwords")
stop_words = stopwords.words("english")
from hyperopt import hp, fmin, tpe, STATUS_OK, Trials
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
import numpy as np
import tqdm
from scipy.stats import kurtosis
from scipy.stats import skew
nltk.download("punkt")
# The following code snippet reads the quora question pairs dataset.
train_dataset = pd.read_csv("/kaggle/input/question-pairs-dataset/questions.csv")
# The following code displays the top5 entries of read dataframe
train_dataset.head()
# ## Step 1.
# > Exploratory Data Analysis and Feature Engineering.
#
# dropping unneccessary columns
train_dataset = train_dataset.drop(["id", "qid1", "qid2"], axis=1)
# checking number of class labels
train_dataset.groupby("is_duplicate").count().plot.bar()
# total number of question paris for training
print("Total number of Q&A pairs is {}".format(len(train_dataset)))
# checking if any row has null values
null_rows = train_dataset[train_dataset.isnull().any(1)]
print(null_rows)
# filling empty rows with ""
train_dataset = train_dataset.fillna("")
# calculating the null rows again
null_rows = train_dataset[train_dataset.isnull().any(1)]
print(null_rows)
train_dataset.head()
# finding number of words in question 1 and question 2
train_dataset["q1_length"] = train_dataset.question1.apply(lambda x: len(str(x)))
train_dataset["q2_length"] = train_dataset.question2.apply(lambda x: len(str(x)))
train_dataset.head()
# finding absolute difference in length of questions
train_dataset["question_len_diff"] = abs(
train_dataset.q2_length - train_dataset.q1_length
)
train_dataset.head()
# finding length of common words between questions
train_dataset["len_common_words"] = train_dataset.apply(
lambda x: len(
set(str(x["question1"]).lower().split()).intersection(
set(str(x["question2"]).lower().split())
)
),
axis=1,
)
train_dataset.head()
# # Step 2
# > Naive Features calculation and Model fitting
# Creating naive features by dropping questions from the dataframe
naive_features = train_dataset.drop(["question1", "question2"], axis=1)
# displaying the top 5 columns of the naive features
naive_features.head(10)
# Extracting labels and features
label = naive_features.iloc[:, 0]
naive_features = naive_features.iloc[:, 1:5]
naive_features.head(10)
# Splitting the naive_features and labels into train, val split
X_train_naive, X_test_naive, y_train_naive, y_test_naive = train_test_split(
naive_features, label, random_state=42, test_size=0.25
)
# Fetching the keys of XGboost classifer
XGBClassifier().get_params().keys()
# The following code is implementing a hyperparameter tuning approach using Bayesian optimization for an XGBoost model in Python.
# The `parameters` dictionary contains the hyperparameters to be tuned along with their respective search spaces. The `hp.choice` method from the `hyperopt` library is used to define the search space for each hyperparameter. For example, for the `nthread` hyperparameter, the search space is defined as either 4 or 8. Similarly, other hyperparameters such as `objective`, `learning_rate`, `max_depth`, `min_child_weight`, `subsample`, `colsample_bytree`, and `n_estimators` are defined with their respective search spaces.
# The `objective` function defines the XGBoost model with the hyperparameters as inputs. It then calculates the mean cross-validation accuracy of the model using the `cross_val_score` method from the `sklearn` library. The function returns the negative accuracy, as Bayesian optimization is used to minimize a function.
# The `trials` object is used to store the results of each trial and `fmin` is used to minimize the objective function using the Tree-structured Parzen Estimator (TPE) algorithm from the `hyperopt` library. The `max_evals` parameter determines the maximum number of evaluations (trials) to perform. Finally, the `best` variable stores the hyperparameters that resulted in the lowest negative accuracy.
# bayesian optimization
parameters = {
"nthread": hp.choice(
"nthread", [4, 8]
), # when use hyperthread, xgboost may become slower
"objective": hp.choice(
"objective", ["binary:logistic"]
), # for n_classes greater than 2, you can use multi:softmax or multi:softprob
"learning_rate": hp.choice("learning_rate", [0.01]), # so called `eta` value
"max_depth": hp.choice("max_depth", [2, 3, 4, 5, 6, 7, 8, 9]),
"min_child_weight": hp.choice("min_child_weight", [2, 3, 4, 5, 6, 7, 8, 9, 10]),
"silent": hp.choice("silent", [1]),
"subsample": hp.choice("subsample", [0.8]),
"colsample_bytree": hp.choice("colsample_bytree", [0.7]),
"n_estimators": hp.choice(
"n_estimators", [1, 10, 100, 1000, 10000]
), # number of trees, change it to 1000 for better results
"missing": hp.choice("missing", [-1456, -999]),
"seed": hp.choice("seed", [42, 888, 1337]),
}
def objective(parameters):
model = XGBClassifier(
nthread=parameters["nthread"],
objective=parameters["objective"],
learning_rate=parameters["learning_rate"],
max_depth=parameters["max_depth"],
min_child_weight=parameters["min_child_weight"],
silent=parameters["silent"],
subsample=parameters["subsample"],
colsample_bytree=parameters["colsample_bytree"],
n_estimators=parameters["n_estimators"],
missing=parameters["missing"],
seed=parameters["seed"],
)
accuracy = cross_val_score(model, X_train_naive, y_train_naive, cv=4).mean()
# We aim to maximize accuracy, therefore we return it as a negative value
return {"loss": -accuracy, "status": STATUS_OK}
trials = Trials()
best = fmin(
fn=objective, space=parameters, algo=tpe.suggest, max_evals=1, trials=trials
)
best
# naive_clf = GridSearchCV(XGBClassifier(), parameters, n_jobs=12,
# cv=StratifiedKFold(shuffle=True),
# scoring='roc_auc',
# verbose=2, refit=True).fit(X_train_naive, y_train_naive).best_estimator_
# Fitting the XGboost classifier on the selected best params in the next code snippet
trained_classifier = XGBClassifier(
nthread=4,
objective="binary:logistic",
learning_rate=0.01,
max_depth=4,
min_child_weight=11,
silent=0,
subsample=0.8,
colsample_bytree=0.7,
n_estimators=1000,
seed=42,
).fit(X_train_naive, y_train_naive)
# Predicting the fitted classifer on test set along with evaluating its performance by printing the classification report and accuracy score/
predictionforest = trained_classifier.predict(X_test_naive)
print(confusion_matrix(y_test_naive, predictionforest))
print(classification_report(y_test_naive, predictionforest))
accuracy = accuracy_score(y_test_naive, predictionforest)
print(accuracy)
# Observation
# The accuracy is not good. Let's increase it via incorporating more features, primarily fuzzy features. Reference [Link](https://github.com/abhishekkrthakur/is_that_a_duplicate_quora_question/blob/master/feature_engineering.py)
# The following code is used to compute various fuzzy features for a dataset of question pairs. The fuzzy features measure the similarity between two strings, in this case, the two questions in a given question pair. The code uses the fuzzywuzzy library, which provides several functions to compute fuzzy features between two strings.
# The code creates several new columns in the train_dataset dataframe to store the computed fuzzy features. Each line of code uses the apply() function to apply a lambda function to each row of the dataframe, where the lambda function computes a specific fuzzy feature between the two questions in that row.
# The fuzzy features computed in this code are:
# 1. fuzz_qratio: This is the simple ratio of the lengths of the two strings, which measures the similarity of the two strings based on the number of common characters.
# 2. fuzz_WRatio: This measures the similarity of the two strings based on the number of common words.
# 3. fuzz_partial_ratio: This measures the similarity of the two strings based on the length of the longest common substring.
# 4. fuzz_partial_token_set_ratio: This measures the similarity of the two strings based on the number of common tokens, where tokens are defined as non-whitespace substrings.
# 5. fuzz_partial_token_sort_ratio: This measures the similarity of the two strings based on the number of common sorted tokens.
# 6. fuzz_token_set_ratio: This measures the similarity of the two strings based on the number of common tokens, regardless of order.
# 7. fuzz_token_sort_ratio: This measures the similarity of the two strings based on the number of common sorted tokens, where sorted tokens are defined as tokens sorted alphabetically.
#
# Basic fuzzy features
train_dataset["fuzz_qratio"] = train_dataset.apply(
lambda x: fuzz.QRatio(str(x["question1"]), str(x["question2"])), axis=1
)
print("Processed")
train_dataset["fuzz_WRatio"] = train_dataset.apply(
lambda x: fuzz.WRatio(str(x["question1"]), str(x["question2"])), axis=1
)
print("Processed")
train_dataset["fuzz_partial_ratio"] = train_dataset.apply(
lambda x: fuzz.partial_ratio(str(x["question1"]), str(x["question2"])), axis=1
)
print("Processed")
train_dataset["fuzz_partial_token_set_ratio"] = train_dataset.apply(
lambda x: fuzz.partial_token_set_ratio(str(x["question1"]), str(x["question2"])),
axis=1,
)
print("Processed")
train_dataset["fuzz_partial_token_sort_ratio"] = train_dataset.apply(
lambda x: fuzz.partial_token_sort_ratio(str(x["question1"]), str(x["question2"])),
axis=1,
)
print("Processed")
train_dataset["fuzz_token_set_ratio"] = train_dataset.apply(
lambda x: fuzz.token_set_ratio(str(x["question1"]), str(x["question2"])), axis=1
)
print("Processed")
train_dataset["fuzz_token_sort_ratio"] = train_dataset.apply(
lambda x: fuzz.token_sort_ratio(str(x["question1"]), str(x["question2"])), axis=1
)
print("Processed")
# The following code defines three functions used for computing semantic similarity between pairs of questions.
# The first function `wmd(s1, s2)` calculates the Word Mover's Distance (WMD) between two input sentences `s1` and `s2`. WMD is a measure of the semantic similarity between two texts that takes into account the distance between word embeddings. In the function, the input sentences are first converted to lowercase and tokenized. Stop words are then removed from each sentence, and the WMD is calculated using a pre-trained word embedding model (`model`).
# The second function `norm_wmd(s1, s2)` is similar to the first, but it uses a normalized WMD (norm WMD) score instead. This score is calculated by dividing the WMD by the sum of the minimum distance between each word in one sentence and any word in the other sentence. The function also uses a different pre-trained embedding model (`norm_model`) for computing the norm WMD.
# The third function `sent2vec(s)` converts a sentence `s` to a vector representation using word embeddings. The input sentence is first converted to lowercase and tokenized, and stop words are removed. Only words that are alphabetic are kept, and their embeddings are retrieved from the pre-trained embedding model (`model`). These embeddings are then summed and normalized to obtain a single vector representation for the sentence.
# These functions can be used as helper functions for building a model that predicts the semantic similarity between pairs of questions. The `wmd` and `norm_wmd` functions can be used to compute features based on WMD and norm WMD for a pair of questions, and the `sent2vec` function can be used to obtain a vector representation for each question that can be used as input to a machine learning model.
# helper function for more complex
def wmd(s1, s2):
s1 = str(s1).lower().split()
s2 = str(s2).lower().split()
stop_words = stopwords.words("english")
s1 = [w for w in s1 if w not in stop_words]
s2 = [w for w in s2 if w not in stop_words]
return model.wmdistance(s1, s2)
def norm_wmd(s1, s2):
s1 = str(s1).lower().split()
s2 = str(s2).lower().split()
stop_words = stopwords.words("english")
s1 = [w for w in s1 if w not in stop_words]
s2 = [w for w in s2 if w not in stop_words]
return norm_model.wmdistance(s1, s2)
def sent2vec(s):
words = str(s).lower()
words = word_tokenize(words)
words = [w for w in words if not w in stop_words]
words = [w for w in words if w.isalpha()]
M = []
for w in words:
try:
M.append(model[w])
except:
continue
M = np.array(M)
v = M.sum(axis=0)
return v / np.sqrt((v**2).sum())
train_dataset.head(10)
# saving the dataframe till now as a checkpoint
train_dataset.to_csv("fuzzy_features1.csv", index=False)
# loading csv from last checkpoint
train_dataset = pd.read_csv("fuzzy_features1.csv")
train_dataset.head()
# The following code snippet is related to feature extraction for Quora question pairs similarity.
# The first line of code loads the pre-trained Word2Vec model from a binary file format ('.bin') which contains word embeddings for Google News dataset.
# Next, the code extracts a feature using the Word Mover's Distance (WMD) between two questions in the Quora dataset. The WMD measures the distance between the embedded meanings of two sentences, taking into account the similarity of the words in the two sentences and their order.
# The `train_dataset` is a DataFrame that contains the Quora question pairs dataset. The `apply` method is used to apply the `wmd` function to each row of the dataset, where the function takes the two questions of each row and returns their WMD value. The resulting WMD values are stored as a new column in the `train_dataset` DataFrame called `wmd`.
# After calculating the WMD feature, the code then loads another pre-trained Word2Vec model from the same binary file format and initializes its word vectors with normalized L2 norm. Then, it applies another function called `norm_wmd` using the `apply` method similar to before, and the resulting normalized WMD values are stored as a new column called `norm_wmd` in the `train_dataset` DataFrame.
# The `wmd` and `norm_wmd` features can then be used as input features for a machine learning model to predict whether two questions are similar or not.
# feature extraction reference :: https://github.com/abhishekkrthakur/is_that_a_duplicate_quora_question/blob/master/feature_engineering.py
model = gensim.models.KeyedVectors.load_word2vec_format(
"/kaggle/input/googlenews-vectors-negative300bingz-gz-format/GoogleNews-vectors-negative300.bin",
binary=True,
)
print("Starting up")
train_dataset["wmd"] = train_dataset.apply(
lambda x: wmd(x["question1"], x["question2"]), axis=1
)
print("Processed")
norm_model = gensim.models.KeyedVectors.load_word2vec_format(
"GoogleNews-vectors-negative300.bin.gz", binary=True
)
norm_model.init_sims(replace=True)
train_dataset["norm_wmd"] = train_dataset.apply(
lambda x: norm_wmd(x["question1"], x["question2"]), axis=1
)
print("Processed")
# The following code calculates different distance metrics between pairs of questions in the Quora dataset and adds them as additional features to the dataset. The distance metrics are cosine distance, cityblock distance, Jaccard distance, Canberra distance, Euclidean distance, Minkowski distance, and Braycurtis distance.
# The code first initializes two numpy arrays called question1_vectors and question2_vectors, each of shape (train_dataset.shape[0], 300), where train_dataset is the pandas dataframe containing the Quora dataset and 300 is the length of the word embeddings for each word in the questions.
# The code then loops through each question in the train_dataset, calculates the corresponding sentence embedding using the sent2vec function (not shown in the code), and stores it in the question1_vectors or question2_vectors array depending on which question it corresponds to.
# The np.nan_to_num function is used to convert any NaN values in the arrays to 0.
# Next, the code calculates the cosine distance, cityblock distance, Jaccard distance, Canberra distance, Euclidean distance, Minkowski distance, and Braycurtis distance between each pair of questions using the corresponding SciPy functions. These distances are added as additional columns to the train_dataset dataframe.
#
question1_vectors = np.zeros((train_dataset.shape[0], 300))
error_count = 0
for i, q in tqdm.tqdm(enumerate(train_dataset.question1.values)):
question1_vectors[i, :] = sent2vec(q)
question2_vectors = np.zeros((train_dataset.shape[0], 300))
for i, q in tqdm.tqdm(enumerate(train_dataset.question2.values)):
question2_vectors[i, :] = sent2vec(q)
train_dataset["cosine_distance"] = [
cosine(x, y)
for (x, y) in zip(
np.nan_to_num(question1_vectors), np.nan_to_num(question2_vectors)
)
]
print("Processed")
train_dataset["cityblock_distance"] = [
cityblock(x, y)
for (x, y) in zip(
np.nan_to_num(question1_vectors), np.nan_to_num(question2_vectors)
)
]
print("Processed")
train_dataset["jaccard_distance"] = [
jaccard(x, y)
for (x, y) in zip(
np.nan_to_num(question1_vectors), np.nan_to_num(question2_vectors)
)
]
print("Processed")
train_dataset["canberra_distance"] = [
canberra(x, y)
for (x, y) in zip(
np.nan_to_num(question1_vectors), np.nan_to_num(question2_vectors)
)
]
print("Processed")
train_dataset["euclidean_distance"] = [
euclidean(x, y)
for (x, y) in zip(
np.nan_to_num(question1_vectors), np.nan_to_num(question2_vectors)
)
]
print("Processed")
train_dataset["minkowski_distance"] = [
minkowski(x, y, 3)
for (x, y) in zip(
np.nan_to_num(question1_vectors), np.nan_to_num(question2_vectors)
)
]
print("Processed")
train_dataset["braycurtis_distance"] = [
braycurtis(x, y)
for (x, y) in zip(
np.nan_to_num(question1_vectors), np.nan_to_num(question2_vectors)
)
]
print("Processed")
# The following code calculates the skewness and kurtosis values for the question1_vectors and question2_vectors arrays, and appends them as new features in the train_dataset dataframe.
# The `skew()` function from the `scipy.stats` module is used to calculate the skewness of each array. Skewness is a measure of the asymmetry of the probability distribution of a random variable, and indicates whether the distribution is skewed to the left or right. A positive skewness value indicates that the distribution is skewed to the right, while a negative value indicates skewness to the left.
# The `kurtosis()` function from the `scipy.stats` module is used to calculate the kurtosis of each array. Kurtosis is a measure of the peakedness of the probability distribution of a random variable, and indicates how much of the variance is due to outliers. A high kurtosis value indicates that there are more outliers in the distribution than would be expected under normal conditions.
# The `np.nan_to_num()` function is used to convert any NaN values in the question1_vectors and question2_vectors arrays to zero, since the `skew()` and `kurtosis()` functions require arrays with no NaN values.
# Overall, adding the skewness and kurtosis features to the train_dataset dataframe can potentially improve the performance of a machine learning model that uses these features as input.
train_dataset["skew_q1vec"] = [skew(x) for x in np.nan_to_num(question1_vectors)]
print("Processed")
train_dataset["skew_q2vec"] = [skew(x) for x in np.nan_to_num(question2_vectors)]
print("Processed")
train_dataset["kur_q1vec"] = [kurtosis(x) for x in np.nan_to_num(question1_vectors)]
print("Processed")
train_dataset["kur_q2vec"] = [kurtosis(x) for x in np.nan_to_num(question2_vectors)]
print("Processed")
# The questions vectors are dumped in pkl files and saved to a csv file for later usage.
pickle.dump(question1_vectors, open("q1_w2v.pkl", "wb"), -1)
pickle.dump(question2_vectors, open("q2_w2v.pkl", "wb"), -1)
train_dataset.to_csv("final_features.csv", index=False)
# First few columns of it are checked in the following code snippet.
train_dataset.head(10)
# Text based questions are being dropped from the fuzzy features dataframe
fuzzy_features = train_dataset.drop(["question1", "question2"], axis=1)
fuzzy_features.head()
fuzzy_label = fuzzy_features.iloc[:, 0]
fuzzy_features = fuzzy_features.iloc[:, 1:24]
# Top 5 entries of the fuzzy features are checked
fuzzy_features.head()
# The entire dataset is being split into train,test split
X_train_fuzzy, X_test_fuzzy, y_train_fuzzy, y_test_fuzzy = train_test_split(
fuzzy_features, fuzzy_label, random_state=42, test_size=0.25
)
# Relevant modules for doing hyperparameter tuning using spark is being installed and imported in next code snippet.
# let's do hyperparam tuning using spark
from skdist.distribute.search import DistGridSearchCV
# Model fitting is being done after a brief hyperparameter tuning.
xgb_classifier = XGBClassifier()
fuzzy_param_grid = {
"silent": [False],
"max_depth": [6, 10, 20],
"learning_rate": [0.001, 0.01, 0.1],
"subsample": [0.5, 0.7, 1.0],
"colsample_bytree": [0.4, 0.7, 1.0],
"colsample_bylevel": [0.4, 0.7, 0.9],
"min_child_weight": [0.5, 3.0, 5.0, 10.0],
"gamma": [0, 0.5, 1.0],
"reg_lambda": [0.1, 5.0, 50.0, 100.0],
"n_estimators": [10, 100, 1000],
}
randomsearch_classifier = RandomizedSearchCV(
xgb_classifier,
fuzzy_param_grid,
n_iter=20,
n_jobs=24,
verbose=1,
cv=2,
scoring="roc_auc",
refit=True,
random_state=42,
)
randomsearch_classifier.fit(X_train_fuzzy, y_train_fuzzy)
best_score = randomsearch_classifier.best_score_
best_params = randomsearch_classifier.best_params_
print("Best score: {}".format(best_score))
# print("Best params: ")
# for param_name in sorted(best_params.keys()):
# print('%s: %r' % (param_name, best_params[param_name]))
# Predictions are generated on test set along with calculating the accuracy and confusion matrix.
prediction_fuzzy = randomsearch_classifier.predict(X_test_fuzzy)
print(confusion_matrix(y_test_fuzzy, prediction_fuzzy))
print(classification_report(y_test_fuzzy, prediction_fuzzy))
accuracy = accuracy_score(y_test_fuzzy, prediction_fuzzy)
print(accuracy)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/132/129132225.ipynb
|
question-pairs-dataset
| null |
[{"Id": 129132225, "ScriptId": 37799439, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9201105, "CreationDate": "05/11/2023 08:31:50", "VersionNumber": 2.0, "Title": "[XGBoost] Question pairs similarity with Descrip.", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 489.0, "LinesInsertedFromPrevious": 125.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 364.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 4}]
|
[{"Id": 184914389, "KernelVersionId": 129132225, "SourceDatasetVersionId": 1423}, {"Id": 184914390, "KernelVersionId": 129132225, "SourceDatasetVersionId": 5533486}]
|
[{"Id": 1423, "DatasetId": 747, "DatasourceVersionId": 1423, "CreatorUserId": 1, "LicenseName": "Other (specified in description)", "CreationDate": "02/02/2017 01:26:29", "VersionNumber": 2.0, "Title": "Question Pairs Dataset", "Slug": "question-pairs-dataset", "Subtitle": "Can you identify duplicate questions?", "Description": "# Context\n\nQuora's first public dataset is related to the problem of identifying duplicate questions. At Quora, an important product principle is that there should be a single question page for each logically distinct question. For example, the queries \u201cWhat is the most populous state in the USA?\u201d and \u201cWhich state in the United States has the most people?\u201d should not exist separately on Quora because the intent behind both is identical. Having a canonical page for each logically distinct query makes knowledge-sharing more efficient in many ways: for example, knowledge seekers can access all the answers to a question in a single location, and writers can reach a larger readership than if that audience was divided amongst several pages.\n\nThe dataset is based on actual data from Quora and will give anyone the opportunity to train and test models of semantic equivalence.\n\n# Content\n\nThere are over 400,000 lines of potential question duplicate pairs. Each line contains IDs for each question in the pair, the full text for each question, and a binary value that indicates whether the line truly contains a duplicate pair.\n\n# Acknowledgements\n\nFor more information on this dataset, check out Quora's [first dataset release page](https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs).\n\n# License\n\nThis data is subject to Quora's [Terms of Service](https://www.quora.com/about/tos), allowing for non-commercial use.", "VersionNotes": "Fixing a parsing error introduced when converting from `tsv` to `csv`", "TotalCompressedBytes": 60747409.0, "TotalUncompressedBytes": 60747409.0}]
|
[{"Id": 747, "CreatorUserId": 1, "OwnerUserId": NaN, "OwnerOrganizationId": 407.0, "CurrentDatasetVersionId": 1423.0, "CurrentDatasourceVersionId": 1423.0, "ForumId": 2504, "Type": 2, "CreationDate": "01/30/2017 20:18:29", "LastActivityDate": "02/05/2018", "TotalViews": 90353, "TotalDownloads": 10645, "TotalVotes": 238, "TotalKernels": 117}]
| null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # XgBoost based Question pairs similarity
# #### Method Overview
# Xgboost is an optimized implementation of the gradient boosting algorithm that has become popular for solving various machine learning problems. In the context of question pairs similarity on Kaggle's Quora competition, Xgboost can be used as a machine learning model to predict whether two questions are semantically similar or not based on various features.
# The following code installs relevant modules and imports them whose imports are mandatory.
# installing xgboost
#!pip install xgboost
# importing modules
import os
import gensim
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier, LogisticRegression
from sklearn.ensemble import GradientBoostingClassifier
from xgboost import XGBClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import (
cross_val_score,
train_test_split,
GridSearchCV,
StratifiedKFold,
RandomizedSearchCV,
)
from sklearn.metrics import confusion_matrix, f1_score, accuracy_score
import pickle
import json
from fuzzywuzzy import fuzz
from scipy.spatial.distance import (
cosine,
cityblock,
jaccard,
canberra,
euclidean,
minkowski,
braycurtis,
)
from nltk.corpus import stopwords
from nltk import word_tokenize
import nltk
nltk.download("stopwords")
stop_words = stopwords.words("english")
from hyperopt import hp, fmin, tpe, STATUS_OK, Trials
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
import numpy as np
import tqdm
from scipy.stats import kurtosis
from scipy.stats import skew
nltk.download("punkt")
# The following code snippet reads the quora question pairs dataset.
train_dataset = pd.read_csv("/kaggle/input/question-pairs-dataset/questions.csv")
# The following code displays the top5 entries of read dataframe
train_dataset.head()
# ## Step 1.
# > Exploratory Data Analysis and Feature Engineering.
#
# dropping unneccessary columns
train_dataset = train_dataset.drop(["id", "qid1", "qid2"], axis=1)
# checking number of class labels
train_dataset.groupby("is_duplicate").count().plot.bar()
# total number of question paris for training
print("Total number of Q&A pairs is {}".format(len(train_dataset)))
# checking if any row has null values
null_rows = train_dataset[train_dataset.isnull().any(1)]
print(null_rows)
# filling empty rows with ""
train_dataset = train_dataset.fillna("")
# calculating the null rows again
null_rows = train_dataset[train_dataset.isnull().any(1)]
print(null_rows)
train_dataset.head()
# finding number of words in question 1 and question 2
train_dataset["q1_length"] = train_dataset.question1.apply(lambda x: len(str(x)))
train_dataset["q2_length"] = train_dataset.question2.apply(lambda x: len(str(x)))
train_dataset.head()
# finding absolute difference in length of questions
train_dataset["question_len_diff"] = abs(
train_dataset.q2_length - train_dataset.q1_length
)
train_dataset.head()
# finding length of common words between questions
train_dataset["len_common_words"] = train_dataset.apply(
lambda x: len(
set(str(x["question1"]).lower().split()).intersection(
set(str(x["question2"]).lower().split())
)
),
axis=1,
)
train_dataset.head()
# # Step 2
# > Naive Features calculation and Model fitting
# Creating naive features by dropping questions from the dataframe
naive_features = train_dataset.drop(["question1", "question2"], axis=1)
# displaying the top 5 columns of the naive features
naive_features.head(10)
# Extracting labels and features
label = naive_features.iloc[:, 0]
naive_features = naive_features.iloc[:, 1:5]
naive_features.head(10)
# Splitting the naive_features and labels into train, val split
X_train_naive, X_test_naive, y_train_naive, y_test_naive = train_test_split(
naive_features, label, random_state=42, test_size=0.25
)
# Fetching the keys of XGboost classifer
XGBClassifier().get_params().keys()
# The following code is implementing a hyperparameter tuning approach using Bayesian optimization for an XGBoost model in Python.
# The `parameters` dictionary contains the hyperparameters to be tuned along with their respective search spaces. The `hp.choice` method from the `hyperopt` library is used to define the search space for each hyperparameter. For example, for the `nthread` hyperparameter, the search space is defined as either 4 or 8. Similarly, other hyperparameters such as `objective`, `learning_rate`, `max_depth`, `min_child_weight`, `subsample`, `colsample_bytree`, and `n_estimators` are defined with their respective search spaces.
# The `objective` function defines the XGBoost model with the hyperparameters as inputs. It then calculates the mean cross-validation accuracy of the model using the `cross_val_score` method from the `sklearn` library. The function returns the negative accuracy, as Bayesian optimization is used to minimize a function.
# The `trials` object is used to store the results of each trial and `fmin` is used to minimize the objective function using the Tree-structured Parzen Estimator (TPE) algorithm from the `hyperopt` library. The `max_evals` parameter determines the maximum number of evaluations (trials) to perform. Finally, the `best` variable stores the hyperparameters that resulted in the lowest negative accuracy.
# bayesian optimization
parameters = {
"nthread": hp.choice(
"nthread", [4, 8]
), # when use hyperthread, xgboost may become slower
"objective": hp.choice(
"objective", ["binary:logistic"]
), # for n_classes greater than 2, you can use multi:softmax or multi:softprob
"learning_rate": hp.choice("learning_rate", [0.01]), # so called `eta` value
"max_depth": hp.choice("max_depth", [2, 3, 4, 5, 6, 7, 8, 9]),
"min_child_weight": hp.choice("min_child_weight", [2, 3, 4, 5, 6, 7, 8, 9, 10]),
"silent": hp.choice("silent", [1]),
"subsample": hp.choice("subsample", [0.8]),
"colsample_bytree": hp.choice("colsample_bytree", [0.7]),
"n_estimators": hp.choice(
"n_estimators", [1, 10, 100, 1000, 10000]
), # number of trees, change it to 1000 for better results
"missing": hp.choice("missing", [-1456, -999]),
"seed": hp.choice("seed", [42, 888, 1337]),
}
def objective(parameters):
model = XGBClassifier(
nthread=parameters["nthread"],
objective=parameters["objective"],
learning_rate=parameters["learning_rate"],
max_depth=parameters["max_depth"],
min_child_weight=parameters["min_child_weight"],
silent=parameters["silent"],
subsample=parameters["subsample"],
colsample_bytree=parameters["colsample_bytree"],
n_estimators=parameters["n_estimators"],
missing=parameters["missing"],
seed=parameters["seed"],
)
accuracy = cross_val_score(model, X_train_naive, y_train_naive, cv=4).mean()
# We aim to maximize accuracy, therefore we return it as a negative value
return {"loss": -accuracy, "status": STATUS_OK}
trials = Trials()
best = fmin(
fn=objective, space=parameters, algo=tpe.suggest, max_evals=1, trials=trials
)
best
# naive_clf = GridSearchCV(XGBClassifier(), parameters, n_jobs=12,
# cv=StratifiedKFold(shuffle=True),
# scoring='roc_auc',
# verbose=2, refit=True).fit(X_train_naive, y_train_naive).best_estimator_
# Fitting the XGboost classifier on the selected best params in the next code snippet
trained_classifier = XGBClassifier(
nthread=4,
objective="binary:logistic",
learning_rate=0.01,
max_depth=4,
min_child_weight=11,
silent=0,
subsample=0.8,
colsample_bytree=0.7,
n_estimators=1000,
seed=42,
).fit(X_train_naive, y_train_naive)
# Predicting the fitted classifer on test set along with evaluating its performance by printing the classification report and accuracy score/
predictionforest = trained_classifier.predict(X_test_naive)
print(confusion_matrix(y_test_naive, predictionforest))
print(classification_report(y_test_naive, predictionforest))
accuracy = accuracy_score(y_test_naive, predictionforest)
print(accuracy)
# Observation
# The accuracy is not good. Let's increase it via incorporating more features, primarily fuzzy features. Reference [Link](https://github.com/abhishekkrthakur/is_that_a_duplicate_quora_question/blob/master/feature_engineering.py)
# The following code is used to compute various fuzzy features for a dataset of question pairs. The fuzzy features measure the similarity between two strings, in this case, the two questions in a given question pair. The code uses the fuzzywuzzy library, which provides several functions to compute fuzzy features between two strings.
# The code creates several new columns in the train_dataset dataframe to store the computed fuzzy features. Each line of code uses the apply() function to apply a lambda function to each row of the dataframe, where the lambda function computes a specific fuzzy feature between the two questions in that row.
# The fuzzy features computed in this code are:
# 1. fuzz_qratio: This is the simple ratio of the lengths of the two strings, which measures the similarity of the two strings based on the number of common characters.
# 2. fuzz_WRatio: This measures the similarity of the two strings based on the number of common words.
# 3. fuzz_partial_ratio: This measures the similarity of the two strings based on the length of the longest common substring.
# 4. fuzz_partial_token_set_ratio: This measures the similarity of the two strings based on the number of common tokens, where tokens are defined as non-whitespace substrings.
# 5. fuzz_partial_token_sort_ratio: This measures the similarity of the two strings based on the number of common sorted tokens.
# 6. fuzz_token_set_ratio: This measures the similarity of the two strings based on the number of common tokens, regardless of order.
# 7. fuzz_token_sort_ratio: This measures the similarity of the two strings based on the number of common sorted tokens, where sorted tokens are defined as tokens sorted alphabetically.
#
# Basic fuzzy features
train_dataset["fuzz_qratio"] = train_dataset.apply(
lambda x: fuzz.QRatio(str(x["question1"]), str(x["question2"])), axis=1
)
print("Processed")
train_dataset["fuzz_WRatio"] = train_dataset.apply(
lambda x: fuzz.WRatio(str(x["question1"]), str(x["question2"])), axis=1
)
print("Processed")
train_dataset["fuzz_partial_ratio"] = train_dataset.apply(
lambda x: fuzz.partial_ratio(str(x["question1"]), str(x["question2"])), axis=1
)
print("Processed")
train_dataset["fuzz_partial_token_set_ratio"] = train_dataset.apply(
lambda x: fuzz.partial_token_set_ratio(str(x["question1"]), str(x["question2"])),
axis=1,
)
print("Processed")
train_dataset["fuzz_partial_token_sort_ratio"] = train_dataset.apply(
lambda x: fuzz.partial_token_sort_ratio(str(x["question1"]), str(x["question2"])),
axis=1,
)
print("Processed")
train_dataset["fuzz_token_set_ratio"] = train_dataset.apply(
lambda x: fuzz.token_set_ratio(str(x["question1"]), str(x["question2"])), axis=1
)
print("Processed")
train_dataset["fuzz_token_sort_ratio"] = train_dataset.apply(
lambda x: fuzz.token_sort_ratio(str(x["question1"]), str(x["question2"])), axis=1
)
print("Processed")
# The following code defines three functions used for computing semantic similarity between pairs of questions.
# The first function `wmd(s1, s2)` calculates the Word Mover's Distance (WMD) between two input sentences `s1` and `s2`. WMD is a measure of the semantic similarity between two texts that takes into account the distance between word embeddings. In the function, the input sentences are first converted to lowercase and tokenized. Stop words are then removed from each sentence, and the WMD is calculated using a pre-trained word embedding model (`model`).
# The second function `norm_wmd(s1, s2)` is similar to the first, but it uses a normalized WMD (norm WMD) score instead. This score is calculated by dividing the WMD by the sum of the minimum distance between each word in one sentence and any word in the other sentence. The function also uses a different pre-trained embedding model (`norm_model`) for computing the norm WMD.
# The third function `sent2vec(s)` converts a sentence `s` to a vector representation using word embeddings. The input sentence is first converted to lowercase and tokenized, and stop words are removed. Only words that are alphabetic are kept, and their embeddings are retrieved from the pre-trained embedding model (`model`). These embeddings are then summed and normalized to obtain a single vector representation for the sentence.
# These functions can be used as helper functions for building a model that predicts the semantic similarity between pairs of questions. The `wmd` and `norm_wmd` functions can be used to compute features based on WMD and norm WMD for a pair of questions, and the `sent2vec` function can be used to obtain a vector representation for each question that can be used as input to a machine learning model.
# helper function for more complex
def wmd(s1, s2):
s1 = str(s1).lower().split()
s2 = str(s2).lower().split()
stop_words = stopwords.words("english")
s1 = [w for w in s1 if w not in stop_words]
s2 = [w for w in s2 if w not in stop_words]
return model.wmdistance(s1, s2)
def norm_wmd(s1, s2):
s1 = str(s1).lower().split()
s2 = str(s2).lower().split()
stop_words = stopwords.words("english")
s1 = [w for w in s1 if w not in stop_words]
s2 = [w for w in s2 if w not in stop_words]
return norm_model.wmdistance(s1, s2)
def sent2vec(s):
words = str(s).lower()
words = word_tokenize(words)
words = [w for w in words if not w in stop_words]
words = [w for w in words if w.isalpha()]
M = []
for w in words:
try:
M.append(model[w])
except:
continue
M = np.array(M)
v = M.sum(axis=0)
return v / np.sqrt((v**2).sum())
train_dataset.head(10)
# saving the dataframe till now as a checkpoint
train_dataset.to_csv("fuzzy_features1.csv", index=False)
# loading csv from last checkpoint
train_dataset = pd.read_csv("fuzzy_features1.csv")
train_dataset.head()
# The following code snippet is related to feature extraction for Quora question pairs similarity.
# The first line of code loads the pre-trained Word2Vec model from a binary file format ('.bin') which contains word embeddings for Google News dataset.
# Next, the code extracts a feature using the Word Mover's Distance (WMD) between two questions in the Quora dataset. The WMD measures the distance between the embedded meanings of two sentences, taking into account the similarity of the words in the two sentences and their order.
# The `train_dataset` is a DataFrame that contains the Quora question pairs dataset. The `apply` method is used to apply the `wmd` function to each row of the dataset, where the function takes the two questions of each row and returns their WMD value. The resulting WMD values are stored as a new column in the `train_dataset` DataFrame called `wmd`.
# After calculating the WMD feature, the code then loads another pre-trained Word2Vec model from the same binary file format and initializes its word vectors with normalized L2 norm. Then, it applies another function called `norm_wmd` using the `apply` method similar to before, and the resulting normalized WMD values are stored as a new column called `norm_wmd` in the `train_dataset` DataFrame.
# The `wmd` and `norm_wmd` features can then be used as input features for a machine learning model to predict whether two questions are similar or not.
# feature extraction reference :: https://github.com/abhishekkrthakur/is_that_a_duplicate_quora_question/blob/master/feature_engineering.py
model = gensim.models.KeyedVectors.load_word2vec_format(
"/kaggle/input/googlenews-vectors-negative300bingz-gz-format/GoogleNews-vectors-negative300.bin",
binary=True,
)
print("Starting up")
train_dataset["wmd"] = train_dataset.apply(
lambda x: wmd(x["question1"], x["question2"]), axis=1
)
print("Processed")
norm_model = gensim.models.KeyedVectors.load_word2vec_format(
"GoogleNews-vectors-negative300.bin.gz", binary=True
)
norm_model.init_sims(replace=True)
train_dataset["norm_wmd"] = train_dataset.apply(
lambda x: norm_wmd(x["question1"], x["question2"]), axis=1
)
print("Processed")
# The following code calculates different distance metrics between pairs of questions in the Quora dataset and adds them as additional features to the dataset. The distance metrics are cosine distance, cityblock distance, Jaccard distance, Canberra distance, Euclidean distance, Minkowski distance, and Braycurtis distance.
# The code first initializes two numpy arrays called question1_vectors and question2_vectors, each of shape (train_dataset.shape[0], 300), where train_dataset is the pandas dataframe containing the Quora dataset and 300 is the length of the word embeddings for each word in the questions.
# The code then loops through each question in the train_dataset, calculates the corresponding sentence embedding using the sent2vec function (not shown in the code), and stores it in the question1_vectors or question2_vectors array depending on which question it corresponds to.
# The np.nan_to_num function is used to convert any NaN values in the arrays to 0.
# Next, the code calculates the cosine distance, cityblock distance, Jaccard distance, Canberra distance, Euclidean distance, Minkowski distance, and Braycurtis distance between each pair of questions using the corresponding SciPy functions. These distances are added as additional columns to the train_dataset dataframe.
#
question1_vectors = np.zeros((train_dataset.shape[0], 300))
error_count = 0
for i, q in tqdm.tqdm(enumerate(train_dataset.question1.values)):
question1_vectors[i, :] = sent2vec(q)
question2_vectors = np.zeros((train_dataset.shape[0], 300))
for i, q in tqdm.tqdm(enumerate(train_dataset.question2.values)):
question2_vectors[i, :] = sent2vec(q)
train_dataset["cosine_distance"] = [
cosine(x, y)
for (x, y) in zip(
np.nan_to_num(question1_vectors), np.nan_to_num(question2_vectors)
)
]
print("Processed")
train_dataset["cityblock_distance"] = [
cityblock(x, y)
for (x, y) in zip(
np.nan_to_num(question1_vectors), np.nan_to_num(question2_vectors)
)
]
print("Processed")
train_dataset["jaccard_distance"] = [
jaccard(x, y)
for (x, y) in zip(
np.nan_to_num(question1_vectors), np.nan_to_num(question2_vectors)
)
]
print("Processed")
train_dataset["canberra_distance"] = [
canberra(x, y)
for (x, y) in zip(
np.nan_to_num(question1_vectors), np.nan_to_num(question2_vectors)
)
]
print("Processed")
train_dataset["euclidean_distance"] = [
euclidean(x, y)
for (x, y) in zip(
np.nan_to_num(question1_vectors), np.nan_to_num(question2_vectors)
)
]
print("Processed")
train_dataset["minkowski_distance"] = [
minkowski(x, y, 3)
for (x, y) in zip(
np.nan_to_num(question1_vectors), np.nan_to_num(question2_vectors)
)
]
print("Processed")
train_dataset["braycurtis_distance"] = [
braycurtis(x, y)
for (x, y) in zip(
np.nan_to_num(question1_vectors), np.nan_to_num(question2_vectors)
)
]
print("Processed")
# The following code calculates the skewness and kurtosis values for the question1_vectors and question2_vectors arrays, and appends them as new features in the train_dataset dataframe.
# The `skew()` function from the `scipy.stats` module is used to calculate the skewness of each array. Skewness is a measure of the asymmetry of the probability distribution of a random variable, and indicates whether the distribution is skewed to the left or right. A positive skewness value indicates that the distribution is skewed to the right, while a negative value indicates skewness to the left.
# The `kurtosis()` function from the `scipy.stats` module is used to calculate the kurtosis of each array. Kurtosis is a measure of the peakedness of the probability distribution of a random variable, and indicates how much of the variance is due to outliers. A high kurtosis value indicates that there are more outliers in the distribution than would be expected under normal conditions.
# The `np.nan_to_num()` function is used to convert any NaN values in the question1_vectors and question2_vectors arrays to zero, since the `skew()` and `kurtosis()` functions require arrays with no NaN values.
# Overall, adding the skewness and kurtosis features to the train_dataset dataframe can potentially improve the performance of a machine learning model that uses these features as input.
train_dataset["skew_q1vec"] = [skew(x) for x in np.nan_to_num(question1_vectors)]
print("Processed")
train_dataset["skew_q2vec"] = [skew(x) for x in np.nan_to_num(question2_vectors)]
print("Processed")
train_dataset["kur_q1vec"] = [kurtosis(x) for x in np.nan_to_num(question1_vectors)]
print("Processed")
train_dataset["kur_q2vec"] = [kurtosis(x) for x in np.nan_to_num(question2_vectors)]
print("Processed")
# The questions vectors are dumped in pkl files and saved to a csv file for later usage.
pickle.dump(question1_vectors, open("q1_w2v.pkl", "wb"), -1)
pickle.dump(question2_vectors, open("q2_w2v.pkl", "wb"), -1)
train_dataset.to_csv("final_features.csv", index=False)
# First few columns of it are checked in the following code snippet.
train_dataset.head(10)
# Text based questions are being dropped from the fuzzy features dataframe
fuzzy_features = train_dataset.drop(["question1", "question2"], axis=1)
fuzzy_features.head()
fuzzy_label = fuzzy_features.iloc[:, 0]
fuzzy_features = fuzzy_features.iloc[:, 1:24]
# Top 5 entries of the fuzzy features are checked
fuzzy_features.head()
# The entire dataset is being split into train,test split
X_train_fuzzy, X_test_fuzzy, y_train_fuzzy, y_test_fuzzy = train_test_split(
fuzzy_features, fuzzy_label, random_state=42, test_size=0.25
)
# Relevant modules for doing hyperparameter tuning using spark is being installed and imported in next code snippet.
# let's do hyperparam tuning using spark
from skdist.distribute.search import DistGridSearchCV
# Model fitting is being done after a brief hyperparameter tuning.
xgb_classifier = XGBClassifier()
fuzzy_param_grid = {
"silent": [False],
"max_depth": [6, 10, 20],
"learning_rate": [0.001, 0.01, 0.1],
"subsample": [0.5, 0.7, 1.0],
"colsample_bytree": [0.4, 0.7, 1.0],
"colsample_bylevel": [0.4, 0.7, 0.9],
"min_child_weight": [0.5, 3.0, 5.0, 10.0],
"gamma": [0, 0.5, 1.0],
"reg_lambda": [0.1, 5.0, 50.0, 100.0],
"n_estimators": [10, 100, 1000],
}
randomsearch_classifier = RandomizedSearchCV(
xgb_classifier,
fuzzy_param_grid,
n_iter=20,
n_jobs=24,
verbose=1,
cv=2,
scoring="roc_auc",
refit=True,
random_state=42,
)
randomsearch_classifier.fit(X_train_fuzzy, y_train_fuzzy)
best_score = randomsearch_classifier.best_score_
best_params = randomsearch_classifier.best_params_
print("Best score: {}".format(best_score))
# print("Best params: ")
# for param_name in sorted(best_params.keys()):
# print('%s: %r' % (param_name, best_params[param_name]))
# Predictions are generated on test set along with calculating the accuracy and confusion matrix.
prediction_fuzzy = randomsearch_classifier.predict(X_test_fuzzy)
print(confusion_matrix(y_test_fuzzy, prediction_fuzzy))
print(classification_report(y_test_fuzzy, prediction_fuzzy))
accuracy = accuracy_score(y_test_fuzzy, prediction_fuzzy)
print(accuracy)
| false | 0 | 6,942 | 4 | 7,289 | 6,942 |
||
129132559
|
# Created by Andreas Chandra
# 24 April 2023
# ### 🚀SPACESHIP TITANIC: Benchmark Models
# # Overview
# Welcome to the year 2912, where your data science skills are needed to solve a cosmic mystery. We've received a transmission from four lightyears away and things aren't looking good.
# The Spaceship Titanic was an interstellar passenger liner launched a month ago. With almost 13,000 passengers on board, the vessel set out on its maiden voyage transporting emigrants from our solar system to three newly habitable exoplanets orbiting nearby stars.
# While rounding Alpha Centauri en route to its first destination—the torrid 55 Cancri E—the unwary Spaceship Titanic collided with a spacetime anomaly hidden within a dust cloud. Sadly, it met a similar fate as its namesake from 1000 years before. Though the ship stayed intact, almost half of the passengers were transported to an alternate dimension!
# # Libraries
# #### Installing LazyPredict
from IPython.display import clear_output
clear_output()
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import LabelEncoder
from sklearn.decomposition import PCA
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import (
StandardScaler,
MinMaxScaler,
OneHotEncoder,
LabelEncoder,
)
from sklearn.model_selection import StratifiedKFold, train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import accuracy_score
from lightgbm import LGBMClassifier
import lazypredict
from lazypredict.Supervised import LazyClassifier
import time
import warnings
warnings.filterwarnings("ignore")
from plotly.offline import init_notebook_mode, iplot
init_notebook_mode(connected=True)
# # Data Loading and Preparation
DATA_DIR = "../input/spaceship-titanic"
train = pd.read_csv(os.path.join(DATA_DIR, "train.csv"))
test = pd.read_csv(os.path.join(DATA_DIR, "test.csv"))
submission = pd.read_csv(os.path.join(DATA_DIR, "sample_submission.csv"))
RANDOM_STATE = 12
FOLDS = 5
STRATEGY = "median"
# ### Column Descriptions:
# - `PassengerId` - A unique Id for each passenger. Each Id takes the form gggg_pp where gggg indicates a group the passenger is travelling with and pp is their number within the group. People in a group are often family members, but not always.
# - `HomePlanet` - The planet the passenger departed from, typically their planet of permanent residence.
# - `CryoSleep` - Indicates whether the passenger elected to be put into suspended animation for the duration of the voyage. Passengers in cryosleep are confined to their cabins.
# - `Cabin` - The cabin number where the passenger is staying. Takes the form deck/num/side, where side can be either P for Port or S for Starboard.
# - `Destination` - The planet the passenger will be debarking to.
# - `Age` - The age of the passenger.
# - `VIP` - Whether the passenger has paid for special VIP service during the voyage.
# - `RoomService`, `FoodCourt`, `ShoppingMall`, `Spa`, `VRDeck` - Amount the passenger has billed at each of the Spaceship Titanic's many luxury amenities.
# - `Name` - The first and last names of the passenger.
# - `Transported` - Whether the passenger was transported to another dimension. This is the target, the column you are trying to predict.
# ### Exploring Train Data:
# 📌 Observations in Train Data:
# * 14 columns and 8693 rows.
# * Train data contains 121702 values with 2324 of them are missing.
# * All 12 feature columns have missing values in them with CryoSleep having highest missing values (217)
# * The target variable is "Transported"
# #### Quick view of train data:
# Below are the first 5 rows of train dataset
#
train.head()
print(f"\033[94mNumber of rows in train data \t\t: {len(train)}")
print(f"\033[94mNumber of columns in train data \t: {len(train.columns)}")
print(f"\033[94mNumber of values in train data \t\t: {train.count().sum()}")
print(f"\033[94mNumber of missing values in train data : {sum(train.isna().sum())}")
# #### Column wise missing values:
train.isna().sum().sort_values(ascending=False)
# #### Basic statistics of training data:
# Below is the basic statistics for each numerical variables which contain information on `count`, `mean`, `standard deviation`, `minimum`, `Q1`, `median`, `Q3`, and `maximum`
train.describe().T
# ### Exploring test data
# 📌 Observations in Test Data:
# * There are total of 13 columns and 4277 rows in test data.
# * Train data contains 54484 observation with 1117 missing values.
# * All 12 feature columns have missing values in them with FoodCourt having highest missing values (106)
test.head()
test.isna().sum()
test.describe().T
# ### Submission File
submission.head()
# # Exploratory Data Analysis
TARGET = "Transported"
FEATURES = list(set(train.columns) - set([TARGET]))
RANDOM_STATE = 42
# #### Null Value Distribution
# 📌 Observations in Null Value Distribution :
# * The maximum of missing value in an row is 3 and the lowest is no missing value
# * Interestingly, the missing value distribution (row basis) is quite the same between train and test dataset.
# * There are around 76% of the observations (row basis) that has no missing values.
# * Rest 24% of the observations (row basis) that have 1 to 3 missing values
# #### Column wise null value distribution
train_mv = (
train.isna()
.sum()
.reset_index()
.rename({"index": "feature", 0: "total_nulls"}, axis=1)
)
train_mv["null_ratio"] = train_mv.total_nulls / len(train) * 100
train_mv
test_mv = (
test.isna()
.sum()
.reset_index()
.rename({"index": "feature", 0: "total_nulls"}, axis=1)
)
test_mv["null_ratio"] = test_mv.total_nulls / len(test) * 100
test_mv
# #### Row wise null value distribution
train_na_cols = train.columns[train.isna().any()]
test_na_cols = test.columns[test.isna().any()]
fig = px.imshow(
train[train_na_cols].isna().T, aspect="auto", color_continuous_scale="viridis"
)
fig.show()
# Result:
# The missing values is quite random. Therefore, next we can perform imputation.
# #### Continuos and Categorical Data Distribution
# 📌 Observations in Null Value Distribution :
# * Out of 12 features 6 features are continous, 2 features are text data and 4 features are categorical.
# * HomePlanet and Destination have 3 differnt unique values.
# * CryoSleep and VIP are bool features
df = pd.concat([train[FEATURES], test[FEATURES]], axis=0)
text_features = ["Cabin", "Name"]
cat_features = [
col for col in FEATURES if df[col].nunique() < 20 and col not in text_features
]
cont_features = [
col for col in FEATURES if df[col].nunique() >= 20 and col not in text_features
]
del df
print(f"\033[94mTotal number of features\t: {len(FEATURES)}")
print(f"\033[94mTotal of categorical features\t: {len(cat_features)}")
print(f"\033[94mTotal of continuous features\t: {len(cont_features)}")
print(f"\033[94mTotal of text features\t\t: {len(text_features)}")
labels = ["Categorical", "Continuous", "Text"]
values = [len(cat_features), len(cont_features), len(text_features)]
colors = ["#DE3163", "#58D68D"]
fig = go.Figure(
data=[
go.Pie(
labels=labels,
values=values,
pull=[0.01, 0.01, 0.01],
marker=dict(colors=colors),
)
]
)
fig.show()
# ### Feature Distribution of Continuous Features
# #### Distribution of Age
train_age = train.copy()
test_age = test.copy()
train_age["type"] = "Train"
test_age["type"] = "Test"
ageDf = pd.concat([train_age, test_age])
fig = px.histogram(
data_frame=ageDf,
x="Age",
color="type",
color_discrete_sequence=["#58D68D", "#DE3163"],
marginal="box",
nbins=100,
template="plotly_white",
)
fig.update_layout(title="Distribution of Age", title_x=0.5)
fig.show()
# #### Feature Distribution of Categorical Features
if len(cat_features) == 0:
print("No categorical features")
else:
ncols = 2
nrows = 2
fig, axes = plt.subplots(nrows, ncols, figsize=(18, 10))
for r in range(nrows):
for c in range(ncols):
col = cat_features[r * ncols + c]
sns.countplot(
x=train[col], ax=axes[r, c], palette="viridis", label="Train data"
)
sns.countplot(
x=test[col], ax=axes[r, c], palette="magma", label="Test data"
)
axes[r, c].legend()
axes[r, c].set_ylabel("")
axes[r, c].set_xlabel(col, fontsize=20),
axes[r, c].tick_params(labelsize=10, width=0.5)
axes[r, c].xaxis.offsetText.set_fontsize(4)
axes[r, c].yaxis.offsetText.set_fontsize(4)
plt.show()
# #### Target Distribution
# 📌 Observations in Null Value Distribution :
# * There are two target values - 0 and 1.
# * Both the target values are almost equally distributed.
target_df = pd.DataFrame(train[TARGET].value_counts()).reset_index()
target_df.columns = [TARGET, "count"]
fig = px.bar(data_frame=target_df, x=TARGET, y="count")
fig.update_traces(
marker_color=["#58D68D", "#DE3163"],
marker_line_color="rgb(0,0,0)",
marker_line_width=2,
)
fig.update_layout(title="Target Distribution", template="plotly_white", title_x=0.5)
transported_perc = target_df["count"][0] * 100 / train.shape[0]
not_transported_perc = target_df["count"][1] * 100 / train.shape[0]
print(f"\033[94mPercentage of Transported = 0:{transported_perc}")
print(f"\033[94mPercentage of Transported = 1:{not_transported_perc}")
fig.show()
# #### Correlation Matrix
fig = px.imshow(
train.corr(), text_auto=True, aspect="auto", color_continuous_scale="viridis"
)
fig.show()
# ### Modified
train
fig = px.histogram(data_frame=train, x="Age", color="Transported")
fig.show()
fig = px.histogram(data_frame=train, x="RoomService", color="Transported")
fig.show()
fig = px.histogram(data_frame=train, x="FoodCourt", color="Transported")
fig.show()
fig = px.histogram(data_frame=train, x="Spa", color="Transported")
fig.show()
fig = px.histogram(data_frame=train, x="ShoppingMall", color="Transported")
fig.show()
fig = px.histogram(data_frame=train, x="VRDeck", color="Transported")
fig.show()
# # Feature Engineering
# Follow: https://www.kaggle.com/code/samuelcortinhas/spaceship-titanic-a-complete-guide#Feature-engineering
# #### Age
train["AgeGroup"] = None
train.loc[train.Age.lt(13), "AgeGroup"] = "Age_0-12"
train.loc[train.Age.between(13, 17), "AgeGroup"] = "Age_13-17"
train.loc[train.Age.between(18, 25), "AgeGroup"] = "Age_18-25"
train.loc[train.Age.between(26, 30), "AgeGroup"] = "Age_26-30"
train.loc[train.Age.between(31, 50), "AgeGroup"] = "Age_31-50"
train.loc[train.Age.gt(51, 17), "AgeGroup"] = "Age_51+"
test.loc[test.Age.lt(13), "AgeGroup"] = "Age_0-12"
test.loc[test.Age.between(13, 17), "AgeGroup"] = "Age_13-17"
test.loc[test.Age.between(18, 25), "AgeGroup"] = "Age_18-25"
test.loc[test.Age.between(26, 30), "AgeGroup"] = "Age_26-30"
test.loc[test.Age.between(31, 50), "AgeGroup"] = "Age_31-50"
test.loc[test.Age.gt(51, 17), "AgeGroup"] = "Age_51+"
# Plot distribution of new features
plt.figure(figsize=(10, 4))
g = sns.countplot(
data=train,
x="AgeGroup",
hue="Transported",
order=["Age_0-12", "Age_13-17", "Age_18-25", "Age_26-30", "Age_31-50", "Age_51+"],
)
plt.title("Age group distribution")
# #### Expenditure
# Expenditure features
exp_feats = ["RoomService", "FoodCourt", "ShoppingMall", "Spa", "VRDeck"]
train["Expenditure"] = train[exp_feats].sum(axis=1)
train["NoSpending"] = (train["Expenditure"] == 0).astype(int)
test["Expenditure"] = test[exp_feats].sum(axis=1)
test["NoSpending"] = (test["Expenditure"] == 0).astype(int)
fig = plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
sns.histplot(data=train, x="Expenditure", hue="Transported", bins=200)
plt.title("Total expenditure (truncated)")
plt.ylim([0, 200])
plt.xlim([0, 20000])
plt.subplot(1, 2, 2)
sns.countplot(data=train, x="NoSpending", hue="Transported")
plt.title("No spending")
fig.tight_layout()
# #### Passanger Group
# New feature - Group
train["Group"] = train["PassengerId"].apply(lambda x: x.split("_")[0]).astype(int)
test["Group"] = test["PassengerId"].apply(lambda x: x.split("_")[0]).astype(int)
# New feature - Group size
train["GroupSize"] = train["Group"].map(
lambda x: pd.concat([train.Group, test.Group]).value_counts()[x]
)
test["GroupSize"] = test["Group"].map(
lambda x: pd.concat([train.Group, test.Group]).value_counts()[x]
)
# Plot distribution
plt.figure(figsize=(20, 4))
plt.subplot(1, 2, 1)
sns.histplot(data=train, x="Group", hue="Transported", binwidth=1)
plt.title("Group")
plt.subplot(1, 2, 2)
sns.countplot(data=train, x="GroupSize", hue="Transported")
plt.title("Group size")
fig.tight_layout()
train["Solo"] = (train["GroupSize"] == 1).astype(int)
test["Solo"] = (test["GroupSize"] == 1).astype(int)
# New feature distribution
plt.figure(figsize=(10, 4))
sns.countplot(data=train, x="Solo", hue="Transported")
plt.title("Passanger travelling solo or not")
plt.ylim([0, 3000])
plt.show()
# #### Cabin Location
# Extract deck, number, and side from cabin feature
train["Cabin"].fillna("Z/9999/Z", inplace=True)
test["Cabin"].fillna("Z/9999/Z", inplace=True)
train["CabinDeck"] = train.Cabin.apply(lambda x: x.split("/")[0])
train["CabinNumber"] = train.Cabin.apply(lambda x: x.split("/")[1]).astype(int)
train["CabinSide"] = train.Cabin.apply(lambda x: x.split("/")[2])
test["CabinDeck"] = test.Cabin.apply(lambda x: x.split("/")[0])
test["CabinNumber"] = test.Cabin.apply(lambda x: x.split("/")[1]).astype(int)
test["CabinSide"] = test.Cabin.apply(lambda x: x.split("/")[2])
train.loc[train.CabinDeck == "Z", "CabinDeck"] = np.nan
train.loc[train.CabinNumber == 9999, "CabinNumber"] = np.nan
train.loc[train.CabinSide == "Z", "CabinSide"] = np.nan
test.loc[test.CabinDeck == "Z", "CabinDeck"] = np.nan
test.loc[test.CabinNumber == 9999, "CabinNumber"] = np.nan
test.loc[test.CabinSide == "Z", "CabinSide"] = np.nan
train.drop("Cabin", axis=1, inplace=True)
test.drop("Cabin", axis=1, inplace=True)
fig = plt.figure(figsize=(10, 12))
plt.subplot(3, 1, 1)
sns.countplot(
data=train,
x="CabinDeck",
hue="Transported",
order=["A", "B", "C", "D", "E", "F", "G", "T"],
)
plt.title("Cabin Deck")
plt.subplot(3, 1, 2)
sns.histplot(data=train, x="CabinNumber", hue="Transported", binwidth=20)
plt.title("CabinNumber")
plt.xlim([0, 2000])
plt.subplot(3, 1, 3)
sns.countplot(data=train, x="CabinSide", hue="Transported")
plt.title("CabinSide")
fig.tight_layout()
train["CabinRegion1"] = train.CabinNumber.lt(300).astype(int)
train["CabinRegion2"] = train.CabinNumber.between(300, 599).astype(int)
train["CabinRegion3"] = train.CabinNumber.between(600, 899).astype(int)
train["CabinRegion4"] = train.CabinNumber.between(900, 1199).astype(int)
train["CabinRegion5"] = train.CabinNumber.between(1200, 1499).astype(int)
train["CabinRegion6"] = train.CabinNumber.between(1500, 1799).astype(int)
train["CabinRegion7"] = train.CabinNumber.ge(1800).astype(int)
test["CabinRegion1"] = test.CabinNumber.lt(300).astype(int)
test["CabinRegion2"] = test.CabinNumber.between(300, 599).astype(int)
test["CabinRegion3"] = test.CabinNumber.between(600, 899).astype(int)
test["CabinRegion4"] = test.CabinNumber.between(900, 1199).astype(int)
test["CabinRegion5"] = test.CabinNumber.between(1200, 1499).astype(int)
test["CabinRegion6"] = test.CabinNumber.between(1500, 1799).astype(int)
test["CabinRegion7"] = test.CabinNumber.ge(1800).astype(int)
plt.figure(figsize=(10, 4))
train["CabinRegionPlot"] = (
train["CabinRegion1"]
+ 2 * train["CabinRegion2"]
+ 3 * train["CabinRegion3"]
+ 4 * train["CabinRegion4"]
+ 5 * train["CabinRegion5"]
+ 6 * train["CabinRegion6"]
+ 7 * train["CabinRegion7"]
).astype(int)
sns.countplot(data=train, x="CabinRegionPlot", hue="Transported")
plt.title("Cabin Regions")
train.drop("CabinRegionPlot", axis=1, inplace=True)
# #### Last Name
# replace NaN's with outliers
train["Name"].fillna("Unknown Unknown", inplace=True)
test["Name"].fillna("Unknown, Unknown", inplace=True)
# Surname
train["Surname"] = train["Name"].str.split().str[-1]
test["Surname"] = test["Name"].str.split().str[-1]
# Family size
train["FamilySize"] = train["Surname"].map(
lambda x: pd.concat([train.Surname, test.Surname]).value_counts()[x]
)
test["FamilySize"] = test["Surname"].map(
lambda x: pd.concat([train.Surname, test.Surname]).value_counts()[x]
)
train.loc[train.Surname == "Unknown", "Surname"] = np.nan
train.loc[train.FamilySize > 100, "FamilySize"] = np.nan
test.loc[test.Surname == "Unknown", "Surname"] = np.nan
test.loc[test.FamilySize > 100, "FamilySize"] = np.nan
# Drop name
train.drop("Name", axis=1, inplace=True)
test.drop("Name", axis=1, inplace=True)
# Plot family size distribution
plt.figure(figsize=(12, 4))
sns.countplot(data=train, x="FamilySize", hue="Transported")
plt.title("Family size")
plt.show()
# # Imputing Missing Values
# Combine train and test
y = train["Transported"].copy().astype(int)
X = train.drop("Transported", axis=1).copy()
data = pd.concat([X, test], axis=0).reset_index(drop=True)
# #### Count missing values
na_cols = data.columns[data.isna().any()].tolist()
mv = pd.DataFrame(data[na_cols].isna().sum(), columns=["NumberMissing"])
mv["PercentageMissing"] = np.round(100 * mv.NumberMissing / len(data), 2)
mv
plt.figure(figsize=(12, 6))
sns.heatmap(train[na_cols].isna().T, cmap="summer")
plt.title("Heatmap of missing values")
plt.show()
# Countplot
train["NACount"] = train.isna().sum(axis=1)
plt.figure(figsize=(10, 4))
sns.countplot(data=train, x="NACount", hue="Transported")
plt.title("Number of missing values by passanger")
plt.show()
train.drop("NACount", axis=1, inplace=True)
# #### Strategy
# #### HomePlanet
# ##### HomePlanet and Group
GHP_gb = data.groupby(["Group", "HomePlanet"])["HomePlanet"].size().unstack().fillna(0)
GHP_gb.head()
(GHP_gb > 0).sum(axis=1).value_counts()
HP_bef = data["HomePlanet"].isna().sum()
GHP_index = data[data.HomePlanet.isna()][
(data[data.HomePlanet.isna()]["Group"]).isin(GHP_gb.index)
].index
data.loc[GHP_index, "HomePlanet"] = data.iloc[GHP_index, :]["Group"].map(
lambda x: GHP_gb.idxmax(axis=1)[x]
)
print("# of HomePlanet missing before: ", HP_bef)
print("# of HomePlanet missing after: ", data.HomePlanet.isna().sum())
# ##### HomePlanet and CabinDeck
CDHP_gb = (
data.groupby(["CabinDeck", "HomePlanet"])["HomePlanet"].size().unstack().fillna(0)
)
plt.figure(figsize=(10, 4))
sns.heatmap(CDHP_gb.T, annot=True, fmt="g", cmap="coolwarm")
plt.show()
HP_bef = data.HomePlanet.isna().sum()
data.loc[(data.HomePlanet.isna()) & ~(data.CabinDeck == "D"), "HomePlanet"] = "Earth"
data.loc[(data.HomePlanet.isna()) & (data.CabinDeck == "D"), "HomePlanet"] = "Mars"
print("# of HomePlanet missing before:", HP_bef)
print("# of HomePlanet missing after:", data.HomePlanet.isna().sum())
# #### Destination
D_bef = data.Destination.isna().sum()
data.loc[data.Destination.isna(), "Destination"] = "TRAPPIST-1e"
print("# of Destination missing before:", D_bef)
print("# of Destination missing after:", data.Destination.isna().sum())
# #### Surname and Group
GSN_gb = (
data[data.GroupSize > 1]
.groupby(["Group", "Surname"])["Surname"]
.size()
.unstack()
.fillna(0)
)
# Countplot
plt.figure(figsize=(10, 4))
sns.countplot(data=(GSN_gb > 0).sum(axis=1).reset_index(), x=0)
plt.title("Number of unique surnames by group")
SN_bef = data.Surname.isna().sum()
GSN_index = data[data.Surname.isna()][
(data[data.Surname.isna()]["Group"]).isin(GSN_gb.index)
].index
data.loc[GSN_index, "Surname"] = data.iloc[GSN_index, :]["Group"].map(
lambda x: GSN_gb.idxmax(axis=1)[x]
)
print("# of Surname missing before: ", SN_bef)
print("# of Surname missing after", data.Surname.isna().sum())
data.Surname.fillna("Unknown", inplace=True)
data["FamilySize"] = data["Surname"].map(lambda x: data["Surname"].value_counts()[x])
data.loc[data["Surname"] == "Unknown", "Surname"] = np.nan
data.loc[data["FamilySize"] > 100, "FamilySize"] = 0
# #### CabinSide and Group
# Joint distribution of Group and Cabin features
GCD_gb = (
data[data["GroupSize"] > 1]
.groupby(["Group", "CabinDeck"])["CabinDeck"]
.size()
.unstack()
.fillna(0)
)
GCN_gb = (
data[data["GroupSize"] > 1]
.groupby(["Group", "CabinNumber"])["CabinNumber"]
.size()
.unstack()
.fillna(0)
)
GCS_gb = (
data[data["GroupSize"] > 1]
.groupby(["Group", "CabinSide"])["CabinSide"]
.size()
.unstack()
.fillna(0)
)
# Countplots
fig = plt.figure(figsize=(16, 4))
plt.subplot(1, 3, 1)
sns.countplot(data=(GCD_gb > 0).sum(axis=1).reset_index(), x=0)
plt.title("# of Unique cabin decks per group")
plt.subplot(1, 3, 2)
sns.countplot(data=(GCN_gb > 0).sum(axis=1).reset_index(), x=0)
plt.title("# of Unique cabin numbers per group")
plt.subplot(1, 3, 3)
sns.countplot(data=(GCS_gb > 0).sum(axis=1).reset_index(), x=0)
plt.title("# of Unique cabin sides per group")
fig.tight_layout()
# Missing values before
CS_bef = data.CabinSide.isna().sum()
# Passengers with missing Cabin side and in a group with known Cabin side
GCS_index = data[data.CabinSide.isna()][
(data[data.CabinSide.isna()]["Group"]).isin(GCS_gb.index)
].index
# Fill corresponding missing values
data.loc[GCS_index, "CabinSide"] = data.iloc[GCS_index, :]["Group"].map(
lambda x: GCS_gb.idxmax(axis=1)[x]
)
# Print number of missing values left
print("# of Cabin_side missing values before:", CS_bef)
print("# of Cabin_side missing values after:", data.CabinSide.isna().sum())
# #### CabinSide and Surname
SCS_gb = (
data[data["GroupSize"] > 1]
.groupby(["Surname", "CabinSide"])["CabinSide"]
.size()
.unstack()
.fillna(0)
)
SCS_gb["Ratio"] = SCS_gb["P"] / (SCS_gb["P"] + SCS_gb["S"])
plt.figure(figsize=(10, 4))
sns.histplot(SCS_gb["Ratio"], kde=True, binwidth=0.05)
plt.title("Ratio of cabin side by surname")
print(
"Percentage of families all on the same cabid side:",
100 * np.round((SCS_gb["Ratio"].isin([0, 1])).sum() / len(SCS_gb), 3),
"%",
)
SCS_gb.head()
# Missing values before
CS_bef = data["CabinSide"].isna().sum()
SCS_gb.drop("Ratio", axis=1, inplace=True)
SCS_index = data[data["CabinSide"].isna()][
(data[data["CabinSide"].isna()]["Surname"]).isin(SCS_gb.index)
].index
data.loc[SCS_index, "CabinSide"] = data.iloc[SCS_index, :]["Surname"].map(
lambda x: SCS_gb.idxmax(axis=1)[x]
)
data.drop("Surname", axis=1, inplace=True)
print("# of CabinSide missing before: ", CS_bef)
print("# of CabinSide missing after:", data.CabinSide.isna().sum())
data.CabinSide.value_counts()
CS_bef = data.CabinSide.isna().sum()
data.loc[data["CabinSide"].isna(), "CabinSide"] = "Z"
print("# of CabinSide missing before: ", CS_bef)
print("# of CabinSide missing after:", data.CabinSide.isna().sum())
# #### CabinDeck and Group
CD_bef = data["CabinDeck"].isna().sum()
GCD_index = data[data["CabinDeck"].isna()][
(data[data.CabinDeck.isna()]["Group"]).isin(GCD_gb.index)
].index
data.loc[GCD_index, "CabinDeck"] = data.iloc[GCD_index, :]["Group"].map(
lambda x: GCD_gb.idxmax(axis=1)[x]
)
# Print number of missing values left
print("# of Cabin_deck missing before:", CD_bef)
print("# of Cabin_deck missing after:", data["CabinDeck"].isna().sum())
# #### CabinDeck and HomePlanet
# Joint distribution
data.groupby(["HomePlanet", "Destination", "Solo", "CabinDeck"])[
"CabinDeck"
].size().unstack().fillna(0)
plt.figure(figsize=(12, 8))
sns.heatmap(
data.groupby(["HomePlanet", "Destination", "Solo", "CabinDeck"])["CabinDeck"]
.size()
.unstack()
.fillna(0),
annot=True,
fmt="g",
)
# Missing values before
CD_bef = data["CabinDeck"].isna().sum()
# Fill missing values using the mode
na_rows_CD = data.loc[data["CabinDeck"].isna(), "CabinDeck"].index
data.loc[data["CabinDeck"].isna(), "CabinDeck"] = data.groupby(
["HomePlanet", "Destination", "Solo"]
)["CabinDeck"].transform(lambda x: x.fillna(pd.Series.mode(x)[0]))[na_rows_CD]
# Print number of missing values left
print("# of CabinDeck missing values before:", CD_bef)
print("# of CabinDeck missing values after:", data["CabinDeck"].isna().sum())
# #### CabinNumber and CabinDeck
# Scatterplot
plt.figure(figsize=(10, 4))
sns.scatterplot(
x=data["CabinNumber"],
y=data["Group"],
c=LabelEncoder().fit_transform(data.loc[~data["CabinNumber"].isna(), "CabinDeck"]),
cmap="tab10",
)
plt.title("Cabin_number vs group coloured by group")
# Missing values before
CN_bef = data["CabinNumber"].isna().sum()
# Extrapolate linear relationship on a deck by deck basis
for deck in ["A", "B", "C", "D", "E", "F", "G"]:
# Features and labels
X_CN = data.loc[
~(data["CabinNumber"].isna()) & (data["CabinDeck"] == deck), "Group"
]
y_CN = data.loc[
~(data["CabinNumber"].isna()) & (data["CabinDeck"] == deck), "CabinNumber"
]
X_test_CN = data.loc[
(data["CabinNumber"].isna()) & (data["CabinDeck"] == deck), "Group"
]
# Linear regression
model_CN = LinearRegression()
model_CN.fit(X_CN.values.reshape(-1, 1), y_CN)
preds_CN = model_CN.predict(X_test_CN.values.reshape(-1, 1))
# Fill missing values with predictions
data.loc[
(data["CabinNumber"].isna()) & (data["CabinDeck"] == deck), "CabinNumber"
] = preds_CN.astype(int)
# Print number of missing values left
print("#Cabin_number missing values before:", CN_bef)
print("#Cabin_number missing values after:", data["CabinNumber"].isna().sum())
# One-hot encode cabin regions
data["CabinRegion1"] = (data["CabinNumber"] < 300).astype(int)
data["CabinRegion2"] = (
(data["CabinNumber"] >= 300) & (data["CabinNumber"] < 600)
).astype(int)
data["CabinRegion3"] = (
(data["CabinNumber"] >= 600) & (data["CabinNumber"] < 900)
).astype(int)
data["CabinRegion4"] = (
(data["CabinNumber"] >= 900) & (data["CabinNumber"] < 1200)
).astype(int)
data["CabinRegion5"] = (
(data["CabinNumber"] >= 1200) & (data["CabinNumber"] < 1500)
).astype(int)
data["CabinRegion6"] = (
(data["CabinNumber"] >= 1500) & (data["CabinNumber"] < 1800)
).astype(int)
data["CabinRegion7"] = (data["CabinNumber"] >= 1800).astype(int)
# #### VIP
data.VIP.value_counts()
# Missing values before
V_bef = data["VIP"].isna().sum()
# Fill missing values with mode
data.loc[data["VIP"].isna(), "VIP"] = False
# Print number of missing values left
print("#VIP missing values before:", V_bef)
print("#VIP missing values after:", data["VIP"].isna().sum())
# #### Age
data.groupby(["HomePlanet", "NoSpending", "Solo", "CabinDeck"])[
"Age"
].median().unstack().fillna(0)
# Missing values before
A_bef = data[exp_feats].isna().sum().sum()
# Fill missing values using the median
na_rows_A = data.loc[data["Age"].isna(), "Age"].index
data.loc[data["Age"].isna(), "Age"] = data.groupby(
["HomePlanet", "NoSpending", "Solo", "CabinDeck"]
)["Age"].transform(lambda x: x.fillna(x.median()))[na_rows_A]
# Print number of missing values left
print("# of Age missing before:", A_bef)
print("# of Age missing after:", data.Age.isna().sum())
# Update age group feature
data.loc[data["Age"] <= 12, "AgeGroup"] = "Age_0-12"
data.loc[(data["Age"] > 12) & (data["Age"] < 18), "AgeGroup"] = "Age_13-17"
data.loc[(data["Age"] >= 18) & (data["Age"] <= 25), "AgeGroup"] = "Age_18-25"
data.loc[(data["Age"] > 25) & (data["Age"] <= 30), "AgeGroup"] = "Age_26-30"
data.loc[(data["Age"] > 30) & (data["Age"] <= 50), "AgeGroup"] = "Age_31-50"
data.loc[data["Age"] > 50, "AgeGroup"] = "Age_51+"
# #### CryoSleep
# Joint distribution
data.groupby(["NoSpending", "CryoSleep"])["CryoSleep"].size().unstack().fillna(0)
# Missing values before
CSL_bef = data["CryoSleep"].isna().sum()
# Fill missing values using the mode
na_rows_CSL = data.loc[data["CryoSleep"].isna(), "CryoSleep"].index
data.loc[data["CryoSleep"].isna(), "CryoSleep"] = data.groupby(["NoSpending"])[
"CryoSleep"
].transform(lambda x: x.fillna(pd.Series.mode(x)[0]))[na_rows_CSL]
# Print number of missing values left
print("#CryoSleep missing values before:", CSL_bef)
print("#CryoSleep missing values after:", data["CryoSleep"].isna().sum())
# #### Expenditure and CryoSleep
print(
"Maximum expenditure of passengers in CryoSleep:",
data.loc[data["CryoSleep"] == True, exp_feats].sum(axis=1).max(),
)
# Missing values before
E_bef = data[exp_feats].isna().sum().sum()
# CryoSleep has no expenditure
for col in exp_feats:
data.loc[(data[col].isna()) & (data["CryoSleep"] == True), col] = 0
# Print number of missing values left
print("# of Expenditure missing values before:", E_bef)
print("# of Expenditure missing values after:", data[exp_feats].isna().sum().sum())
# #### Expenditure and Others
# Joint distribution
data.groupby(["HomePlanet", "Solo", "AgeGroup"])["Expenditure"].mean().unstack().fillna(
0
)
# Missing values before
E_bef = data[exp_feats].isna().sum().sum()
# Fill remaining missing values using the median
for col in exp_feats:
na_rows = data.loc[data[col].isna(), col].index
data.loc[data[col].isna(), col] = data.groupby(["HomePlanet", "Solo", "AgeGroup"])[
col
].transform(lambda x: x.fillna(x.mean()))[na_rows]
# Print number of missing values left
print("# of Expenditure missing values before:", E_bef)
print("# of Expenditure missing values after:", data[exp_feats].isna().sum().sum())
# Update expenditure and no_spending
data["Expenditure"] = data[exp_feats].sum(axis=1)
data["NoSpending"] = (data["Expenditure"] == 0).astype(int)
data.isna().sum()
# # Preprocessing
# Split data back into train and test sets
X = data[data.PassengerId.isin(train.PassengerId.values)].copy()
X_test = data[data.PassengerId.isin(test.PassengerId.values)].copy()
# Drop unwanted features
X.drop(
["PassengerId", "Group", "GroupSize", "AgeGroup", "CabinNumber"],
axis=1,
inplace=True,
)
X_test.drop(
["PassengerId", "Group", "GroupSize", "AgeGroup", "CabinNumber"],
axis=1,
inplace=True,
)
# Log Transform
fig = plt.figure(figsize=(12, 20))
for i, col in enumerate(
["RoomService", "FoodCourt", "ShoppingMall", "Spa", "VRDeck", "Expenditure"]
):
plt.subplot(6, 2, 2 * i + 1)
sns.histplot(X[col], binwidth=100)
plt.ylim([0, 200])
plt.title(f"{col} (original)")
plt.subplot(6, 2, 2 * i + 2)
sns.histplot(np.log(1 + X[col]), color="C1")
plt.ylim([0, 200])
plt.title(f"{col} (log-transform)")
fig.tight_layout()
plt.show()
for col in ["RoomService", "FoodCourt", "ShoppingMall", "Spa", "VRDeck", "Expenditure"]:
X[col] = np.log(1 + X[col])
X_test[col] = np.log(1 + X_test[col])
# Encoding and Scaling
# Indentify numerical and categorical columns
numerical_cols = [
cname for cname in X.columns if X[cname].dtype in ["int64", "float64"]
]
categorical_cols = [cname for cname in X.columns if X[cname].dtype == "object"]
# Scale numerical data to have mean=0 and variance=1
numerical_transformer = Pipeline(steps=[("scaler", StandardScaler())])
# One-hot encode categorical data
categorical_transformer = Pipeline(
steps=[
(
"onehot",
OneHotEncoder(drop="if_binary", handle_unknown="ignore", sparse=False),
)
]
)
# Combine preprocessing
ct = ColumnTransformer(
transformers=[
("num", numerical_transformer, numerical_cols),
("cat", categorical_transformer, categorical_cols),
],
remainder="passthrough",
)
# Apply preprocessing
X = ct.fit_transform(X)
X_test = ct.transform(X_test)
# Print new shape
print("Training set shape:", X.shape)
# PCA
pca = PCA(n_components=3)
components = pca.fit_transform(X)
total_var = pca.explained_variance_ratio_.sum() * 100
fig = px.scatter_3d(
components,
x=0,
y=1,
z=2,
color=y,
size=0.1 * np.ones(len(X)),
opacity=1,
title=f"Total Explained Variance: {total_var:.2f}%",
labels={"0": "PC 1", "1": "PC 2", "2": "PC 3"},
width=800,
height=500,
)
fig.show()
# Explained variance (how important each additional principal component is)
pca = PCA().fit(X)
fig, ax = plt.subplots(figsize=(10, 4))
xi = np.arange(1, 1 + X.shape[1], step=1)
yi = np.cumsum(pca.explained_variance_ratio_)
plt.plot(xi, yi, marker="o", linestyle="--", color="b")
# Aesthetics
plt.ylim(0.0, 1.1)
plt.xlabel("Number of Components")
plt.xticks(np.arange(1, 1 + X.shape[1], step=2))
plt.ylabel("Cumulative variance (%)")
plt.title("Explained variance by each component")
plt.axhline(y=1, color="r", linestyle="-")
plt.text(0.5, 0.85, "100% cut-off threshold", color="red")
ax.grid(axis="x")
# # Modeling
X_train, X_valid, y_train, y_valid = train_test_split(
X, y, stratify=y, test_size=0.2, random_state=42
)
# #### 27 Different Classifiers (LazyPredict)
clf = LazyClassifier(
verbose=0,
ignore_warnings=True,
custom_metric=None,
predictions=False,
random_state=RANDOM_STATE,
classifiers="all",
)
models, predictions = clf.fit(X_train, X_valid, y_train, y_valid)
clear_output()
models
line = px.line(data_frame=models, y=["Accuracy"], markers=True)
line.update_xaxes(title="Model", rangeslider_visible=False)
line.update_yaxes(title="Accuracy")
line.update_traces(line_color="red")
line.update_layout(
showlegend=True,
title={
"text": "Accuracy vs Model",
"x": 0.5,
"xanchor": "center",
"yanchor": "top",
},
)
line.show()
line = px.line(data_frame=models, y=["ROC AUC", "F1 Score"], markers=True)
line.update_xaxes(title="Model", rangeslider_visible=False)
line.update_yaxes(title="ROC AUC Score")
line.update_layout(
showlegend=True,
title={
"text": "ROC AUC and F1 Score vs Model",
"x": 0.5,
"xanchor": "center",
"yanchor": "top",
},
)
line.show()
line = px.line(data_frame=models, y=["Time Taken"], markers=True)
line.update_xaxes(title="Model", rangeslider_visible=False)
line.update_yaxes(title="Time(s)")
line.update_traces(line_color="purple")
line.update_layout(
showlegend=True,
title={
"text": "Time taken vs Model",
"y": 0.94,
"x": 0.5,
"xanchor": "center",
"yanchor": "top",
},
)
line.show()
# #### LGBM Classifier (5 Folds):
lgb_params = {"objective": "binary", "n_estimators": 50, "learning_rate": 0.08}
lgb_predictions = 0
lgb_scores = []
lgb_fimp = []
skf = StratifiedKFold(n_splits=FOLDS, shuffle=True, random_state=RANDOM_STATE)
for fold, (train_idx, valid_idx) in enumerate(skf.split(X, y)):
print(f"\033[94m")
print(10 * "=", f"Fold={fold+1}", 10 * "=")
start_time = time.time()
X_train, X_valid = X[train_idx], X[valid_idx]
y_train, y_valid = y[train_idx], y[valid_idx]
model = LGBMClassifier(**lgb_params)
model.fit(X_train, y_train, verbose=0)
preds_valid = model.predict(X_valid)
acc = accuracy_score(y_valid, preds_valid) * 100
lgb_scores.append(acc)
run_time = time.time() - start_time
print(f"Fold={fold+1}, Accuracy score: {acc:.2f}%, Run Time: {run_time:.2f}s")
test_preds = model.predict(X_test)
lgb_predictions += test_preds / FOLDS
print("")
print("Avg Accuracy:", np.mean(lgb_scores))
# # Submission
submission[TARGET] = lgb_predictions.astype("bool")
submission.to_csv("submission.csv", index=False)
submission.head()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/132/129132559.ipynb
| null | null |
[{"Id": 129132559, "ScriptId": 37665387, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 839522, "CreationDate": "05/11/2023 08:34:55", "VersionNumber": 8.0, "Title": "\ud83d\ude80Spaceship Titanic: 20+ Benchmark LazyPredict", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 1018.0, "LinesInsertedFromPrevious": 388.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 630.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 4}]
| null | null | null | null |
# Created by Andreas Chandra
# 24 April 2023
# ### 🚀SPACESHIP TITANIC: Benchmark Models
# # Overview
# Welcome to the year 2912, where your data science skills are needed to solve a cosmic mystery. We've received a transmission from four lightyears away and things aren't looking good.
# The Spaceship Titanic was an interstellar passenger liner launched a month ago. With almost 13,000 passengers on board, the vessel set out on its maiden voyage transporting emigrants from our solar system to three newly habitable exoplanets orbiting nearby stars.
# While rounding Alpha Centauri en route to its first destination—the torrid 55 Cancri E—the unwary Spaceship Titanic collided with a spacetime anomaly hidden within a dust cloud. Sadly, it met a similar fate as its namesake from 1000 years before. Though the ship stayed intact, almost half of the passengers were transported to an alternate dimension!
# # Libraries
# #### Installing LazyPredict
from IPython.display import clear_output
clear_output()
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import LabelEncoder
from sklearn.decomposition import PCA
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import (
StandardScaler,
MinMaxScaler,
OneHotEncoder,
LabelEncoder,
)
from sklearn.model_selection import StratifiedKFold, train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import accuracy_score
from lightgbm import LGBMClassifier
import lazypredict
from lazypredict.Supervised import LazyClassifier
import time
import warnings
warnings.filterwarnings("ignore")
from plotly.offline import init_notebook_mode, iplot
init_notebook_mode(connected=True)
# # Data Loading and Preparation
DATA_DIR = "../input/spaceship-titanic"
train = pd.read_csv(os.path.join(DATA_DIR, "train.csv"))
test = pd.read_csv(os.path.join(DATA_DIR, "test.csv"))
submission = pd.read_csv(os.path.join(DATA_DIR, "sample_submission.csv"))
RANDOM_STATE = 12
FOLDS = 5
STRATEGY = "median"
# ### Column Descriptions:
# - `PassengerId` - A unique Id for each passenger. Each Id takes the form gggg_pp where gggg indicates a group the passenger is travelling with and pp is their number within the group. People in a group are often family members, but not always.
# - `HomePlanet` - The planet the passenger departed from, typically their planet of permanent residence.
# - `CryoSleep` - Indicates whether the passenger elected to be put into suspended animation for the duration of the voyage. Passengers in cryosleep are confined to their cabins.
# - `Cabin` - The cabin number where the passenger is staying. Takes the form deck/num/side, where side can be either P for Port or S for Starboard.
# - `Destination` - The planet the passenger will be debarking to.
# - `Age` - The age of the passenger.
# - `VIP` - Whether the passenger has paid for special VIP service during the voyage.
# - `RoomService`, `FoodCourt`, `ShoppingMall`, `Spa`, `VRDeck` - Amount the passenger has billed at each of the Spaceship Titanic's many luxury amenities.
# - `Name` - The first and last names of the passenger.
# - `Transported` - Whether the passenger was transported to another dimension. This is the target, the column you are trying to predict.
# ### Exploring Train Data:
# 📌 Observations in Train Data:
# * 14 columns and 8693 rows.
# * Train data contains 121702 values with 2324 of them are missing.
# * All 12 feature columns have missing values in them with CryoSleep having highest missing values (217)
# * The target variable is "Transported"
# #### Quick view of train data:
# Below are the first 5 rows of train dataset
#
train.head()
print(f"\033[94mNumber of rows in train data \t\t: {len(train)}")
print(f"\033[94mNumber of columns in train data \t: {len(train.columns)}")
print(f"\033[94mNumber of values in train data \t\t: {train.count().sum()}")
print(f"\033[94mNumber of missing values in train data : {sum(train.isna().sum())}")
# #### Column wise missing values:
train.isna().sum().sort_values(ascending=False)
# #### Basic statistics of training data:
# Below is the basic statistics for each numerical variables which contain information on `count`, `mean`, `standard deviation`, `minimum`, `Q1`, `median`, `Q3`, and `maximum`
train.describe().T
# ### Exploring test data
# 📌 Observations in Test Data:
# * There are total of 13 columns and 4277 rows in test data.
# * Train data contains 54484 observation with 1117 missing values.
# * All 12 feature columns have missing values in them with FoodCourt having highest missing values (106)
test.head()
test.isna().sum()
test.describe().T
# ### Submission File
submission.head()
# # Exploratory Data Analysis
TARGET = "Transported"
FEATURES = list(set(train.columns) - set([TARGET]))
RANDOM_STATE = 42
# #### Null Value Distribution
# 📌 Observations in Null Value Distribution :
# * The maximum of missing value in an row is 3 and the lowest is no missing value
# * Interestingly, the missing value distribution (row basis) is quite the same between train and test dataset.
# * There are around 76% of the observations (row basis) that has no missing values.
# * Rest 24% of the observations (row basis) that have 1 to 3 missing values
# #### Column wise null value distribution
train_mv = (
train.isna()
.sum()
.reset_index()
.rename({"index": "feature", 0: "total_nulls"}, axis=1)
)
train_mv["null_ratio"] = train_mv.total_nulls / len(train) * 100
train_mv
test_mv = (
test.isna()
.sum()
.reset_index()
.rename({"index": "feature", 0: "total_nulls"}, axis=1)
)
test_mv["null_ratio"] = test_mv.total_nulls / len(test) * 100
test_mv
# #### Row wise null value distribution
train_na_cols = train.columns[train.isna().any()]
test_na_cols = test.columns[test.isna().any()]
fig = px.imshow(
train[train_na_cols].isna().T, aspect="auto", color_continuous_scale="viridis"
)
fig.show()
# Result:
# The missing values is quite random. Therefore, next we can perform imputation.
# #### Continuos and Categorical Data Distribution
# 📌 Observations in Null Value Distribution :
# * Out of 12 features 6 features are continous, 2 features are text data and 4 features are categorical.
# * HomePlanet and Destination have 3 differnt unique values.
# * CryoSleep and VIP are bool features
df = pd.concat([train[FEATURES], test[FEATURES]], axis=0)
text_features = ["Cabin", "Name"]
cat_features = [
col for col in FEATURES if df[col].nunique() < 20 and col not in text_features
]
cont_features = [
col for col in FEATURES if df[col].nunique() >= 20 and col not in text_features
]
del df
print(f"\033[94mTotal number of features\t: {len(FEATURES)}")
print(f"\033[94mTotal of categorical features\t: {len(cat_features)}")
print(f"\033[94mTotal of continuous features\t: {len(cont_features)}")
print(f"\033[94mTotal of text features\t\t: {len(text_features)}")
labels = ["Categorical", "Continuous", "Text"]
values = [len(cat_features), len(cont_features), len(text_features)]
colors = ["#DE3163", "#58D68D"]
fig = go.Figure(
data=[
go.Pie(
labels=labels,
values=values,
pull=[0.01, 0.01, 0.01],
marker=dict(colors=colors),
)
]
)
fig.show()
# ### Feature Distribution of Continuous Features
# #### Distribution of Age
train_age = train.copy()
test_age = test.copy()
train_age["type"] = "Train"
test_age["type"] = "Test"
ageDf = pd.concat([train_age, test_age])
fig = px.histogram(
data_frame=ageDf,
x="Age",
color="type",
color_discrete_sequence=["#58D68D", "#DE3163"],
marginal="box",
nbins=100,
template="plotly_white",
)
fig.update_layout(title="Distribution of Age", title_x=0.5)
fig.show()
# #### Feature Distribution of Categorical Features
if len(cat_features) == 0:
print("No categorical features")
else:
ncols = 2
nrows = 2
fig, axes = plt.subplots(nrows, ncols, figsize=(18, 10))
for r in range(nrows):
for c in range(ncols):
col = cat_features[r * ncols + c]
sns.countplot(
x=train[col], ax=axes[r, c], palette="viridis", label="Train data"
)
sns.countplot(
x=test[col], ax=axes[r, c], palette="magma", label="Test data"
)
axes[r, c].legend()
axes[r, c].set_ylabel("")
axes[r, c].set_xlabel(col, fontsize=20),
axes[r, c].tick_params(labelsize=10, width=0.5)
axes[r, c].xaxis.offsetText.set_fontsize(4)
axes[r, c].yaxis.offsetText.set_fontsize(4)
plt.show()
# #### Target Distribution
# 📌 Observations in Null Value Distribution :
# * There are two target values - 0 and 1.
# * Both the target values are almost equally distributed.
target_df = pd.DataFrame(train[TARGET].value_counts()).reset_index()
target_df.columns = [TARGET, "count"]
fig = px.bar(data_frame=target_df, x=TARGET, y="count")
fig.update_traces(
marker_color=["#58D68D", "#DE3163"],
marker_line_color="rgb(0,0,0)",
marker_line_width=2,
)
fig.update_layout(title="Target Distribution", template="plotly_white", title_x=0.5)
transported_perc = target_df["count"][0] * 100 / train.shape[0]
not_transported_perc = target_df["count"][1] * 100 / train.shape[0]
print(f"\033[94mPercentage of Transported = 0:{transported_perc}")
print(f"\033[94mPercentage of Transported = 1:{not_transported_perc}")
fig.show()
# #### Correlation Matrix
fig = px.imshow(
train.corr(), text_auto=True, aspect="auto", color_continuous_scale="viridis"
)
fig.show()
# ### Modified
train
fig = px.histogram(data_frame=train, x="Age", color="Transported")
fig.show()
fig = px.histogram(data_frame=train, x="RoomService", color="Transported")
fig.show()
fig = px.histogram(data_frame=train, x="FoodCourt", color="Transported")
fig.show()
fig = px.histogram(data_frame=train, x="Spa", color="Transported")
fig.show()
fig = px.histogram(data_frame=train, x="ShoppingMall", color="Transported")
fig.show()
fig = px.histogram(data_frame=train, x="VRDeck", color="Transported")
fig.show()
# # Feature Engineering
# Follow: https://www.kaggle.com/code/samuelcortinhas/spaceship-titanic-a-complete-guide#Feature-engineering
# #### Age
train["AgeGroup"] = None
train.loc[train.Age.lt(13), "AgeGroup"] = "Age_0-12"
train.loc[train.Age.between(13, 17), "AgeGroup"] = "Age_13-17"
train.loc[train.Age.between(18, 25), "AgeGroup"] = "Age_18-25"
train.loc[train.Age.between(26, 30), "AgeGroup"] = "Age_26-30"
train.loc[train.Age.between(31, 50), "AgeGroup"] = "Age_31-50"
train.loc[train.Age.gt(51, 17), "AgeGroup"] = "Age_51+"
test.loc[test.Age.lt(13), "AgeGroup"] = "Age_0-12"
test.loc[test.Age.between(13, 17), "AgeGroup"] = "Age_13-17"
test.loc[test.Age.between(18, 25), "AgeGroup"] = "Age_18-25"
test.loc[test.Age.between(26, 30), "AgeGroup"] = "Age_26-30"
test.loc[test.Age.between(31, 50), "AgeGroup"] = "Age_31-50"
test.loc[test.Age.gt(51, 17), "AgeGroup"] = "Age_51+"
# Plot distribution of new features
plt.figure(figsize=(10, 4))
g = sns.countplot(
data=train,
x="AgeGroup",
hue="Transported",
order=["Age_0-12", "Age_13-17", "Age_18-25", "Age_26-30", "Age_31-50", "Age_51+"],
)
plt.title("Age group distribution")
# #### Expenditure
# Expenditure features
exp_feats = ["RoomService", "FoodCourt", "ShoppingMall", "Spa", "VRDeck"]
train["Expenditure"] = train[exp_feats].sum(axis=1)
train["NoSpending"] = (train["Expenditure"] == 0).astype(int)
test["Expenditure"] = test[exp_feats].sum(axis=1)
test["NoSpending"] = (test["Expenditure"] == 0).astype(int)
fig = plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
sns.histplot(data=train, x="Expenditure", hue="Transported", bins=200)
plt.title("Total expenditure (truncated)")
plt.ylim([0, 200])
plt.xlim([0, 20000])
plt.subplot(1, 2, 2)
sns.countplot(data=train, x="NoSpending", hue="Transported")
plt.title("No spending")
fig.tight_layout()
# #### Passanger Group
# New feature - Group
train["Group"] = train["PassengerId"].apply(lambda x: x.split("_")[0]).astype(int)
test["Group"] = test["PassengerId"].apply(lambda x: x.split("_")[0]).astype(int)
# New feature - Group size
train["GroupSize"] = train["Group"].map(
lambda x: pd.concat([train.Group, test.Group]).value_counts()[x]
)
test["GroupSize"] = test["Group"].map(
lambda x: pd.concat([train.Group, test.Group]).value_counts()[x]
)
# Plot distribution
plt.figure(figsize=(20, 4))
plt.subplot(1, 2, 1)
sns.histplot(data=train, x="Group", hue="Transported", binwidth=1)
plt.title("Group")
plt.subplot(1, 2, 2)
sns.countplot(data=train, x="GroupSize", hue="Transported")
plt.title("Group size")
fig.tight_layout()
train["Solo"] = (train["GroupSize"] == 1).astype(int)
test["Solo"] = (test["GroupSize"] == 1).astype(int)
# New feature distribution
plt.figure(figsize=(10, 4))
sns.countplot(data=train, x="Solo", hue="Transported")
plt.title("Passanger travelling solo or not")
plt.ylim([0, 3000])
plt.show()
# #### Cabin Location
# Extract deck, number, and side from cabin feature
train["Cabin"].fillna("Z/9999/Z", inplace=True)
test["Cabin"].fillna("Z/9999/Z", inplace=True)
train["CabinDeck"] = train.Cabin.apply(lambda x: x.split("/")[0])
train["CabinNumber"] = train.Cabin.apply(lambda x: x.split("/")[1]).astype(int)
train["CabinSide"] = train.Cabin.apply(lambda x: x.split("/")[2])
test["CabinDeck"] = test.Cabin.apply(lambda x: x.split("/")[0])
test["CabinNumber"] = test.Cabin.apply(lambda x: x.split("/")[1]).astype(int)
test["CabinSide"] = test.Cabin.apply(lambda x: x.split("/")[2])
train.loc[train.CabinDeck == "Z", "CabinDeck"] = np.nan
train.loc[train.CabinNumber == 9999, "CabinNumber"] = np.nan
train.loc[train.CabinSide == "Z", "CabinSide"] = np.nan
test.loc[test.CabinDeck == "Z", "CabinDeck"] = np.nan
test.loc[test.CabinNumber == 9999, "CabinNumber"] = np.nan
test.loc[test.CabinSide == "Z", "CabinSide"] = np.nan
train.drop("Cabin", axis=1, inplace=True)
test.drop("Cabin", axis=1, inplace=True)
fig = plt.figure(figsize=(10, 12))
plt.subplot(3, 1, 1)
sns.countplot(
data=train,
x="CabinDeck",
hue="Transported",
order=["A", "B", "C", "D", "E", "F", "G", "T"],
)
plt.title("Cabin Deck")
plt.subplot(3, 1, 2)
sns.histplot(data=train, x="CabinNumber", hue="Transported", binwidth=20)
plt.title("CabinNumber")
plt.xlim([0, 2000])
plt.subplot(3, 1, 3)
sns.countplot(data=train, x="CabinSide", hue="Transported")
plt.title("CabinSide")
fig.tight_layout()
train["CabinRegion1"] = train.CabinNumber.lt(300).astype(int)
train["CabinRegion2"] = train.CabinNumber.between(300, 599).astype(int)
train["CabinRegion3"] = train.CabinNumber.between(600, 899).astype(int)
train["CabinRegion4"] = train.CabinNumber.between(900, 1199).astype(int)
train["CabinRegion5"] = train.CabinNumber.between(1200, 1499).astype(int)
train["CabinRegion6"] = train.CabinNumber.between(1500, 1799).astype(int)
train["CabinRegion7"] = train.CabinNumber.ge(1800).astype(int)
test["CabinRegion1"] = test.CabinNumber.lt(300).astype(int)
test["CabinRegion2"] = test.CabinNumber.between(300, 599).astype(int)
test["CabinRegion3"] = test.CabinNumber.between(600, 899).astype(int)
test["CabinRegion4"] = test.CabinNumber.between(900, 1199).astype(int)
test["CabinRegion5"] = test.CabinNumber.between(1200, 1499).astype(int)
test["CabinRegion6"] = test.CabinNumber.between(1500, 1799).astype(int)
test["CabinRegion7"] = test.CabinNumber.ge(1800).astype(int)
plt.figure(figsize=(10, 4))
train["CabinRegionPlot"] = (
train["CabinRegion1"]
+ 2 * train["CabinRegion2"]
+ 3 * train["CabinRegion3"]
+ 4 * train["CabinRegion4"]
+ 5 * train["CabinRegion5"]
+ 6 * train["CabinRegion6"]
+ 7 * train["CabinRegion7"]
).astype(int)
sns.countplot(data=train, x="CabinRegionPlot", hue="Transported")
plt.title("Cabin Regions")
train.drop("CabinRegionPlot", axis=1, inplace=True)
# #### Last Name
# replace NaN's with outliers
train["Name"].fillna("Unknown Unknown", inplace=True)
test["Name"].fillna("Unknown, Unknown", inplace=True)
# Surname
train["Surname"] = train["Name"].str.split().str[-1]
test["Surname"] = test["Name"].str.split().str[-1]
# Family size
train["FamilySize"] = train["Surname"].map(
lambda x: pd.concat([train.Surname, test.Surname]).value_counts()[x]
)
test["FamilySize"] = test["Surname"].map(
lambda x: pd.concat([train.Surname, test.Surname]).value_counts()[x]
)
train.loc[train.Surname == "Unknown", "Surname"] = np.nan
train.loc[train.FamilySize > 100, "FamilySize"] = np.nan
test.loc[test.Surname == "Unknown", "Surname"] = np.nan
test.loc[test.FamilySize > 100, "FamilySize"] = np.nan
# Drop name
train.drop("Name", axis=1, inplace=True)
test.drop("Name", axis=1, inplace=True)
# Plot family size distribution
plt.figure(figsize=(12, 4))
sns.countplot(data=train, x="FamilySize", hue="Transported")
plt.title("Family size")
plt.show()
# # Imputing Missing Values
# Combine train and test
y = train["Transported"].copy().astype(int)
X = train.drop("Transported", axis=1).copy()
data = pd.concat([X, test], axis=0).reset_index(drop=True)
# #### Count missing values
na_cols = data.columns[data.isna().any()].tolist()
mv = pd.DataFrame(data[na_cols].isna().sum(), columns=["NumberMissing"])
mv["PercentageMissing"] = np.round(100 * mv.NumberMissing / len(data), 2)
mv
plt.figure(figsize=(12, 6))
sns.heatmap(train[na_cols].isna().T, cmap="summer")
plt.title("Heatmap of missing values")
plt.show()
# Countplot
train["NACount"] = train.isna().sum(axis=1)
plt.figure(figsize=(10, 4))
sns.countplot(data=train, x="NACount", hue="Transported")
plt.title("Number of missing values by passanger")
plt.show()
train.drop("NACount", axis=1, inplace=True)
# #### Strategy
# #### HomePlanet
# ##### HomePlanet and Group
GHP_gb = data.groupby(["Group", "HomePlanet"])["HomePlanet"].size().unstack().fillna(0)
GHP_gb.head()
(GHP_gb > 0).sum(axis=1).value_counts()
HP_bef = data["HomePlanet"].isna().sum()
GHP_index = data[data.HomePlanet.isna()][
(data[data.HomePlanet.isna()]["Group"]).isin(GHP_gb.index)
].index
data.loc[GHP_index, "HomePlanet"] = data.iloc[GHP_index, :]["Group"].map(
lambda x: GHP_gb.idxmax(axis=1)[x]
)
print("# of HomePlanet missing before: ", HP_bef)
print("# of HomePlanet missing after: ", data.HomePlanet.isna().sum())
# ##### HomePlanet and CabinDeck
CDHP_gb = (
data.groupby(["CabinDeck", "HomePlanet"])["HomePlanet"].size().unstack().fillna(0)
)
plt.figure(figsize=(10, 4))
sns.heatmap(CDHP_gb.T, annot=True, fmt="g", cmap="coolwarm")
plt.show()
HP_bef = data.HomePlanet.isna().sum()
data.loc[(data.HomePlanet.isna()) & ~(data.CabinDeck == "D"), "HomePlanet"] = "Earth"
data.loc[(data.HomePlanet.isna()) & (data.CabinDeck == "D"), "HomePlanet"] = "Mars"
print("# of HomePlanet missing before:", HP_bef)
print("# of HomePlanet missing after:", data.HomePlanet.isna().sum())
# #### Destination
D_bef = data.Destination.isna().sum()
data.loc[data.Destination.isna(), "Destination"] = "TRAPPIST-1e"
print("# of Destination missing before:", D_bef)
print("# of Destination missing after:", data.Destination.isna().sum())
# #### Surname and Group
GSN_gb = (
data[data.GroupSize > 1]
.groupby(["Group", "Surname"])["Surname"]
.size()
.unstack()
.fillna(0)
)
# Countplot
plt.figure(figsize=(10, 4))
sns.countplot(data=(GSN_gb > 0).sum(axis=1).reset_index(), x=0)
plt.title("Number of unique surnames by group")
SN_bef = data.Surname.isna().sum()
GSN_index = data[data.Surname.isna()][
(data[data.Surname.isna()]["Group"]).isin(GSN_gb.index)
].index
data.loc[GSN_index, "Surname"] = data.iloc[GSN_index, :]["Group"].map(
lambda x: GSN_gb.idxmax(axis=1)[x]
)
print("# of Surname missing before: ", SN_bef)
print("# of Surname missing after", data.Surname.isna().sum())
data.Surname.fillna("Unknown", inplace=True)
data["FamilySize"] = data["Surname"].map(lambda x: data["Surname"].value_counts()[x])
data.loc[data["Surname"] == "Unknown", "Surname"] = np.nan
data.loc[data["FamilySize"] > 100, "FamilySize"] = 0
# #### CabinSide and Group
# Joint distribution of Group and Cabin features
GCD_gb = (
data[data["GroupSize"] > 1]
.groupby(["Group", "CabinDeck"])["CabinDeck"]
.size()
.unstack()
.fillna(0)
)
GCN_gb = (
data[data["GroupSize"] > 1]
.groupby(["Group", "CabinNumber"])["CabinNumber"]
.size()
.unstack()
.fillna(0)
)
GCS_gb = (
data[data["GroupSize"] > 1]
.groupby(["Group", "CabinSide"])["CabinSide"]
.size()
.unstack()
.fillna(0)
)
# Countplots
fig = plt.figure(figsize=(16, 4))
plt.subplot(1, 3, 1)
sns.countplot(data=(GCD_gb > 0).sum(axis=1).reset_index(), x=0)
plt.title("# of Unique cabin decks per group")
plt.subplot(1, 3, 2)
sns.countplot(data=(GCN_gb > 0).sum(axis=1).reset_index(), x=0)
plt.title("# of Unique cabin numbers per group")
plt.subplot(1, 3, 3)
sns.countplot(data=(GCS_gb > 0).sum(axis=1).reset_index(), x=0)
plt.title("# of Unique cabin sides per group")
fig.tight_layout()
# Missing values before
CS_bef = data.CabinSide.isna().sum()
# Passengers with missing Cabin side and in a group with known Cabin side
GCS_index = data[data.CabinSide.isna()][
(data[data.CabinSide.isna()]["Group"]).isin(GCS_gb.index)
].index
# Fill corresponding missing values
data.loc[GCS_index, "CabinSide"] = data.iloc[GCS_index, :]["Group"].map(
lambda x: GCS_gb.idxmax(axis=1)[x]
)
# Print number of missing values left
print("# of Cabin_side missing values before:", CS_bef)
print("# of Cabin_side missing values after:", data.CabinSide.isna().sum())
# #### CabinSide and Surname
SCS_gb = (
data[data["GroupSize"] > 1]
.groupby(["Surname", "CabinSide"])["CabinSide"]
.size()
.unstack()
.fillna(0)
)
SCS_gb["Ratio"] = SCS_gb["P"] / (SCS_gb["P"] + SCS_gb["S"])
plt.figure(figsize=(10, 4))
sns.histplot(SCS_gb["Ratio"], kde=True, binwidth=0.05)
plt.title("Ratio of cabin side by surname")
print(
"Percentage of families all on the same cabid side:",
100 * np.round((SCS_gb["Ratio"].isin([0, 1])).sum() / len(SCS_gb), 3),
"%",
)
SCS_gb.head()
# Missing values before
CS_bef = data["CabinSide"].isna().sum()
SCS_gb.drop("Ratio", axis=1, inplace=True)
SCS_index = data[data["CabinSide"].isna()][
(data[data["CabinSide"].isna()]["Surname"]).isin(SCS_gb.index)
].index
data.loc[SCS_index, "CabinSide"] = data.iloc[SCS_index, :]["Surname"].map(
lambda x: SCS_gb.idxmax(axis=1)[x]
)
data.drop("Surname", axis=1, inplace=True)
print("# of CabinSide missing before: ", CS_bef)
print("# of CabinSide missing after:", data.CabinSide.isna().sum())
data.CabinSide.value_counts()
CS_bef = data.CabinSide.isna().sum()
data.loc[data["CabinSide"].isna(), "CabinSide"] = "Z"
print("# of CabinSide missing before: ", CS_bef)
print("# of CabinSide missing after:", data.CabinSide.isna().sum())
# #### CabinDeck and Group
CD_bef = data["CabinDeck"].isna().sum()
GCD_index = data[data["CabinDeck"].isna()][
(data[data.CabinDeck.isna()]["Group"]).isin(GCD_gb.index)
].index
data.loc[GCD_index, "CabinDeck"] = data.iloc[GCD_index, :]["Group"].map(
lambda x: GCD_gb.idxmax(axis=1)[x]
)
# Print number of missing values left
print("# of Cabin_deck missing before:", CD_bef)
print("# of Cabin_deck missing after:", data["CabinDeck"].isna().sum())
# #### CabinDeck and HomePlanet
# Joint distribution
data.groupby(["HomePlanet", "Destination", "Solo", "CabinDeck"])[
"CabinDeck"
].size().unstack().fillna(0)
plt.figure(figsize=(12, 8))
sns.heatmap(
data.groupby(["HomePlanet", "Destination", "Solo", "CabinDeck"])["CabinDeck"]
.size()
.unstack()
.fillna(0),
annot=True,
fmt="g",
)
# Missing values before
CD_bef = data["CabinDeck"].isna().sum()
# Fill missing values using the mode
na_rows_CD = data.loc[data["CabinDeck"].isna(), "CabinDeck"].index
data.loc[data["CabinDeck"].isna(), "CabinDeck"] = data.groupby(
["HomePlanet", "Destination", "Solo"]
)["CabinDeck"].transform(lambda x: x.fillna(pd.Series.mode(x)[0]))[na_rows_CD]
# Print number of missing values left
print("# of CabinDeck missing values before:", CD_bef)
print("# of CabinDeck missing values after:", data["CabinDeck"].isna().sum())
# #### CabinNumber and CabinDeck
# Scatterplot
plt.figure(figsize=(10, 4))
sns.scatterplot(
x=data["CabinNumber"],
y=data["Group"],
c=LabelEncoder().fit_transform(data.loc[~data["CabinNumber"].isna(), "CabinDeck"]),
cmap="tab10",
)
plt.title("Cabin_number vs group coloured by group")
# Missing values before
CN_bef = data["CabinNumber"].isna().sum()
# Extrapolate linear relationship on a deck by deck basis
for deck in ["A", "B", "C", "D", "E", "F", "G"]:
# Features and labels
X_CN = data.loc[
~(data["CabinNumber"].isna()) & (data["CabinDeck"] == deck), "Group"
]
y_CN = data.loc[
~(data["CabinNumber"].isna()) & (data["CabinDeck"] == deck), "CabinNumber"
]
X_test_CN = data.loc[
(data["CabinNumber"].isna()) & (data["CabinDeck"] == deck), "Group"
]
# Linear regression
model_CN = LinearRegression()
model_CN.fit(X_CN.values.reshape(-1, 1), y_CN)
preds_CN = model_CN.predict(X_test_CN.values.reshape(-1, 1))
# Fill missing values with predictions
data.loc[
(data["CabinNumber"].isna()) & (data["CabinDeck"] == deck), "CabinNumber"
] = preds_CN.astype(int)
# Print number of missing values left
print("#Cabin_number missing values before:", CN_bef)
print("#Cabin_number missing values after:", data["CabinNumber"].isna().sum())
# One-hot encode cabin regions
data["CabinRegion1"] = (data["CabinNumber"] < 300).astype(int)
data["CabinRegion2"] = (
(data["CabinNumber"] >= 300) & (data["CabinNumber"] < 600)
).astype(int)
data["CabinRegion3"] = (
(data["CabinNumber"] >= 600) & (data["CabinNumber"] < 900)
).astype(int)
data["CabinRegion4"] = (
(data["CabinNumber"] >= 900) & (data["CabinNumber"] < 1200)
).astype(int)
data["CabinRegion5"] = (
(data["CabinNumber"] >= 1200) & (data["CabinNumber"] < 1500)
).astype(int)
data["CabinRegion6"] = (
(data["CabinNumber"] >= 1500) & (data["CabinNumber"] < 1800)
).astype(int)
data["CabinRegion7"] = (data["CabinNumber"] >= 1800).astype(int)
# #### VIP
data.VIP.value_counts()
# Missing values before
V_bef = data["VIP"].isna().sum()
# Fill missing values with mode
data.loc[data["VIP"].isna(), "VIP"] = False
# Print number of missing values left
print("#VIP missing values before:", V_bef)
print("#VIP missing values after:", data["VIP"].isna().sum())
# #### Age
data.groupby(["HomePlanet", "NoSpending", "Solo", "CabinDeck"])[
"Age"
].median().unstack().fillna(0)
# Missing values before
A_bef = data[exp_feats].isna().sum().sum()
# Fill missing values using the median
na_rows_A = data.loc[data["Age"].isna(), "Age"].index
data.loc[data["Age"].isna(), "Age"] = data.groupby(
["HomePlanet", "NoSpending", "Solo", "CabinDeck"]
)["Age"].transform(lambda x: x.fillna(x.median()))[na_rows_A]
# Print number of missing values left
print("# of Age missing before:", A_bef)
print("# of Age missing after:", data.Age.isna().sum())
# Update age group feature
data.loc[data["Age"] <= 12, "AgeGroup"] = "Age_0-12"
data.loc[(data["Age"] > 12) & (data["Age"] < 18), "AgeGroup"] = "Age_13-17"
data.loc[(data["Age"] >= 18) & (data["Age"] <= 25), "AgeGroup"] = "Age_18-25"
data.loc[(data["Age"] > 25) & (data["Age"] <= 30), "AgeGroup"] = "Age_26-30"
data.loc[(data["Age"] > 30) & (data["Age"] <= 50), "AgeGroup"] = "Age_31-50"
data.loc[data["Age"] > 50, "AgeGroup"] = "Age_51+"
# #### CryoSleep
# Joint distribution
data.groupby(["NoSpending", "CryoSleep"])["CryoSleep"].size().unstack().fillna(0)
# Missing values before
CSL_bef = data["CryoSleep"].isna().sum()
# Fill missing values using the mode
na_rows_CSL = data.loc[data["CryoSleep"].isna(), "CryoSleep"].index
data.loc[data["CryoSleep"].isna(), "CryoSleep"] = data.groupby(["NoSpending"])[
"CryoSleep"
].transform(lambda x: x.fillna(pd.Series.mode(x)[0]))[na_rows_CSL]
# Print number of missing values left
print("#CryoSleep missing values before:", CSL_bef)
print("#CryoSleep missing values after:", data["CryoSleep"].isna().sum())
# #### Expenditure and CryoSleep
print(
"Maximum expenditure of passengers in CryoSleep:",
data.loc[data["CryoSleep"] == True, exp_feats].sum(axis=1).max(),
)
# Missing values before
E_bef = data[exp_feats].isna().sum().sum()
# CryoSleep has no expenditure
for col in exp_feats:
data.loc[(data[col].isna()) & (data["CryoSleep"] == True), col] = 0
# Print number of missing values left
print("# of Expenditure missing values before:", E_bef)
print("# of Expenditure missing values after:", data[exp_feats].isna().sum().sum())
# #### Expenditure and Others
# Joint distribution
data.groupby(["HomePlanet", "Solo", "AgeGroup"])["Expenditure"].mean().unstack().fillna(
0
)
# Missing values before
E_bef = data[exp_feats].isna().sum().sum()
# Fill remaining missing values using the median
for col in exp_feats:
na_rows = data.loc[data[col].isna(), col].index
data.loc[data[col].isna(), col] = data.groupby(["HomePlanet", "Solo", "AgeGroup"])[
col
].transform(lambda x: x.fillna(x.mean()))[na_rows]
# Print number of missing values left
print("# of Expenditure missing values before:", E_bef)
print("# of Expenditure missing values after:", data[exp_feats].isna().sum().sum())
# Update expenditure and no_spending
data["Expenditure"] = data[exp_feats].sum(axis=1)
data["NoSpending"] = (data["Expenditure"] == 0).astype(int)
data.isna().sum()
# # Preprocessing
# Split data back into train and test sets
X = data[data.PassengerId.isin(train.PassengerId.values)].copy()
X_test = data[data.PassengerId.isin(test.PassengerId.values)].copy()
# Drop unwanted features
X.drop(
["PassengerId", "Group", "GroupSize", "AgeGroup", "CabinNumber"],
axis=1,
inplace=True,
)
X_test.drop(
["PassengerId", "Group", "GroupSize", "AgeGroup", "CabinNumber"],
axis=1,
inplace=True,
)
# Log Transform
fig = plt.figure(figsize=(12, 20))
for i, col in enumerate(
["RoomService", "FoodCourt", "ShoppingMall", "Spa", "VRDeck", "Expenditure"]
):
plt.subplot(6, 2, 2 * i + 1)
sns.histplot(X[col], binwidth=100)
plt.ylim([0, 200])
plt.title(f"{col} (original)")
plt.subplot(6, 2, 2 * i + 2)
sns.histplot(np.log(1 + X[col]), color="C1")
plt.ylim([0, 200])
plt.title(f"{col} (log-transform)")
fig.tight_layout()
plt.show()
for col in ["RoomService", "FoodCourt", "ShoppingMall", "Spa", "VRDeck", "Expenditure"]:
X[col] = np.log(1 + X[col])
X_test[col] = np.log(1 + X_test[col])
# Encoding and Scaling
# Indentify numerical and categorical columns
numerical_cols = [
cname for cname in X.columns if X[cname].dtype in ["int64", "float64"]
]
categorical_cols = [cname for cname in X.columns if X[cname].dtype == "object"]
# Scale numerical data to have mean=0 and variance=1
numerical_transformer = Pipeline(steps=[("scaler", StandardScaler())])
# One-hot encode categorical data
categorical_transformer = Pipeline(
steps=[
(
"onehot",
OneHotEncoder(drop="if_binary", handle_unknown="ignore", sparse=False),
)
]
)
# Combine preprocessing
ct = ColumnTransformer(
transformers=[
("num", numerical_transformer, numerical_cols),
("cat", categorical_transformer, categorical_cols),
],
remainder="passthrough",
)
# Apply preprocessing
X = ct.fit_transform(X)
X_test = ct.transform(X_test)
# Print new shape
print("Training set shape:", X.shape)
# PCA
pca = PCA(n_components=3)
components = pca.fit_transform(X)
total_var = pca.explained_variance_ratio_.sum() * 100
fig = px.scatter_3d(
components,
x=0,
y=1,
z=2,
color=y,
size=0.1 * np.ones(len(X)),
opacity=1,
title=f"Total Explained Variance: {total_var:.2f}%",
labels={"0": "PC 1", "1": "PC 2", "2": "PC 3"},
width=800,
height=500,
)
fig.show()
# Explained variance (how important each additional principal component is)
pca = PCA().fit(X)
fig, ax = plt.subplots(figsize=(10, 4))
xi = np.arange(1, 1 + X.shape[1], step=1)
yi = np.cumsum(pca.explained_variance_ratio_)
plt.plot(xi, yi, marker="o", linestyle="--", color="b")
# Aesthetics
plt.ylim(0.0, 1.1)
plt.xlabel("Number of Components")
plt.xticks(np.arange(1, 1 + X.shape[1], step=2))
plt.ylabel("Cumulative variance (%)")
plt.title("Explained variance by each component")
plt.axhline(y=1, color="r", linestyle="-")
plt.text(0.5, 0.85, "100% cut-off threshold", color="red")
ax.grid(axis="x")
# # Modeling
X_train, X_valid, y_train, y_valid = train_test_split(
X, y, stratify=y, test_size=0.2, random_state=42
)
# #### 27 Different Classifiers (LazyPredict)
clf = LazyClassifier(
verbose=0,
ignore_warnings=True,
custom_metric=None,
predictions=False,
random_state=RANDOM_STATE,
classifiers="all",
)
models, predictions = clf.fit(X_train, X_valid, y_train, y_valid)
clear_output()
models
line = px.line(data_frame=models, y=["Accuracy"], markers=True)
line.update_xaxes(title="Model", rangeslider_visible=False)
line.update_yaxes(title="Accuracy")
line.update_traces(line_color="red")
line.update_layout(
showlegend=True,
title={
"text": "Accuracy vs Model",
"x": 0.5,
"xanchor": "center",
"yanchor": "top",
},
)
line.show()
line = px.line(data_frame=models, y=["ROC AUC", "F1 Score"], markers=True)
line.update_xaxes(title="Model", rangeslider_visible=False)
line.update_yaxes(title="ROC AUC Score")
line.update_layout(
showlegend=True,
title={
"text": "ROC AUC and F1 Score vs Model",
"x": 0.5,
"xanchor": "center",
"yanchor": "top",
},
)
line.show()
line = px.line(data_frame=models, y=["Time Taken"], markers=True)
line.update_xaxes(title="Model", rangeslider_visible=False)
line.update_yaxes(title="Time(s)")
line.update_traces(line_color="purple")
line.update_layout(
showlegend=True,
title={
"text": "Time taken vs Model",
"y": 0.94,
"x": 0.5,
"xanchor": "center",
"yanchor": "top",
},
)
line.show()
# #### LGBM Classifier (5 Folds):
lgb_params = {"objective": "binary", "n_estimators": 50, "learning_rate": 0.08}
lgb_predictions = 0
lgb_scores = []
lgb_fimp = []
skf = StratifiedKFold(n_splits=FOLDS, shuffle=True, random_state=RANDOM_STATE)
for fold, (train_idx, valid_idx) in enumerate(skf.split(X, y)):
print(f"\033[94m")
print(10 * "=", f"Fold={fold+1}", 10 * "=")
start_time = time.time()
X_train, X_valid = X[train_idx], X[valid_idx]
y_train, y_valid = y[train_idx], y[valid_idx]
model = LGBMClassifier(**lgb_params)
model.fit(X_train, y_train, verbose=0)
preds_valid = model.predict(X_valid)
acc = accuracy_score(y_valid, preds_valid) * 100
lgb_scores.append(acc)
run_time = time.time() - start_time
print(f"Fold={fold+1}, Accuracy score: {acc:.2f}%, Run Time: {run_time:.2f}s")
test_preds = model.predict(X_test)
lgb_predictions += test_preds / FOLDS
print("")
print("Avg Accuracy:", np.mean(lgb_scores))
# # Submission
submission[TARGET] = lgb_predictions.astype("bool")
submission.to_csv("submission.csv", index=False)
submission.head()
| false | 0 | 12,449 | 4 | 12,449 | 12,449 |
||
129145947
|
import cv2
import numpy as np
import torch
from torch import nn
import onnx
import onnxruntime
class SmallSuperPoint_Interpolate(nn.Module):
def __init__(self):
super().__init__()
self.relu = nn.ReLU(inplace=True)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
# c1, c2, c3, c4, c5 = 64, 64, 128, 128, 256
c1, c2, c3, c4, c5, d1 = 32, 64, 64, 128, 128, 128
self.conv1a = nn.Conv2d(1, c1, kernel_size=3, stride=1, padding=1)
conv1b_depth = nn.Conv2d(c1, c1, kernel_size=3, stride=1, padding=1, groups=c1)
conv1b_point = nn.Conv2d(c1, c1, kernel_size=1)
self.conv1b = nn.Sequential(conv1b_depth, conv1b_point)
conv2a_depth = nn.Conv2d(c1, c1, kernel_size=3, stride=1, padding=1, groups=c1)
conv2a_point = nn.Conv2d(c1, c2, kernel_size=1)
self.conv2a = nn.Sequential(conv2a_depth, conv2a_point)
conv2b_depth = nn.Conv2d(c2, c2, kernel_size=3, stride=1, padding=1, groups=c2)
conv2b_point = nn.Conv2d(c2, c2, kernel_size=1)
self.conv2b = nn.Sequential(conv2b_depth, conv2b_point)
conv3a_depth = nn.Conv2d(c2, c2, kernel_size=3, stride=1, padding=1, groups=c2)
conv3a_point = nn.Conv2d(c2, c3, kernel_size=1)
self.conv3a = nn.Sequential(conv3a_depth, conv3a_point)
conv3b_depth = nn.Conv2d(c3, c3, kernel_size=3, stride=1, padding=1, groups=c3)
conv3b_point = nn.Conv2d(c3, c3, kernel_size=1)
self.conv3b = nn.Sequential(conv3b_depth, conv3b_point)
conv4a_depth = nn.Conv2d(c3, c3, kernel_size=3, stride=1, padding=1, groups=c3)
conv4a_point = nn.Conv2d(c3, c4, kernel_size=1)
self.conv4a = nn.Sequential(conv4a_depth, conv4a_point)
conv4b_depth = nn.Conv2d(c4, c4, kernel_size=3, stride=1, padding=1, groups=c4)
conv4b_point = nn.Conv2d(c4, c4, kernel_size=1)
self.conv4b = nn.Sequential(conv4b_depth, conv4b_point)
convPa_depth = nn.Conv2d(c4, c4, kernel_size=3, stride=1, padding=1, groups=c4)
convPb_point = nn.Conv2d(c4, c5, kernel_size=1)
self.convPa = nn.Sequential(convPa_depth, convPb_point)
self.convPb = nn.Conv2d(c5, 65, kernel_size=1, stride=1, padding=0)
convDa_depth = nn.Conv2d(c4, c4, kernel_size=3, stride=1, padding=1, groups=c4)
convDb_point = nn.Conv2d(c4, c5, kernel_size=1)
self.convDa = nn.Sequential(convDa_depth, convDb_point)
self.convDb = nn.Conv2d(c5, d1, kernel_size=1, stride=1, padding=0)
def forward(self, image):
"""Compute keypoints, scores, descriptors for image"""
# Shared Encoder
x = self.relu(self.conv1a(image))
x = self.relu(self.conv1b(x))
x = self.pool(x)
x = self.relu(self.conv2a(x))
x = self.relu(self.conv2b(x))
x = self.pool(x)
x = self.relu(self.conv3a(x))
x = self.relu(self.conv3b(x))
x = self.pool(x)
x = self.relu(self.conv4a(x))
x = self.relu(self.conv4b(x))
# Compute the dense keypoint scores
cPa = self.relu(self.convPa(x))
scores = self.convPb(cPa)
scores = torch.nn.functional.softmax(scores, 1)[:, :64, :, :]
b, _, h, w = scores.shape
scores = scores.permute(0, 2, 3, 1).reshape(b, h, w, 8, 8)
scores = scores.permute(0, 1, 3, 2, 4).reshape(b, 1, h * 8, w * 8)
# scores = simple_nms(scores, 4).reshape(h*8, w*8)
# scores = simple_nms(scores, 4).reshape(-1)
# # Extract keypoints
# index_1d = torch.arange(0, h*8*w*8).to(scores)
# keypoints = torch.stack([index_1d // (w*8), index_1d % (w*8)], dim=1)
# Discard keypoints near the image borders
# keypoints, scores = remove_borders(keypoints, scores, 4, h*8, w*8)
# Keep the k keypoints with highest score
# scores, indices = torch.topk(scores, 400)
# keypoints = keypoints[indices]
# # Convert (h, w) to (x, y)
# keypoints = torch.flip(keypoints, [1]).float()
# Compute the dense descriptors
cDa = self.relu(self.convDa(x))
descriptors = self.convDb(cDa)
# descriptors = torch.nn.functional.normalize(descriptors, dim=1, eps=1e-6)
norm = descriptors.norm(dim=1, keepdim=True).expand_as(descriptors)
descriptors = descriptors / norm
# # Extract descriptors
# descriptors = sample_descriptors_interpolate(keypoints, descriptors, 8)
return descriptors, scores
filename = "/kaggle/input/img-dataset/test_data/21000.jpg"
def resize_image_cuda(img, resize=(640, 400)):
h, w, _ = img.shape
scale_w = resize[0] / w
scale_h = resize[1] / h
img = cv2.resize(img, resize)
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
img = torch.from_numpy(img)[None][None] / 255.0
return img.cuda(), (scale_w, scale_h)
def resize_image_numpy(img, resize=(640, 400)):
h, w, _ = img.shape
scale_w = resize[0] / w
scale_h = resize[1] / h
img = cv2.resize(img, resize)
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
img = img[None][None] / 255.0
return img, (scale_w, scale_h)
def export_onnx_new():
device = torch.device("cuda")
superpoint_onnx = SmallSuperPoint_Interpolate()
superpoint_onnx.load_state_dict(
torch.load(
"/kaggle/input/model-weight/small_superPointNet_ws_10000_checkpoint.pth.tar"
)["model_state_dict"]
)
superpoint_onnx = superpoint_onnx.eval().to(device)
img = cv2.cvtColor(cv2.imread(filename), cv2.COLOR_BGR2RGB)
superpoint_input, _ = resize_image_cuda(img)
# export to onnx
torch.onnx.export(
superpoint_onnx, # model being run
superpoint_input, # model input (or a tuple for multiple inputs)
"my_model.onnx", # where to save the model (can be a file or file-like object)
export_params=True, # store the trained parameter weights inside the model file
opset_version=11,
input_names=["image"], # the model's input names
output_names=["descriptors", "scores"], # the model's output names
)
export_onnx_new()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/145/129145947.ipynb
| null | null |
[{"Id": 129145947, "ScriptId": 38390410, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1896085, "CreationDate": "05/11/2023 10:35:44", "VersionNumber": 1.0, "Title": "fixed_SmallSuperPoint_Interpolate", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 160.0, "LinesInsertedFromPrevious": 160.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import cv2
import numpy as np
import torch
from torch import nn
import onnx
import onnxruntime
class SmallSuperPoint_Interpolate(nn.Module):
def __init__(self):
super().__init__()
self.relu = nn.ReLU(inplace=True)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
# c1, c2, c3, c4, c5 = 64, 64, 128, 128, 256
c1, c2, c3, c4, c5, d1 = 32, 64, 64, 128, 128, 128
self.conv1a = nn.Conv2d(1, c1, kernel_size=3, stride=1, padding=1)
conv1b_depth = nn.Conv2d(c1, c1, kernel_size=3, stride=1, padding=1, groups=c1)
conv1b_point = nn.Conv2d(c1, c1, kernel_size=1)
self.conv1b = nn.Sequential(conv1b_depth, conv1b_point)
conv2a_depth = nn.Conv2d(c1, c1, kernel_size=3, stride=1, padding=1, groups=c1)
conv2a_point = nn.Conv2d(c1, c2, kernel_size=1)
self.conv2a = nn.Sequential(conv2a_depth, conv2a_point)
conv2b_depth = nn.Conv2d(c2, c2, kernel_size=3, stride=1, padding=1, groups=c2)
conv2b_point = nn.Conv2d(c2, c2, kernel_size=1)
self.conv2b = nn.Sequential(conv2b_depth, conv2b_point)
conv3a_depth = nn.Conv2d(c2, c2, kernel_size=3, stride=1, padding=1, groups=c2)
conv3a_point = nn.Conv2d(c2, c3, kernel_size=1)
self.conv3a = nn.Sequential(conv3a_depth, conv3a_point)
conv3b_depth = nn.Conv2d(c3, c3, kernel_size=3, stride=1, padding=1, groups=c3)
conv3b_point = nn.Conv2d(c3, c3, kernel_size=1)
self.conv3b = nn.Sequential(conv3b_depth, conv3b_point)
conv4a_depth = nn.Conv2d(c3, c3, kernel_size=3, stride=1, padding=1, groups=c3)
conv4a_point = nn.Conv2d(c3, c4, kernel_size=1)
self.conv4a = nn.Sequential(conv4a_depth, conv4a_point)
conv4b_depth = nn.Conv2d(c4, c4, kernel_size=3, stride=1, padding=1, groups=c4)
conv4b_point = nn.Conv2d(c4, c4, kernel_size=1)
self.conv4b = nn.Sequential(conv4b_depth, conv4b_point)
convPa_depth = nn.Conv2d(c4, c4, kernel_size=3, stride=1, padding=1, groups=c4)
convPb_point = nn.Conv2d(c4, c5, kernel_size=1)
self.convPa = nn.Sequential(convPa_depth, convPb_point)
self.convPb = nn.Conv2d(c5, 65, kernel_size=1, stride=1, padding=0)
convDa_depth = nn.Conv2d(c4, c4, kernel_size=3, stride=1, padding=1, groups=c4)
convDb_point = nn.Conv2d(c4, c5, kernel_size=1)
self.convDa = nn.Sequential(convDa_depth, convDb_point)
self.convDb = nn.Conv2d(c5, d1, kernel_size=1, stride=1, padding=0)
def forward(self, image):
"""Compute keypoints, scores, descriptors for image"""
# Shared Encoder
x = self.relu(self.conv1a(image))
x = self.relu(self.conv1b(x))
x = self.pool(x)
x = self.relu(self.conv2a(x))
x = self.relu(self.conv2b(x))
x = self.pool(x)
x = self.relu(self.conv3a(x))
x = self.relu(self.conv3b(x))
x = self.pool(x)
x = self.relu(self.conv4a(x))
x = self.relu(self.conv4b(x))
# Compute the dense keypoint scores
cPa = self.relu(self.convPa(x))
scores = self.convPb(cPa)
scores = torch.nn.functional.softmax(scores, 1)[:, :64, :, :]
b, _, h, w = scores.shape
scores = scores.permute(0, 2, 3, 1).reshape(b, h, w, 8, 8)
scores = scores.permute(0, 1, 3, 2, 4).reshape(b, 1, h * 8, w * 8)
# scores = simple_nms(scores, 4).reshape(h*8, w*8)
# scores = simple_nms(scores, 4).reshape(-1)
# # Extract keypoints
# index_1d = torch.arange(0, h*8*w*8).to(scores)
# keypoints = torch.stack([index_1d // (w*8), index_1d % (w*8)], dim=1)
# Discard keypoints near the image borders
# keypoints, scores = remove_borders(keypoints, scores, 4, h*8, w*8)
# Keep the k keypoints with highest score
# scores, indices = torch.topk(scores, 400)
# keypoints = keypoints[indices]
# # Convert (h, w) to (x, y)
# keypoints = torch.flip(keypoints, [1]).float()
# Compute the dense descriptors
cDa = self.relu(self.convDa(x))
descriptors = self.convDb(cDa)
# descriptors = torch.nn.functional.normalize(descriptors, dim=1, eps=1e-6)
norm = descriptors.norm(dim=1, keepdim=True).expand_as(descriptors)
descriptors = descriptors / norm
# # Extract descriptors
# descriptors = sample_descriptors_interpolate(keypoints, descriptors, 8)
return descriptors, scores
filename = "/kaggle/input/img-dataset/test_data/21000.jpg"
def resize_image_cuda(img, resize=(640, 400)):
h, w, _ = img.shape
scale_w = resize[0] / w
scale_h = resize[1] / h
img = cv2.resize(img, resize)
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
img = torch.from_numpy(img)[None][None] / 255.0
return img.cuda(), (scale_w, scale_h)
def resize_image_numpy(img, resize=(640, 400)):
h, w, _ = img.shape
scale_w = resize[0] / w
scale_h = resize[1] / h
img = cv2.resize(img, resize)
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
img = img[None][None] / 255.0
return img, (scale_w, scale_h)
def export_onnx_new():
device = torch.device("cuda")
superpoint_onnx = SmallSuperPoint_Interpolate()
superpoint_onnx.load_state_dict(
torch.load(
"/kaggle/input/model-weight/small_superPointNet_ws_10000_checkpoint.pth.tar"
)["model_state_dict"]
)
superpoint_onnx = superpoint_onnx.eval().to(device)
img = cv2.cvtColor(cv2.imread(filename), cv2.COLOR_BGR2RGB)
superpoint_input, _ = resize_image_cuda(img)
# export to onnx
torch.onnx.export(
superpoint_onnx, # model being run
superpoint_input, # model input (or a tuple for multiple inputs)
"my_model.onnx", # where to save the model (can be a file or file-like object)
export_params=True, # store the trained parameter weights inside the model file
opset_version=11,
input_names=["image"], # the model's input names
output_names=["descriptors", "scores"], # the model's output names
)
export_onnx_new()
| false | 0 | 2,226 | 0 | 2,226 | 2,226 |
||
129145863
|
<jupyter_start><jupyter_text>Indian Food Images Dataset
### Context
Indian cuisine consists of a variety of regional and traditional cuisines native to the Indian subcontinent. Given the diversity in soil, climate, culture, ethnic groups, and occupations, these cuisines vary substantially and use locally available spices, herbs, vegetables, and fruits. Indian food is also heavily influenced by religion, in particular Hinduism, cultural choices, and traditions. Centuries of Islamic rule, particularly by the Mughals, also introduced dishes like samosa and pilaf.
Historical events such as invasions, trade relations, and colonialism have played a role in introducing certain foods to this country. The Columbian discovery of the New World brought a number of new vegetables and fruit to India. A number of these such as the potato, tomatoes, chilies, peanuts, and Guava have become staples in many regions of India. Indian cuisine has shaped the history of international relations; the spice trade between India and Europe was the primary catalyst for Europe's Age of Discovery. Spices were bought from India and traded around Europe and Asia. Indian cuisine has influenced other cuisines across the world, especially those from Europe (especially Britain), the Middle East, Southern African, East Africa, Southeast Asia, North America, Mauritius, Fiji, Oceania, and the Caribbean.
### Content
In this Dataset, we have 4000 Indian Food Images in 80 different categories or classes.
### Structure of the Dataset

### Acknowledgment
This Dataset is created from Google Images: https://images.google.com/. If you want to learn more, you can visit the Website.
Cover Photo: https://www.eazydiner.com/food-trends/the-indian-food-trail-best-dishes-from-india
Kaggle dataset identifier: indian-food-images-dataset
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ### Assignment 2
# ### Sai Sylesh Gupta Namburu, CB.EN.P2DSC22007
# ## Data Set - https://www.kaggle.com/datasets/iamsouravbanerjee/indian-food-images-dataset
# # Importing the Modules
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from pathlib import Path
import os.path
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from keras.models import Sequential
from keras.layers import Dense, GlobalAveragePooling2D
from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score
from keras.models import Model
from sklearn.svm import SVC
from tabulate import tabulate
# ## Data Processing - Segregation into different classes, Training, Testing
Image_Directory = Path(
"/kaggle/input/indian-food-images-dataset/Indian Food Images/Indian Food Images"
)
File_Path = list(Image_Directory.glob(r"**/*.jpg"))
Label = list(map(lambda x: os.path.split(os.path.split(x)[0])[1], File_Path))
File_Path = pd.Series(File_Path, name="Filepath").astype(str)
Label = pd.Series(Label, name="Label")
Images_Df = (
pd.concat([File_Path, Label], axis=1)
.sample(frac=1.0, random_state=1)
.reset_index(drop=True)
)
Images_Df.head()
Images_Df["Label"]
# # Exploratory Data Analysis
print("Total Number of Food Dishes available are:", len(np.unique(Images_Df["Label"])))
print("")
print("The Food Dishes are:", np.unique(Images_Df["Label"]))
# **Data Splitting**
Train_Data, Test_Data = train_test_split(
Images_Df, test_size=0.30, shuffle=True, random_state=43
)
Train_Gen = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, validation_split=0.2
)
test_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
train_image = Train_Gen.flow_from_dataframe(
dataframe=Train_Data,
x_col="Filepath",
y_col="Label",
target_size=(224, 224),
batch_size=32,
color_mode="rgb",
class_mode="categorical",
shuffle=True,
seed=42,
subset="training",
)
val_image = Train_Gen.flow_from_dataframe(
dataframe=Train_Data,
x_col="Filepath",
y_col="Label",
target_size=(224, 224),
batch_size=32,
color_mode="rgb",
class_mode="categorical",
shuffle=True,
seed=42,
subset="validation",
)
test_image = test_gen.flow_from_dataframe(
dataframe=Test_Data,
x_col="Filepath",
y_col="Label",
target_size=(224, 224),
batch_size=32,
color_mode="rgb",
class_mode="categorical",
shuffle=False,
)
# ## Transfer Learning and Inception Modelling
InceptionV3_Model = tf.keras.applications.InceptionV3(
weights="imagenet", include_top=False, input_shape=(224, 224, 3)
)
for layer in InceptionV3_Model.layers[:-15]:
layer.trainable = True
model = Sequential()
model.add(InceptionV3_Model)
model.add(GlobalAveragePooling2D())
model.add(Dense(1024, activation="relu"))
model.add(Dense(80, activation="softmax"))
model.summary()
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
model.fit(train_image, epochs=10, validation_data=val_image)
Prediction = model.predict(test_image)
Prediction = np.argmax(Prediction, axis=1) # convert probabilities to class labels
True_Values = test_image.classes
Inception_Accuracy = accuracy_score(True_Values, Prediction)
Inception_Recall = recall_score(True_Values, Prediction, average="weighted")
Inception_Precision = precision_score(True_Values, Prediction, average="weighted")
Inception_F1 = f1_score(True_Values, Prediction, average="weighted")
print("Accuracy Score:", Inception_Accuracy)
print("Recall Score:", Inception_Recall)
print("Precision Score:", Inception_Precision)
print("F1-Score:", Inception_F1)
# ## Transfer Learning and Exception Modelling
Xception_model = Sequential()
Xception_Base_Model = tf.keras.applications.xception.Xception(
include_top=False, weights="imagenet", input_shape=(229, 229, 3)
)
print(f"Number of layers in Xception : {len(Xception_Base_Model.layers)}")
for layer in Xception_Base_Model.layers[:]:
layer.trainable = False
for layer in Xception_Base_Model.layers[90:]:
layer.trainable = True
Xception_model.add(Xception_Base_Model)
Xception_model.add(GlobalAveragePooling2D())
Xception_model.add(Dense(1024, activation="relu"))
Xception_model.add(Dense(units=80, activation="softmax"))
Xception_model.summary()
Xception_model.compile(
optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"]
)
Xception_model.fit(train_image, epochs=10, validation_data=val_image)
Xception_Prediction = Xception_model.predict(test_image)
Xception_Prediction = np.argmax(
Xception_Prediction, axis=1
) # convert probabilities to class labels
True_Values = test_image.classes
Xception_Accuracy = accuracy_score(True_Values, Xception_Prediction)
Xception_Recall = recall_score(True_Values, Xception_Prediction, average="weighted")
Xception_Precision = precision_score(
True_Values, Xception_Prediction, average="weighted"
)
Xception_F1 = f1_score(True_Values, Xception_Prediction, average="weighted")
print("Accuracy Score:", Xception_Accuracy)
print("Recall Score:", Xception_Recall)
print("Precision Score:", Xception_Precision)
print("F1-Score:", Xception_F1)
# ## Inferences with Justifications (Inception v/s Xception)
Table = [
["Accuracy Score:", Inception_Accuracy, Xception_Accuracy],
["Precision Score:", Inception_Precision, Xception_Precision],
["Recall Score:", Inception_Recall, Xception_Recall],
["F1 Score:", Inception_F1, Xception_F1],
]
head = ["Scores", "Inception", "Xception"]
print(tabulate(Table, headers=head, tablefmt="grid"))
# Based on the performance measures, the Xception architecture outperformed the Inception architecture. Below are the inferences:
# - **Accuracy Score**: The Xception architecture achieved an accuracy score of 0.520833, which is almost twice as high as the Inception architecture's score of 0.258333. This indicates that the Xception model was better at correctly classifying the test set images.
# - **Precision Score**: The Xception architecture achieved a precision score of 0.601681, which is higher than the Inception architecture's score of 0.394582. This suggests that the Xception model was better at correctly identifying true positives (i.e., correctly predicting positive cases) while avoiding false positives (i.e., incorrectly predicting positive cases).
# - **Recall Score**: The recall score measures the ability of the model to correctly identify true positives out of all actual positives. The Xception architecture achieved a recall score of 0.520833, which is higher than the Inception architecture's score of 0.258333. This suggests that the Xception model was better at detecting true positives among all actual positive cases.
# - **F1 Score**: The F1 score is a harmonic mean of the precision and recall scores, and thus gives a balanced measure of the model's performance. The Xception architecture achieved an F1 score of 0.521496, which is higher than the Inception architecture's score of 0.254289. This indicates that the Xception model achieved a better balance between precision and recall compared to the Inception model.
# Overall, the higher performance measures achieved by the Xception model suggest that it is a better choice compared to the Inception model for this task. However, it is important to note that these conclusions are specific to the dataset and task used for evaluation, and may not generalize to other datasets or tasks.
# ## Choosing the Deep Features from the Dense Layer from the Inception Model
Dense_Features = Model(inputs=model.input, outputs=model.get_layer(index=-2).output)
model.get_layer(index=-2).output
Train_Features = np.array(Dense_Features.predict(train_image))
X_Train = np.reshape(Train_Features, (Train_Features.shape[0], Train_Features.shape[1]))
Test_Features = np.array(Dense_Features.predict(test_image))
X_Test = np.reshape(Test_Features, (Test_Features.shape[0], Test_Features.shape[1]))
# **Support Vector Machine Model**
Model_SVC = SVC(kernel="rbf")
Model_SVC.fit(X_Train, train_image.classes)
Prediction = Model_SVC.predict(X_Test)
Inception_ML_Accuracy = accuracy_score(Prediction, test_image.classes)
Inception_ML_Recall = recall_score(Prediction, test_image.classes, average="weighted")
Inception_ML_Precision = precision_score(
Prediction, test_image.classes, average="weighted"
)
Inception_ML_F1 = f1_score(Prediction, test_image.classes, average="weighted")
print("Accuracy is:", Inception_ML_Accuracy)
print("Precision is:", Inception_ML_Precision)
print("Recall is:", Inception_ML_Recall)
print("F1 Score is:", Inception_ML_F1)
# **Logistic Regression**
from sklearn.linear_model import LogisticRegression
Model_LR = LogisticRegression()
Model_LR.fit(X_Train, train_image.classes)
Prediction = Model_LR.predict(X_Test)
LR_Inception_ML_Accuracy = accuracy_score(Prediction, test_image.classes)
LR_Inception_ML_Recall = recall_score(
Prediction, test_image.classes, average="weighted"
)
LR_Inception_ML_Precision = precision_score(
Prediction, test_image.classes, average="weighted"
)
LR_Inception_ML_F1 = f1_score(Prediction, test_image.classes, average="weighted")
print("Accuracy is:", LR_Inception_ML_Accuracy)
print("Precision is:", LR_Inception_ML_Precision)
print("Recall is:", LR_Inception_ML_Recall)
print("F1 Score is:", LR_Inception_ML_F1)
# **K-Nearest Neighbours**
from sklearn.neighbors import KNeighborsClassifier
KNN_Model = KNeighborsClassifier(n_neighbors=5)
KNN_Model.fit(X_Train, train_image.classes)
Prediction_KNN = KNN_Model.predict(X_Test)
KNN_Inception_ML_Accuracy = accuracy_score(Prediction_KNN, test_image.classes)
KNN_Inception_ML_Recall = recall_score(
Prediction_KNN, test_image.classes, average="weighted"
)
KNN_Inception_ML_Precision = precision_score(
Prediction_KNN, test_image.classes, average="weighted"
)
KNN_Inception_ML_F1 = f1_score(Prediction_KNN, test_image.classes, average="weighted")
print("Accuracy is:", KNN_Inception_ML_Accuracy)
print("Precision is:", KNN_Inception_ML_Precision)
print("Recall is:", KNN_Inception_ML_Recall)
print("F1 Score is:", KNN_Inception_ML_F1)
# **Naive Bayes**
from sklearn.naive_bayes import GaussianNB
GNB_Model = GaussianNB()
GNB_Model.fit(X_Train, train_image.classes)
Prediction_GNB = GNB_Model.predict(X_Test)
GNB_Inception_ML_Accuracy = accuracy_score(Prediction_GNB, test_image.classes)
GNB_Inception_ML_Recall = recall_score(
Prediction_GNB, test_image.classes, average="weighted"
)
GNB_Inception_ML_Precision = precision_score(
Prediction_GNB, test_image.classes, average="weighted"
)
GNB_Inception_ML_F1 = f1_score(Prediction_GNB, test_image.classes, average="weighted")
print("Accuracy is:", GNB_Inception_ML_Accuracy)
print("Precision is:", GNB_Inception_ML_Precision)
print("Recall is:", GNB_Inception_ML_Recall)
print("F1 Score is:", GNB_Inception_ML_F1)
# # Choosing the Deep Features from the Dense Layer from the Xception Model
Xception_Dense_Features = Model(
inputs=Xception_model.input, outputs=Xception_model.get_layer(index=-2).output
)
Xception_Train_Features = np.array(Xception_Dense_Features.predict(train_image))
Xception_X_Train = np.reshape(
Xception_Train_Features,
(Xception_Train_Features.shape[0], Xception_Train_Features.shape[1]),
)
Xception_Test_Features = np.array(Xception_Dense_Features.predict(test_image))
Xception_X_Test = np.reshape(
Xception_Test_Features,
(Xception_Test_Features.shape[0], Xception_Test_Features.shape[1]),
)
# #### Support Vector Machine
Xception_SVC = SVC(kernel="rbf")
Xception_SVC.fit(Xception_X_Train, train_image.classes)
Xception_SVC_Prediction = Xception_SVC.predict(Xception_X_Test)
Exception_ML_Accuracy = accuracy_score(Xception_SVC_Prediction, test_image.classes)
Exception_ML_Recall = recall_score(
Xception_SVC_Prediction, test_image.classes, average="weighted"
)
Exception_ML_Precision = precision_score(
Xception_SVC_Prediction, test_image.classes, average="weighted"
)
Exception_ML_F1 = f1_score(
Xception_SVC_Prediction, test_image.classes, average="weighted"
)
print("Accuracy is:", Exception_ML_Accuracy)
print("Precision is:", Exception_ML_Precision)
print("Recall is:", Exception_ML_Recall)
print("F1 Score is:", Exception_ML_F1)
# #### Naive Bayes
Xception_GNB_Model = GaussianNB()
Xception_GNB_Model.fit(Xception_X_Train, train_image.classes)
Xception_Prediction_GNB = Xception_GNB_Model.predict(Xception_X_Test)
Exception_GNB_Accuracy = accuracy_score(Xception_Prediction_GNB, test_image.classes)
Exception_GNB_Recall = recall_score(
Xception_Prediction_GNB, test_image.classes, average="weighted"
)
Exception_GNB_Precision = precision_score(
Xception_Prediction_GNB, test_image.classes, average="weighted"
)
Exception_GNB_F1 = f1_score(
Xception_Prediction_GNB, test_image.classes, average="weighted"
)
print("Accuracy is:", Exception_GNB_Accuracy)
print("Precision is:", Exception_GNB_Precision)
print("Recall is:", Exception_GNB_Recall)
print("F1 Score is:", Exception_GNB_F1)
# #### K-Nearest Neighbours
Xception_KNN_Model = KNeighborsClassifier(n_neighbors=5)
Xception_KNN_Model.fit(Xception_X_Train, train_image.classes)
Xception_Prediction_KNN = Xception_KNN_Model.predict(Xception_X_Test)
Exception_KNN_Accuracy = accuracy_score(Xception_Prediction_KNN, test_image.classes)
Exception_KNN_Recall = recall_score(
Xception_Prediction_KNN, test_image.classes, average="weighted"
)
Exception_KNN_Precision = precision_score(
Xception_Prediction_KNN, test_image.classes, average="weighted"
)
Exception_KNN_F1 = f1_score(
Xception_Prediction_KNN, test_image.classes, average="weighted"
)
print("Accuracy is:", Exception_KNN_Accuracy)
print("Precision is:", Exception_KNN_Precision)
print("Recall is:", Exception_KNN_Recall)
print("F1 Score is:", Exception_KNN_F1)
# #### Logistic Regression
Xception_Model_LR = LogisticRegression()
Xception_Model_LR.fit(Xception_X_Train, train_image.classes)
Xception_Prediction_LR = Model_LR.predict(Xception_X_Test)
Exception_LR_Accuracy = accuracy_score(Xception_Prediction_LR, test_image.classes)
Exception_LR_Recall = recall_score(
Xception_Prediction_LR, test_image.classes, average="weighted"
)
Exception_LR_Precision = precision_score(
Xception_Prediction_LR, test_image.classes, average="weighted"
)
Exception_LR_F1 = f1_score(
Xception_Prediction_LR, test_image.classes, average="weighted"
)
print("Accuracy is:", Exception_LR_Accuracy)
print("Precision is:", Exception_LR_Precision)
print("Recall is:", Exception_LR_Recall)
print("F1 Score is:", Exception_LR_F1)
# ## Inferences and Justification on Machine Learning Model Performances
Table = [
[
"Accuracy Score:",
KNN_Inception_ML_Accuracy,
Exception_KNN_Accuracy,
GNB_Inception_ML_Accuracy,
Exception_GNB_Accuracy,
LR_Inception_ML_Accuracy,
Exception_LR_Accuracy,
Inception_ML_Accuracy,
Exception_ML_Accuracy,
],
[
"Precision Score:",
KNN_Inception_ML_Precision,
Exception_KNN_Precision,
GNB_Inception_ML_Precision,
Exception_GNB_Precision,
LR_Inception_ML_Precision,
Exception_LR_Precision,
Inception_ML_Precision,
Exception_ML_Precision,
],
[
"Recall Score:",
KNN_Inception_ML_Recall,
Exception_KNN_Recall,
GNB_Inception_ML_Recall,
Exception_GNB_Recall,
LR_Inception_ML_Recall,
Exception_LR_Recall,
Inception_ML_Recall,
Exception_ML_Recall,
],
[
"F1 Score:",
KNN_Inception_ML_F1,
Exception_KNN_F1,
GNB_Inception_ML_F1,
Exception_GNB_F1,
LR_Inception_ML_F1,
Exception_LR_F1,
Inception_ML_F1,
Exception_ML_F1,
],
]
head = [
"Scores",
"Inception_KNN",
"Xception_KNN",
"Inc_Naive_Bayes",
"Xce_Naive_Bayes",
"Inc_Logistic_Regression",
"Xce_Logistic_Regression",
"Inc_SVM",
"Xce_SVM",
]
print(tabulate(Table, headers=head, tablefmt="grid"))
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/145/129145863.ipynb
|
indian-food-images-dataset
|
iamsouravbanerjee
|
[{"Id": 129145863, "ScriptId": 38362788, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5064487, "CreationDate": "05/11/2023 10:34:59", "VersionNumber": 1.0, "Title": "Transfer Learning with Inception and Exception", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 366.0, "LinesInsertedFromPrevious": 366.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 184936446, "KernelVersionId": 129145863, "SourceDatasetVersionId": 4077749}]
|
[{"Id": 4077749, "DatasetId": 1243366, "DatasourceVersionId": 4133874, "CreatorUserId": 7024923, "LicenseName": "Other (specified in description)", "CreationDate": "08/16/2022 07:59:35", "VersionNumber": 5.0, "Title": "Indian Food Images Dataset", "Slug": "indian-food-images-dataset", "Subtitle": "This Dataset consist of 4000 Indian Food Images in 80 different Classes", "Description": "### Context\n\nIndian cuisine consists of a variety of regional and traditional cuisines native to the Indian subcontinent. Given the diversity in soil, climate, culture, ethnic groups, and occupations, these cuisines vary substantially and use locally available spices, herbs, vegetables, and fruits. Indian food is also heavily influenced by religion, in particular Hinduism, cultural choices, and traditions. Centuries of Islamic rule, particularly by the Mughals, also introduced dishes like samosa and pilaf.\n\nHistorical events such as invasions, trade relations, and colonialism have played a role in introducing certain foods to this country. The Columbian discovery of the New World brought a number of new vegetables and fruit to India. A number of these such as the potato, tomatoes, chilies, peanuts, and Guava have become staples in many regions of India. Indian cuisine has shaped the history of international relations; the spice trade between India and Europe was the primary catalyst for Europe's Age of Discovery. Spices were bought from India and traded around Europe and Asia. Indian cuisine has influenced other cuisines across the world, especially those from Europe (especially Britain), the Middle East, Southern African, East Africa, Southeast Asia, North America, Mauritius, Fiji, Oceania, and the Caribbean.\n\n### Content\n\nIn this Dataset, we have 4000 Indian Food Images in 80 different categories or classes.\n\n### Structure of the Dataset\n\n\n\n### Acknowledgment\n\nThis Dataset is created from Google Images: https://images.google.com/. If you want to learn more, you can visit the Website.\n\nCover Photo: https://www.eazydiner.com/food-trends/the-indian-food-trail-best-dishes-from-india", "VersionNotes": "New Information Added", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1243366, "CreatorUserId": 7024923, "OwnerUserId": 7024923.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4077749.0, "CurrentDatasourceVersionId": 4133874.0, "ForumId": 1261640, "Type": 2, "CreationDate": "03/31/2021 11:23:57", "LastActivityDate": "03/31/2021", "TotalViews": 24524, "TotalDownloads": 2496, "TotalVotes": 134, "TotalKernels": 8}]
|
[{"Id": 7024923, "UserName": "iamsouravbanerjee", "DisplayName": "Sourav Banerjee", "RegisterDate": "03/25/2021", "PerformanceTier": 4}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ### Assignment 2
# ### Sai Sylesh Gupta Namburu, CB.EN.P2DSC22007
# ## Data Set - https://www.kaggle.com/datasets/iamsouravbanerjee/indian-food-images-dataset
# # Importing the Modules
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from pathlib import Path
import os.path
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from keras.models import Sequential
from keras.layers import Dense, GlobalAveragePooling2D
from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score
from keras.models import Model
from sklearn.svm import SVC
from tabulate import tabulate
# ## Data Processing - Segregation into different classes, Training, Testing
Image_Directory = Path(
"/kaggle/input/indian-food-images-dataset/Indian Food Images/Indian Food Images"
)
File_Path = list(Image_Directory.glob(r"**/*.jpg"))
Label = list(map(lambda x: os.path.split(os.path.split(x)[0])[1], File_Path))
File_Path = pd.Series(File_Path, name="Filepath").astype(str)
Label = pd.Series(Label, name="Label")
Images_Df = (
pd.concat([File_Path, Label], axis=1)
.sample(frac=1.0, random_state=1)
.reset_index(drop=True)
)
Images_Df.head()
Images_Df["Label"]
# # Exploratory Data Analysis
print("Total Number of Food Dishes available are:", len(np.unique(Images_Df["Label"])))
print("")
print("The Food Dishes are:", np.unique(Images_Df["Label"]))
# **Data Splitting**
Train_Data, Test_Data = train_test_split(
Images_Df, test_size=0.30, shuffle=True, random_state=43
)
Train_Gen = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, validation_split=0.2
)
test_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
train_image = Train_Gen.flow_from_dataframe(
dataframe=Train_Data,
x_col="Filepath",
y_col="Label",
target_size=(224, 224),
batch_size=32,
color_mode="rgb",
class_mode="categorical",
shuffle=True,
seed=42,
subset="training",
)
val_image = Train_Gen.flow_from_dataframe(
dataframe=Train_Data,
x_col="Filepath",
y_col="Label",
target_size=(224, 224),
batch_size=32,
color_mode="rgb",
class_mode="categorical",
shuffle=True,
seed=42,
subset="validation",
)
test_image = test_gen.flow_from_dataframe(
dataframe=Test_Data,
x_col="Filepath",
y_col="Label",
target_size=(224, 224),
batch_size=32,
color_mode="rgb",
class_mode="categorical",
shuffle=False,
)
# ## Transfer Learning and Inception Modelling
InceptionV3_Model = tf.keras.applications.InceptionV3(
weights="imagenet", include_top=False, input_shape=(224, 224, 3)
)
for layer in InceptionV3_Model.layers[:-15]:
layer.trainable = True
model = Sequential()
model.add(InceptionV3_Model)
model.add(GlobalAveragePooling2D())
model.add(Dense(1024, activation="relu"))
model.add(Dense(80, activation="softmax"))
model.summary()
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
model.fit(train_image, epochs=10, validation_data=val_image)
Prediction = model.predict(test_image)
Prediction = np.argmax(Prediction, axis=1) # convert probabilities to class labels
True_Values = test_image.classes
Inception_Accuracy = accuracy_score(True_Values, Prediction)
Inception_Recall = recall_score(True_Values, Prediction, average="weighted")
Inception_Precision = precision_score(True_Values, Prediction, average="weighted")
Inception_F1 = f1_score(True_Values, Prediction, average="weighted")
print("Accuracy Score:", Inception_Accuracy)
print("Recall Score:", Inception_Recall)
print("Precision Score:", Inception_Precision)
print("F1-Score:", Inception_F1)
# ## Transfer Learning and Exception Modelling
Xception_model = Sequential()
Xception_Base_Model = tf.keras.applications.xception.Xception(
include_top=False, weights="imagenet", input_shape=(229, 229, 3)
)
print(f"Number of layers in Xception : {len(Xception_Base_Model.layers)}")
for layer in Xception_Base_Model.layers[:]:
layer.trainable = False
for layer in Xception_Base_Model.layers[90:]:
layer.trainable = True
Xception_model.add(Xception_Base_Model)
Xception_model.add(GlobalAveragePooling2D())
Xception_model.add(Dense(1024, activation="relu"))
Xception_model.add(Dense(units=80, activation="softmax"))
Xception_model.summary()
Xception_model.compile(
optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"]
)
Xception_model.fit(train_image, epochs=10, validation_data=val_image)
Xception_Prediction = Xception_model.predict(test_image)
Xception_Prediction = np.argmax(
Xception_Prediction, axis=1
) # convert probabilities to class labels
True_Values = test_image.classes
Xception_Accuracy = accuracy_score(True_Values, Xception_Prediction)
Xception_Recall = recall_score(True_Values, Xception_Prediction, average="weighted")
Xception_Precision = precision_score(
True_Values, Xception_Prediction, average="weighted"
)
Xception_F1 = f1_score(True_Values, Xception_Prediction, average="weighted")
print("Accuracy Score:", Xception_Accuracy)
print("Recall Score:", Xception_Recall)
print("Precision Score:", Xception_Precision)
print("F1-Score:", Xception_F1)
# ## Inferences with Justifications (Inception v/s Xception)
Table = [
["Accuracy Score:", Inception_Accuracy, Xception_Accuracy],
["Precision Score:", Inception_Precision, Xception_Precision],
["Recall Score:", Inception_Recall, Xception_Recall],
["F1 Score:", Inception_F1, Xception_F1],
]
head = ["Scores", "Inception", "Xception"]
print(tabulate(Table, headers=head, tablefmt="grid"))
# Based on the performance measures, the Xception architecture outperformed the Inception architecture. Below are the inferences:
# - **Accuracy Score**: The Xception architecture achieved an accuracy score of 0.520833, which is almost twice as high as the Inception architecture's score of 0.258333. This indicates that the Xception model was better at correctly classifying the test set images.
# - **Precision Score**: The Xception architecture achieved a precision score of 0.601681, which is higher than the Inception architecture's score of 0.394582. This suggests that the Xception model was better at correctly identifying true positives (i.e., correctly predicting positive cases) while avoiding false positives (i.e., incorrectly predicting positive cases).
# - **Recall Score**: The recall score measures the ability of the model to correctly identify true positives out of all actual positives. The Xception architecture achieved a recall score of 0.520833, which is higher than the Inception architecture's score of 0.258333. This suggests that the Xception model was better at detecting true positives among all actual positive cases.
# - **F1 Score**: The F1 score is a harmonic mean of the precision and recall scores, and thus gives a balanced measure of the model's performance. The Xception architecture achieved an F1 score of 0.521496, which is higher than the Inception architecture's score of 0.254289. This indicates that the Xception model achieved a better balance between precision and recall compared to the Inception model.
# Overall, the higher performance measures achieved by the Xception model suggest that it is a better choice compared to the Inception model for this task. However, it is important to note that these conclusions are specific to the dataset and task used for evaluation, and may not generalize to other datasets or tasks.
# ## Choosing the Deep Features from the Dense Layer from the Inception Model
Dense_Features = Model(inputs=model.input, outputs=model.get_layer(index=-2).output)
model.get_layer(index=-2).output
Train_Features = np.array(Dense_Features.predict(train_image))
X_Train = np.reshape(Train_Features, (Train_Features.shape[0], Train_Features.shape[1]))
Test_Features = np.array(Dense_Features.predict(test_image))
X_Test = np.reshape(Test_Features, (Test_Features.shape[0], Test_Features.shape[1]))
# **Support Vector Machine Model**
Model_SVC = SVC(kernel="rbf")
Model_SVC.fit(X_Train, train_image.classes)
Prediction = Model_SVC.predict(X_Test)
Inception_ML_Accuracy = accuracy_score(Prediction, test_image.classes)
Inception_ML_Recall = recall_score(Prediction, test_image.classes, average="weighted")
Inception_ML_Precision = precision_score(
Prediction, test_image.classes, average="weighted"
)
Inception_ML_F1 = f1_score(Prediction, test_image.classes, average="weighted")
print("Accuracy is:", Inception_ML_Accuracy)
print("Precision is:", Inception_ML_Precision)
print("Recall is:", Inception_ML_Recall)
print("F1 Score is:", Inception_ML_F1)
# **Logistic Regression**
from sklearn.linear_model import LogisticRegression
Model_LR = LogisticRegression()
Model_LR.fit(X_Train, train_image.classes)
Prediction = Model_LR.predict(X_Test)
LR_Inception_ML_Accuracy = accuracy_score(Prediction, test_image.classes)
LR_Inception_ML_Recall = recall_score(
Prediction, test_image.classes, average="weighted"
)
LR_Inception_ML_Precision = precision_score(
Prediction, test_image.classes, average="weighted"
)
LR_Inception_ML_F1 = f1_score(Prediction, test_image.classes, average="weighted")
print("Accuracy is:", LR_Inception_ML_Accuracy)
print("Precision is:", LR_Inception_ML_Precision)
print("Recall is:", LR_Inception_ML_Recall)
print("F1 Score is:", LR_Inception_ML_F1)
# **K-Nearest Neighbours**
from sklearn.neighbors import KNeighborsClassifier
KNN_Model = KNeighborsClassifier(n_neighbors=5)
KNN_Model.fit(X_Train, train_image.classes)
Prediction_KNN = KNN_Model.predict(X_Test)
KNN_Inception_ML_Accuracy = accuracy_score(Prediction_KNN, test_image.classes)
KNN_Inception_ML_Recall = recall_score(
Prediction_KNN, test_image.classes, average="weighted"
)
KNN_Inception_ML_Precision = precision_score(
Prediction_KNN, test_image.classes, average="weighted"
)
KNN_Inception_ML_F1 = f1_score(Prediction_KNN, test_image.classes, average="weighted")
print("Accuracy is:", KNN_Inception_ML_Accuracy)
print("Precision is:", KNN_Inception_ML_Precision)
print("Recall is:", KNN_Inception_ML_Recall)
print("F1 Score is:", KNN_Inception_ML_F1)
# **Naive Bayes**
from sklearn.naive_bayes import GaussianNB
GNB_Model = GaussianNB()
GNB_Model.fit(X_Train, train_image.classes)
Prediction_GNB = GNB_Model.predict(X_Test)
GNB_Inception_ML_Accuracy = accuracy_score(Prediction_GNB, test_image.classes)
GNB_Inception_ML_Recall = recall_score(
Prediction_GNB, test_image.classes, average="weighted"
)
GNB_Inception_ML_Precision = precision_score(
Prediction_GNB, test_image.classes, average="weighted"
)
GNB_Inception_ML_F1 = f1_score(Prediction_GNB, test_image.classes, average="weighted")
print("Accuracy is:", GNB_Inception_ML_Accuracy)
print("Precision is:", GNB_Inception_ML_Precision)
print("Recall is:", GNB_Inception_ML_Recall)
print("F1 Score is:", GNB_Inception_ML_F1)
# # Choosing the Deep Features from the Dense Layer from the Xception Model
Xception_Dense_Features = Model(
inputs=Xception_model.input, outputs=Xception_model.get_layer(index=-2).output
)
Xception_Train_Features = np.array(Xception_Dense_Features.predict(train_image))
Xception_X_Train = np.reshape(
Xception_Train_Features,
(Xception_Train_Features.shape[0], Xception_Train_Features.shape[1]),
)
Xception_Test_Features = np.array(Xception_Dense_Features.predict(test_image))
Xception_X_Test = np.reshape(
Xception_Test_Features,
(Xception_Test_Features.shape[0], Xception_Test_Features.shape[1]),
)
# #### Support Vector Machine
Xception_SVC = SVC(kernel="rbf")
Xception_SVC.fit(Xception_X_Train, train_image.classes)
Xception_SVC_Prediction = Xception_SVC.predict(Xception_X_Test)
Exception_ML_Accuracy = accuracy_score(Xception_SVC_Prediction, test_image.classes)
Exception_ML_Recall = recall_score(
Xception_SVC_Prediction, test_image.classes, average="weighted"
)
Exception_ML_Precision = precision_score(
Xception_SVC_Prediction, test_image.classes, average="weighted"
)
Exception_ML_F1 = f1_score(
Xception_SVC_Prediction, test_image.classes, average="weighted"
)
print("Accuracy is:", Exception_ML_Accuracy)
print("Precision is:", Exception_ML_Precision)
print("Recall is:", Exception_ML_Recall)
print("F1 Score is:", Exception_ML_F1)
# #### Naive Bayes
Xception_GNB_Model = GaussianNB()
Xception_GNB_Model.fit(Xception_X_Train, train_image.classes)
Xception_Prediction_GNB = Xception_GNB_Model.predict(Xception_X_Test)
Exception_GNB_Accuracy = accuracy_score(Xception_Prediction_GNB, test_image.classes)
Exception_GNB_Recall = recall_score(
Xception_Prediction_GNB, test_image.classes, average="weighted"
)
Exception_GNB_Precision = precision_score(
Xception_Prediction_GNB, test_image.classes, average="weighted"
)
Exception_GNB_F1 = f1_score(
Xception_Prediction_GNB, test_image.classes, average="weighted"
)
print("Accuracy is:", Exception_GNB_Accuracy)
print("Precision is:", Exception_GNB_Precision)
print("Recall is:", Exception_GNB_Recall)
print("F1 Score is:", Exception_GNB_F1)
# #### K-Nearest Neighbours
Xception_KNN_Model = KNeighborsClassifier(n_neighbors=5)
Xception_KNN_Model.fit(Xception_X_Train, train_image.classes)
Xception_Prediction_KNN = Xception_KNN_Model.predict(Xception_X_Test)
Exception_KNN_Accuracy = accuracy_score(Xception_Prediction_KNN, test_image.classes)
Exception_KNN_Recall = recall_score(
Xception_Prediction_KNN, test_image.classes, average="weighted"
)
Exception_KNN_Precision = precision_score(
Xception_Prediction_KNN, test_image.classes, average="weighted"
)
Exception_KNN_F1 = f1_score(
Xception_Prediction_KNN, test_image.classes, average="weighted"
)
print("Accuracy is:", Exception_KNN_Accuracy)
print("Precision is:", Exception_KNN_Precision)
print("Recall is:", Exception_KNN_Recall)
print("F1 Score is:", Exception_KNN_F1)
# #### Logistic Regression
Xception_Model_LR = LogisticRegression()
Xception_Model_LR.fit(Xception_X_Train, train_image.classes)
Xception_Prediction_LR = Model_LR.predict(Xception_X_Test)
Exception_LR_Accuracy = accuracy_score(Xception_Prediction_LR, test_image.classes)
Exception_LR_Recall = recall_score(
Xception_Prediction_LR, test_image.classes, average="weighted"
)
Exception_LR_Precision = precision_score(
Xception_Prediction_LR, test_image.classes, average="weighted"
)
Exception_LR_F1 = f1_score(
Xception_Prediction_LR, test_image.classes, average="weighted"
)
print("Accuracy is:", Exception_LR_Accuracy)
print("Precision is:", Exception_LR_Precision)
print("Recall is:", Exception_LR_Recall)
print("F1 Score is:", Exception_LR_F1)
# ## Inferences and Justification on Machine Learning Model Performances
Table = [
[
"Accuracy Score:",
KNN_Inception_ML_Accuracy,
Exception_KNN_Accuracy,
GNB_Inception_ML_Accuracy,
Exception_GNB_Accuracy,
LR_Inception_ML_Accuracy,
Exception_LR_Accuracy,
Inception_ML_Accuracy,
Exception_ML_Accuracy,
],
[
"Precision Score:",
KNN_Inception_ML_Precision,
Exception_KNN_Precision,
GNB_Inception_ML_Precision,
Exception_GNB_Precision,
LR_Inception_ML_Precision,
Exception_LR_Precision,
Inception_ML_Precision,
Exception_ML_Precision,
],
[
"Recall Score:",
KNN_Inception_ML_Recall,
Exception_KNN_Recall,
GNB_Inception_ML_Recall,
Exception_GNB_Recall,
LR_Inception_ML_Recall,
Exception_LR_Recall,
Inception_ML_Recall,
Exception_ML_Recall,
],
[
"F1 Score:",
KNN_Inception_ML_F1,
Exception_KNN_F1,
GNB_Inception_ML_F1,
Exception_GNB_F1,
LR_Inception_ML_F1,
Exception_LR_F1,
Inception_ML_F1,
Exception_ML_F1,
],
]
head = [
"Scores",
"Inception_KNN",
"Xception_KNN",
"Inc_Naive_Bayes",
"Xce_Naive_Bayes",
"Inc_Logistic_Regression",
"Xce_Logistic_Regression",
"Inc_SVM",
"Xce_SVM",
]
print(tabulate(Table, headers=head, tablefmt="grid"))
| false | 0 | 5,152 | 0 | 5,654 | 5,152 |
||
129048691
|
<jupyter_start><jupyter_text>Images
Kaggle dataset identifier: images
<jupyter_script>import pandas as pd
import torch
import os
from transformers import Trainer, TrainingArguments
from transformers import DataCollatorForLanguageModeling
from transformers import BlipProcessor, BlipForConditionalGeneration
from torch.utils.data import DataLoader, Dataset
from PIL import Image
import torchvision.transforms as transforms
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# from transformers import AutoProcessor, AutoModelForSeq2SeqLM
# processor = AutoProcessor.from_pretrained("nathansutton/generate-cxr")
# model = AutoModelForSeq2SeqLM.from_pretrained("nathansutton/generate-cxr")
#
processor_path = "/kaggle/input/processor"
model_path = "/kaggle/input/chest-rep-model"
model = BlipForConditionalGeneration.from_pretrained(model_path)
tokenizer = BlipProcessor.from_pretrained(processor_path).tokenizer
processor = BlipProcessor.from_pretrained(processor_path)
import io
from io import BytesIO
from google.cloud import storage
client = storage.Client.from_service_account_json(
"/kaggle/input/gcpkey/august-strata-385919-1f7f245d5414.json"
)
bucket = client.get_bucket("258_project")
df = pd.read_csv("/kaggle/input/pred-train-tags-full1/pred_train_tags_full1.csv")
class CXRDataset(Dataset):
def __init__(self, df, tokenizer, processor, bucket_name, client):
self.df = df
self.tokenizer = tokenizer
self.processor = processor
self.bucket_name = bucket_name
self.client = client
def __getitem__(self, idx):
row = self.df.iloc[idx]
image_path = row["image_path"]
try:
# Get a handle to the bucket and file
bucket = self.client.get_bucket(self.bucket_name)
blob = bucket.blob(image_path)
# Read the image bytes from the blob
image_bytes = blob.download_as_bytes()
image = Image.open(BytesIO(image_bytes)).convert("RGB")
except Exception as e:
print(f"Could not read image at {image_path}: {e}")
return None
text = row["pred_tags"]
image_caption_encoding = self.processor(
images=image, text=text, padding="max_length", return_tensors="pt"
)
image_caption_encoding = {
k: v.squeeze() for k, v in image_caption_encoding.items()
}
return image_caption_encoding
def __len__(self):
return len(self.df)
train_df = df.iloc[1:500]
valid_df = df.iloc[501:550]
train_dataset = CXRDataset(train_df, tokenizer, processor, bucket, client)
valid_dataset = CXRDataset(valid_df, tokenizer, processor, bucket, client)
training_args = TrainingArguments(
num_train_epochs=10,
evaluation_strategy="epoch",
save_steps=1000,
logging_steps=100,
per_device_eval_batch_size=1,
per_device_train_batch_size=1,
gradient_accumulation_steps=4,
lr_scheduler_type="cosine_with_restarts",
warmup_ratio=0.1,
learning_rate=1e-3,
save_total_limit=1,
output_dir="generate_rep_new",
)
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
trainer = Trainer(
args=training_args,
train_dataset=train_dataset,
eval_dataset=valid_dataset,
data_collator=data_collator,
tokenizer=processor.tokenizer,
model=model,
)
trainer.train()
trainer.save_model("generate_med_rep_final_v2")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/048/129048691.ipynb
|
images
|
ananyaajoshi
|
[{"Id": 129048691, "ScriptId": 38325828, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11410316, "CreationDate": "05/10/2023 15:03:15", "VersionNumber": 1.0, "Title": "report_gen_training", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 110.0, "LinesInsertedFromPrevious": 110.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 184761786, "KernelVersionId": 129048691, "SourceDatasetVersionId": 5646618}, {"Id": 184761788, "KernelVersionId": 129048691, "SourceDatasetVersionId": 5649842}, {"Id": 184761789, "KernelVersionId": 129048691, "SourceDatasetVersionId": 5649914}, {"Id": 184761790, "KernelVersionId": 129048691, "SourceDatasetVersionId": 5652652}, {"Id": 184761791, "KernelVersionId": 129048691, "SourceDatasetVersionId": 5652684}]
|
[{"Id": 5646618, "DatasetId": 3245364, "DatasourceVersionId": 5721966, "CreatorUserId": 11410316, "LicenseName": "Unknown", "CreationDate": "05/09/2023 20:14:22", "VersionNumber": 1.0, "Title": "Images", "Slug": "images", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3245364, "CreatorUserId": 11410316, "OwnerUserId": 11410316.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5646618.0, "CurrentDatasourceVersionId": 5721966.0, "ForumId": 3310686, "Type": 2, "CreationDate": "05/09/2023 20:14:22", "LastActivityDate": "05/09/2023", "TotalViews": 30, "TotalDownloads": 1, "TotalVotes": 0, "TotalKernels": 0}]
|
[{"Id": 11410316, "UserName": "ananyaajoshi", "DisplayName": "Ananya A Joshi", "RegisterDate": "08/27/2022", "PerformanceTier": 0}]
|
import pandas as pd
import torch
import os
from transformers import Trainer, TrainingArguments
from transformers import DataCollatorForLanguageModeling
from transformers import BlipProcessor, BlipForConditionalGeneration
from torch.utils.data import DataLoader, Dataset
from PIL import Image
import torchvision.transforms as transforms
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# from transformers import AutoProcessor, AutoModelForSeq2SeqLM
# processor = AutoProcessor.from_pretrained("nathansutton/generate-cxr")
# model = AutoModelForSeq2SeqLM.from_pretrained("nathansutton/generate-cxr")
#
processor_path = "/kaggle/input/processor"
model_path = "/kaggle/input/chest-rep-model"
model = BlipForConditionalGeneration.from_pretrained(model_path)
tokenizer = BlipProcessor.from_pretrained(processor_path).tokenizer
processor = BlipProcessor.from_pretrained(processor_path)
import io
from io import BytesIO
from google.cloud import storage
client = storage.Client.from_service_account_json(
"/kaggle/input/gcpkey/august-strata-385919-1f7f245d5414.json"
)
bucket = client.get_bucket("258_project")
df = pd.read_csv("/kaggle/input/pred-train-tags-full1/pred_train_tags_full1.csv")
class CXRDataset(Dataset):
def __init__(self, df, tokenizer, processor, bucket_name, client):
self.df = df
self.tokenizer = tokenizer
self.processor = processor
self.bucket_name = bucket_name
self.client = client
def __getitem__(self, idx):
row = self.df.iloc[idx]
image_path = row["image_path"]
try:
# Get a handle to the bucket and file
bucket = self.client.get_bucket(self.bucket_name)
blob = bucket.blob(image_path)
# Read the image bytes from the blob
image_bytes = blob.download_as_bytes()
image = Image.open(BytesIO(image_bytes)).convert("RGB")
except Exception as e:
print(f"Could not read image at {image_path}: {e}")
return None
text = row["pred_tags"]
image_caption_encoding = self.processor(
images=image, text=text, padding="max_length", return_tensors="pt"
)
image_caption_encoding = {
k: v.squeeze() for k, v in image_caption_encoding.items()
}
return image_caption_encoding
def __len__(self):
return len(self.df)
train_df = df.iloc[1:500]
valid_df = df.iloc[501:550]
train_dataset = CXRDataset(train_df, tokenizer, processor, bucket, client)
valid_dataset = CXRDataset(valid_df, tokenizer, processor, bucket, client)
training_args = TrainingArguments(
num_train_epochs=10,
evaluation_strategy="epoch",
save_steps=1000,
logging_steps=100,
per_device_eval_batch_size=1,
per_device_train_batch_size=1,
gradient_accumulation_steps=4,
lr_scheduler_type="cosine_with_restarts",
warmup_ratio=0.1,
learning_rate=1e-3,
save_total_limit=1,
output_dir="generate_rep_new",
)
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
trainer = Trainer(
args=training_args,
train_dataset=train_dataset,
eval_dataset=valid_dataset,
data_collator=data_collator,
tokenizer=processor.tokenizer,
model=model,
)
trainer.train()
trainer.save_model("generate_med_rep_final_v2")
| false | 1 | 968 | 0 | 984 | 968 |
||
129048379
|
# Store Sales - Time Series forecasting
# # 1 | Competition Overview
# The objective of the competition is to use time-series forecasting to forecast store sales on data from Corporación Favorita, a large Ecuadorian-based grocery retailer.
# Specifically, the requirement is to build a model that more accurately predicts the unit sales for thousands of items sold at different Favorita stores.
# We have different data sets to our disposal:
# - holiday_events: a list with all ecuadorian holidays and events;
# - oil: a list of oilprices meant to serve as an economic indicator of Ecuador;
# - stores: a dataset with information about our stores: includes city, state, type and others;
# - transactions: a dataset containing the number of aggregated transactions for each store on each day;
# - test: general testset of 16 days of sales we will need to predict;
# - train: a huge trainset with about 4 years of data to predict our test sales data.
# # 2 |Exploratory Data Analysis
#
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import plotly.express as px
import seaborn as sns
import gc
import os
from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder
from sklearn.metrics import mean_absolute_error
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import LabelEncoder
from statsmodels.tsa.deterministic import DeterministicProcess, CalendarFourier
from statsmodels.graphics.tsaplots import plot_pacf
from xgboost import XGBRegressor
# CONFIGURATIONS
# ------------------------------------------------------
pd.set_option("display.max_columns", None)
pd.options.display.float_format = "{:.2f}".format
warnings.filterwarnings("ignore")
# Import
train = pd.read_csv("../input/store-sales-time-series-forecasting/train.csv")
test = pd.read_csv("../input/store-sales-time-series-forecasting/test.csv")
stores = pd.read_csv("../input/store-sales-time-series-forecasting/stores.csv")
# sub = pd.read_csv("../input/store-sales-time-series-forecasting/sample_submission.csv")
transactions = pd.read_csv(
"../input/store-sales-time-series-forecasting/transactions.csv"
).sort_values(["store_nbr", "date"])
# Datetime
train["date"] = pd.to_datetime(train.date)
test["date"] = pd.to_datetime(test.date)
transactions["date"] = pd.to_datetime(transactions.date)
# Data types
train.onpromotion = train.onpromotion.astype("float16")
train.sales = train.sales.astype("float32")
stores.cluster = stores.cluster.astype("int8")
# ## 2.1 |Transaction dataset
# Let's start by having a look at the transactions_df. Logically, the feature included in this dataset (the number of transactions) will be highly correlated with sales data.
temp = pd.merge(
train.groupby(["date", "store_nbr"]).sales.sum().reset_index(),
transactions,
how="left",
)
print(
"Spearman Correlation between Total Sales and Transactions: {:,.4f}".format(
temp.corr("spearman").sales.loc["transactions"]
)
)
px.line(
transactions.sort_values(["store_nbr", "date"]),
x="date",
y="transactions",
color="store_nbr",
title="Transactions",
)
# we notice a seasonality, with peaks during December. In order to confirm this, we can have a look at a boxplot:
a = transactions.copy()
a["year"] = a.date.dt.year
a["month"] = a.date.dt.month
px.box(a, x="year", y="transactions", color="month", title="Transactions")
# Let's have a look at the scatterplot between sales and transaction and see if the relationship we expect is supported by our data:
px.scatter(
temp, x="transactions", y="sales", trendline="ols", trendline_color_override="red"
)
# Besides having a yearly seasonality, we could also have weekly seasonality. Let's see if this is the case:
a = transactions.copy()
a["year"] = a.date.dt.year
a["dayofweek"] = a.date.dt.dayofweek + 1
a = a.groupby(["year", "dayofweek"]).transactions.mean().reset_index()
px.line(a, x="dayofweek", y="transactions", color="year", title="Transactions")
# ## 2.2 |Training dataset
#
print(
"The Training dataset is made of {} rows and {} columns.".format(
len(train), len(train.columns)
)
)
#
# Now that we have imported the dataset, we can start to see some rows from it, in order to see how the data included in it look like:
pd.options.display.max_columns = train.shape[1]
train.head()
# We can see that not all variables have the same type of data in it.
# In fact, if we look at each column, the types we have are the following:
columns = train.dtypes
for elem in range(len(columns.index)):
print("- {}: type {} \n".format(columns.index[elem], columns.values[elem]))
# Let's have a look if the training dataset contains any missing values:
train.isna().sum()
# The training set doesn't contain any missing values.
# Find out how many stores, products and dates are in our data:
nr_stores = train["store_nbr"].unique().__len__() # 54 stores
nr_products = train["family"].unique().__len__() # 33 products
print(
"The Training dataset includes {} stores and {} product classifications.".format(
nr_stores, nr_products
)
)
a = train[["store_nbr", "sales"]]
a["ind"] = 1
a["ind"] = a.groupby("store_nbr").ind.cumsum().values
a = pd.pivot(a, index="ind", columns="store_nbr", values="sales").corr()
mask = np.triu(a.corr())
plt.figure(figsize=(20, 20))
sns.heatmap(
a,
annot=True,
fmt=".1f",
cmap="coolwarm",
square=True,
mask=mask,
linewidths=1,
cbar=False,
)
plt.title("Correlations among stores", fontsize=20)
plt.show()
# Looks like stores 20, 21, 22, 42 and 52 behave differently. We can try to have a look at the sales grouped by store and product family:
sales_grouped = (
train.groupby(["store_nbr", "family"])
.sales.sum()
.reset_index()
.sort_values(["family", "store_nbr"])
)
sales_grouped_0 = sales_grouped[sales_grouped["sales"] == 0]
# Seems like some of the stores don't sell specific product categories, so we should probably take into consideration this aspect for our forecasting. Of course, if our time series has only 0 values, the forecasted values will still be 0, therefore we can submit this part of the predictions.
zero_prediction = []
for i in range(0, len(sales_grouped_0)):
zero_prediction.append(
pd.DataFrame(
{
"date": pd.date_range("2017-08-16", "2017-08-31").tolist(),
"store_nbr": sales_grouped_0.store_nbr.iloc[i],
"family": sales_grouped_0.family.iloc[i],
"sales": 0,
}
)
)
zero_prediction = pd.concat(zero_prediction)
del sales_grouped_0
gc.collect()
# Now that we have done this first prediction, we can have a look at all other product families, in order to understand better our data. Let's do a barplot to see a distribution by category:
a = train.groupby("family").sales.mean().sort_values(ascending=False).reset_index()
px.bar(
a,
y="family",
x="sales",
color="family",
title="Which product family preferred more?",
)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/048/129048379.ipynb
| null | null |
[{"Id": 129048379, "ScriptId": 35842109, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1762850, "CreationDate": "05/10/2023 15:00:56", "VersionNumber": 6.0, "Title": "Store Sales - EDA", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 189.0, "LinesInsertedFromPrevious": 104.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 85.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# Store Sales - Time Series forecasting
# # 1 | Competition Overview
# The objective of the competition is to use time-series forecasting to forecast store sales on data from Corporación Favorita, a large Ecuadorian-based grocery retailer.
# Specifically, the requirement is to build a model that more accurately predicts the unit sales for thousands of items sold at different Favorita stores.
# We have different data sets to our disposal:
# - holiday_events: a list with all ecuadorian holidays and events;
# - oil: a list of oilprices meant to serve as an economic indicator of Ecuador;
# - stores: a dataset with information about our stores: includes city, state, type and others;
# - transactions: a dataset containing the number of aggregated transactions for each store on each day;
# - test: general testset of 16 days of sales we will need to predict;
# - train: a huge trainset with about 4 years of data to predict our test sales data.
# # 2 |Exploratory Data Analysis
#
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import plotly.express as px
import seaborn as sns
import gc
import os
from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder
from sklearn.metrics import mean_absolute_error
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import LabelEncoder
from statsmodels.tsa.deterministic import DeterministicProcess, CalendarFourier
from statsmodels.graphics.tsaplots import plot_pacf
from xgboost import XGBRegressor
# CONFIGURATIONS
# ------------------------------------------------------
pd.set_option("display.max_columns", None)
pd.options.display.float_format = "{:.2f}".format
warnings.filterwarnings("ignore")
# Import
train = pd.read_csv("../input/store-sales-time-series-forecasting/train.csv")
test = pd.read_csv("../input/store-sales-time-series-forecasting/test.csv")
stores = pd.read_csv("../input/store-sales-time-series-forecasting/stores.csv")
# sub = pd.read_csv("../input/store-sales-time-series-forecasting/sample_submission.csv")
transactions = pd.read_csv(
"../input/store-sales-time-series-forecasting/transactions.csv"
).sort_values(["store_nbr", "date"])
# Datetime
train["date"] = pd.to_datetime(train.date)
test["date"] = pd.to_datetime(test.date)
transactions["date"] = pd.to_datetime(transactions.date)
# Data types
train.onpromotion = train.onpromotion.astype("float16")
train.sales = train.sales.astype("float32")
stores.cluster = stores.cluster.astype("int8")
# ## 2.1 |Transaction dataset
# Let's start by having a look at the transactions_df. Logically, the feature included in this dataset (the number of transactions) will be highly correlated with sales data.
temp = pd.merge(
train.groupby(["date", "store_nbr"]).sales.sum().reset_index(),
transactions,
how="left",
)
print(
"Spearman Correlation between Total Sales and Transactions: {:,.4f}".format(
temp.corr("spearman").sales.loc["transactions"]
)
)
px.line(
transactions.sort_values(["store_nbr", "date"]),
x="date",
y="transactions",
color="store_nbr",
title="Transactions",
)
# we notice a seasonality, with peaks during December. In order to confirm this, we can have a look at a boxplot:
a = transactions.copy()
a["year"] = a.date.dt.year
a["month"] = a.date.dt.month
px.box(a, x="year", y="transactions", color="month", title="Transactions")
# Let's have a look at the scatterplot between sales and transaction and see if the relationship we expect is supported by our data:
px.scatter(
temp, x="transactions", y="sales", trendline="ols", trendline_color_override="red"
)
# Besides having a yearly seasonality, we could also have weekly seasonality. Let's see if this is the case:
a = transactions.copy()
a["year"] = a.date.dt.year
a["dayofweek"] = a.date.dt.dayofweek + 1
a = a.groupby(["year", "dayofweek"]).transactions.mean().reset_index()
px.line(a, x="dayofweek", y="transactions", color="year", title="Transactions")
# ## 2.2 |Training dataset
#
print(
"The Training dataset is made of {} rows and {} columns.".format(
len(train), len(train.columns)
)
)
#
# Now that we have imported the dataset, we can start to see some rows from it, in order to see how the data included in it look like:
pd.options.display.max_columns = train.shape[1]
train.head()
# We can see that not all variables have the same type of data in it.
# In fact, if we look at each column, the types we have are the following:
columns = train.dtypes
for elem in range(len(columns.index)):
print("- {}: type {} \n".format(columns.index[elem], columns.values[elem]))
# Let's have a look if the training dataset contains any missing values:
train.isna().sum()
# The training set doesn't contain any missing values.
# Find out how many stores, products and dates are in our data:
nr_stores = train["store_nbr"].unique().__len__() # 54 stores
nr_products = train["family"].unique().__len__() # 33 products
print(
"The Training dataset includes {} stores and {} product classifications.".format(
nr_stores, nr_products
)
)
a = train[["store_nbr", "sales"]]
a["ind"] = 1
a["ind"] = a.groupby("store_nbr").ind.cumsum().values
a = pd.pivot(a, index="ind", columns="store_nbr", values="sales").corr()
mask = np.triu(a.corr())
plt.figure(figsize=(20, 20))
sns.heatmap(
a,
annot=True,
fmt=".1f",
cmap="coolwarm",
square=True,
mask=mask,
linewidths=1,
cbar=False,
)
plt.title("Correlations among stores", fontsize=20)
plt.show()
# Looks like stores 20, 21, 22, 42 and 52 behave differently. We can try to have a look at the sales grouped by store and product family:
sales_grouped = (
train.groupby(["store_nbr", "family"])
.sales.sum()
.reset_index()
.sort_values(["family", "store_nbr"])
)
sales_grouped_0 = sales_grouped[sales_grouped["sales"] == 0]
# Seems like some of the stores don't sell specific product categories, so we should probably take into consideration this aspect for our forecasting. Of course, if our time series has only 0 values, the forecasted values will still be 0, therefore we can submit this part of the predictions.
zero_prediction = []
for i in range(0, len(sales_grouped_0)):
zero_prediction.append(
pd.DataFrame(
{
"date": pd.date_range("2017-08-16", "2017-08-31").tolist(),
"store_nbr": sales_grouped_0.store_nbr.iloc[i],
"family": sales_grouped_0.family.iloc[i],
"sales": 0,
}
)
)
zero_prediction = pd.concat(zero_prediction)
del sales_grouped_0
gc.collect()
# Now that we have done this first prediction, we can have a look at all other product families, in order to understand better our data. Let's do a barplot to see a distribution by category:
a = train.groupby("family").sales.mean().sort_values(ascending=False).reset_index()
px.bar(
a,
y="family",
x="sales",
color="family",
title="Which product family preferred more?",
)
| false | 0 | 2,215 | 0 | 2,215 | 2,215 |
||
129048255
|
<jupyter_start><jupyter_text>Red Wine Quality
### Context
The two datasets are related to red and white variants of the Portuguese "Vinho Verde" wine. For more details, consult the reference [Cortez et al., 2009]. Due to privacy and logistic issues, only physicochemical (inputs) and sensory (the output) variables are available (e.g. there is no data about grape types, wine brand, wine selling price, etc.).
These datasets can be viewed as classification or regression tasks. The classes are ordered and not balanced (e.g. there are much more normal wines than excellent or poor ones).
---
*This dataset is also available from the UCI machine learning repository, https://archive.ics.uci.edu/ml/datasets/wine+quality , I just shared it to kaggle for convenience. (If I am mistaken and the public license type disallowed me from doing so, I will take this down if requested.)*
### Content
For more information, read [Cortez et al., 2009].<br>
Input variables (based on physicochemical tests):<br>
1 - fixed acidity <br>
2 - volatile acidity <br>
3 - citric acid <br>
4 - residual sugar <br>
5 - chlorides <br>
6 - free sulfur dioxide <br>
7 - total sulfur dioxide <br>
8 - density <br>
9 - pH <br>
10 - sulphates <br>
11 - alcohol <br>
Output variable (based on sensory data): <br>
12 - quality (score between 0 and 10) <br>
### Tips
What might be an interesting thing to do, is aside from using regression modelling, is to set an arbitrary cutoff for your dependent variable (wine quality) at e.g. 7 or higher getting classified as 'good/1' and the remainder as 'not good/0'.
This allows you to practice with hyper parameter tuning on e.g. decision tree algorithms looking at the ROC curve and the AUC value.
Without doing any kind of feature engineering or overfitting you should be able to get an AUC of .88 (without even using random forest algorithm)
**KNIME** is a great tool (GUI) that can be used for this.<br>
1 - File Reader (for csv) to linear correlation node and to interactive histogram for basic EDA.<br>
2- File Reader to 'Rule Engine Node' to turn the 10 point scale to dichtome variable (good wine and rest), the code to put in the rule engine is something like this:<br>
- **$quality$ > 6.5 => "good"**<br>
- **TRUE => "bad"** <br>
3- Rule Engine Node output to input of Column Filter node to filter out your original 10point feature (this prevent leaking)<br>
4- Column Filter Node output to input of Partitioning Node (your standard train/tes split, e.g. 75%/25%, choose 'random' or 'stratified')<br>
5- Partitioning Node train data split output to input of Train data split to input Decision Tree Learner node and <br>
6- Partitioning Node test data split output to input Decision Tree predictor Node<br>
7- Decision Tree learner Node output to input Decision Tree Node input<br>
8- Decision Tree output to input ROC Node.. (here you can evaluate your model base on AUC value)<br>
### Inspiration
Use machine learning to determine which physiochemical properties make a wine 'good'!
Kaggle dataset identifier: red-wine-quality-cortez-et-al-2009
<jupyter_script># # Importing the Libraries
import numpy as np # to create numpy arrays
import pandas as pd # to create pandas dataframe
import matplotlib.pyplot as plt # for making plots and graphs
import seaborn as sns # for data visualization
from sklearn.model_selection import (
train_test_split,
) # to split data into training data and testing data
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score # to evaluate the model
# # Data Collection
# loading the dataset to a pandas dataframe
wine_dataset = pd.read_csv(
"/kaggle/input/red-wine-quality-cortez-et-al-2009/winequality-red.csv"
)
# checking the first 5 rows of the dataset
wine_dataset.head()
# checking number of rows and columns in th dataset'
wine_dataset.shape
# getting some information about the dataset
wine_dataset.info()
# checking for missing values in each column
wine_dataset.isnull().sum()
# We don't have any missing values in our dataset
# # Data Analysis and Visualization
# getting statistical measures of the dataset
wine_dataset.describe()
# finding the number of values for each quality
sns.catplot(x="quality", data=wine_dataset, kind="count")
# volatile acidity vs quality
plot = plt.figure(figsize=(5, 5))
sns.barplot(x="quality", y="volatile acidity", data=wine_dataset)
# 'volatile acidity' and 'quality' are inversely proportional
# citric acid vs quality
plot = plt.figure(figsize=(5, 5))
sns.barplot(x="quality", y="citric acid", data=wine_dataset)
# if the 'citric acid' content is more then we're getting high 'quality' of wine
# checking the distribution of the data
wine_dataset.hist(bins=100, figsize=(10, 10))
plt.show()
# # Correlation
# correlation between all the columns to the quality column
correlation = wine_dataset.corr()
# constructing a heatmap to understand the correlation between the columns
plt.figure(figsize=(10, 7))
sns.heatmap(correlation, annot=True)
# printing correlation values
wine_dataset.corr()["quality"].sort_values()
# 'alcohol' has higher correlation with target --quality
# # Data Preprocessing
# separating the features and label
X = wine_dataset.drop("quality", axis=1)
print(X)
# **Label Binarization**
Y = wine_dataset["quality"].apply(lambda y_value: 1 if y_value >= 6.5 else 0)
print(Y)
# So here we have classified the different wine quality ratings to 0 and 1 --GOOD and BAD
# # Train & Test Split
# splitting X,Y into training and testing data
X_train, X_test, Y_train, Y_test = train_test_split(
X, Y, test_size=0.2, random_state=3
) # assigned 20% for test
print(X.shape, X_train.shape, X_test.shape)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/048/129048255.ipynb
|
red-wine-quality-cortez-et-al-2009
| null |
[{"Id": 129048255, "ScriptId": 38353133, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14119393, "CreationDate": "05/10/2023 14:59:53", "VersionNumber": 2.0, "Title": "Wine Quality Prediction", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 93.0, "LinesInsertedFromPrevious": 35.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 58.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 184760756, "KernelVersionId": 129048255, "SourceDatasetVersionId": 8204}]
|
[{"Id": 8204, "DatasetId": 4458, "DatasourceVersionId": 8204, "CreatorUserId": 1132983, "LicenseName": "Database: Open Database, Contents: Database Contents", "CreationDate": "11/27/2017 23:41:08", "VersionNumber": 2.0, "Title": "Red Wine Quality", "Slug": "red-wine-quality-cortez-et-al-2009", "Subtitle": "Simple and clean practice dataset for regression or classification modelling", "Description": "### Context\n\nThe two datasets are related to red and white variants of the Portuguese \"Vinho Verde\" wine. For more details, consult the reference [Cortez et al., 2009]. Due to privacy and logistic issues, only physicochemical (inputs) and sensory (the output) variables are available (e.g. there is no data about grape types, wine brand, wine selling price, etc.). \n\nThese datasets can be viewed as classification or regression tasks. The classes are ordered and not balanced (e.g. there are much more normal wines than excellent or poor ones). \n\n---\n*This dataset is also available from the UCI machine learning repository, https://archive.ics.uci.edu/ml/datasets/wine+quality , I just shared it to kaggle for convenience. (If I am mistaken and the public license type disallowed me from doing so, I will take this down if requested.)*\n\n\n### Content\n\nFor more information, read [Cortez et al., 2009].<br>\nInput variables (based on physicochemical tests):<br>\n1 - fixed acidity <br>\n2 - volatile acidity <br>\n3 - citric acid <br>\n4 - residual sugar <br>\n5 - chlorides <br>\n6 - free sulfur dioxide <br> \n7 - total sulfur dioxide <br>\n8 - density <br>\n9 - pH <br>\n10 - sulphates <br>\n11 - alcohol <br>\nOutput variable (based on sensory data): <br>\n12 - quality (score between 0 and 10) <br>\n\n### Tips\nWhat might be an interesting thing to do, is aside from using regression modelling, is to set an arbitrary cutoff for your dependent variable (wine quality) at e.g. 7 or higher getting classified as 'good/1' and the remainder as 'not good/0'.\nThis allows you to practice with hyper parameter tuning on e.g. decision tree algorithms looking at the ROC curve and the AUC value.\nWithout doing any kind of feature engineering or overfitting you should be able to get an AUC of .88 (without even using random forest algorithm)\n\n**KNIME** is a great tool (GUI) that can be used for this.<br>\n1 - File Reader (for csv) to linear correlation node and to interactive histogram for basic EDA.<br>\n2- File Reader to 'Rule Engine Node' to turn the 10 point scale to dichtome variable (good wine and rest), the code to put in the rule engine is something like this:<br>\n - **$quality$ > 6.5 => \"good\"**<br>\n - **TRUE => \"bad\"** <br>\n3- Rule Engine Node output to input of Column Filter node to filter out your original 10point feature (this prevent leaking)<br>\n4- Column Filter Node output to input of Partitioning Node (your standard train/tes split, e.g. 75%/25%, choose 'random' or 'stratified')<br>\n5- Partitioning Node train data split output to input of Train data split to input Decision Tree Learner node and <br>\n6- Partitioning Node test data split output to input Decision Tree predictor Node<br>\n7- Decision Tree learner Node output to input Decision Tree Node input<br>\n8- Decision Tree output to input ROC Node.. (here you can evaluate your model base on AUC value)<br>\n\n\n### Inspiration\nUse machine learning to determine which physiochemical properties make a wine 'good'!\n\n\n\n### Acknowledgements\n\nThis dataset is also available from the UCI machine learning repository, https://archive.ics.uci.edu/ml/datasets/wine+quality , I just shared it to kaggle for convenience. *(I am mistaken and the public license type disallowed me from doing so, I will take this down at first request. I am not the owner of this dataset.*\n\n**Please include this citation if you plan to use this database: \nP. Cortez, A. Cerdeira, F. Almeida, T. Matos and J. Reis. \nModeling wine preferences by data mining from physicochemical properties. In Decision Support Systems, Elsevier, 47(4):547-553, 2009.**\n\n### Relevant publication\n\nP. Cortez, A. Cerdeira, F. Almeida, T. Matos and J. Reis. Modeling wine preferences by data mining from physicochemical properties. \nIn Decision Support Systems, Elsevier, 47(4):547-553, 2009.", "VersionNotes": "Fixed csv format to use comma as delimiter", "TotalCompressedBytes": 100951.0, "TotalUncompressedBytes": 100951.0}]
|
[{"Id": 4458, "CreatorUserId": 1132983, "OwnerUserId": NaN, "OwnerOrganizationId": 7.0, "CurrentDatasetVersionId": 8204.0, "CurrentDatasourceVersionId": 8204.0, "ForumId": 10170, "Type": 2, "CreationDate": "11/12/2017 14:08:43", "LastActivityDate": "02/06/2018", "TotalViews": 1214229, "TotalDownloads": 194418, "TotalVotes": 2537, "TotalKernels": 1574}]
| null |
# # Importing the Libraries
import numpy as np # to create numpy arrays
import pandas as pd # to create pandas dataframe
import matplotlib.pyplot as plt # for making plots and graphs
import seaborn as sns # for data visualization
from sklearn.model_selection import (
train_test_split,
) # to split data into training data and testing data
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score # to evaluate the model
# # Data Collection
# loading the dataset to a pandas dataframe
wine_dataset = pd.read_csv(
"/kaggle/input/red-wine-quality-cortez-et-al-2009/winequality-red.csv"
)
# checking the first 5 rows of the dataset
wine_dataset.head()
# checking number of rows and columns in th dataset'
wine_dataset.shape
# getting some information about the dataset
wine_dataset.info()
# checking for missing values in each column
wine_dataset.isnull().sum()
# We don't have any missing values in our dataset
# # Data Analysis and Visualization
# getting statistical measures of the dataset
wine_dataset.describe()
# finding the number of values for each quality
sns.catplot(x="quality", data=wine_dataset, kind="count")
# volatile acidity vs quality
plot = plt.figure(figsize=(5, 5))
sns.barplot(x="quality", y="volatile acidity", data=wine_dataset)
# 'volatile acidity' and 'quality' are inversely proportional
# citric acid vs quality
plot = plt.figure(figsize=(5, 5))
sns.barplot(x="quality", y="citric acid", data=wine_dataset)
# if the 'citric acid' content is more then we're getting high 'quality' of wine
# checking the distribution of the data
wine_dataset.hist(bins=100, figsize=(10, 10))
plt.show()
# # Correlation
# correlation between all the columns to the quality column
correlation = wine_dataset.corr()
# constructing a heatmap to understand the correlation between the columns
plt.figure(figsize=(10, 7))
sns.heatmap(correlation, annot=True)
# printing correlation values
wine_dataset.corr()["quality"].sort_values()
# 'alcohol' has higher correlation with target --quality
# # Data Preprocessing
# separating the features and label
X = wine_dataset.drop("quality", axis=1)
print(X)
# **Label Binarization**
Y = wine_dataset["quality"].apply(lambda y_value: 1 if y_value >= 6.5 else 0)
print(Y)
# So here we have classified the different wine quality ratings to 0 and 1 --GOOD and BAD
# # Train & Test Split
# splitting X,Y into training and testing data
X_train, X_test, Y_train, Y_test = train_test_split(
X, Y, test_size=0.2, random_state=3
) # assigned 20% for test
print(X.shape, X_train.shape, X_test.shape)
| false | 0 | 751 | 0 | 1,619 | 751 |
||
129048397
|
import tensorflow as tf
tf.__version__
# + scalar value: 0.4, 2.3
# + vector: [3, 4, 2, 5]
# + matrix [[1, 2, 3], [4, 5, 6]]
# + tensor: color image [height, width, color], video, text, ..
tf.constant(2, shape=(2, 3), dtype=tf.int32)
# identity matrix
tf.eye(7)
tf.ones(shape=(4, 2))
tf.zeros(shape=(2, 3, 4))
tf.random.normal(shape=(2, 3), mean=0, stddev=1)
tf.range(10)
tf.range(start=4, limit=10, delta=2)
# ## Basic Math
a = tf.constant([1, 3, 2])
b = tf.constant([4, -2, 1])
# element-wise operators
print(a + b)
print(a - b)
print(a * b)
print(a / b)
print(a % b)
print(a**3)
# matmul
a = tf.random.normal((2, 4))
b = tf.random.normal((4, 3))
print(a)
print(b)
print(tf.matmul(a, b))
# ## Indexing
a = tf.range(start=3, limit=15)
a
print(a[2], a[-3])
print(a[3:7]) # -> a[3], a[4], a[5], a[6]
print(a[:4], a[6:])
# ## Reshaping
a = tf.range(24)
a
tf.reshape(a, (4, -1))
tf.reshape(a, (2, 3, 4))
# ## Transposing
a = tf.reshape(tf.range(24), (4, 6))
a
tf.transpose(a)
# ## Image Classification with Fashion-MNIST Dataset
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# load Fashion-MNIST dataset -> multi-class classification task
(train_images, train_labels), (
test_images,
test_labels,
) = tf.keras.datasets.fashion_mnist.load_data()
train_images.shape, train_labels.shape, test_images.shape, test_labels.shape
train_images[0]
train_labels[0]
plt.figure()
plt.imshow(train_images[0])
plt.colorbar()
train_images = train_images / 255.0
test_images = test_images / 255.0
train_images[0]
train_images[0].shape
a = tf.constant([[1, 4, 3, 2], [2, 3, 4, 5], [1, 3, 1, 5], [8, 7, 9, 1]])
a
tf.reshape(a, (16))
# [Feed-Forward Neural Network](https://deepai.org/machine-learning-glossary-and-terms/feed-forward-neural-network)
# 
model = tf.keras.Sequential(
[
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation="sigmoid"),
tf.keras.layers.Dense(10),
]
)
model.summary()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/048/129048397.ipynb
| null | null |
[{"Id": 129048397, "ScriptId": 38359541, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2792943, "CreationDate": "05/10/2023 15:01:04", "VersionNumber": 1.0, "Title": "Mock Project - Python 06 - TensorFlow Basic", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 109.0, "LinesInsertedFromPrevious": 109.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
| null | null | null | null |
import tensorflow as tf
tf.__version__
# + scalar value: 0.4, 2.3
# + vector: [3, 4, 2, 5]
# + matrix [[1, 2, 3], [4, 5, 6]]
# + tensor: color image [height, width, color], video, text, ..
tf.constant(2, shape=(2, 3), dtype=tf.int32)
# identity matrix
tf.eye(7)
tf.ones(shape=(4, 2))
tf.zeros(shape=(2, 3, 4))
tf.random.normal(shape=(2, 3), mean=0, stddev=1)
tf.range(10)
tf.range(start=4, limit=10, delta=2)
# ## Basic Math
a = tf.constant([1, 3, 2])
b = tf.constant([4, -2, 1])
# element-wise operators
print(a + b)
print(a - b)
print(a * b)
print(a / b)
print(a % b)
print(a**3)
# matmul
a = tf.random.normal((2, 4))
b = tf.random.normal((4, 3))
print(a)
print(b)
print(tf.matmul(a, b))
# ## Indexing
a = tf.range(start=3, limit=15)
a
print(a[2], a[-3])
print(a[3:7]) # -> a[3], a[4], a[5], a[6]
print(a[:4], a[6:])
# ## Reshaping
a = tf.range(24)
a
tf.reshape(a, (4, -1))
tf.reshape(a, (2, 3, 4))
# ## Transposing
a = tf.reshape(tf.range(24), (4, 6))
a
tf.transpose(a)
# ## Image Classification with Fashion-MNIST Dataset
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# load Fashion-MNIST dataset -> multi-class classification task
(train_images, train_labels), (
test_images,
test_labels,
) = tf.keras.datasets.fashion_mnist.load_data()
train_images.shape, train_labels.shape, test_images.shape, test_labels.shape
train_images[0]
train_labels[0]
plt.figure()
plt.imshow(train_images[0])
plt.colorbar()
train_images = train_images / 255.0
test_images = test_images / 255.0
train_images[0]
train_images[0].shape
a = tf.constant([[1, 4, 3, 2], [2, 3, 4, 5], [1, 3, 1, 5], [8, 7, 9, 1]])
a
tf.reshape(a, (16))
# [Feed-Forward Neural Network](https://deepai.org/machine-learning-glossary-and-terms/feed-forward-neural-network)
# 
model = tf.keras.Sequential(
[
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation="sigmoid"),
tf.keras.layers.Dense(10),
]
)
model.summary()
| false | 0 | 914 | 1 | 914 | 914 |
||
129048187
|
<jupyter_start><jupyter_text>geoguessr_55countries
Kaggle dataset identifier: geoguessr-55countries
<jupyter_script>from fastai.vision.all import *
import timm
path = Path("/kaggle/input/geoguessr-55countries/geoguessr_filtered_data")
fns = get_image_files(path)
failed = verify_images(fns)
failed.map(Path.unlink)
guessr = DataBlock(
blocks=(ImageBlock, CategoryBlock),
get_items=get_image_files,
splitter=RandomSplitter(valid_pct=0.2, seed=42),
get_y=parent_label,
)
# guessr = guessr.new(
# item_tfms=RandomResizedCrop(224, min_scale=0.5),
# batch_tfms=aug_transforms())
# dls = guessr.dataloaders(path)
dls = guessr.dataloaders(path)
learn = vision_learner(dls, resnet50, metrics=error_rate)
learn.fine_tune(5)
learn.show_results(max_n=24)
interp = ClassificationInterpretation.from_learner(learn)
interp.plot_top_losses(5, nrows=1)
learn.export("guessr.pkl")
path = Path()
path.ls(file_exts=".pkl")
import gradio as gr
learn_inf = load_learner("guessr.pkl")
learn_inf.predict(
"/kaggle/input/geoguessr-55countries/geoguessr_filtered_data/Russia/canvas_1629261283.jpg"
)
import os
path = "/kaggle/input/geoguessrdata-200images/data"
categories = [f for f in os.listdir(path) if os.path.isdir(os.path.join(path, f))]
categories
def guess_country(img):
pred, idx, probs = learn_inf.predict(img)
top_5 = sorted(
zip(categories, map(float, probs)), key=lambda x: x[1], reverse=True
)[:5]
return dict(top_5)
app = gr.Interface(fn=guess_country, inputs="image", outputs="label")
app.launch(share=True)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/048/129048187.ipynb
|
geoguessr-55countries
|
annaglass1
|
[{"Id": 129048187, "ScriptId": 38028987, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14361741, "CreationDate": "05/10/2023 14:59:23", "VersionNumber": 1.0, "Title": "geoguessr-guessr v1", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 65.0, "LinesInsertedFromPrevious": 65.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 184760512, "KernelVersionId": 129048187, "SourceDatasetVersionId": 5585052}]
|
[{"Id": 5585052, "DatasetId": 3213992, "DatasourceVersionId": 5659956, "CreatorUserId": 11687478, "LicenseName": "Unknown", "CreationDate": "05/02/2023 16:28:24", "VersionNumber": 1.0, "Title": "geoguessr_55countries", "Slug": "geoguessr-55countries", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3213992, "CreatorUserId": 11687478, "OwnerUserId": 11687478.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5585052.0, "CurrentDatasourceVersionId": 5659956.0, "ForumId": 3278796, "Type": 2, "CreationDate": "05/02/2023 16:28:24", "LastActivityDate": "05/02/2023", "TotalViews": 131, "TotalDownloads": 3, "TotalVotes": 0, "TotalKernels": 0}]
|
[{"Id": 11687478, "UserName": "annaglass1", "DisplayName": "annaglass1", "RegisterDate": "09/22/2022", "PerformanceTier": 0}]
|
from fastai.vision.all import *
import timm
path = Path("/kaggle/input/geoguessr-55countries/geoguessr_filtered_data")
fns = get_image_files(path)
failed = verify_images(fns)
failed.map(Path.unlink)
guessr = DataBlock(
blocks=(ImageBlock, CategoryBlock),
get_items=get_image_files,
splitter=RandomSplitter(valid_pct=0.2, seed=42),
get_y=parent_label,
)
# guessr = guessr.new(
# item_tfms=RandomResizedCrop(224, min_scale=0.5),
# batch_tfms=aug_transforms())
# dls = guessr.dataloaders(path)
dls = guessr.dataloaders(path)
learn = vision_learner(dls, resnet50, metrics=error_rate)
learn.fine_tune(5)
learn.show_results(max_n=24)
interp = ClassificationInterpretation.from_learner(learn)
interp.plot_top_losses(5, nrows=1)
learn.export("guessr.pkl")
path = Path()
path.ls(file_exts=".pkl")
import gradio as gr
learn_inf = load_learner("guessr.pkl")
learn_inf.predict(
"/kaggle/input/geoguessr-55countries/geoguessr_filtered_data/Russia/canvas_1629261283.jpg"
)
import os
path = "/kaggle/input/geoguessrdata-200images/data"
categories = [f for f in os.listdir(path) if os.path.isdir(os.path.join(path, f))]
categories
def guess_country(img):
pred, idx, probs = learn_inf.predict(img)
top_5 = sorted(
zip(categories, map(float, probs)), key=lambda x: x[1], reverse=True
)[:5]
return dict(top_5)
app = gr.Interface(fn=guess_country, inputs="image", outputs="label")
app.launch(share=True)
| false | 0 | 540 | 0 | 572 | 540 |
||
129496122
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import torch
from torch import autocast
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
model_id = "runwayml/stable-diffusion-v1-5"
device = "cpu"
dpm = DPMSolverMultistepScheduler.from_pretrained(model_id, subfolder="scheduler")
pipe = StableDiffusionPipeline.from_pretrained(model_id, scheduler=dpm)
pipe = pipe.to(device)
image = pipe("apple")
image.images[0]
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/496/129496122.ipynb
| null | null |
[{"Id": 129496122, "ScriptId": 38503949, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7853338, "CreationDate": "05/14/2023 09:50:37", "VersionNumber": 3.0, "Title": "Stable Diffusion", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 33.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 32.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import torch
from torch import autocast
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
model_id = "runwayml/stable-diffusion-v1-5"
device = "cpu"
dpm = DPMSolverMultistepScheduler.from_pretrained(model_id, subfolder="scheduler")
pipe = StableDiffusionPipeline.from_pretrained(model_id, scheduler=dpm)
pipe = pipe.to(device)
image = pipe("apple")
image.images[0]
| false | 0 | 304 | 0 | 304 | 304 |
||
129496515
|
<jupyter_start><jupyter_text>World GDP growth 1980-2028
The global gross domestic product (GDP) has experienced various growth trends from 1980 to 2028. During this period, the world economy has undergone significant changes influenced by factors such as technological advancements, political events, and economic policies.
# Format
- Country
- other columns include gdp year(from 1980 to 2028)
Kaggle dataset identifier: world-gdp-growth-1980-2028
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import xlrd
import seaborn as sns
import matplotlib.pyplot as plt
from autoviz.classify_method import data_cleaning_suggestions
from autoviz.AutoViz_Class import AutoViz_Class
AV = AutoViz_Class()
import matplotlib.cm as cm
workbook = xlrd.open_workbook(
"/kaggle/input/world-gdp-growth-1980-2028/imf-dm-export-20230513.xls",
ignore_workbook_corruption=True,
)
df = pd.read_excel(workbook)
df = df.dropna()
df.head()
country_name = "United States"
country_data = df[df["Real GDP growth (Annual percent change)"] == country_name]
country_data = country_data.replace("no data", pd.NA).dropna(axis=1)
years = country_data.columns[1:].astype(int)
gdp_growth = country_data.values.flatten()[1:].astype(float)
plt.figure(figsize=(14, 6))
plt.plot(years, gdp_growth, linestyle="-", marker="o", markersize=4)
plt.plot(years[-5:], gdp_growth[-5:], linestyle=":", color="r")
plt.grid(True)
plt.xlim(min(years), max(years))
plt.ylim(min(gdp_growth), max(gdp_growth))
plt.xlabel("Year")
plt.ylabel("GDP Growth (Annual percent change)")
plt.title(f"GDP Growth in {country_name}(1980-2028)")
plt.legend([country_name, f"{country_name} (Forcasted)"])
plt.xticks(years, rotation=45)
plt.show()
country_names = ["United States", "Japan"]
country_data = df[df["Real GDP growth (Annual percent change)"].isin(country_names)]
country_data = country_data.replace("no data", pd.NA).dropna(axis=1)
years = country_data.columns[1:].astype(int)
gdp_growth = country_data.iloc[:, 1:].astype(float)
plt.figure(figsize=(14, 6))
for i in range(len(country_names)):
country = country_names[i]
growth = gdp_growth.iloc[i].values
plt.plot(years, growth, linestyle="-", marker="o", markersize=4)
plt.grid(True)
plt.xlim(min(years), max(years))
plt.xlabel("Year")
plt.ylabel("GDP Growth (Annual percent change)")
plt.title("GDP Growth correlation Between US and Japan")
plt.legend(country_names)
plt.xticks(years, rotation=45)
plt.show()
country_names = ["India", "Pakistan", "Bangladesh"]
country_data = df[df["Real GDP growth (Annual percent change)"].isin(country_names)]
country_data = country_data.replace("no data", pd.NA).dropna(axis=1)
years = country_data.columns[1:].astype(int)
gdp_growth = country_data.iloc[:, 1:].astype(float)
plt.figure(figsize=(14, 6))
for i in range(len(country_names)):
country = country_names[i]
growth = gdp_growth.iloc[i].values
plt.plot(years, growth, linestyle="-", marker="o", markersize=4)
plt.grid(True)
plt.xlim(min(years), max(years))
plt.xlabel("Year")
plt.ylabel("GDP Growth (Annual percent change)")
plt.title("GDP Growth for India,Pakistan and Bangladesh")
plt.legend(country_names)
plt.xticks(years, rotation=45)
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/496/129496515.ipynb
|
world-gdp-growth-1980-2028
|
utkarshx27
|
[{"Id": 129496515, "ScriptId": 38500957, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13364933, "CreationDate": "05/14/2023 09:54:42", "VersionNumber": 1.0, "Title": "World GDP growth 1980-2028 Analysis", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 99.0, "LinesInsertedFromPrevious": 99.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185613062, "KernelVersionId": 129496515, "SourceDatasetVersionId": 5678032}]
|
[{"Id": 5678032, "DatasetId": 3264188, "DatasourceVersionId": 5753585, "CreatorUserId": 13364933, "LicenseName": "World Bank Dataset Terms of Use", "CreationDate": "05/13/2023 17:54:00", "VersionNumber": 1.0, "Title": "World GDP growth 1980-2028", "Slug": "world-gdp-growth-1980-2028", "Subtitle": "World GDP growth 1980-2028", "Description": "The global gross domestic product (GDP) has experienced various growth trends from 1980 to 2028. During this period, the world economy has undergone significant changes influenced by factors such as technological advancements, political events, and economic policies.\n\n# Format\n- Country\n- other columns include gdp year(from 1980 to 2028)", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3264188, "CreatorUserId": 13364933, "OwnerUserId": 13364933.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5678032.0, "CurrentDatasourceVersionId": 5753585.0, "ForumId": 3329791, "Type": 2, "CreationDate": "05/13/2023 17:54:00", "LastActivityDate": "05/13/2023", "TotalViews": 5413, "TotalDownloads": 1189, "TotalVotes": 36, "TotalKernels": 2}]
|
[{"Id": 13364933, "UserName": "utkarshx27", "DisplayName": "Utkarsh Singh", "RegisterDate": "01/21/2023", "PerformanceTier": 2}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import xlrd
import seaborn as sns
import matplotlib.pyplot as plt
from autoviz.classify_method import data_cleaning_suggestions
from autoviz.AutoViz_Class import AutoViz_Class
AV = AutoViz_Class()
import matplotlib.cm as cm
workbook = xlrd.open_workbook(
"/kaggle/input/world-gdp-growth-1980-2028/imf-dm-export-20230513.xls",
ignore_workbook_corruption=True,
)
df = pd.read_excel(workbook)
df = df.dropna()
df.head()
country_name = "United States"
country_data = df[df["Real GDP growth (Annual percent change)"] == country_name]
country_data = country_data.replace("no data", pd.NA).dropna(axis=1)
years = country_data.columns[1:].astype(int)
gdp_growth = country_data.values.flatten()[1:].astype(float)
plt.figure(figsize=(14, 6))
plt.plot(years, gdp_growth, linestyle="-", marker="o", markersize=4)
plt.plot(years[-5:], gdp_growth[-5:], linestyle=":", color="r")
plt.grid(True)
plt.xlim(min(years), max(years))
plt.ylim(min(gdp_growth), max(gdp_growth))
plt.xlabel("Year")
plt.ylabel("GDP Growth (Annual percent change)")
plt.title(f"GDP Growth in {country_name}(1980-2028)")
plt.legend([country_name, f"{country_name} (Forcasted)"])
plt.xticks(years, rotation=45)
plt.show()
country_names = ["United States", "Japan"]
country_data = df[df["Real GDP growth (Annual percent change)"].isin(country_names)]
country_data = country_data.replace("no data", pd.NA).dropna(axis=1)
years = country_data.columns[1:].astype(int)
gdp_growth = country_data.iloc[:, 1:].astype(float)
plt.figure(figsize=(14, 6))
for i in range(len(country_names)):
country = country_names[i]
growth = gdp_growth.iloc[i].values
plt.plot(years, growth, linestyle="-", marker="o", markersize=4)
plt.grid(True)
plt.xlim(min(years), max(years))
plt.xlabel("Year")
plt.ylabel("GDP Growth (Annual percent change)")
plt.title("GDP Growth correlation Between US and Japan")
plt.legend(country_names)
plt.xticks(years, rotation=45)
plt.show()
country_names = ["India", "Pakistan", "Bangladesh"]
country_data = df[df["Real GDP growth (Annual percent change)"].isin(country_names)]
country_data = country_data.replace("no data", pd.NA).dropna(axis=1)
years = country_data.columns[1:].astype(int)
gdp_growth = country_data.iloc[:, 1:].astype(float)
plt.figure(figsize=(14, 6))
for i in range(len(country_names)):
country = country_names[i]
growth = gdp_growth.iloc[i].values
plt.plot(years, growth, linestyle="-", marker="o", markersize=4)
plt.grid(True)
plt.xlim(min(years), max(years))
plt.xlabel("Year")
plt.ylabel("GDP Growth (Annual percent change)")
plt.title("GDP Growth for India,Pakistan and Bangladesh")
plt.legend(country_names)
plt.xticks(years, rotation=45)
plt.show()
| false | 0 | 1,121 | 0 | 1,261 | 1,121 |
||
129496303
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# The Titanic Tutorial on Kaggle is a classic machine learning problem where the task is to predict whether a passenger on the Titanic survived or not based on a set of features such as age, gender, passenger class, etc. The dataset contains both a training set and a test set, and the goal is to build a machine learning model on the training set that can accurately predict the survival of passengers on the test set.
# 1 - Import necessary libraries and load the data
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
test_data = pd.read_csv("/kaggle/input/titanic/train.csv")
train_data.head()
train_data.describe()
train_data.info()
sns.countplot(x="Survived", data=train_data)
sns.countplot(x="Survived", hue="Sex", data=train_data)
sns.countplot(x="Survived", hue="Pclass", data=train_data)
sns.distplot(train_data["Age"].dropna(), bins=30)
train_data["Age"].hist(bins=30)
# create a new feature called FamilySize
train_data["FamilySize"] = train_data["SibSp"] + train_data["Parch"] + 1
test_data["FamilySize"] = test_data["SibSp"] + test_data["Parch"] + 1
# create a new feature called IsAlone
train_data["IsAlone"] = np.where(train_data["FamilySize"] == 1, 1, 0)
test_data["IsAlone"] = np.where(test_data["FamilySize"] == 1, 1, 0)
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
X = train_data.drop("Survived", axis=1)
y = train_data["Survived"]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
accuracy_score(y_test, y_pred)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/496/129496303.ipynb
| null | null |
[{"Id": 129496303, "ScriptId": 38505569, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14633871, "CreationDate": "05/14/2023 09:52:26", "VersionNumber": 1.0, "Title": "Titanic", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 66.0, "LinesInsertedFromPrevious": 66.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# The Titanic Tutorial on Kaggle is a classic machine learning problem where the task is to predict whether a passenger on the Titanic survived or not based on a set of features such as age, gender, passenger class, etc. The dataset contains both a training set and a test set, and the goal is to build a machine learning model on the training set that can accurately predict the survival of passengers on the test set.
# 1 - Import necessary libraries and load the data
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
test_data = pd.read_csv("/kaggle/input/titanic/train.csv")
train_data.head()
train_data.describe()
train_data.info()
sns.countplot(x="Survived", data=train_data)
sns.countplot(x="Survived", hue="Sex", data=train_data)
sns.countplot(x="Survived", hue="Pclass", data=train_data)
sns.distplot(train_data["Age"].dropna(), bins=30)
train_data["Age"].hist(bins=30)
# create a new feature called FamilySize
train_data["FamilySize"] = train_data["SibSp"] + train_data["Parch"] + 1
test_data["FamilySize"] = test_data["SibSp"] + test_data["Parch"] + 1
# create a new feature called IsAlone
train_data["IsAlone"] = np.where(train_data["FamilySize"] == 1, 1, 0)
test_data["IsAlone"] = np.where(test_data["FamilySize"] == 1, 1, 0)
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
X = train_data.drop("Survived", axis=1)
y = train_data["Survived"]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
accuracy_score(y_test, y_pred)
| false | 0 | 772 | 0 | 772 | 772 |
||
129496254
|
<jupyter_start><jupyter_text>China Scholarship Data - May 2019
The data was collected through web scraping https://www.cucas.edu.cn/china_scholarships/
The code to the web scraping program and data cleaning program is stored in https://github.com/mcmuralishclint/CUCAS
The dataset contains information about the scholarship programs in China as of May 2019.
Kaggle dataset identifier: china-scholarship-data-may-2019
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(font_scale=1)
sns.set_palette("bright")
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Data Understanding
df = pd.read_csv("/kaggle/input/china-scholarship-data-may-2019/cleaned.csv")
df.head()
df.shape
df.info()
df.describe().T.drop("count", axis=1).T
df.describe(include=object).T.drop("count", axis=1).T
# # Data Preprocessing
df.isna().sum()
df.dropna(inplace=True)
df.duplicated().sum()
sns.clustermap(df.corr(), annot=True, cmap="mako", figsize=(8, 8))
plt.show
# # Feature Engineering
# Create Column Category Tuition Covered?
df["Tuition Covered?"] = df["Tuition Covered"].apply(lambda x: 1 if x > 0 else 0)
df["Tuition Covered?"] = df["Tuition Covered?"].astype(int)
# Groupping for Create Scholarship Category
df.loc[
(df["Tuition Covered?"] == 1)
& (df["Accomodation covered?"] == 1)
& (df["Living Expense Covered?"] == 1),
"Scholarship Category",
] = "Fully Covered"
df.loc[
(df["Tuition Covered?"] == 1)
& (df["Accomodation covered?"] == 1)
& (df["Living Expense Covered?"] == 0),
"Scholarship Category",
] = "Tuition and Accomodation Covered"
df.loc[
(df["Tuition Covered?"] == 1)
& (df["Accomodation covered?"] == 0)
& (df["Living Expense Covered?"] == 1),
"Scholarship Category",
] = "Tuition and Living Covered"
df.loc[
(df["Tuition Covered?"] == 0)
& (df["Accomodation covered?"] == 1)
& (df["Living Expense Covered?"] == 1),
"Scholarship Category",
] = "Accomodation and Living Expense Covered"
df.loc[
(df["Tuition Covered?"] == 0)
& (df["Accomodation covered?"] == 1)
& (df["Living Expense Covered?"] == 0),
"Scholarship Category",
] = "Accomodation Covered"
df.loc[
(df["Tuition Covered?"] == 0)
& (df["Accomodation covered?"] == 0)
& (df["Living Expense Covered?"] == 1),
"Scholarship Category",
] = "Living Expense Covered"
df.loc[
(df["Tuition Covered?"] == 1)
& (df["Accomodation covered?"] == 0)
& (df["Living Expense Covered?"] == 0),
"Scholarship Category",
] = "Tuition Covered"
df.loc[
(df["Tuition Covered?"] == 0)
& (df["Accomodation covered?"] == 0)
& (df["Living Expense Covered?"] == 0),
"Scholarship Category",
] = "Not Fully Covered"
# Create column for Total Accomodation in Year
convert_to_month = {"YEAR": 1, "SEMESTER": 2, "TERM": 3, "MONTH": 12, "DAY": 365}
df["Accomodation_duration"] = df["Accomodation_duration"].replace(convert_to_month)
df["Accomodation_duration"] = df["Accomodation_duration"].astype(int)
df["total_accomodation_year"] = df["Accomodation_duration"] * df["Accomodation_To_Pay"]
# Create Column for Total Expense Year
convert_to_num = {"MONTH": 12}
df["Expense_duration"] = df["Expense_duration"].replace(convert_to_month)
df["Expense_duration"] = df["Expense_duration"].astype(int)
df["total_expense_year"] = df["Expense_duration"] * df["Expense_To_Pay"]
# Create Total Cost Year Without Coverage
df["total_cost_year"] = (
df["Tuition fees to pay"] + df["total_accomodation_year"] + df["total_expense_year"]
)
# Remove text
df["Major"] = df["Major"].str.replace(r"\(.*\)", "").str.strip()
df["Major"] = df["Major"].replace("[^a-zA-Z ]", "", regex=True)
# # Reasoning for Feature Engineering
# * The Scholarship Category column was created to facilitate analysis of the types of scholarships awarded in the dataset. This makes it possible to understand more about the types of scholarships present in the dataset and helps in a more detailed analysis of how much impact the scholarship has on the total cost to be borne by the student.
# * By grouping the cost of accommodation and living expenses for one year, we can easily compare the total costs that must be borne by students each year, and also compare costs between universities.
# * Removing text inside parentheses in the Major column may be done to facilitate the data analysis process. Sometimes in columns like this, there is additional information or description that is not needed for analysis, such as specific information about courses or majors, study program codes, or specific education levels. By removing the text inside the parentheses, the Major column becomes easier to read and understand
# # Distribution University
uni = df["University"].value_counts().head(10)
plt.figure(figsize=(8, 4))
sns.set_style("whitegrid")
ax = sns.barplot(x=uni.values, y=uni.index)
for i in ax.containers:
ax.bar_label(i, fontsize=8)
plt.xlabel("Number of Students")
plt.title("Top 10 Universities with Most International Students")
plt.show()
uni_major_count = (
df.groupby(["University", "Major"])
.size()
.reset_index(name="Count")
.sort_values(by="Count", ascending=False)
.head(10)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(x="Count", y="Major", hue="University", data=uni_major_count)
for i in ax.containers:
ax.bar_label(i, label_type="edge", padding=5, fontsize=10)
plt.title("Top 10 Popular Majors with Universities")
plt.xlabel("Count")
plt.ylabel("Major")
plt.legend(fontsize=10, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show()
uni_level_count = (
df.groupby(["University", "Level"])
.size()
.reset_index(name="Count")
.sort_values("Count", ascending=False)
.head(10)
)
# Plot the data using a barplot
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=uni_level_count, y="University", x="Count", hue="Level", orient="h"
)
for i in ax.containers:
ax.bar_label(i, fontsize=8)
ax.legend(fontsize=8, loc="lower right")
plt.title("Top University with Populer Level", fontsize=10)
plt.xlabel("Count", fontsize=8)
plt.ylabel("University", fontsize=8)
plt.show()
# Group the data by University and calculate the max Tuition Covered
uni_tuition_max = (
df.groupby("University")["Tuition Covered"]
.max()
.reset_index(name="MaxTuitionFee")
.sort_values("MaxTuitionFee", ascending=False)
.head(10)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=uni_tuition_max, x="MaxTuitionFee", y="University", orient="h")
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8)
plt.title("Top 10 Universities with Highest Tuition Cover", fontsize=10)
plt.xlabel("Max Tuition Fee (RMB)", fontsize=8)
plt.ylabel("University", fontsize=8)
plt.show()
# Groupby University dan ambil nilai rata-rata dari kolom 'Tuition Covered'
uni_tuition_covered = (
df.groupby("University")["Tuition Covered"]
.min()
.reset_index()
.sort_values(by="Tuition Covered")
.head(15)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(x="Tuition Covered", y="University", data=uni_tuition_covered)
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8)
plt.title("10 Universities with the Lowest Tuition Covered (RMB)")
plt.xlabel("Min Tuition Covered (RMB)", fontsize=8)
plt.show()
# Groupby University Offering Scholarship Category Fully Covered
uni_scholarship_cover = (
df[df["Scholarship Category"] == "Fully Covered"]
.groupby(["University"])
.size()
.reset_index(name="Count")
.sort_values("Count", ascending=False)
.head(10)
)
# Plot the data using a barplot
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=uni_scholarship_cover, x="Count", y="University", dodge=False, orient="h"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8, padding=5)
plt.xlabel("Count", fontsize=8)
plt.ylabel("Number of Programs Offering Fully Cover", fontsize=8)
plt.title("Top 10 Majors Offering Scholarship Fully Covered", fontsize=10)
plt.legend(fontsize=10, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show()
# Group the data by University and calculate the max TuitionFee
uni_tuition_max = (
df.groupby("University")["Tuition fees to pay"]
.max()
.reset_index(name="MaxTuitionFee")
.sort_values("MaxTuitionFee", ascending=False)
.head(10)
)
# Plot the data using a h barplot
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=uni_tuition_max, x="MaxTuitionFee", y="University", orient="h")
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8)
plt.title("Top 10 Universities with Highest Tuition Fee", fontsize=10)
plt.xlabel("Max Tuition Fee", fontsize=8)
plt.ylabel("University", fontsize=8)
plt.show()
uni_total_cost_year = (
df.groupby("University")["total_cost_year"]
.sum()
.reset_index(name="TotalCostYear")
.sort_values("TotalCostYear")
.head(10)
)
# Plot the data using a h barplot
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=uni_total_cost_year, x="TotalCostYear", y="University", orient="h"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8)
plt.title("Top 10 Universities with Lowest Total Cost Year", fontsize=10)
plt.xlabel("Total Cost Year (RMB)", fontsize=8)
plt.ylabel("University", fontsize=8)
plt.show()
# Group data by university and calculate the mean of each cost category
cost_data = df.groupby("Level")[
["Tuition fees to pay", "Accomodation_To_Pay", "Expense_To_Pay"]
].sum()
# Create a line chart for each cost category
plt.plot(cost_data["Tuition fees to pay"], label="Tuition")
plt.plot(cost_data["Accomodation_To_Pay"], label="Accomodation")
plt.plot(cost_data["Expense_To_Pay"], label="Living Expenses")
# Set the title and axis labels
plt.title("Comparison of Tuition, Accomodation, and Living Expenses by University")
plt.xlabel("University")
plt.ylabel("Cost (RMB)")
# Add legend and show the plot
plt.legend()
plt.show()
# # Insight in University
# * From the distribution of universities mentioned, Zhejiang Normal University and North China Electric Power University are among the top choices for international students due to the number of international students enrolled. Additionally, Shanghai University of Traditional Medicine and China University of Petroleum offer majors that are popular among international students.
# * Furthermore, Zhejiang Normal University offers the most Bachelor's and Master's degrees, while North China Electric Power University provides the most Ph.D. degree offerings. On the other hand, Shanghai University of Traditional Medicine provides the most Fully Covered scholarships, indicating that it is an attractive option for those seeking financial aid.
# * The findings suggest that Bachelor's and Master's degrees have the highest associated costs, while the Non-Degree level has the lowest. However, it is worth noting that some universities such as Baoji University of Arts and Sciences and East China University of Science and Technology offer Fully Covered and Total Cost Year 0, which ensures that all expenses (Tuition Covered, Accommodation Covered, and Living Expenses) are taken care of.
# # Distribution Major
uni = df["Major"].value_counts()
top10_major = uni[:10]
plt.figure(figsize=(8, 4))
sns.set_style("whitegrid")
ax = sns.barplot(x=top10_major.values, y=top10_major.index)
for i in ax.containers:
ax.bar_label(i, fontsize=8)
plt.xlabel("Number of Students")
plt.title("Top 10 Major with Most International Students")
plt.show()
maj_level_count = (
df.groupby(["Major", "Level"])
.size()
.reset_index(name="Count")
.sort_values("Count", ascending=False)
.head(10)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=maj_level_count, y="Major", x="Count", hue="Level", orient="h")
for i in ax.containers:
ax.bar_label(i, fontsize=8)
plt.title("Top 10 Major Populer with Level Categories", fontsize=10)
plt.xlabel("Count", fontsize=8)
plt.ylabel("University", fontsize=8)
plt.show()
maj_phd_count = (
df[df["Level"] == "Phd"]
.groupby(["Major", "Level", "University"])
.size()
.reset_index(name="Count")
.sort_values("Count", ascending=False)
.head(10)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=maj_phd_count, y="Major", x="Count", hue="University", orient="h")
for i in ax.containers:
ax.bar_label(i, fontsize=8)
plt.title("Top 10 Popular Majors with Phd Level", fontsize=10)
plt.xlabel("Count", fontsize=8)
plt.ylabel("University", fontsize=8)
plt.legend(fontsize=10, bbox_to_anchor=(1, 1), loc=2, borderaxespad=0.0)
plt.show()
# Group by major and calculate the sum of tuition covered
maj_tuition_fee = (
df.groupby("Major")["Tuition Covered"]
.sum()
.reset_index()
.sort_values("Tuition Covered", ascending=False)
.head(10)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=maj_tuition_fee, y="Major", x="Tuition Covered", orient="h")
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8)
plt.title("Top 10 Major with Total Highest Tuition Covered", fontsize=10)
plt.xlabel("Tuition Covered (RMB)", fontsize=8)
plt.ylabel("Major", fontsize=8)
plt.show()
# Group the data by Major and calculate the max TuitionFee
maj_tuition_mean = (
df.groupby("Major")["Tuition fees to pay"]
.sum()
.reset_index(name="MajTuitionFee")
.sort_values("MajTuitionFee", ascending=False)
.head(10)
)
# Plot the data using a h barplot
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=maj_tuition_mean, x="MajTuitionFee", y="Major", orient="h")
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8)
plt.title("Top 10 Major with Average Tuition Fee to Pay", fontsize=10)
plt.xlabel("Avg. Tuition Fee(RMB)", fontsize=8)
plt.ylabel("Major", fontsize=8)
plt.show()
# Groupby Major Programs Offering accommodation cover
major_accommodation_cover = (
df[df["Accomodation covered?"] == 1]
.groupby(["Major"])
.size()
.reset_index(name="Count")
.sort_values("Count", ascending=False)
.head(10)
)
# Plot the data using a barplot
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=major_accommodation_cover, x="Count", y="Major", dodge=False, orient="h"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8, padding=5)
plt.xlabel("Count", fontsize=8)
plt.ylabel("Major", fontsize=8)
plt.title("Top 10 Majors Offering Accommodation Cover", fontsize=10)
plt.legend(fontsize=10, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show()
# Group data by major and calculate the sum of accommodation covered
major_accomodation = (
df.groupby("Major")["total_accomodation_year"]
.sum()
.reset_index()
.sort_values("total_accomodation_year", ascending=False)
.head(10)
)
# Create a h bar plot
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=major_accomodation, y="Major", x="total_accomodation_year", orient="h"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8)
plt.title("Top 10 Major with Total Highest Accommodation To Pay", fontsize=8)
plt.xlabel("Accommodation Year(RMB)", fontsize=8)
plt.ylabel("Major", fontsize=8)
plt.show()
# Groupby Major Programs Offering Living Cover
major_living_cover = (
df[df["Living Expense Covered?"] == 1]
.groupby(["Major"])
.size()
.reset_index(name="Count")
.sort_values("Count", ascending=False)
.head(10)
)
# Plot the data using a barplot
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=major_living_cover, x="Count", y="Major", dodge=False, orient="h")
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8, padding=5)
plt.xlabel("Count", fontsize=8)
plt.ylabel("Major", fontsize=8)
plt.title("Top 10 Majors Offering Living Expense Cover", fontsize=10)
plt.legend(fontsize=10, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show()
# Group data by major and calculate the sum of Total Expense Year
major_living = (
df.groupby("Major")["total_expense_year"]
.sum()
.reset_index()
.sort_values("total_expense_year", ascending=False)
.head(10)
)
# Create a h bar plot
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=major_living, y="Major", x="total_expense_year", orient="h")
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8)
plt.title("Top 10 Major with Highest Total Expense to Pay Year", fontsize=10)
plt.xlabel("Total Expense Year(RMB)", fontsize=8)
plt.ylabel("Major", fontsize=8)
plt.show()
# Group data by major and calculate the sum of Total Cost Year
major_living_cost = (
df.groupby("Major")["total_cost_year"]
.sum()
.reset_index()
.sort_values("total_cost_year", ascending=False)
.head(10)
)
# Create a h bar plot
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=major_living_cost, y="Major", x="total_cost_year", orient="h")
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8)
plt.title("Top 10 Major with Highest Total Cost Year", fontsize=10)
plt.xlabel("Total Cost Year(RMB)", fontsize=8)
plt.ylabel("Major", fontsize=8)
plt.show()
# # Insight in Major
# * From the distribution of the Major column, it can be concluded that Computer Science and Technology and Mechanical Engineering are the most popular majors with the highest number of students, with 59 and 48 students respectively. Computer Science and Technology also offers the most levels, ranging from Bachelor's to Master's degrees, while Mechanical Engineering only offers the Master's degree level.
# * Additionally, Chinese Materia Medica is also one of the most sought-after majors with 28 Master's degree students and 10 PhD students. It is also worth noting that Chinese Materia Medica offers the highest number of Tuition Covered, Accommodation Covered, and Living Covered scholarships compared to other majors.
# * Although Computer Science and Technology and Mechanical Engineering are the most popular majors, it is important to consider other majors such as Chinese Materia Medica, as it offers a wide range of scholarships and opportunities for students pursuing this field of study.
# # Distribution Level
lev = (
df.groupby("Level")
.size()
.reset_index(name="counts")
.sort_values(by="counts", ascending=False)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=lev, x="counts", y="Level", orient="h")
for i in ax.containers:
ax.bar_label(i, fontsize=8)
plt.title("Distribution of Level", fontsize=18)
plt.show()
print("Level With Non Degre\n")
df.loc[df["Level"] == "Non-Degree", ["University", "Major", "Level"]]
# Groupby Level dan Calculate Tuition Covered
level_tuition_covered = (
df.groupby("Level")["Tuition Covered"]
.sum()
.reset_index()
.sort_values("Tuition Covered", ascending=False)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=level_tuition_covered, x="Tuition Covered", y="Level", orient="h")
# Add labels to the bars
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8, padding=5)
# Set the title and axis labels
plt.title("Sum of Tuition Covered by Level", fontsize=10)
plt.xlabel("Level", fontsize=8)
plt.ylabel("Tuition Covered(RMB)", fontsize=8)
plt.show()
# Group the data by Level and calculate the max TuitionFee
lev_tuition = (
df.groupby("Level")["Tuition fees to pay"]
.sum()
.reset_index(name="MajTuitionFee")
.sort_values("MajTuitionFee", ascending=False)
)
# Plot the data using a h barplot
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=lev_tuition, x="MajTuitionFee", y="Level", orient="h")
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8)
plt.title("Top 10 Major with Average Tuition Fee to Pay", fontsize=10)
plt.xlabel("Avg. Tuition Fee(RMB)", fontsize=8)
plt.ylabel("Major", fontsize=8)
plt.show()
# Groupby Level Offering Accomodation cover
level_accommodation_cover = (
df[df["Accomodation covered?"] == 1]
.groupby(["Level"])
.size()
.reset_index(name="Count")
.sort_values("Count", ascending=False)
)
# Plot the data using a barplot
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=level_accommodation_cover, x="Count", y="Level", orient="h")
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8)
plt.xlabel("Count", fontsize=8)
plt.ylabel("Level", fontsize=8)
plt.title("Level Offering Accommodation Cover", fontsize=10)
plt.legend(fontsize=10, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show()
# Group data by Level and calculate the sum of accommodation covered
level_accomodation = (
df.groupby("Level")["total_accomodation_year"]
.sum()
.reset_index()
.sort_values("total_accomodation_year", ascending=False)
)
# Create a h bar plot
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=level_accomodation, y="Level", x="total_accomodation_year", orient="h"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8)
plt.title("Level with Sum Total Accommodation Year", fontsize=10)
plt.xlabel("Total Accommodation Year(RMB)", fontsize=8)
plt.ylabel("Level", fontsize=8)
plt.show()
# Groupby University dan Major yang memberikan accommodation cover
lev_living_cover = (
df[df["Living Expense Covered?"] == 1]
.groupby(["Level"])
.size()
.reset_index(name="Count")
.sort_values("Count", ascending=False)
)
# Plot the data using a barplot
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=lev_living_cover, x="Count", y="Level", dodge=False, orient="h")
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8, padding=5)
plt.xlabel("Count", fontsize=8)
plt.ylabel("Level", fontsize=8)
plt.title("Offering Living Expense Cover", fontsize=10)
plt.legend(fontsize=10, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show()
# Group data by Level and calculate the sum of Total Expense Year
level_expense = (
df.groupby("Level")["total_expense_year"]
.sum()
.reset_index()
.sort_values("total_expense_year", ascending=False)
)
# Create a h bar plot
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=level_expense, y="Level", x="total_expense_year", orient="h")
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8)
plt.title("Level with Total Expense Year", fontsize=10)
plt.xlabel("Expense Year(RMB)", fontsize=8)
plt.ylabel("Level", fontsize=8)
plt.show()
# Group data by Level and calculate the sum of Total Cost Year
Level_cost = (
df.groupby("Level")["total_cost_year"]
.sum()
.reset_index()
.sort_values("total_cost_year", ascending=False)
)
# Create a h bar plot
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=Level_cost, y="Level", x="total_cost_year", orient="h")
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8)
plt.title("Level with Lower Total Cost Year", fontsize=10)
plt.xlabel("Total Cost Year(RMB)", fontsize=8)
plt.ylabel("Level", fontsize=8)
plt.show()
# # Insight in Level
# * Based on the distribution of scholarship offerings by level, it can be concluded that Master's and Bachelor's levels are the most popular among universities in terms of providing scholarships. This is evidenced by the highest Tuition Covered and Accommodation Covered for these levels, indicating the high demand for these levels among international students seeking scholarships in China. However, it should be noted that the high scholarship coverage for these levels also results in the highest Total Accommodation Year, Total Living Year, and Total Cost Year among all levels.
# * Additionally, although the Ph.D. level ranks third in terms of the number of universities offering scholarships at this level, it is the second level with the highest Total Living Expense after the Master's level. This suggests that international students seeking scholarships at the Ph.D. level should be prepared to bear a significant portion of their living expenses while pursuing their studies in China.
# # Distribution Language
lan = (
df.groupby("Language")
.size()
.reset_index(name="counts")
.sort_values(by="counts", ascending=False)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=lan, x="counts", y="Language", orient="h")
for i in ax.containers:
ax.bar_label(i, fontsize=8)
plt.title("Distribution of Language", fontsize=10)
plt.show()
df.loc[df["Language"] == "Japanese", ["University", "Major", "Level", "Language"]]
df.loc[df["Language"] == "German", ["University", "Major", "Level", "Language"]]
# Groupby Language and Calculate sum of Tuition Covered
language_tuition_covered = (
df.groupby("Language")["Tuition Covered"]
.sum()
.reset_index()
.sort_values("Tuition Covered", ascending=False)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=language_tuition_covered, x="Tuition Covered", y="Language", orient="h"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8, padding=5)
plt.title("Tuition Covered by Language", fontsize=10)
plt.xlabel("Tuition Covered(RMB)", fontsize=8)
plt.ylabel("Language", fontsize=8)
plt.show()
# Groupby Language Offering Accomodation Cover
Lan_accommodation_cover = (
df[df["Accomodation covered?"] == 1]
.groupby("Language")
.size()
.reset_index(name="Count")
.sort_values("Count", ascending=False)
)
# Plot the data using a h barplot
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=Lan_accommodation_cover, y="Language", x="Count")
for i in ax.containers:
ax.bar_label(i, fontsize=8)
plt.title("Language Offering Accommodation Cover", fontsize=10)
plt.show()
# Group data by Language and calculate the sum of Total Accommodation Year
lan_accomodation = (
df.groupby("Language")["total_accomodation_year"]
.sum()
.reset_index()
.sort_values("total_accomodation_year", ascending=False)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=lan_accomodation, y="Language", x="total_accomodation_year", orient="h"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8)
plt.title("Language with Total Accomodation Year", fontsize=10)
plt.xlabel("Accommodation Year(RMB)", fontsize=8)
plt.ylabel("Language", fontsize=8)
plt.show()
# Groupby Language offering Living Expense Covere
lan_living_cover = (
df[df["Living Expense Covered?"] == 1]
.groupby(["Language"])
.size()
.reset_index(name="Count")
.sort_values("Count", ascending=False)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=lan_living_cover, x="Count", y="Language", dodge=False, orient="h"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8, padding=5)
plt.xlabel("Count", fontsize=8)
plt.ylabel("Language", fontsize=8)
plt.title("Language Offering Living Expense Covered", fontsize=10)
plt.legend(fontsize=10, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show()
# Group data by Language and calculate the sum of Total Expense Year
lan_living = (
df.groupby("Language")["total_expense_year"]
.sum()
.reset_index()
.sort_values("total_expense_year", ascending=False)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=lan_living, y="Language", x="total_expense_year", orient="h")
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8)
plt.title("Language with Total Expense Year", fontsize=10)
plt.xlabel("Total Expense Year(RMB)", fontsize=8)
plt.ylabel("Language", fontsize=8)
plt.show()
# Group data by Language and calculate the sum of Total Cost Year
lan_cost = (
df.groupby("Language")["total_cost_year"]
.sum()
.reset_index()
.sort_values("total_cost_year", ascending=False)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=lan_cost, y="Language", x="total_cost_year", orient="h")
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8)
plt.title("Language with Total Cost Year", fontsize=10)
plt.xlabel("Total Cost Year(RMB)", fontsize=8)
plt.ylabel("Language", fontsize=8)
plt.show()
# # Distribution Scholarship Category
sch = (
df.groupby("Scholarship Category")
.size()
.reset_index(name="counts")
.sort_values(by="counts", ascending=False)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=sch, x="counts", y="Scholarship Category", orient="h")
for i in ax.containers:
ax.bar_label(i, fontsize=8)
plt.ylabel("Scholarship Category", fontsize=8)
plt.xlabel("Count", fontsize=8)
plt.title("Distribution of Scholarship Category", fontsize=10)
plt.show()
uni_tuition_covered = (
df[df["Scholarship Category"] == "Tuition Covered"]
.groupby("University")["total_cost_year"]
.mean()
.reset_index()
.sort_values(by="total_cost_year", ascending=True)
.head(5)
)
# Create bar plot
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=uni_tuition_covered, x="total_cost_year", y="University", orient="h"
)
for i in ax.containers:
ax.bar_label(i, fontsize=8)
plt.title(
"Top 5 Universities with Tuition Covered Scholarship Category based on Avg Total Cost Year",
fontsize=10,
)
plt.xlabel("Mean Total Cost Year", fontsize=8)
plt.ylabel("University", fontsize=8)
plt.show()
# filter data scholarship category == Not Fully Covered
maj_tui_acc_covered = (
df[df["Scholarship Category"] == "Tuition and Accomodation Covered"]
.groupby("Major")["total_cost_year"]
.sum()
.reset_index()
.sort_values("total_cost_year", ascending=False)
.head(5)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=maj_tui_acc_covered, x="total_cost_year", y="Major", orient="h")
for i in ax.containers:
ax.bar_label(i, fontsize=8)
plt.xlabel("Total Cost Year", fontsize=8)
plt.ylabel("Major", fontsize=8)
plt.title(
"Top 5 Major with Scholarship Category Tuition and Accomodation Covered and Total Cost Year Tertinggi",
fontsize=10,
)
plt.show()
# filter data scholarship category == Not Fully Covered
maj_fully_covered = (
df[df["Scholarship Category"] == "Fully Covered"]
.groupby("Major")["total_cost_year"]
.sum()
.reset_index()
.sort_values("total_cost_year", ascending=False)
.head(5)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=maj_fully_covered, x="total_cost_year", y="Major", orient="h")
for i in ax.containers:
ax.bar_label(i, fontsize=8)
plt.xlabel("Total Cost Year", fontsize=8)
plt.ylabel("Major", fontsize=8)
plt.title(
"Top 5 Major with Scholarship Category Fully Covered and Total Cost Year Tertinggi",
fontsize=10,
)
plt.show()
maj_tui_cost_cov = df[
(df["Scholarship Category"] == "Tuition Covered") & (df["total_cost_year"] < 25000)
]
# Ambil 10 major dengan total cost year tertinggi
major_count = maj_tui_cost_cov["Major"].value_counts()[:10].index.tolist()
# Filter data untuk major yang terpilih
plot_data = maj_tui_cost_cov[maj_tui_cost_cov["Major"].isin(major_count)]
# Plot data menggunakan seaborn
plt.figure(figsize=(8, 6))
ax = sns.barplot(
x="total_cost_year", y="Major", hue="University", data=plot_data, orient="h"
)
for i in ax.containers:
ax.bar_label(i, fontsize=10)
plt.xlabel("Total Cost Year", fontsize=8)
plt.ylabel("Major", fontsize=8)
plt.title(
"Top 10 Major with Tuition Covered and Total Cost Year Below 25000", fontsize=10
)
plt.legend(fontsize=10, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show()
maj_tui_acc_cov = df[
(df["Scholarship Category"] == "Tuition and Accomodation Covered")
& (df["total_cost_year"] < 25000)
]
# Ambil 10 major dengan total cost year tertinggi
major_count = maj_tui_acc_cov["Major"].value_counts()[:10].index.tolist()
# Filter data untuk major yang terpilih
maj_tui_acc = maj_tui_acc_cov[maj_tui_acc_cov["Major"].isin(major_count)]
# Plot data menggunakan seaborn
plt.figure(figsize=(8, 6))
ax = sns.barplot(
x="total_cost_year", y="Major", hue="University", data=maj_tui_acc, orient="h"
)
for i in ax.containers:
ax.bar_label(i, fontsize=10)
plt.xlabel("Total Cost Year", fontsize=8)
plt.ylabel("Major", fontsize=8)
plt.title(
"Top 10 Major with Tuition and Accomodation Covered and Total Cost Year Below 25000",
fontsize=10,
)
plt.legend(fontsize=10, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show()
uni_maj_fully_covered = (
df[(df["Scholarship Category"] == "Fully Covered")]
.groupby(["University", "Major"])["total_cost_year"]
.sum()
.reset_index()
.sort_values("total_cost_year", ascending=False)
.head(20)
)
plt.figure(figsize=(10, 6))
ax = sns.barplot(
x="total_cost_year",
y="Major",
hue="University",
data=uni_maj_fully_covered,
orient="h",
)
for i in ax.containers:
ax.bar_label(i, fontsize=10)
plt.xlabel("Total Cost Year(RMB)")
plt.ylabel("Major")
plt.title(
"20 Major with University Top 4 Have Scholarship Category Fully Covered and Highest Total Cost Year"
)
plt.legend(fontsize=10, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show()
plt.figure(figsize=(8, 6))
ax = sns.countplot(y="Level", data=df, orient="h", hue="Scholarship Category")
for i in ax.containers:
ax.bar_label(i, fontsize=8)
plt.xlabel("Count", fontsize=8)
plt.ylabel("Level", fontsize=8)
plt.title("Level of Study by Scholarship Category", fontsize=10)
plt.legend(fontsize=10, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show()
filtered_Bachelor = (
df[(df["Scholarship Category"] == "Tuition Covered") & (df["Level"] == "Bachelor")]
.pivot_table(index="University", values="Major", aggfunc=pd.Series.nunique)
.nlargest(10, "Major")
)
# Plot the data
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=filtered_Bachelor, x="Major", y=filtered_Bachelor.index, orient="h"
)
for i in ax.containers:
ax.bar_label(i, fontsize=8)
plt.xlabel("Number of Majors", fontsize=8)
plt.ylabel("University", fontsize=8)
plt.title(
"Top 5 Universities with Tuition Covered Scholarship and Bachelor Level",
fontsize=10,
)
plt.show()
filtered_bachelor = (
df[
(df["Scholarship Category"] == "Tuition and Accomodation Covered")
& (df["Level"] == "Bachelor")
]
.pivot_table(index="University", values="Major", aggfunc=pd.Series.nunique)
.nlargest(10, "Major")
)
# Plot the data
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=filtered_bachelor, x="Major", y=filtered_bachelor.index, orient="h"
)
for i in ax.containers:
ax.bar_label(i, fontsize=8)
plt.xlabel("Number of Majors", fontsize=8)
plt.ylabel("University", fontsize=8)
plt.title(
"Universities with Tuition and Accomodation Covered Scholarship and Bachelor Level",
fontsize=10,
)
plt.show()
filtered_Master = (
df[
(df["Scholarship Category"] == "Tuition and Accomodation Covered")
& (df["Level"] == "Master")
]
.pivot_table(index="University", values="Major", aggfunc=pd.Series.nunique)
.nlargest(10, "Major")
)
# Plot the data
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=filtered_Master, x="Major", y=filtered_Master.index, orient="h")
for i in ax.containers:
ax.bar_label(i, fontsize=8)
plt.xlabel("Number of Majors", fontsize=8)
plt.ylabel("University", fontsize=8)
plt.title(
"Top 5 Universities with Tuition and Accomodation Covered Scholarship and Master Level",
fontsize=10,
)
plt.show()
filtered_master = (
df[(df["Scholarship Category"] == "Fully Covered") & (df["Level"] == "Master")]
.pivot_table(index="University", values="Major", aggfunc=pd.Series.nunique)
.nlargest(10, "Major")
)
# Plot the data
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=filtered_master, x="Major", y=filtered_master.index, orient="h")
for i in ax.containers:
ax.bar_label(i, fontsize=8)
plt.xlabel("Number of Majors", fontsize=8)
plt.ylabel("University", fontsize=8)
plt.title("Universities with Fully Covered Scholarship and Master Level", fontsize=10)
plt.show()
filtered_phd = (
df[(df["Scholarship Category"] == "Fully Covered") & (df["Level"] == "Phd")]
.pivot_table(index="University", values="Major", aggfunc=pd.Series.nunique)
.nlargest(10, "Major")
)
# Plot the data
plt.figure(figsize=(6, 2))
ax = sns.barplot(data=filtered_phd, x="Major", y=filtered_phd.index, orient="h")
for i in ax.containers:
ax.bar_label(i, fontsize=8)
plt.xlabel("Number of Majors", fontsize=8)
plt.ylabel("University", fontsize=8)
plt.title("Universities with Fully Covered Scholarship and PhD Level", fontsize=10)
plt.show()
filtered_phd = (
df[(df["Scholarship Category"] == "Tuition Covered") & (df["Level"] == "Phd")]
.pivot_table(index="University", values="Major", aggfunc=pd.Series.nunique)
.nlargest(10, "Major")
)
# Plot the data
plt.figure(figsize=(6, 2))
ax = sns.barplot(data=filtered_phd, x="Major", y=filtered_phd.index, orient="h")
for i in ax.containers:
ax.bar_label(i, fontsize=8)
plt.xlabel("Number of Majors", fontsize=8)
plt.ylabel("University", fontsize=8)
plt.title("Universities with Tuition Covered Scholarship and PhD Level", fontsize=10)
plt.show()
# # Insight Scholarship Category
# * From the distribution of the Scholarship Category column, it can be concluded that more than 50% of universities offer scholarships that only cover tuition fees, followed by the Fully Covered and Tuition and Accommodation Covered categories. Moreover, Dongguan University of Technology and Zhengzhou University are universities that offer Tuition Covered scholarships with the lowest average total cost per year.
# * Furthermore, Bachelor's level has the highest total Tuition Covered, the Master's level has the most Fully Covered scholarships, and the Ph.D. level has the most Living Expense Covered scholarships. Thus, the distribution between Level and Scholarship Category can be seen.
# * However, from the distribution of University, Level, and Scholarship Categories, it can be seen that Northeast Petroleum University has the most majors offering Tuition Covered scholarships at the Bachelor's level, while the Zhejiang University of Technology has the most scholarships for Tuition and Accommodation Covered at Bachelor's level.
# * In addition, China University of Petroleum - Beijing has the most Fully Covered scholarships at the Master's level, and Zhejiang Normal University has the most scholarships for Tuition and Accommodation Covered at the Master's level.
# * Finally, at the Ph.D. level, East China University of Science and Technology has the most Fully Covered scholarships, while North China Electric Power University has the most Tuition Covered scholarships.
# # Filtered Data
# Created Data Level Bachelor For University and Student Not Pay Tuition, Accomodation, Living (Fully Covered)
filtered_data_Bachelor = df[
(df["Level"] == "Bachelor")
& (df["Scholarship Category"] == "Fully Covered")
& (df["total_cost_year"] == 0)
][["University", "Major", "Level", "total_cost_year"]].sort_values(
"University", ascending=True
)
print("University and Major in Bachelor Level With Total Cost Year 0")
filtered_data_Bachelor
filtered_data_Master = df[
(df["Level"] == "Master")
& (df["Scholarship Category"] == "Fully Covered")
& (df["total_cost_year"] == 0)
][["University", "Major", "Level", "total_cost_year"]].sort_values(
"University", ascending=True
)
print("University and Major in Master Level With Total Cost Year 0")
filtered_data_Master
filtered_data_Phd = df[
(df["Level"] == "Phd")
& (df["Scholarship Category"] == "Fully Covered")
& (df["total_cost_year"] == 0)
][["University", "Major", "Level", "total_cost_year"]].sort_values(
"total_cost_year", ascending=True
)
print("University and Major in PhD Level With Total Cost Year 0")
filtered_data_Phd
# Created University, Major, Level, and Not Fully Covered Below 50.000(RMB). Because 75% Total Cost Year is 50.000 and Students can prepare of fees
filtered_data_uni = df[
(df["Scholarship Category"] == "Not Fully Covered")
& (df["total_cost_year"] < 50000)
][["University", "Major", "Level", "total_cost_year"]]
print(
"Univerisy, Major, and Level With Not Fully Covered Scholarship Category\n Total Cost Year Below 50.000(RMB)\n"
)
filtered_data_uni
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/496/129496254.ipynb
|
china-scholarship-data-may-2019
|
mcmuralishclint96
|
[{"Id": 129496254, "ScriptId": 38357811, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6488245, "CreationDate": "05/14/2023 09:51:53", "VersionNumber": 6.0, "Title": "Exploring Data Scholarship Opportunities in China", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 741.0, "LinesInsertedFromPrevious": 63.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 678.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
|
[{"Id": 185612475, "KernelVersionId": 129496254, "SourceDatasetVersionId": 436936}]
|
[{"Id": 436936, "DatasetId": 196715, "DatasourceVersionId": 452465, "CreatorUserId": 2824142, "LicenseName": "Other (specified in description)", "CreationDate": "05/19/2019 03:10:48", "VersionNumber": 3.0, "Title": "China Scholarship Data - May 2019", "Slug": "china-scholarship-data-may-2019", "Subtitle": NaN, "Description": "The data was collected through web scraping https://www.cucas.edu.cn/china_scholarships/\nThe code to the web scraping program and data cleaning program is stored in https://github.com/mcmuralishclint/CUCAS\nThe dataset contains information about the scholarship programs in China as of May 2019.", "VersionNotes": "Cleaned", "TotalCompressedBytes": 517175.0, "TotalUncompressedBytes": 517175.0}]
|
[{"Id": 196715, "CreatorUserId": 2824142, "OwnerUserId": 2824142.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 436936.0, "CurrentDatasourceVersionId": 452465.0, "ForumId": 207653, "Type": 2, "CreationDate": "05/18/2019 03:08:18", "LastActivityDate": "05/18/2019", "TotalViews": 9460, "TotalDownloads": 791, "TotalVotes": 26, "TotalKernels": 3}]
|
[{"Id": 2824142, "UserName": "mcmuralishclint96", "DisplayName": "Muralish Clint", "RegisterDate": "02/16/2019", "PerformanceTier": 0}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(font_scale=1)
sns.set_palette("bright")
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Data Understanding
df = pd.read_csv("/kaggle/input/china-scholarship-data-may-2019/cleaned.csv")
df.head()
df.shape
df.info()
df.describe().T.drop("count", axis=1).T
df.describe(include=object).T.drop("count", axis=1).T
# # Data Preprocessing
df.isna().sum()
df.dropna(inplace=True)
df.duplicated().sum()
sns.clustermap(df.corr(), annot=True, cmap="mako", figsize=(8, 8))
plt.show
# # Feature Engineering
# Create Column Category Tuition Covered?
df["Tuition Covered?"] = df["Tuition Covered"].apply(lambda x: 1 if x > 0 else 0)
df["Tuition Covered?"] = df["Tuition Covered?"].astype(int)
# Groupping for Create Scholarship Category
df.loc[
(df["Tuition Covered?"] == 1)
& (df["Accomodation covered?"] == 1)
& (df["Living Expense Covered?"] == 1),
"Scholarship Category",
] = "Fully Covered"
df.loc[
(df["Tuition Covered?"] == 1)
& (df["Accomodation covered?"] == 1)
& (df["Living Expense Covered?"] == 0),
"Scholarship Category",
] = "Tuition and Accomodation Covered"
df.loc[
(df["Tuition Covered?"] == 1)
& (df["Accomodation covered?"] == 0)
& (df["Living Expense Covered?"] == 1),
"Scholarship Category",
] = "Tuition and Living Covered"
df.loc[
(df["Tuition Covered?"] == 0)
& (df["Accomodation covered?"] == 1)
& (df["Living Expense Covered?"] == 1),
"Scholarship Category",
] = "Accomodation and Living Expense Covered"
df.loc[
(df["Tuition Covered?"] == 0)
& (df["Accomodation covered?"] == 1)
& (df["Living Expense Covered?"] == 0),
"Scholarship Category",
] = "Accomodation Covered"
df.loc[
(df["Tuition Covered?"] == 0)
& (df["Accomodation covered?"] == 0)
& (df["Living Expense Covered?"] == 1),
"Scholarship Category",
] = "Living Expense Covered"
df.loc[
(df["Tuition Covered?"] == 1)
& (df["Accomodation covered?"] == 0)
& (df["Living Expense Covered?"] == 0),
"Scholarship Category",
] = "Tuition Covered"
df.loc[
(df["Tuition Covered?"] == 0)
& (df["Accomodation covered?"] == 0)
& (df["Living Expense Covered?"] == 0),
"Scholarship Category",
] = "Not Fully Covered"
# Create column for Total Accomodation in Year
convert_to_month = {"YEAR": 1, "SEMESTER": 2, "TERM": 3, "MONTH": 12, "DAY": 365}
df["Accomodation_duration"] = df["Accomodation_duration"].replace(convert_to_month)
df["Accomodation_duration"] = df["Accomodation_duration"].astype(int)
df["total_accomodation_year"] = df["Accomodation_duration"] * df["Accomodation_To_Pay"]
# Create Column for Total Expense Year
convert_to_num = {"MONTH": 12}
df["Expense_duration"] = df["Expense_duration"].replace(convert_to_month)
df["Expense_duration"] = df["Expense_duration"].astype(int)
df["total_expense_year"] = df["Expense_duration"] * df["Expense_To_Pay"]
# Create Total Cost Year Without Coverage
df["total_cost_year"] = (
df["Tuition fees to pay"] + df["total_accomodation_year"] + df["total_expense_year"]
)
# Remove text
df["Major"] = df["Major"].str.replace(r"\(.*\)", "").str.strip()
df["Major"] = df["Major"].replace("[^a-zA-Z ]", "", regex=True)
# # Reasoning for Feature Engineering
# * The Scholarship Category column was created to facilitate analysis of the types of scholarships awarded in the dataset. This makes it possible to understand more about the types of scholarships present in the dataset and helps in a more detailed analysis of how much impact the scholarship has on the total cost to be borne by the student.
# * By grouping the cost of accommodation and living expenses for one year, we can easily compare the total costs that must be borne by students each year, and also compare costs between universities.
# * Removing text inside parentheses in the Major column may be done to facilitate the data analysis process. Sometimes in columns like this, there is additional information or description that is not needed for analysis, such as specific information about courses or majors, study program codes, or specific education levels. By removing the text inside the parentheses, the Major column becomes easier to read and understand
# # Distribution University
uni = df["University"].value_counts().head(10)
plt.figure(figsize=(8, 4))
sns.set_style("whitegrid")
ax = sns.barplot(x=uni.values, y=uni.index)
for i in ax.containers:
ax.bar_label(i, fontsize=8)
plt.xlabel("Number of Students")
plt.title("Top 10 Universities with Most International Students")
plt.show()
uni_major_count = (
df.groupby(["University", "Major"])
.size()
.reset_index(name="Count")
.sort_values(by="Count", ascending=False)
.head(10)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(x="Count", y="Major", hue="University", data=uni_major_count)
for i in ax.containers:
ax.bar_label(i, label_type="edge", padding=5, fontsize=10)
plt.title("Top 10 Popular Majors with Universities")
plt.xlabel("Count")
plt.ylabel("Major")
plt.legend(fontsize=10, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show()
uni_level_count = (
df.groupby(["University", "Level"])
.size()
.reset_index(name="Count")
.sort_values("Count", ascending=False)
.head(10)
)
# Plot the data using a barplot
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=uni_level_count, y="University", x="Count", hue="Level", orient="h"
)
for i in ax.containers:
ax.bar_label(i, fontsize=8)
ax.legend(fontsize=8, loc="lower right")
plt.title("Top University with Populer Level", fontsize=10)
plt.xlabel("Count", fontsize=8)
plt.ylabel("University", fontsize=8)
plt.show()
# Group the data by University and calculate the max Tuition Covered
uni_tuition_max = (
df.groupby("University")["Tuition Covered"]
.max()
.reset_index(name="MaxTuitionFee")
.sort_values("MaxTuitionFee", ascending=False)
.head(10)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=uni_tuition_max, x="MaxTuitionFee", y="University", orient="h")
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8)
plt.title("Top 10 Universities with Highest Tuition Cover", fontsize=10)
plt.xlabel("Max Tuition Fee (RMB)", fontsize=8)
plt.ylabel("University", fontsize=8)
plt.show()
# Groupby University dan ambil nilai rata-rata dari kolom 'Tuition Covered'
uni_tuition_covered = (
df.groupby("University")["Tuition Covered"]
.min()
.reset_index()
.sort_values(by="Tuition Covered")
.head(15)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(x="Tuition Covered", y="University", data=uni_tuition_covered)
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8)
plt.title("10 Universities with the Lowest Tuition Covered (RMB)")
plt.xlabel("Min Tuition Covered (RMB)", fontsize=8)
plt.show()
# Groupby University Offering Scholarship Category Fully Covered
uni_scholarship_cover = (
df[df["Scholarship Category"] == "Fully Covered"]
.groupby(["University"])
.size()
.reset_index(name="Count")
.sort_values("Count", ascending=False)
.head(10)
)
# Plot the data using a barplot
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=uni_scholarship_cover, x="Count", y="University", dodge=False, orient="h"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8, padding=5)
plt.xlabel("Count", fontsize=8)
plt.ylabel("Number of Programs Offering Fully Cover", fontsize=8)
plt.title("Top 10 Majors Offering Scholarship Fully Covered", fontsize=10)
plt.legend(fontsize=10, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show()
# Group the data by University and calculate the max TuitionFee
uni_tuition_max = (
df.groupby("University")["Tuition fees to pay"]
.max()
.reset_index(name="MaxTuitionFee")
.sort_values("MaxTuitionFee", ascending=False)
.head(10)
)
# Plot the data using a h barplot
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=uni_tuition_max, x="MaxTuitionFee", y="University", orient="h")
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8)
plt.title("Top 10 Universities with Highest Tuition Fee", fontsize=10)
plt.xlabel("Max Tuition Fee", fontsize=8)
plt.ylabel("University", fontsize=8)
plt.show()
uni_total_cost_year = (
df.groupby("University")["total_cost_year"]
.sum()
.reset_index(name="TotalCostYear")
.sort_values("TotalCostYear")
.head(10)
)
# Plot the data using a h barplot
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=uni_total_cost_year, x="TotalCostYear", y="University", orient="h"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8)
plt.title("Top 10 Universities with Lowest Total Cost Year", fontsize=10)
plt.xlabel("Total Cost Year (RMB)", fontsize=8)
plt.ylabel("University", fontsize=8)
plt.show()
# Group data by university and calculate the mean of each cost category
cost_data = df.groupby("Level")[
["Tuition fees to pay", "Accomodation_To_Pay", "Expense_To_Pay"]
].sum()
# Create a line chart for each cost category
plt.plot(cost_data["Tuition fees to pay"], label="Tuition")
plt.plot(cost_data["Accomodation_To_Pay"], label="Accomodation")
plt.plot(cost_data["Expense_To_Pay"], label="Living Expenses")
# Set the title and axis labels
plt.title("Comparison of Tuition, Accomodation, and Living Expenses by University")
plt.xlabel("University")
plt.ylabel("Cost (RMB)")
# Add legend and show the plot
plt.legend()
plt.show()
# # Insight in University
# * From the distribution of universities mentioned, Zhejiang Normal University and North China Electric Power University are among the top choices for international students due to the number of international students enrolled. Additionally, Shanghai University of Traditional Medicine and China University of Petroleum offer majors that are popular among international students.
# * Furthermore, Zhejiang Normal University offers the most Bachelor's and Master's degrees, while North China Electric Power University provides the most Ph.D. degree offerings. On the other hand, Shanghai University of Traditional Medicine provides the most Fully Covered scholarships, indicating that it is an attractive option for those seeking financial aid.
# * The findings suggest that Bachelor's and Master's degrees have the highest associated costs, while the Non-Degree level has the lowest. However, it is worth noting that some universities such as Baoji University of Arts and Sciences and East China University of Science and Technology offer Fully Covered and Total Cost Year 0, which ensures that all expenses (Tuition Covered, Accommodation Covered, and Living Expenses) are taken care of.
# # Distribution Major
uni = df["Major"].value_counts()
top10_major = uni[:10]
plt.figure(figsize=(8, 4))
sns.set_style("whitegrid")
ax = sns.barplot(x=top10_major.values, y=top10_major.index)
for i in ax.containers:
ax.bar_label(i, fontsize=8)
plt.xlabel("Number of Students")
plt.title("Top 10 Major with Most International Students")
plt.show()
maj_level_count = (
df.groupby(["Major", "Level"])
.size()
.reset_index(name="Count")
.sort_values("Count", ascending=False)
.head(10)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=maj_level_count, y="Major", x="Count", hue="Level", orient="h")
for i in ax.containers:
ax.bar_label(i, fontsize=8)
plt.title("Top 10 Major Populer with Level Categories", fontsize=10)
plt.xlabel("Count", fontsize=8)
plt.ylabel("University", fontsize=8)
plt.show()
maj_phd_count = (
df[df["Level"] == "Phd"]
.groupby(["Major", "Level", "University"])
.size()
.reset_index(name="Count")
.sort_values("Count", ascending=False)
.head(10)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=maj_phd_count, y="Major", x="Count", hue="University", orient="h")
for i in ax.containers:
ax.bar_label(i, fontsize=8)
plt.title("Top 10 Popular Majors with Phd Level", fontsize=10)
plt.xlabel("Count", fontsize=8)
plt.ylabel("University", fontsize=8)
plt.legend(fontsize=10, bbox_to_anchor=(1, 1), loc=2, borderaxespad=0.0)
plt.show()
# Group by major and calculate the sum of tuition covered
maj_tuition_fee = (
df.groupby("Major")["Tuition Covered"]
.sum()
.reset_index()
.sort_values("Tuition Covered", ascending=False)
.head(10)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=maj_tuition_fee, y="Major", x="Tuition Covered", orient="h")
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8)
plt.title("Top 10 Major with Total Highest Tuition Covered", fontsize=10)
plt.xlabel("Tuition Covered (RMB)", fontsize=8)
plt.ylabel("Major", fontsize=8)
plt.show()
# Group the data by Major and calculate the max TuitionFee
maj_tuition_mean = (
df.groupby("Major")["Tuition fees to pay"]
.sum()
.reset_index(name="MajTuitionFee")
.sort_values("MajTuitionFee", ascending=False)
.head(10)
)
# Plot the data using a h barplot
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=maj_tuition_mean, x="MajTuitionFee", y="Major", orient="h")
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8)
plt.title("Top 10 Major with Average Tuition Fee to Pay", fontsize=10)
plt.xlabel("Avg. Tuition Fee(RMB)", fontsize=8)
plt.ylabel("Major", fontsize=8)
plt.show()
# Groupby Major Programs Offering accommodation cover
major_accommodation_cover = (
df[df["Accomodation covered?"] == 1]
.groupby(["Major"])
.size()
.reset_index(name="Count")
.sort_values("Count", ascending=False)
.head(10)
)
# Plot the data using a barplot
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=major_accommodation_cover, x="Count", y="Major", dodge=False, orient="h"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8, padding=5)
plt.xlabel("Count", fontsize=8)
plt.ylabel("Major", fontsize=8)
plt.title("Top 10 Majors Offering Accommodation Cover", fontsize=10)
plt.legend(fontsize=10, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show()
# Group data by major and calculate the sum of accommodation covered
major_accomodation = (
df.groupby("Major")["total_accomodation_year"]
.sum()
.reset_index()
.sort_values("total_accomodation_year", ascending=False)
.head(10)
)
# Create a h bar plot
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=major_accomodation, y="Major", x="total_accomodation_year", orient="h"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8)
plt.title("Top 10 Major with Total Highest Accommodation To Pay", fontsize=8)
plt.xlabel("Accommodation Year(RMB)", fontsize=8)
plt.ylabel("Major", fontsize=8)
plt.show()
# Groupby Major Programs Offering Living Cover
major_living_cover = (
df[df["Living Expense Covered?"] == 1]
.groupby(["Major"])
.size()
.reset_index(name="Count")
.sort_values("Count", ascending=False)
.head(10)
)
# Plot the data using a barplot
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=major_living_cover, x="Count", y="Major", dodge=False, orient="h")
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8, padding=5)
plt.xlabel("Count", fontsize=8)
plt.ylabel("Major", fontsize=8)
plt.title("Top 10 Majors Offering Living Expense Cover", fontsize=10)
plt.legend(fontsize=10, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show()
# Group data by major and calculate the sum of Total Expense Year
major_living = (
df.groupby("Major")["total_expense_year"]
.sum()
.reset_index()
.sort_values("total_expense_year", ascending=False)
.head(10)
)
# Create a h bar plot
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=major_living, y="Major", x="total_expense_year", orient="h")
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8)
plt.title("Top 10 Major with Highest Total Expense to Pay Year", fontsize=10)
plt.xlabel("Total Expense Year(RMB)", fontsize=8)
plt.ylabel("Major", fontsize=8)
plt.show()
# Group data by major and calculate the sum of Total Cost Year
major_living_cost = (
df.groupby("Major")["total_cost_year"]
.sum()
.reset_index()
.sort_values("total_cost_year", ascending=False)
.head(10)
)
# Create a h bar plot
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=major_living_cost, y="Major", x="total_cost_year", orient="h")
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8)
plt.title("Top 10 Major with Highest Total Cost Year", fontsize=10)
plt.xlabel("Total Cost Year(RMB)", fontsize=8)
plt.ylabel("Major", fontsize=8)
plt.show()
# # Insight in Major
# * From the distribution of the Major column, it can be concluded that Computer Science and Technology and Mechanical Engineering are the most popular majors with the highest number of students, with 59 and 48 students respectively. Computer Science and Technology also offers the most levels, ranging from Bachelor's to Master's degrees, while Mechanical Engineering only offers the Master's degree level.
# * Additionally, Chinese Materia Medica is also one of the most sought-after majors with 28 Master's degree students and 10 PhD students. It is also worth noting that Chinese Materia Medica offers the highest number of Tuition Covered, Accommodation Covered, and Living Covered scholarships compared to other majors.
# * Although Computer Science and Technology and Mechanical Engineering are the most popular majors, it is important to consider other majors such as Chinese Materia Medica, as it offers a wide range of scholarships and opportunities for students pursuing this field of study.
# # Distribution Level
lev = (
df.groupby("Level")
.size()
.reset_index(name="counts")
.sort_values(by="counts", ascending=False)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=lev, x="counts", y="Level", orient="h")
for i in ax.containers:
ax.bar_label(i, fontsize=8)
plt.title("Distribution of Level", fontsize=18)
plt.show()
print("Level With Non Degre\n")
df.loc[df["Level"] == "Non-Degree", ["University", "Major", "Level"]]
# Groupby Level dan Calculate Tuition Covered
level_tuition_covered = (
df.groupby("Level")["Tuition Covered"]
.sum()
.reset_index()
.sort_values("Tuition Covered", ascending=False)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=level_tuition_covered, x="Tuition Covered", y="Level", orient="h")
# Add labels to the bars
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8, padding=5)
# Set the title and axis labels
plt.title("Sum of Tuition Covered by Level", fontsize=10)
plt.xlabel("Level", fontsize=8)
plt.ylabel("Tuition Covered(RMB)", fontsize=8)
plt.show()
# Group the data by Level and calculate the max TuitionFee
lev_tuition = (
df.groupby("Level")["Tuition fees to pay"]
.sum()
.reset_index(name="MajTuitionFee")
.sort_values("MajTuitionFee", ascending=False)
)
# Plot the data using a h barplot
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=lev_tuition, x="MajTuitionFee", y="Level", orient="h")
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8)
plt.title("Top 10 Major with Average Tuition Fee to Pay", fontsize=10)
plt.xlabel("Avg. Tuition Fee(RMB)", fontsize=8)
plt.ylabel("Major", fontsize=8)
plt.show()
# Groupby Level Offering Accomodation cover
level_accommodation_cover = (
df[df["Accomodation covered?"] == 1]
.groupby(["Level"])
.size()
.reset_index(name="Count")
.sort_values("Count", ascending=False)
)
# Plot the data using a barplot
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=level_accommodation_cover, x="Count", y="Level", orient="h")
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8)
plt.xlabel("Count", fontsize=8)
plt.ylabel("Level", fontsize=8)
plt.title("Level Offering Accommodation Cover", fontsize=10)
plt.legend(fontsize=10, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show()
# Group data by Level and calculate the sum of accommodation covered
level_accomodation = (
df.groupby("Level")["total_accomodation_year"]
.sum()
.reset_index()
.sort_values("total_accomodation_year", ascending=False)
)
# Create a h bar plot
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=level_accomodation, y="Level", x="total_accomodation_year", orient="h"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8)
plt.title("Level with Sum Total Accommodation Year", fontsize=10)
plt.xlabel("Total Accommodation Year(RMB)", fontsize=8)
plt.ylabel("Level", fontsize=8)
plt.show()
# Groupby University dan Major yang memberikan accommodation cover
lev_living_cover = (
df[df["Living Expense Covered?"] == 1]
.groupby(["Level"])
.size()
.reset_index(name="Count")
.sort_values("Count", ascending=False)
)
# Plot the data using a barplot
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=lev_living_cover, x="Count", y="Level", dodge=False, orient="h")
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8, padding=5)
plt.xlabel("Count", fontsize=8)
plt.ylabel("Level", fontsize=8)
plt.title("Offering Living Expense Cover", fontsize=10)
plt.legend(fontsize=10, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show()
# Group data by Level and calculate the sum of Total Expense Year
level_expense = (
df.groupby("Level")["total_expense_year"]
.sum()
.reset_index()
.sort_values("total_expense_year", ascending=False)
)
# Create a h bar plot
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=level_expense, y="Level", x="total_expense_year", orient="h")
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8)
plt.title("Level with Total Expense Year", fontsize=10)
plt.xlabel("Expense Year(RMB)", fontsize=8)
plt.ylabel("Level", fontsize=8)
plt.show()
# Group data by Level and calculate the sum of Total Cost Year
Level_cost = (
df.groupby("Level")["total_cost_year"]
.sum()
.reset_index()
.sort_values("total_cost_year", ascending=False)
)
# Create a h bar plot
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=Level_cost, y="Level", x="total_cost_year", orient="h")
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8)
plt.title("Level with Lower Total Cost Year", fontsize=10)
plt.xlabel("Total Cost Year(RMB)", fontsize=8)
plt.ylabel("Level", fontsize=8)
plt.show()
# # Insight in Level
# * Based on the distribution of scholarship offerings by level, it can be concluded that Master's and Bachelor's levels are the most popular among universities in terms of providing scholarships. This is evidenced by the highest Tuition Covered and Accommodation Covered for these levels, indicating the high demand for these levels among international students seeking scholarships in China. However, it should be noted that the high scholarship coverage for these levels also results in the highest Total Accommodation Year, Total Living Year, and Total Cost Year among all levels.
# * Additionally, although the Ph.D. level ranks third in terms of the number of universities offering scholarships at this level, it is the second level with the highest Total Living Expense after the Master's level. This suggests that international students seeking scholarships at the Ph.D. level should be prepared to bear a significant portion of their living expenses while pursuing their studies in China.
# # Distribution Language
lan = (
df.groupby("Language")
.size()
.reset_index(name="counts")
.sort_values(by="counts", ascending=False)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=lan, x="counts", y="Language", orient="h")
for i in ax.containers:
ax.bar_label(i, fontsize=8)
plt.title("Distribution of Language", fontsize=10)
plt.show()
df.loc[df["Language"] == "Japanese", ["University", "Major", "Level", "Language"]]
df.loc[df["Language"] == "German", ["University", "Major", "Level", "Language"]]
# Groupby Language and Calculate sum of Tuition Covered
language_tuition_covered = (
df.groupby("Language")["Tuition Covered"]
.sum()
.reset_index()
.sort_values("Tuition Covered", ascending=False)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=language_tuition_covered, x="Tuition Covered", y="Language", orient="h"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8, padding=5)
plt.title("Tuition Covered by Language", fontsize=10)
plt.xlabel("Tuition Covered(RMB)", fontsize=8)
plt.ylabel("Language", fontsize=8)
plt.show()
# Groupby Language Offering Accomodation Cover
Lan_accommodation_cover = (
df[df["Accomodation covered?"] == 1]
.groupby("Language")
.size()
.reset_index(name="Count")
.sort_values("Count", ascending=False)
)
# Plot the data using a h barplot
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=Lan_accommodation_cover, y="Language", x="Count")
for i in ax.containers:
ax.bar_label(i, fontsize=8)
plt.title("Language Offering Accommodation Cover", fontsize=10)
plt.show()
# Group data by Language and calculate the sum of Total Accommodation Year
lan_accomodation = (
df.groupby("Language")["total_accomodation_year"]
.sum()
.reset_index()
.sort_values("total_accomodation_year", ascending=False)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=lan_accomodation, y="Language", x="total_accomodation_year", orient="h"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8)
plt.title("Language with Total Accomodation Year", fontsize=10)
plt.xlabel("Accommodation Year(RMB)", fontsize=8)
plt.ylabel("Language", fontsize=8)
plt.show()
# Groupby Language offering Living Expense Covere
lan_living_cover = (
df[df["Living Expense Covered?"] == 1]
.groupby(["Language"])
.size()
.reset_index(name="Count")
.sort_values("Count", ascending=False)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=lan_living_cover, x="Count", y="Language", dodge=False, orient="h"
)
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8, padding=5)
plt.xlabel("Count", fontsize=8)
plt.ylabel("Language", fontsize=8)
plt.title("Language Offering Living Expense Covered", fontsize=10)
plt.legend(fontsize=10, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show()
# Group data by Language and calculate the sum of Total Expense Year
lan_living = (
df.groupby("Language")["total_expense_year"]
.sum()
.reset_index()
.sort_values("total_expense_year", ascending=False)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=lan_living, y="Language", x="total_expense_year", orient="h")
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8)
plt.title("Language with Total Expense Year", fontsize=10)
plt.xlabel("Total Expense Year(RMB)", fontsize=8)
plt.ylabel("Language", fontsize=8)
plt.show()
# Group data by Language and calculate the sum of Total Cost Year
lan_cost = (
df.groupby("Language")["total_cost_year"]
.sum()
.reset_index()
.sort_values("total_cost_year", ascending=False)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=lan_cost, y="Language", x="total_cost_year", orient="h")
for i in ax.containers:
ax.bar_label(i, label_type="edge", fontsize=8)
plt.title("Language with Total Cost Year", fontsize=10)
plt.xlabel("Total Cost Year(RMB)", fontsize=8)
plt.ylabel("Language", fontsize=8)
plt.show()
# # Distribution Scholarship Category
sch = (
df.groupby("Scholarship Category")
.size()
.reset_index(name="counts")
.sort_values(by="counts", ascending=False)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=sch, x="counts", y="Scholarship Category", orient="h")
for i in ax.containers:
ax.bar_label(i, fontsize=8)
plt.ylabel("Scholarship Category", fontsize=8)
plt.xlabel("Count", fontsize=8)
plt.title("Distribution of Scholarship Category", fontsize=10)
plt.show()
uni_tuition_covered = (
df[df["Scholarship Category"] == "Tuition Covered"]
.groupby("University")["total_cost_year"]
.mean()
.reset_index()
.sort_values(by="total_cost_year", ascending=True)
.head(5)
)
# Create bar plot
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=uni_tuition_covered, x="total_cost_year", y="University", orient="h"
)
for i in ax.containers:
ax.bar_label(i, fontsize=8)
plt.title(
"Top 5 Universities with Tuition Covered Scholarship Category based on Avg Total Cost Year",
fontsize=10,
)
plt.xlabel("Mean Total Cost Year", fontsize=8)
plt.ylabel("University", fontsize=8)
plt.show()
# filter data scholarship category == Not Fully Covered
maj_tui_acc_covered = (
df[df["Scholarship Category"] == "Tuition and Accomodation Covered"]
.groupby("Major")["total_cost_year"]
.sum()
.reset_index()
.sort_values("total_cost_year", ascending=False)
.head(5)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=maj_tui_acc_covered, x="total_cost_year", y="Major", orient="h")
for i in ax.containers:
ax.bar_label(i, fontsize=8)
plt.xlabel("Total Cost Year", fontsize=8)
plt.ylabel("Major", fontsize=8)
plt.title(
"Top 5 Major with Scholarship Category Tuition and Accomodation Covered and Total Cost Year Tertinggi",
fontsize=10,
)
plt.show()
# filter data scholarship category == Not Fully Covered
maj_fully_covered = (
df[df["Scholarship Category"] == "Fully Covered"]
.groupby("Major")["total_cost_year"]
.sum()
.reset_index()
.sort_values("total_cost_year", ascending=False)
.head(5)
)
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=maj_fully_covered, x="total_cost_year", y="Major", orient="h")
for i in ax.containers:
ax.bar_label(i, fontsize=8)
plt.xlabel("Total Cost Year", fontsize=8)
plt.ylabel("Major", fontsize=8)
plt.title(
"Top 5 Major with Scholarship Category Fully Covered and Total Cost Year Tertinggi",
fontsize=10,
)
plt.show()
maj_tui_cost_cov = df[
(df["Scholarship Category"] == "Tuition Covered") & (df["total_cost_year"] < 25000)
]
# Ambil 10 major dengan total cost year tertinggi
major_count = maj_tui_cost_cov["Major"].value_counts()[:10].index.tolist()
# Filter data untuk major yang terpilih
plot_data = maj_tui_cost_cov[maj_tui_cost_cov["Major"].isin(major_count)]
# Plot data menggunakan seaborn
plt.figure(figsize=(8, 6))
ax = sns.barplot(
x="total_cost_year", y="Major", hue="University", data=plot_data, orient="h"
)
for i in ax.containers:
ax.bar_label(i, fontsize=10)
plt.xlabel("Total Cost Year", fontsize=8)
plt.ylabel("Major", fontsize=8)
plt.title(
"Top 10 Major with Tuition Covered and Total Cost Year Below 25000", fontsize=10
)
plt.legend(fontsize=10, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show()
maj_tui_acc_cov = df[
(df["Scholarship Category"] == "Tuition and Accomodation Covered")
& (df["total_cost_year"] < 25000)
]
# Ambil 10 major dengan total cost year tertinggi
major_count = maj_tui_acc_cov["Major"].value_counts()[:10].index.tolist()
# Filter data untuk major yang terpilih
maj_tui_acc = maj_tui_acc_cov[maj_tui_acc_cov["Major"].isin(major_count)]
# Plot data menggunakan seaborn
plt.figure(figsize=(8, 6))
ax = sns.barplot(
x="total_cost_year", y="Major", hue="University", data=maj_tui_acc, orient="h"
)
for i in ax.containers:
ax.bar_label(i, fontsize=10)
plt.xlabel("Total Cost Year", fontsize=8)
plt.ylabel("Major", fontsize=8)
plt.title(
"Top 10 Major with Tuition and Accomodation Covered and Total Cost Year Below 25000",
fontsize=10,
)
plt.legend(fontsize=10, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show()
uni_maj_fully_covered = (
df[(df["Scholarship Category"] == "Fully Covered")]
.groupby(["University", "Major"])["total_cost_year"]
.sum()
.reset_index()
.sort_values("total_cost_year", ascending=False)
.head(20)
)
plt.figure(figsize=(10, 6))
ax = sns.barplot(
x="total_cost_year",
y="Major",
hue="University",
data=uni_maj_fully_covered,
orient="h",
)
for i in ax.containers:
ax.bar_label(i, fontsize=10)
plt.xlabel("Total Cost Year(RMB)")
plt.ylabel("Major")
plt.title(
"20 Major with University Top 4 Have Scholarship Category Fully Covered and Highest Total Cost Year"
)
plt.legend(fontsize=10, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show()
plt.figure(figsize=(8, 6))
ax = sns.countplot(y="Level", data=df, orient="h", hue="Scholarship Category")
for i in ax.containers:
ax.bar_label(i, fontsize=8)
plt.xlabel("Count", fontsize=8)
plt.ylabel("Level", fontsize=8)
plt.title("Level of Study by Scholarship Category", fontsize=10)
plt.legend(fontsize=10, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
plt.show()
filtered_Bachelor = (
df[(df["Scholarship Category"] == "Tuition Covered") & (df["Level"] == "Bachelor")]
.pivot_table(index="University", values="Major", aggfunc=pd.Series.nunique)
.nlargest(10, "Major")
)
# Plot the data
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=filtered_Bachelor, x="Major", y=filtered_Bachelor.index, orient="h"
)
for i in ax.containers:
ax.bar_label(i, fontsize=8)
plt.xlabel("Number of Majors", fontsize=8)
plt.ylabel("University", fontsize=8)
plt.title(
"Top 5 Universities with Tuition Covered Scholarship and Bachelor Level",
fontsize=10,
)
plt.show()
filtered_bachelor = (
df[
(df["Scholarship Category"] == "Tuition and Accomodation Covered")
& (df["Level"] == "Bachelor")
]
.pivot_table(index="University", values="Major", aggfunc=pd.Series.nunique)
.nlargest(10, "Major")
)
# Plot the data
plt.figure(figsize=(8, 4))
ax = sns.barplot(
data=filtered_bachelor, x="Major", y=filtered_bachelor.index, orient="h"
)
for i in ax.containers:
ax.bar_label(i, fontsize=8)
plt.xlabel("Number of Majors", fontsize=8)
plt.ylabel("University", fontsize=8)
plt.title(
"Universities with Tuition and Accomodation Covered Scholarship and Bachelor Level",
fontsize=10,
)
plt.show()
filtered_Master = (
df[
(df["Scholarship Category"] == "Tuition and Accomodation Covered")
& (df["Level"] == "Master")
]
.pivot_table(index="University", values="Major", aggfunc=pd.Series.nunique)
.nlargest(10, "Major")
)
# Plot the data
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=filtered_Master, x="Major", y=filtered_Master.index, orient="h")
for i in ax.containers:
ax.bar_label(i, fontsize=8)
plt.xlabel("Number of Majors", fontsize=8)
plt.ylabel("University", fontsize=8)
plt.title(
"Top 5 Universities with Tuition and Accomodation Covered Scholarship and Master Level",
fontsize=10,
)
plt.show()
filtered_master = (
df[(df["Scholarship Category"] == "Fully Covered") & (df["Level"] == "Master")]
.pivot_table(index="University", values="Major", aggfunc=pd.Series.nunique)
.nlargest(10, "Major")
)
# Plot the data
plt.figure(figsize=(8, 4))
ax = sns.barplot(data=filtered_master, x="Major", y=filtered_master.index, orient="h")
for i in ax.containers:
ax.bar_label(i, fontsize=8)
plt.xlabel("Number of Majors", fontsize=8)
plt.ylabel("University", fontsize=8)
plt.title("Universities with Fully Covered Scholarship and Master Level", fontsize=10)
plt.show()
filtered_phd = (
df[(df["Scholarship Category"] == "Fully Covered") & (df["Level"] == "Phd")]
.pivot_table(index="University", values="Major", aggfunc=pd.Series.nunique)
.nlargest(10, "Major")
)
# Plot the data
plt.figure(figsize=(6, 2))
ax = sns.barplot(data=filtered_phd, x="Major", y=filtered_phd.index, orient="h")
for i in ax.containers:
ax.bar_label(i, fontsize=8)
plt.xlabel("Number of Majors", fontsize=8)
plt.ylabel("University", fontsize=8)
plt.title("Universities with Fully Covered Scholarship and PhD Level", fontsize=10)
plt.show()
filtered_phd = (
df[(df["Scholarship Category"] == "Tuition Covered") & (df["Level"] == "Phd")]
.pivot_table(index="University", values="Major", aggfunc=pd.Series.nunique)
.nlargest(10, "Major")
)
# Plot the data
plt.figure(figsize=(6, 2))
ax = sns.barplot(data=filtered_phd, x="Major", y=filtered_phd.index, orient="h")
for i in ax.containers:
ax.bar_label(i, fontsize=8)
plt.xlabel("Number of Majors", fontsize=8)
plt.ylabel("University", fontsize=8)
plt.title("Universities with Tuition Covered Scholarship and PhD Level", fontsize=10)
plt.show()
# # Insight Scholarship Category
# * From the distribution of the Scholarship Category column, it can be concluded that more than 50% of universities offer scholarships that only cover tuition fees, followed by the Fully Covered and Tuition and Accommodation Covered categories. Moreover, Dongguan University of Technology and Zhengzhou University are universities that offer Tuition Covered scholarships with the lowest average total cost per year.
# * Furthermore, Bachelor's level has the highest total Tuition Covered, the Master's level has the most Fully Covered scholarships, and the Ph.D. level has the most Living Expense Covered scholarships. Thus, the distribution between Level and Scholarship Category can be seen.
# * However, from the distribution of University, Level, and Scholarship Categories, it can be seen that Northeast Petroleum University has the most majors offering Tuition Covered scholarships at the Bachelor's level, while the Zhejiang University of Technology has the most scholarships for Tuition and Accommodation Covered at Bachelor's level.
# * In addition, China University of Petroleum - Beijing has the most Fully Covered scholarships at the Master's level, and Zhejiang Normal University has the most scholarships for Tuition and Accommodation Covered at the Master's level.
# * Finally, at the Ph.D. level, East China University of Science and Technology has the most Fully Covered scholarships, while North China Electric Power University has the most Tuition Covered scholarships.
# # Filtered Data
# Created Data Level Bachelor For University and Student Not Pay Tuition, Accomodation, Living (Fully Covered)
filtered_data_Bachelor = df[
(df["Level"] == "Bachelor")
& (df["Scholarship Category"] == "Fully Covered")
& (df["total_cost_year"] == 0)
][["University", "Major", "Level", "total_cost_year"]].sort_values(
"University", ascending=True
)
print("University and Major in Bachelor Level With Total Cost Year 0")
filtered_data_Bachelor
filtered_data_Master = df[
(df["Level"] == "Master")
& (df["Scholarship Category"] == "Fully Covered")
& (df["total_cost_year"] == 0)
][["University", "Major", "Level", "total_cost_year"]].sort_values(
"University", ascending=True
)
print("University and Major in Master Level With Total Cost Year 0")
filtered_data_Master
filtered_data_Phd = df[
(df["Level"] == "Phd")
& (df["Scholarship Category"] == "Fully Covered")
& (df["total_cost_year"] == 0)
][["University", "Major", "Level", "total_cost_year"]].sort_values(
"total_cost_year", ascending=True
)
print("University and Major in PhD Level With Total Cost Year 0")
filtered_data_Phd
# Created University, Major, Level, and Not Fully Covered Below 50.000(RMB). Because 75% Total Cost Year is 50.000 and Students can prepare of fees
filtered_data_uni = df[
(df["Scholarship Category"] == "Not Fully Covered")
& (df["total_cost_year"] < 50000)
][["University", "Major", "Level", "total_cost_year"]]
print(
"Univerisy, Major, and Level With Not Fully Covered Scholarship Category\n Total Cost Year Below 50.000(RMB)\n"
)
filtered_data_uni
| false | 1 | 13,088 | 2 | 13,209 | 13,088 |
||
129496081
|
<jupyter_start><jupyter_text>List of World Cities by Population Density
# **CONTENT**
This is a list of cities worldwide by population density. The population, population density and land area for the cities listed are based on the entire city proper, the defined boundary or border of a city or the city limits of the city. The population density of the cities listed is based on the average number of people living per square kilometer or per square mile. This list does not refer to the population, population density or land area of the greater metropolitan area or urban area, nor particular districts in any of the cities listed.
Kaggle dataset identifier: list-of-world-cities-by-population-density
<jupyter_script># # Imports
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
df = pd.read_csv(
"/kaggle/input/list-of-world-cities-by-population-density/List of world cities by population density.csv"
)
df.head()
df.info()
df.shape
# **number of rows and columns******
df.columns
df = df.drop(["Unnamed: 0"], axis=1)
df.head()
# **cleaning unwanted data**
df.isnull().sum()
df["Population"] = df["Population"].str.split("[").str.get(0)
df["Population"] = df["Population"].apply(lambda x: x.replace(",", ""))
df["Area (km²)"] = df["Area (km²)"].str.split("[").str.get(0)
df
df = df.astype({"Population": int, "Area (km²)": float})
df.info()
for col in df.describe(include="object").columns:
print(col)
print(df[col].unique())
print("-" * 50)
pop = df.sort_values(by="Population", ascending=False)[:10]
pop
# # Data Visualisation
plt.figure(figsize=(12, 6))
sns.barplot(x="City", y="Population", data=pop)
plt.ticklabel_format(style="plain", axis="y")
plt.title("most populated cities")
plt.show()
# **> this map tells about the population density in top cities**
ar = df.sort_values(by="Area (km²)", ascending=True)[:10]
ar
plt.figure(figsize=(12, 6))
sns.barplot(x="City", y="Area (km²)", data=ar)
plt.ticklabel_format(style="plain", axis="y")
plt.xticks(rotation=90)
plt.title("cities having smallest area")
plt.show()
# **> this graph tells about the cities having smaller area**
cnt = df["Country"].value_counts().reset_index()
cnt
plt.figure(figsize=(12, 5))
sns.barplot(x="City", y="Area (km²)", data=ar)
plt.ticklabel_format(style="plain", axis="y")
plt.xticks(rotation=90)
plt.title("cities having smallest area")
plt.show()
# > this graph tells about the smalest cities by area having higher population
sns.scatterplot(x="Area (km²)", y="Population", data=df)
plt.ticklabel_format(style="plain", axis="y")
plt.title("population per area(/km²)")
plt.show()
# > popultion in area(km²) graph
#
sns.scatterplot(x="Density (/km²)", y="Population", data=df)
plt.ticklabel_format(style="plain", axis="y")
plt.title("population per Density(/km²)")
plt.show()
# **> popultion in density(km²) graph**
sns.scatterplot(x="Density (/km²)", y="Area (km²)", data=df)
plt.ticklabel_format(style="plain", axis="y")
plt.title("relation between density and area")
plt.show()
# **> this is ralation between density and area scatterplot**
#
ind = df[df["Country"] == "India"]
ind
plt.figure(figsize=(12, 6))
sns.barplot(
x="City", y="Population", data=ind.sort_values("Population", ascending=False)
)
plt.xticks(rotation=90)
plt.ticklabel_format(style="plain", axis="y")
plt.title("populated cities of india")
plt.show()
# **populated cities of india**
ind = df[df["Country"] == "Philippines"]
ind
plt.figure(figsize=(12, 6))
sns.barplot(
x="City",
y="Population",
data=df[df["Country"] == "Philippines"].sort_values("Population", ascending=False),
)
plt.xticks(rotation=90)
plt.ticklabel_format(style="plain", axis="y")
plt.title("populated cities of Philippines")
plt.show()
# **populated cities of phillipines**
tp = df.groupby("Country")[["Population"]].sum()
tp
ta = df.groupby("Country")[["Area (km²)"]].sum()
ta
m = pd.concat([ta, tp], axis=1)
m
sns.scatterplot(y="Population", x="Area (km²)", data=m, hue="Country")
plt.title("coutrywise total population vs area/km")
plt.ticklabel_format(style="plain", axis="y")
plt.show()
df1 = df.drop(labels=["Area (mi²)", "Density (/mi²)", "City", "Country"], axis=1)
corr_matrix = df1.corr()
sns.heatmap(corr_matrix, annot=True, cmap="coolwarm")
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/496/129496081.ipynb
|
list-of-world-cities-by-population-density
|
rajkumarpandey02
|
[{"Id": 129496081, "ScriptId": 37940520, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14698961, "CreationDate": "05/14/2023 09:50:09", "VersionNumber": 3.0, "Title": "World's Population by Cities", "EvaluationDate": "05/14/2023", "IsChange": false, "TotalLines": 148.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 148.0, "LinesInsertedFromFork": 39.0, "LinesDeletedFromFork": 58.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 109.0, "TotalVotes": 0}]
|
[{"Id": 185612176, "KernelVersionId": 129496081, "SourceDatasetVersionId": 5381497}]
|
[{"Id": 5381497, "DatasetId": 3121025, "DatasourceVersionId": 5455141, "CreatorUserId": 11417257, "LicenseName": "CC0: Public Domain", "CreationDate": "04/12/2023 09:36:52", "VersionNumber": 1.0, "Title": "List of World Cities by Population Density", "Slug": "list-of-world-cities-by-population-density", "Subtitle": "List of Metropolitan Areas by Population Density Top 50 Cities", "Description": "# **CONTENT**\n\nThis is a list of cities worldwide by population density. The population, population density and land area for the cities listed are based on the entire city proper, the defined boundary or border of a city or the city limits of the city. The population density of the cities listed is based on the average number of people living per square kilometer or per square mile. This list does not refer to the population, population density or land area of the greater metropolitan area or urban area, nor particular districts in any of the cities listed.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3121025, "CreatorUserId": 11417257, "OwnerUserId": 11417257.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5381497.0, "CurrentDatasourceVersionId": 5455141.0, "ForumId": 3184514, "Type": 2, "CreationDate": "04/12/2023 09:36:52", "LastActivityDate": "04/12/2023", "TotalViews": 7736, "TotalDownloads": 1482, "TotalVotes": 37, "TotalKernels": 7}]
|
[{"Id": 11417257, "UserName": "rajkumarpandey02", "DisplayName": "Raj Kumar Pandey", "RegisterDate": "08/28/2022", "PerformanceTier": 2}]
|
# # Imports
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
df = pd.read_csv(
"/kaggle/input/list-of-world-cities-by-population-density/List of world cities by population density.csv"
)
df.head()
df.info()
df.shape
# **number of rows and columns******
df.columns
df = df.drop(["Unnamed: 0"], axis=1)
df.head()
# **cleaning unwanted data**
df.isnull().sum()
df["Population"] = df["Population"].str.split("[").str.get(0)
df["Population"] = df["Population"].apply(lambda x: x.replace(",", ""))
df["Area (km²)"] = df["Area (km²)"].str.split("[").str.get(0)
df
df = df.astype({"Population": int, "Area (km²)": float})
df.info()
for col in df.describe(include="object").columns:
print(col)
print(df[col].unique())
print("-" * 50)
pop = df.sort_values(by="Population", ascending=False)[:10]
pop
# # Data Visualisation
plt.figure(figsize=(12, 6))
sns.barplot(x="City", y="Population", data=pop)
plt.ticklabel_format(style="plain", axis="y")
plt.title("most populated cities")
plt.show()
# **> this map tells about the population density in top cities**
ar = df.sort_values(by="Area (km²)", ascending=True)[:10]
ar
plt.figure(figsize=(12, 6))
sns.barplot(x="City", y="Area (km²)", data=ar)
plt.ticklabel_format(style="plain", axis="y")
plt.xticks(rotation=90)
plt.title("cities having smallest area")
plt.show()
# **> this graph tells about the cities having smaller area**
cnt = df["Country"].value_counts().reset_index()
cnt
plt.figure(figsize=(12, 5))
sns.barplot(x="City", y="Area (km²)", data=ar)
plt.ticklabel_format(style="plain", axis="y")
plt.xticks(rotation=90)
plt.title("cities having smallest area")
plt.show()
# > this graph tells about the smalest cities by area having higher population
sns.scatterplot(x="Area (km²)", y="Population", data=df)
plt.ticklabel_format(style="plain", axis="y")
plt.title("population per area(/km²)")
plt.show()
# > popultion in area(km²) graph
#
sns.scatterplot(x="Density (/km²)", y="Population", data=df)
plt.ticklabel_format(style="plain", axis="y")
plt.title("population per Density(/km²)")
plt.show()
# **> popultion in density(km²) graph**
sns.scatterplot(x="Density (/km²)", y="Area (km²)", data=df)
plt.ticklabel_format(style="plain", axis="y")
plt.title("relation between density and area")
plt.show()
# **> this is ralation between density and area scatterplot**
#
ind = df[df["Country"] == "India"]
ind
plt.figure(figsize=(12, 6))
sns.barplot(
x="City", y="Population", data=ind.sort_values("Population", ascending=False)
)
plt.xticks(rotation=90)
plt.ticklabel_format(style="plain", axis="y")
plt.title("populated cities of india")
plt.show()
# **populated cities of india**
ind = df[df["Country"] == "Philippines"]
ind
plt.figure(figsize=(12, 6))
sns.barplot(
x="City",
y="Population",
data=df[df["Country"] == "Philippines"].sort_values("Population", ascending=False),
)
plt.xticks(rotation=90)
plt.ticklabel_format(style="plain", axis="y")
plt.title("populated cities of Philippines")
plt.show()
# **populated cities of phillipines**
tp = df.groupby("Country")[["Population"]].sum()
tp
ta = df.groupby("Country")[["Area (km²)"]].sum()
ta
m = pd.concat([ta, tp], axis=1)
m
sns.scatterplot(y="Population", x="Area (km²)", data=m, hue="Country")
plt.title("coutrywise total population vs area/km")
plt.ticklabel_format(style="plain", axis="y")
plt.show()
df1 = df.drop(labels=["Area (mi²)", "Density (/mi²)", "City", "Country"], axis=1)
corr_matrix = df1.corr()
sns.heatmap(corr_matrix, annot=True, cmap="coolwarm")
plt.show()
| false | 1 | 1,255 | 0 | 1,410 | 1,255 |
||
129958802
|
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import nltk
# ## Pre-Processing
# tokenizing
# '\w+' - one or more non-alphanumeric characters.
from nltk.tokenize import RegexpTokenizer
def tokenizer(txt):
token = RegexpTokenizer(r"\w+")
return " ".join(list(set(token.tokenize(txt))))
# stopwords
from nltk.corpus import stopwords
st_words = stopwords.words("english")
def remove_stopwords(lst):
res = []
lst = lst.split(" ")
for word in lst:
if word not in st_words:
res.append(word)
return " ".join(res)
# Stemming
from nltk.stem import WordNetLemmatizer
Lemmatizer = WordNetLemmatizer()
def lemmatize_words(lst):
res = []
for word in lst.split(" "):
res.append(Lemmatizer.lemmatize(word))
return " ".join(res)
# list to str
def convert_tostr(lst):
return " ".join(lst)
def convert_tolst(s):
return [s]
# b1=pd.read_json('/kaggle/input/april24-26-clusters/b1')
# b2=pd.read_json('/kaggle/input/april24-26-clusters/b2')
b3 = pd.read_json("/kaggle/input/april24-26-clusters/b3")
b4 = pd.read_json("/kaggle/input/april24-26-clusters/b4")
b1.head(0)
b1.drop(
columns=[
"_index",
"_type",
"_score",
"source_country",
"_source",
"language",
"domain_name",
"keywords_p",
"company",
"person",
"hashlink",
"lat_long",
"state_id",
"zs_category",
"Summary",
"Client",
],
inplace=True,
)
b2.drop(
columns=[
"_index",
"_type",
"_score",
"source_country",
"_source",
"language",
"domain_name",
"keywords_p",
"company",
"person",
"hashlink",
"lat_long",
"state_id",
"zs_category",
"Summary",
"Client",
],
inplace=True,
)
b3.drop(
columns=[
"_index",
"_type",
"_score",
"source_country",
"_source",
"language",
"domain_name",
"keywords_p",
"company",
"person",
"hashlink",
"lat_long",
"state_id",
"zs_category",
"Summary",
"Client",
],
inplace=True,
)
b4.drop(
columns=[
"_index",
"_type",
"_score",
"source_country",
"_source",
"language",
"domain_name",
"keywords_p",
"company",
"person",
"hashlink",
"lat_long",
"state_id",
"zs_category",
"Summary",
"Client",
],
inplace=True,
)
b1["_data"] = b1["Text"].apply(tokenizer)
b1["_data"] = b1["_data"].apply(remove_stopwords)
b1["_data"] = b1["_data"].apply(lemmatize_words)
b2["_data"] = b2["Text"].apply(tokenizer)
b2["_data"] = b2["_data"].apply(remove_stopwords)
b2["_data"] = b2["_data"].apply(lemmatize_words)
b3["_data"] = b3["Text"].apply(tokenizer)
b3["_data"] = b3["_data"].apply(remove_stopwords)
b3["_data"] = b3["_data"].apply(lemmatize_words)
b4["_data"] = b4["Text"].apply(tokenizer)
b4["_data"] = b4["_data"].apply(remove_stopwords)
b4["_data"] = b4["_data"].apply(lemmatize_words)
b4.head()
sample = ""
for i in b1["_data"]:
sample = i
print(sample)
break
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("paraphrase-multilingual-MiniLM-L12-v2")
b1["_encoded"] = b1["_data"].apply(model.encode)
b1.to_json("batch1.json")
b2["_encoded"] = b2["_data"].apply(model.encode)
b2.to_json("batch2.json")
b3["_encoded"] = b3["_data"].apply(model.encode)
b3.to_json("batch3.json")
b4["_encoded"] = b4["_data"].apply(model.encode)
b4.to_json("batch4.json")
d1 = pd.read_json("/kaggle/working/batch1.json")
d2 = pd.read_json("/kaggle/working/batch2.json")
d3 = pd.read_json("/kaggle/working/batch3.json")
d4 = pd.read_json("/kaggle/working/batch4.json")
merger = [d1, d2, d3, d4]
prepared_data = pd.concat(merger)
final_tensor_vectors = pd.DataFrame(
np.column_stack(list(zip(*prepared_data[["_encoded"]].values)))
)
final_tensor_vectors.to_csv("april24_26_final_tensor_vectors.csv")
final_tensor_vectors
final_tensor_vectors = pd.read_csv(
"/kaggle/input/april24-26-clusters/april24_26_final_tensor_vectors.csv"
)
import umap, hdbscan
umap_embeddings = umap.UMAP(n_components=10, random_state=0).fit_transform(
final_tensor_vectors
)
cluster = hdbscan.HDBSCAN(
min_cluster_size=5,
metric="euclidean",
).fit(umap_embeddings)
prepared_data = pd.read_json("/kaggle/input/april24-26-clusters/apr24_26.json")
prepared_data["cluster_label"] = cluster.labels_
# prepared_data.drop(columns=['cluster_label'],inplace=True)
prepared_data.drop(
columns=[
"_index",
"_type",
"_score",
"source_country",
"_source",
"language",
"keywords_n",
"keywords_p",
"company",
"person",
"hashlink",
"lat_long",
"state_id",
"zs_category",
"Summary",
"Client",
],
inplace=True,
)
prepared_data
dic = {}
for i in prepared_data["cluster_label"]:
if dic.get(i) != None:
dic[i] = dic[i] + 1
else:
dic[i] = 1
print("total_clusters: ", max(dic.keys()))
for i, j in dic.items():
print(f"cluster :{i} , No of Articles :{j}")
dt = prepared_data.query(
"cluster_label==2"
) # defining cluster id and id must be >=40 and <=2480
for i, r in dt.iterrows():
print("** Article id---", r["_id"])
print("\n")
# print('hashlink---',r['hashlink'])
# print('\n')
print("source---", r["domain_name"])
print("\n")
print("Content -----", r["Text"])
print(
"----------------------------------------------------------------------------------"
)
print("\n")
# ### querying clusters
data = pd.read_json("/kaggle/input/april24-26-clusters/april24-26_cluster.json")
dic = {}
for i in data["cluster_label"]:
if dic.get(i) != None:
dic[i] = dic[i] + 1
else:
dic[i] = 1
print("total_clusters: ", max(dic.keys()))
for i, j in dic.items():
print(f"cluster :{i} , No of Articles :{j}")
dt = data.query(
"cluster_label==31"
) # defining cluster id and id must be >=40 and <=2480
for i, r in dt.iterrows():
print("Article id---", r["_id"])
print("\n")
# print('hashlink---',r['hashlink'])
# print('\n')
print("source---", r["domain_name"])
print("\n")
print("Date---", r["published_date"])
print("\n")
print("Content -----", r["Text"])
print(
"----------------------------------------------------------------------------------"
)
print("\n")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/958/129958802.ipynb
| null | null |
[{"Id": 129958802, "ScriptId": 37777630, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14218315, "CreationDate": "05/17/2023 17:54:01", "VersionNumber": 1.0, "Title": "april24-26-news-corpus-clustering", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 206.0, "LinesInsertedFromPrevious": 206.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import nltk
# ## Pre-Processing
# tokenizing
# '\w+' - one or more non-alphanumeric characters.
from nltk.tokenize import RegexpTokenizer
def tokenizer(txt):
token = RegexpTokenizer(r"\w+")
return " ".join(list(set(token.tokenize(txt))))
# stopwords
from nltk.corpus import stopwords
st_words = stopwords.words("english")
def remove_stopwords(lst):
res = []
lst = lst.split(" ")
for word in lst:
if word not in st_words:
res.append(word)
return " ".join(res)
# Stemming
from nltk.stem import WordNetLemmatizer
Lemmatizer = WordNetLemmatizer()
def lemmatize_words(lst):
res = []
for word in lst.split(" "):
res.append(Lemmatizer.lemmatize(word))
return " ".join(res)
# list to str
def convert_tostr(lst):
return " ".join(lst)
def convert_tolst(s):
return [s]
# b1=pd.read_json('/kaggle/input/april24-26-clusters/b1')
# b2=pd.read_json('/kaggle/input/april24-26-clusters/b2')
b3 = pd.read_json("/kaggle/input/april24-26-clusters/b3")
b4 = pd.read_json("/kaggle/input/april24-26-clusters/b4")
b1.head(0)
b1.drop(
columns=[
"_index",
"_type",
"_score",
"source_country",
"_source",
"language",
"domain_name",
"keywords_p",
"company",
"person",
"hashlink",
"lat_long",
"state_id",
"zs_category",
"Summary",
"Client",
],
inplace=True,
)
b2.drop(
columns=[
"_index",
"_type",
"_score",
"source_country",
"_source",
"language",
"domain_name",
"keywords_p",
"company",
"person",
"hashlink",
"lat_long",
"state_id",
"zs_category",
"Summary",
"Client",
],
inplace=True,
)
b3.drop(
columns=[
"_index",
"_type",
"_score",
"source_country",
"_source",
"language",
"domain_name",
"keywords_p",
"company",
"person",
"hashlink",
"lat_long",
"state_id",
"zs_category",
"Summary",
"Client",
],
inplace=True,
)
b4.drop(
columns=[
"_index",
"_type",
"_score",
"source_country",
"_source",
"language",
"domain_name",
"keywords_p",
"company",
"person",
"hashlink",
"lat_long",
"state_id",
"zs_category",
"Summary",
"Client",
],
inplace=True,
)
b1["_data"] = b1["Text"].apply(tokenizer)
b1["_data"] = b1["_data"].apply(remove_stopwords)
b1["_data"] = b1["_data"].apply(lemmatize_words)
b2["_data"] = b2["Text"].apply(tokenizer)
b2["_data"] = b2["_data"].apply(remove_stopwords)
b2["_data"] = b2["_data"].apply(lemmatize_words)
b3["_data"] = b3["Text"].apply(tokenizer)
b3["_data"] = b3["_data"].apply(remove_stopwords)
b3["_data"] = b3["_data"].apply(lemmatize_words)
b4["_data"] = b4["Text"].apply(tokenizer)
b4["_data"] = b4["_data"].apply(remove_stopwords)
b4["_data"] = b4["_data"].apply(lemmatize_words)
b4.head()
sample = ""
for i in b1["_data"]:
sample = i
print(sample)
break
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("paraphrase-multilingual-MiniLM-L12-v2")
b1["_encoded"] = b1["_data"].apply(model.encode)
b1.to_json("batch1.json")
b2["_encoded"] = b2["_data"].apply(model.encode)
b2.to_json("batch2.json")
b3["_encoded"] = b3["_data"].apply(model.encode)
b3.to_json("batch3.json")
b4["_encoded"] = b4["_data"].apply(model.encode)
b4.to_json("batch4.json")
d1 = pd.read_json("/kaggle/working/batch1.json")
d2 = pd.read_json("/kaggle/working/batch2.json")
d3 = pd.read_json("/kaggle/working/batch3.json")
d4 = pd.read_json("/kaggle/working/batch4.json")
merger = [d1, d2, d3, d4]
prepared_data = pd.concat(merger)
final_tensor_vectors = pd.DataFrame(
np.column_stack(list(zip(*prepared_data[["_encoded"]].values)))
)
final_tensor_vectors.to_csv("april24_26_final_tensor_vectors.csv")
final_tensor_vectors
final_tensor_vectors = pd.read_csv(
"/kaggle/input/april24-26-clusters/april24_26_final_tensor_vectors.csv"
)
import umap, hdbscan
umap_embeddings = umap.UMAP(n_components=10, random_state=0).fit_transform(
final_tensor_vectors
)
cluster = hdbscan.HDBSCAN(
min_cluster_size=5,
metric="euclidean",
).fit(umap_embeddings)
prepared_data = pd.read_json("/kaggle/input/april24-26-clusters/apr24_26.json")
prepared_data["cluster_label"] = cluster.labels_
# prepared_data.drop(columns=['cluster_label'],inplace=True)
prepared_data.drop(
columns=[
"_index",
"_type",
"_score",
"source_country",
"_source",
"language",
"keywords_n",
"keywords_p",
"company",
"person",
"hashlink",
"lat_long",
"state_id",
"zs_category",
"Summary",
"Client",
],
inplace=True,
)
prepared_data
dic = {}
for i in prepared_data["cluster_label"]:
if dic.get(i) != None:
dic[i] = dic[i] + 1
else:
dic[i] = 1
print("total_clusters: ", max(dic.keys()))
for i, j in dic.items():
print(f"cluster :{i} , No of Articles :{j}")
dt = prepared_data.query(
"cluster_label==2"
) # defining cluster id and id must be >=40 and <=2480
for i, r in dt.iterrows():
print("** Article id---", r["_id"])
print("\n")
# print('hashlink---',r['hashlink'])
# print('\n')
print("source---", r["domain_name"])
print("\n")
print("Content -----", r["Text"])
print(
"----------------------------------------------------------------------------------"
)
print("\n")
# ### querying clusters
data = pd.read_json("/kaggle/input/april24-26-clusters/april24-26_cluster.json")
dic = {}
for i in data["cluster_label"]:
if dic.get(i) != None:
dic[i] = dic[i] + 1
else:
dic[i] = 1
print("total_clusters: ", max(dic.keys()))
for i, j in dic.items():
print(f"cluster :{i} , No of Articles :{j}")
dt = data.query(
"cluster_label==31"
) # defining cluster id and id must be >=40 and <=2480
for i, r in dt.iterrows():
print("Article id---", r["_id"])
print("\n")
# print('hashlink---',r['hashlink'])
# print('\n')
print("source---", r["domain_name"])
print("\n")
print("Date---", r["published_date"])
print("\n")
print("Content -----", r["Text"])
print(
"----------------------------------------------------------------------------------"
)
print("\n")
| false | 0 | 2,205 | 0 | 2,205 | 2,205 |
||
129958311
|
<jupyter_start><jupyter_text>Udemy Courses
# Udemy Courses
### A dataset of udemy courses
_____
### About this dataset
This dataset contains information about Udemy courses in the Web Development category, including course title, URL, price, number of subscribers, number of reviews, number of lectures, course level, rating, content duration, published timestamp, and subject. With this dataset researchers can track the performance of courses and uncover opportunities to generate revenue. This data set is perfect for those who want to learn more about web development or improve their skills in the field
### How to use the dataset
To get started with this dataset, you will need to download it from kaggle. After downloading the dataset, you will need to unzip it. Once you have unzipped the file, you will be able to open it in a text editor such as Microsoft Word or Notepad ++.
The data is organized into columns with the following information:
-course_title: The title of the course. (String)
-url: The URL of the course. (String)
-price: The price of the course. (Float)
-num_subscribers: The number of subscribers for the course. (Integer)
-num_reviews: The number of reviews for the course. (Integer)
-num_lectures: The number of lectures for the course. (Integer)
-level: The level of the course. (String)
### Research Ideas
1. analyzing which Udemy courses in the Web Development category are the most popular, and understanding what factors contribute to popularity
2. using published timestamp and content duration to understand how course quality affects students' willingness to pay
3. predicting the price of a Udemy course based on its number of subscribers, reviews, lectures, level, rating, and content duration
Kaggle dataset identifier: udemy-courses-revenue-generation-and-course-anal
<jupyter_script>import numpy as np
from sklearn.metrics import mean_squared_error
import pandas as pd
import math
import matplotlib.pyplot as plt
import seaborn as sns
# Import from Libraries
import pandas as pd
import numpy as np
import seaborn as sns
import statistics as sts
import matplotlib.pyplot as plt
from matplotlib import cm
from yellowbrick.cluster import KElbowVisualizer
from yellowbrick.cluster import SilhouetteVisualizer
from sklearn.cluster import AgglomerativeClustering
from scipy.cluster.hierarchy import linkage, dendrogram
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
import scipy.stats as stat
import pylab
from imblearn.over_sampling import SMOTE
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import MinMaxScaler, RobustScaler
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster import DBSCAN
from scipy.stats import ttest_ind
from sklearn import metrics
from scipy.stats import spearmanr
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import silhouette_score
from sklearn.metrics import davies_bouldin_score
from sklearn.metrics import calinski_harabasz_score
from sklearn.svm import SVC, SVR
from sklearn.linear_model import LinearRegression
from sklearn.multioutput import MultiOutputClassifier
from sklearn.metrics import r2_score, accuracy_score, f1_score
import warnings
from scipy import stats
warnings.filterwarnings("ignore")
# Dataset view settings
pd.set_option("display.max_columns", None)
pd.set_option("display.max_rows", None)
pd.set_option("display.width", None)
pd.set_option("display.max_colwidth", 90)
# functions to help us to draw graphs
def plots_helper(
column,
): # calculating the IQR to detrmine the exact number of outliers in each feature and skewness
Q3 = np.quantile(column, 0.75)
Q1 = np.quantile(column, 0.25)
IQR = Q3 - Q1
lower_range = Q1 - 1.5 * IQR
upper_range = Q3 + 1.5 * IQR
oc = len(column[column > upper_range]) + len(column[column < lower_range])
percent = round((oc / len(column)) * 100, 2)
sk = round(stat.skew(column), 2)
return oc, percent, sk
def box_dist_prob_plots(df): # BoxPlot , Distribution, Histogram and ProbabilityPlot
number_features = len(df.columns)
plt.figure(figsize=(25, number_features * 6))
for column, i in zip(df, range(1, number_features * 3 + 1, 3)):
# coalculating the outliers and its percentage
oc, percent, sk = plots_helper(df[column])
# plotting the 3 diagrams for each feature
plt.subplot(number_features, 3, i)
sns.boxplot(df[column])
plt.xlabel(column, size=14)
plt.title(
f"Number of outliers {oc} ({percent}%)",
fontsize=12,
color="red",
fontweight="bold",
)
plt.subplot(number_features, 3, i + 1)
sns.distplot(df[column], kde=False)
plt.title(f"Skewness = {sk}", fontsize=12, color="red", fontweight="bold")
plt.xlabel(column, size=14)
plt.ylabel("Count", size=14)
plt.axvline(x=df[column].median(), ls="--")
plt.axvline(x=df[column].mean())
plt.legend(["mean", "median"])
plt.subplot(number_features, 3, i + 2)
stat.probplot(df[column], dist="norm", plot=pylab, rvalue=True)
def box_for_one_col(column, name):
oc, percent, sk = plots_helper(column)
# plotting the 3 diagrams for each feature
plt.subplot(1, 3, 1)
sns.boxplot(column)
plt.xlabel(name, size=14)
plt.title(
f"Number of outliers {oc} ({percent}%)",
fontsize=12,
color="red",
fontweight="bold",
)
plt.subplot(1, 3, 2)
sns.distplot(column, kde=False)
plt.title(f"Skewness = {sk}", fontsize=12, color="red", fontweight="bold")
plt.xlabel(name, size=14)
plt.ylabel("Count", size=14)
plt.axvline(x=column.median(), ls="--")
plt.axvline(x=column.mean())
plt.legend(["mean", "median"])
plt.subplot(1, 3, 3)
stat.probplot(column, dist="norm", plot=pylab, rvalue=True)
# function to remove outliers using IQR
# Define a function to remove outliers based on the IQR
def remove_outliers_iqr(df, col_name):
Q1 = df[col_name].quantile(0.25)
Q3 = df[col_name].quantile(0.75)
IQR = Q3 - Q1
lower_bound = Q1 - 1.5 * IQR
upper_bound = Q3 + 1.5 * IQR
df_filtered = df[(df[col_name] > lower_bound) & (df[col_name] < upper_bound)]
return df_filtered
# Remove outliers from the DataFrame
# read data
df = pd.read_csv(
"/kaggle/input/udemy-courses-revenue-generation-and-course-anal/Entry Level Project Sheet - 3.1-data-sheet-udemy-courses-web-development.csv"
)
# see samples
df.sample(5)
# check different subjects
pd.unique(df["subject"])
# see information data for example columns and number , nulls and # of rows
df.info()
# here to notice min for duration and num of lectures =0 that is mistake (we will solvve it later)
df.describe()
# **Data Cleaning**
# check duplicate and remove them if found
df[df.duplicated()]
# remove columns we do not need them (url, course id)
df.drop("url", axis=1, inplace=True)
df.drop("course_id", axis=1, inplace=True)
# check type time
#
type(df["published_timestamp"][0])
# covert it to date time
df["published_timestamp"] = pd.to_datetime(df["published_timestamp"]).dt.date
df["Date"] = pd.to_datetime(df["Date"]).dt.date
# we notice free/paid and price column(-_-)
df.sample(5)
# we check here if we find row free and price has value (mistake) or paid and price =0 (also mistake)
rslt_df = df[(df["price"] != 0) & (df["Free/Paid"] == "Paid")]
# print('\nResult dataframe :\n', rslt_df)
rslt_df = df[(df["price"] == 0) & (df["Free/Paid"] == "Free")]
# print('\nResult dataframe :\n', rslt_df)
rslt_df = df[(df["price"] != 0) & (df["Free/Paid"] == "Free")]
print("\nResult dataframe :\n", rslt_df)
rslt_df = df[(df["price"] == 0) & (df["Free/Paid"] == "Paid")]
print("\nResult dataframe :\n", rslt_df)
# now remove free/paid becuase price has more details information
df.drop("Free/Paid", axis=1, inplace=True)
df.sample(5)
# check if we find row date != published_timestamp
rslt_df = np.where(df["Date"] == df["published_timestamp"], 1, 0)
print("\nResult dataframe :\n", np.sum(rslt_df))
# we do did not find so remove one of them
df.drop("published_timestamp", axis=1, inplace=True)
df.sample(5)
# now we said if we find duration = 0 it is null so we found 1 and also we found 1 #of lectures =0 (same row???)
rslt_df = np.where(df["content_duration"] == 0, 1, 0)
print("\nResult dataframe :\n", np.sum(rslt_df))
rslt_df = np.where(df["num_lectures"] == 0, 1, 0)
print("\nResult dataframe :\n", np.sum(rslt_df))
# remove it (yes same row :D)
df = df[df["num_lectures"] >= 1]
df.describe()
# drop course_title we do not need it
df.drop("course_title", axis=1, inplace=True)
# make columns catogery ->numerical for example subject (4 category) so we will make 4 columns one for each subject and make value 1 or 0
df_Gragh = df.copy()
df_Gragh.drop("Date", axis=1, inplace=True)
df_Gragh = pd.get_dummies(df_Gragh)
df_Gragh["Date"] = df["Date"]
df_Gragh.sample(5)
# **End data cleaning**
# **Now answer questions:**
# A-Descriptive:
# 1.What is the most common level for each subject?
# 2.What is the average rating for each subject?
# B-Exploratory:
# 3.What is the relation between the number of subscribers and the publishing year of the course?
# 4.What is the relation between the course level and the course price?
# 5.What is the relation between the rating with the number of reviews and subscribers?
# C-Predictive:
# 6.What will be the price for a new course?
# 7.What will be the number of subscribers to a new course in a curtain subject?
# 8.What will be the rating of a new course?
# D-Causal:
# 9.does the increase of price affect number of subscribers?
# 10.does the subject affect the duration?
# 11.does the number of lectures and the duration affect the number of subscribers?
# E-Mechanistic:
# 12.How does the increase or decrease in the course rating affect the course’s price?
sub = [
"subject_Business Finance",
"subject_Graphic Design",
"subject_Musical Instruments",
"subject_Web Development",
]
lev = [
"level_All Levels",
"level_Beginner Level",
"level_Expert Level",
"level_Intermediate Level",
]
# What is the most common level for each subject?
# make sense **All Levels** but we will prove it
c = 0
for s in sub:
maximume = 0
sStr = -1
lStr = -1
d = []
for l in lev:
res = np.where((df_Gragh[s] == 1) & (df_Gragh[l] == 1), 1, 0)
d.append(np.sum(res))
print(" for s =", s, " level=", l, " num=", np.sum(res))
c += np.sum(res)
if np.sum(res) > maximume:
sStr = s
lStr = l
maximume = np.sum(res)
fig = plt.figure(figsize=(10, 5))
plt.bar(lev, d, color="maroon", width=0.4)
plt.xlabel("Levels")
plt.ylabel("No. of level")
plt.title(s)
plt.show()
print(c)
# 2-What is the average rating for each subject?
# avarage rate for subject_Business Finance is 0.6901344537815126
# avarage rate for subject_Graphic Design is 0.7303820598006644
# avarage rate for subject_Musical Instruments is 0.30891176470588233
# avarage rate for subject_Web Development is 0.643050706566916
d = []
for s in sub:
x = df_Gragh.loc[df_Gragh[s] == 1, "rating"].mean()
d.append(x)
print("avarage rate for ", s, " is ", x)
fig = plt.figure(figsize=(10, 5))
plt.bar(sub, d, color="maroon", width=0.4)
plt.xlabel("subjects")
plt.ylabel("average rating")
plt.title("average rating for each subject")
plt.show()
# 3.What is the relation between the number of subscribers and the publishing year of the course?
# we can not notice anything from first graph so we will use time series STL decompostion to get information
df_Gragh["Year"], df_Gragh["Month"] = df_Gragh["Date"].apply(lambda x: x.year), df[
"Date"
].apply(lambda x: x.month)
y = pd.unique(df_Gragh["Year"])
y = np.sort(y)
m = pd.unique(df_Gragh["Month"])
m = np.sort(m)
m
import datetime
numOfSubs = []
monthes = []
for Y in y:
for i, M in enumerate(m):
sumOfMonth = df_Gragh.loc[
(df_Gragh["Year"] == Y) & (df_Gragh["Month"] == M), "num_subscribers"
].sum()
numOfSubs.append(sumOfMonth)
monthes.append(datetime.date(Y, M, 1))
for i, Y in enumerate(y):
print(
"year=",
Y,
" max month=",
(numOfSubs.index(np.max(numOfSubs[i * 12 : i * 12 + 11])) % 12) + 1,
)
print(
"year=",
Y,
" min month=",
(numOfSubs.index(np.min(numOfSubs[i * 12 : i * 12 + 11])) % 12) + 1,
)
plt.figure(figsize=(15, 15))
plt.plot(monthes, numOfSubs)
plt.xticks(rotation=90)
plt.show()
# we notice trend in 2016 and also increase subscribers every year but in 2017 down why?????
from statsmodels.tsa.seasonal import STL
dataFrameRel = pd.DataFrame(numOfSubs, index=monthes)
print(dataFrameRel.sample(5))
stl = STL(dataFrameRel, period=12)
res = stl.fit()
fig = res.plot()
# becuase max date is 6/7/2017 we didnot have information about last 5 months
df_Gragh["Date"].max()
df_Gragh.sample(5)
# we have three types of hypothesis test first category and category columns ,second numerical and category columns and third numerical and numerical columns
# numerical and numerical columns:
# the Spearman rank correlation coefficient measures the strength and direction of the monotonic relationship between X and Y. The p-value indicates the probability of observing a correlation coefficient as extreme as the one computed, assuming that there is no monotonic relationship between X and Y. If the p-value is less than the chosen significance level (e.g., 0.05), you can reject the null hypothesis that there is no monotonic relationship between X and Y.
# numerical and category columns :
# f_oneway test
# if p_val < 0.05:
# print("There is a significant difference in the mean price across different levels")
# else:
# print("There is no significant difference in the mean price across different levels")
# there is no category and category columns in questions
# **note:correlation detect only linear relation**
# 4.What is the relation between the course level and the course price?
# here category and numerical columns so we will use f_one way test
# and this test tell us if probability .05 there is relation.
import pandas as pd
from scipy.stats import f_oneway
# Group the data by category
groups = df.groupby("level")["price"].apply(list)
print(groups)
# # Calculate the means and standard deviations of the price for each category
# means = groups.mean()
# stds = groups.std()
# Perform One-Way ANOVA
f_stat, p_val = f_oneway(*groups)
print(p_val)
if p_val < 0.05:
print("There is a significant difference in the mean price across different levels")
else:
print(
"There is no significant difference in the mean price across different levels"
)
print(df["price"].corr(df["num_lectures"]))
plt.scatter(df["price"], df["num_lectures"])
plt.xlabel("price")
plt.ylabel("num_lectures")
plt.title("relation between price and num_lectures")
plt.show()
corr_coef, p_val = spearmanr(df["price"], df["num_lectures"])
print(f"Spearman rank correlation coefficient: {corr_coef:.4f}")
print(f"P-value: {p_val:.4f}")
# 5.What is the relation between the rating with the number of reviews and subscribers?
# if we have two numerical cloumns we use ttest and also we can use correlation
# so we will do that for all combination between rating ,
# number of reviews and subscribers:
# we see relation between number of reviews and subscribers because correlation = .65
# and also spearmanr test p-value=0 and that tell us there is relation
# number of reviews and rating:
# we see no relation between number of reviews and rating use correlation = 0.004184210818520607
# and also spearmanr test p-value=.67 and that tell us there is no relation
# number of subscribers and rating:
# we see no relation between number of subscribers and rating use correlation = -0.00726145411559383
# and also spearmanr test p-value=.33and that tell us there is no relation
print(df["rating"].corr(df["num_reviews"]))
plt.scatter(df["rating"], df["num_reviews"])
plt.xlabel("rating")
plt.ylabel("num_reviews")
plt.title("relation between rating and num_reviews")
plt.show()
corr_coef, p_val = spearmanr(df["rating"], df["num_reviews"])
print(f"Spearman rank correlation coefficient: {corr_coef:.4f}")
print(f"P-value: {p_val:.4f}")
print(df["rating"].corr(df["num_subscribers"]))
plt.scatter(df["rating"], df["num_subscribers"])
plt.xlabel("rating")
plt.ylabel("num_subscribers")
plt.title("relation between rating and num_subscribers")
plt.show()
corr_coef, p_val = spearmanr(df["rating"], df["num_subscribers"])
print(f"Spearman rank correlation coefficient: {corr_coef:.4f}")
print(f"P-value: {p_val:.4f}")
print(df["num_reviews"].corr(df["num_subscribers"]))
plt.scatter(df["num_reviews"], df["num_subscribers"])
plt.xlabel("num_reviews")
plt.ylabel("num_subscribers")
plt.title("relation between num_reviews and num_subscribers")
plt.show()
corr_coef, p_val = spearmanr(df["num_reviews"], df["num_subscribers"])
print(f"Spearman rank correlation coefficient: {corr_coef:.4f}")
print(f"P-value: {p_val:.4f}")
# 9.does the increase of price affect number of subscribers?
# from correlation we see there is no relation
print(df["price"].corr(df["num_subscribers"]))
plt.scatter(df["price"], df["num_subscribers"])
plt.xlabel("price")
plt.ylabel("num_subscribers")
plt.title("relation between price and num_subscribers")
plt.show()
corr_coef, p_val = spearmanr(df["num_reviews"], df["num_subscribers"])
print(f"Spearman rank correlation coefficient: {corr_coef:.4f}")
print(f"P-value: {p_val:.4f}")
# 10.does the subject affect the duration?
# f_oneway test p-value=0 then there is relation
import pandas as pd
from scipy.stats import f_oneway
# Group the data by category
groups = df.groupby("subject")["content_duration"].apply(list)
print(groups)
# Perform One-Way ANOVA
f_stat, p_val = f_oneway(*groups)
print(p_val)
if p_val < 0.05:
print(
"There is a significant difference in the mean subject across different content_duration"
)
else:
print(
"There is no significant difference in the mean subject across different content_duration"
)
# 11.does the number of lectures and the duration affect the number of subscribers?
# correlation = .8
# and p-value = 0
# then there is high relation between content_duration and num_lectures
# and also num_subscribers and content_duration
# and also num_lectures and num_subscribers
# because p-value =0 but correlation not high why ???????
# becuause correltion detect only linear relation but test detect prabola and .....
print(df["num_lectures"].corr(df["content_duration"]))
plt.scatter(df["num_lectures"], df["content_duration"])
corr_coef, p_val = spearmanr(df["num_lectures"], df["content_duration"])
plt.xlabel("num_lectures")
plt.ylabel("content_duration")
plt.title("relation between num_lectures and content_duration")
plt.show()
print(f"Spearman rank correlation coefficient: {corr_coef:.4f}")
print(f"P-value: {p_val:.4f}")
print(df["num_lectures"].corr(df["num_subscribers"]))
plt.scatter(df["num_lectures"], df["num_subscribers"])
plt.xlabel("num_lectures")
plt.ylabel("num_subscribers")
plt.title("relation between num_lectures and num_subscribers")
plt.show()
corr_coef, p_val = spearmanr(df["num_lectures"], df["num_subscribers"])
print(f"Spearman rank correlation coefficient: {corr_coef:.4f}")
print(f"P-value: {p_val:.4f}")
print(df["content_duration"].corr(df["num_subscribers"]))
plt.scatter(df["content_duration"], df["num_subscribers"])
plt.xlabel("content_duration")
plt.ylabel("num_subscribers")
plt.title("relation between content_duration and num_subscribers")
plt.show()
corr_coef, p_val = spearmanr(df["content_duration"], df["num_subscribers"])
print(f"Spearman rank correlation coefficient: {corr_coef:.4f}")
print(f"P-value: {p_val:.4f}")
df.sample(5)
# mech
import pandas as pd
from scipy.stats import f_oneway
# Group the data by category
groups = df.groupby("subject")["num_subscribers"].apply(list)
print(groups)
# # Calculate the means and standard deviations of the price for each category
# means = groups.mean()
# stds = groups.std()
# Perform One-Way ANOVA
f_stat, p_val = f_oneway(*groups)
print(p_val)
if p_val < 0.05:
print("There is a significant difference in the mean price across different levels")
else:
print(
"There is no significant difference in the mean price across different levels"
)
# 12.How does the increase or decrease in the course rating affect the course’s price?
# correlation =0 and p-value=.4 then there is no relation
# mach
print(df["price"].corr(df["rating"]))
plt.scatter(df["price"], df["rating"])
plt.xlabel("price")
plt.ylabel("rating")
plt.title("relation between price and rating")
plt.show()
corr_coef, p_val = spearmanr(df["price"], df["rating"])
print(f"Spearman rank correlation coefficient: {corr_coef:.4f}")
print(f"P-value: {p_val:.4f}")
df_Gragh.sample(5)
df_Gragh.drop("Date", axis=1, inplace=True)
df_Gragh.drop("Year", axis=1, inplace=True)
df_Gragh.drop("Month", axis=1, inplace=True)
df_Gragh.sample(5)
# **there is problem in data which is for analyis only not to predict so randomness in data so much ,so error not the best**
# 6.What will be the price for a new course?
# 7.What will be the number of subscribers to a new course in a curtain subject?
# 8.What will be the rating of a new course?
# we use more than one classifier to get the best classifier but still bad that is becuase no relation between (price or rating or number of subscribers) and all feature and also randomness
# for col in df_Gragh.columns:
# print(col)
# plt.scatter(df_Gragh["price"],df_Gragh[col])
# plt.show()
for col in df_Gragh.columns:
print(col)
plt.scatter(df_Gragh["price"], df_Gragh[col])
plt.show()
X = df_Gragh.iloc[:, :].copy()
Y = df_Gragh["price"].copy()
print(len(Y))
X = X.drop(["price"], axis=1)
X.columns
X_train, X_test, y_train, y_test = train_test_split(
X, Y, test_size=0.1, random_state=412
)
from sklearn.compose import TransformedTargetRegressor
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import PowerTransformer
from xgboost import XGBRegressor
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
steps = [
("power_transformer", PowerTransformer()),
("regressor", XGBRegressor(n_estimators=350)),
]
# create the pipeline
pipeline = Pipeline(steps=steps)
# fit the pipeline on X_train and y_train
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_train)
print(math.sqrt(mean_squared_error(y_train, y_pred)))
# Predict on the test data
y_pred = pipeline.predict(X_test)
print(math.sqrt(mean_squared_error(y_test, y_pred)))
steps = [("power_transformer", PowerTransformer()), ("Ridge", Ridge(alpha=0.001))]
# create the pipeline
pipeline = Pipeline(steps=steps)
# fit the pipeline on X_train and y_train
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_train)
print(math.sqrt(mean_squared_error(y_train, y_pred)))
# Predict on the test data
y_pred = pipeline.predict(X_test)
print(math.sqrt(mean_squared_error(y_test, y_pred)))
steps = [("power_transformer", PowerTransformer()), ("lasso", Lasso(alpha=0.5))]
# create the pipeline
pipeline = Pipeline(steps=steps)
# fit the pipeline on X_train and y_train
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_train)
print(math.sqrt(mean_squared_error(y_train, y_pred)))
# Predict on the test data
y_pred = pipeline.predict(X_test)
print(math.sqrt(mean_squared_error(y_test, y_pred)))
steps = [
("power_transformer", PowerTransformer()),
("DecisionTree", DecisionTreeRegressor()),
]
# create the pipeline
pipeline = Pipeline(steps=steps)
# fit the pipeline on X_train and y_train
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_train)
print(math.sqrt(mean_squared_error(y_train, y_pred)))
# Predict on the test data
y_pred = pipeline.predict(X_test)
print(math.sqrt(mean_squared_error(y_test, y_pred)))
steps = [
("power_transformer", PowerTransformer()),
("RandomForest", RandomForestRegressor()),
]
# create the pipeline
pipeline = Pipeline(steps=steps)
# fit the pipeline on X_train and y_train
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_train)
print(math.sqrt(mean_squared_error(y_train, y_pred)))
# Predict on the test data
y_pred = pipeline.predict(X_test)
print(math.sqrt(mean_squared_error(y_test, y_pred)))
for col in df_Gragh.columns:
print(col)
plt.scatter(df_Gragh["rating"], df_Gragh[col])
plt.show()
X = df_Gragh.iloc[:, :].copy()
Y = df_Gragh["rating"].copy()
print(len(Y))
X = X.drop(["rating"], axis=1)
X.columns
X_train, X_test, y_train, y_test = train_test_split(
X, Y, test_size=0.1, random_state=412
)
steps = [
("power_transformer", PowerTransformer()),
("regressor", XGBRegressor(n_estimators=350)),
]
# create the pipeline
pipeline = Pipeline(steps=steps)
# fit the pipeline on X_train and y_train
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_train)
print(math.sqrt(mean_squared_error(y_train, y_pred)))
# Predict on the test data
y_pred = pipeline.predict(X_test)
print(math.sqrt(mean_squared_error(y_test, y_pred)))
steps = [
("power_transformer", PowerTransformer()),
("DecisionTree", DecisionTreeRegressor(max_depth=15)),
]
# create the pipeline
pipeline = Pipeline(steps=steps)
# fit the pipeline on X_train and y_train
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_train)
print(math.sqrt(mean_squared_error(y_train, y_pred)))
# Predict on the test data
y_pred = pipeline.predict(X_test)
print(math.sqrt(mean_squared_error(y_test, y_pred)))
df_Gragh.corr()
X = df_Gragh.iloc[:, :].copy()
Y = df_Gragh["num_subscribers"].copy()
print(len(Y))
X = X.drop(["num_subscribers"], axis=1)
X.columns
X_train, X_test, y_train, y_test = train_test_split(
X, Y, test_size=0.1, random_state=412
)
steps = [
("power_transformer", PowerTransformer()),
("regressor", XGBRegressor(n_estimators=350)),
]
# create the pipeline
pipeline = Pipeline(steps=steps)
# fit the pipeline on X_train and y_train
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_train)
print(math.sqrt(mean_squared_error(y_train, y_pred)))
# Predict on the test data
y_pred = pipeline.predict(X_test)
print(math.sqrt(mean_squared_error(y_test, y_pred)))
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/958/129958311.ipynb
|
udemy-courses-revenue-generation-and-course-anal
|
thedevastator
|
[{"Id": 129958311, "ScriptId": 37324148, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10327586, "CreationDate": "05/17/2023 17:49:31", "VersionNumber": 2.0, "Title": "courses", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 806.0, "LinesInsertedFromPrevious": 631.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 175.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 5}]
|
[{"Id": 186393296, "KernelVersionId": 129958311, "SourceDatasetVersionId": 4338372}]
|
[{"Id": 4338372, "DatasetId": 2554282, "DatasourceVersionId": 4396743, "CreatorUserId": 10654180, "LicenseName": "Other (specified in description)", "CreationDate": "10/17/2022 00:11:53", "VersionNumber": 2.0, "Title": "Udemy Courses", "Slug": "udemy-courses-revenue-generation-and-course-anal", "Subtitle": "A dataset of udemy courses", "Description": "# Udemy Courses\n### A dataset of udemy courses\n_____\n\n### About this dataset\nThis dataset contains information about Udemy courses in the Web Development category, including course title, URL, price, number of subscribers, number of reviews, number of lectures, course level, rating, content duration, published timestamp, and subject. With this dataset researchers can track the performance of courses and uncover opportunities to generate revenue. This data set is perfect for those who want to learn more about web development or improve their skills in the field\n\n### How to use the dataset\nTo get started with this dataset, you will need to download it from kaggle. After downloading the dataset, you will need to unzip it. Once you have unzipped the file, you will be able to open it in a text editor such as Microsoft Word or Notepad ++.\n\nThe data is organized into columns with the following information:\n\n-course_title: The title of the course. (String)\n-url: The URL of the course. (String)\n-price: The price of the course. (Float)\n-num_subscribers: The number of subscribers for the course. (Integer)\n-num_reviews: The number of reviews for the course. (Integer)\n-num_lectures: The number of lectures for the course. (Integer)\n-level: The level of the course. (String)\n\n### Research Ideas\n1. analyzing which Udemy courses in the Web Development category are the most popular, and understanding what factors contribute to popularity\n2. using published timestamp and content duration to understand how course quality affects students' willingness to pay\n3. predicting the price of a Udemy course based on its number of subscribers, reviews, lectures, level, rating, and content duration\n\n### Acknowledgements\nThe data for this dataset was collected from the Udemy website.\n\n### License\n\n> See the dataset description for more information.\n\n### Columns\n\n**File: 3.1-data-sheet-udemy-courses-business-courses.csv**\n| Column name | Description |\n|:------------------------|:-----------------------------------------------------------------|\n| **course_title** | The title of the Udemy course. (String) |\n| **url** | The URL of the Udemy course. (String) |\n| **price** | The price of the Udemy course. (Float) |\n| **num_subscribers** | The number of subscribers for the Udemy course. (Integer) |\n| **num_reviews** | The number of reviews for the Udemy course. (Integer) |\n| **num_lectures** | The number of lectures in the Udemy course. (Integer) |\n| **level** | The level of the Udemy course. (String) |\n| **Rating** | The rating of the Udemy course. (Float) |\n| **content_duration** | The content duration of the Udemy course. (Float) |\n| **published_timestamp** | The timestamp of when the Udemy course was published. (Datetime) |\n| **subject** | The subject of the Udemy course. (String) |\n\n_____\n\n**File: 3.1-data-sheet-udemy-courses-design-courses.csv**\n| Column name | Description |\n|:------------------------|:-----------------------------------------------------------------|\n| **course_title** | The title of the Udemy course. (String) |\n| **url** | The URL of the Udemy course. (String) |\n| **price** | The price of the Udemy course. (Float) |\n| **num_subscribers** | The number of subscribers for the Udemy course. (Integer) |\n| **num_reviews** | The number of reviews for the Udemy course. (Integer) |\n| **num_lectures** | The number of lectures in the Udemy course. (Integer) |\n| **level** | The level of the Udemy course. (String) |\n| **Rating** | The rating of the Udemy course. (Float) |\n| **content_duration** | The content duration of the Udemy course. (Float) |\n| **published_timestamp** | The timestamp of when the Udemy course was published. (Datetime) |\n| **subject** | The subject of the Udemy course. (String) |\n\n_____\n\n**File: 3.1-data-sheet-udemy-courses-music-courses.csv**\n| Column name | Description |\n|:------------------------|:-----------------------------------------------------------------|\n| **course_title** | The title of the Udemy course. (String) |\n| **url** | The URL of the Udemy course. (String) |\n| **price** | The price of the Udemy course. (Float) |\n| **num_subscribers** | The number of subscribers for the Udemy course. (Integer) |\n| **num_reviews** | The number of reviews for the Udemy course. (Integer) |\n| **num_lectures** | The number of lectures in the Udemy course. (Integer) |\n| **level** | The level of the Udemy course. (String) |\n| **Rating** | The rating of the Udemy course. (Float) |\n| **content_duration** | The content duration of the Udemy course. (Float) |\n| **published_timestamp** | The timestamp of when the Udemy course was published. (Datetime) |\n| **subject** | The subject of the Udemy course. (String) |\n\n_____\n\n**File: 3.1-data-sheet-udemy-courses-web-development.csv**\n| Column name | Description |\n|:------------------------|:-----------------------------------------------------------------|\n| **course_title** | The title of the Udemy course. (String) |\n| **url** | The URL of the Udemy course. (String) |\n| **price** | The price of the Udemy course. (Float) |\n| **num_subscribers** | The number of subscribers for the Udemy course. (Integer) |\n| **num_reviews** | The number of reviews for the Udemy course. (Integer) |\n| **num_lectures** | The number of lectures in the Udemy course. (Integer) |\n| **level** | The level of the Udemy course. (String) |\n| **Rating** | The rating of the Udemy course. (Float) |\n| **content_duration** | The content duration of the Udemy course. (Float) |\n| **published_timestamp** | The timestamp of when the Udemy course was published. (Datetime) |\n| **subject** | The subject of the Udemy course. (String) |\n\n_____\n\n**File: Entry Level Project Sheet - 3.1-data-sheet-udemy-courses-web-development.csv**\n| Column name | Description |\n|:------------------------|:-----------------------------------------------------------------|\n| **course_title** | The title of the Udemy course. (String) |\n| **url** | The URL of the Udemy course. (String) |\n| **price** | The price of the Udemy course. (Float) |\n| **num_subscribers** | The number of subscribers for the Udemy course. (Integer) |\n| **num_reviews** | The number of reviews for the Udemy course. (Integer) |\n| **num_lectures** | The number of lectures in the Udemy course. (Integer) |\n| **level** | The level of the Udemy course. (String) |\n| **content_duration** | The content duration of the Udemy course. (Float) |\n| **published_timestamp** | The timestamp of when the Udemy course was published. (Datetime) |\n| **subject** | The subject of the Udemy course. (String) |", "VersionNotes": "version update", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 2554282, "CreatorUserId": 10654180, "OwnerUserId": 10654180.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4338372.0, "CurrentDatasourceVersionId": 4396743.0, "ForumId": 2583451, "Type": 2, "CreationDate": "10/17/2022 00:11:41", "LastActivityDate": "10/17/2022", "TotalViews": 26496, "TotalDownloads": 4600, "TotalVotes": 89, "TotalKernels": 25}]
|
[{"Id": 10654180, "UserName": "thedevastator", "DisplayName": "The Devastator", "RegisterDate": "05/26/2022", "PerformanceTier": 4}]
|
import numpy as np
from sklearn.metrics import mean_squared_error
import pandas as pd
import math
import matplotlib.pyplot as plt
import seaborn as sns
# Import from Libraries
import pandas as pd
import numpy as np
import seaborn as sns
import statistics as sts
import matplotlib.pyplot as plt
from matplotlib import cm
from yellowbrick.cluster import KElbowVisualizer
from yellowbrick.cluster import SilhouetteVisualizer
from sklearn.cluster import AgglomerativeClustering
from scipy.cluster.hierarchy import linkage, dendrogram
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
import scipy.stats as stat
import pylab
from imblearn.over_sampling import SMOTE
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import MinMaxScaler, RobustScaler
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster import DBSCAN
from scipy.stats import ttest_ind
from sklearn import metrics
from scipy.stats import spearmanr
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import silhouette_score
from sklearn.metrics import davies_bouldin_score
from sklearn.metrics import calinski_harabasz_score
from sklearn.svm import SVC, SVR
from sklearn.linear_model import LinearRegression
from sklearn.multioutput import MultiOutputClassifier
from sklearn.metrics import r2_score, accuracy_score, f1_score
import warnings
from scipy import stats
warnings.filterwarnings("ignore")
# Dataset view settings
pd.set_option("display.max_columns", None)
pd.set_option("display.max_rows", None)
pd.set_option("display.width", None)
pd.set_option("display.max_colwidth", 90)
# functions to help us to draw graphs
def plots_helper(
column,
): # calculating the IQR to detrmine the exact number of outliers in each feature and skewness
Q3 = np.quantile(column, 0.75)
Q1 = np.quantile(column, 0.25)
IQR = Q3 - Q1
lower_range = Q1 - 1.5 * IQR
upper_range = Q3 + 1.5 * IQR
oc = len(column[column > upper_range]) + len(column[column < lower_range])
percent = round((oc / len(column)) * 100, 2)
sk = round(stat.skew(column), 2)
return oc, percent, sk
def box_dist_prob_plots(df): # BoxPlot , Distribution, Histogram and ProbabilityPlot
number_features = len(df.columns)
plt.figure(figsize=(25, number_features * 6))
for column, i in zip(df, range(1, number_features * 3 + 1, 3)):
# coalculating the outliers and its percentage
oc, percent, sk = plots_helper(df[column])
# plotting the 3 diagrams for each feature
plt.subplot(number_features, 3, i)
sns.boxplot(df[column])
plt.xlabel(column, size=14)
plt.title(
f"Number of outliers {oc} ({percent}%)",
fontsize=12,
color="red",
fontweight="bold",
)
plt.subplot(number_features, 3, i + 1)
sns.distplot(df[column], kde=False)
plt.title(f"Skewness = {sk}", fontsize=12, color="red", fontweight="bold")
plt.xlabel(column, size=14)
plt.ylabel("Count", size=14)
plt.axvline(x=df[column].median(), ls="--")
plt.axvline(x=df[column].mean())
plt.legend(["mean", "median"])
plt.subplot(number_features, 3, i + 2)
stat.probplot(df[column], dist="norm", plot=pylab, rvalue=True)
def box_for_one_col(column, name):
oc, percent, sk = plots_helper(column)
# plotting the 3 diagrams for each feature
plt.subplot(1, 3, 1)
sns.boxplot(column)
plt.xlabel(name, size=14)
plt.title(
f"Number of outliers {oc} ({percent}%)",
fontsize=12,
color="red",
fontweight="bold",
)
plt.subplot(1, 3, 2)
sns.distplot(column, kde=False)
plt.title(f"Skewness = {sk}", fontsize=12, color="red", fontweight="bold")
plt.xlabel(name, size=14)
plt.ylabel("Count", size=14)
plt.axvline(x=column.median(), ls="--")
plt.axvline(x=column.mean())
plt.legend(["mean", "median"])
plt.subplot(1, 3, 3)
stat.probplot(column, dist="norm", plot=pylab, rvalue=True)
# function to remove outliers using IQR
# Define a function to remove outliers based on the IQR
def remove_outliers_iqr(df, col_name):
Q1 = df[col_name].quantile(0.25)
Q3 = df[col_name].quantile(0.75)
IQR = Q3 - Q1
lower_bound = Q1 - 1.5 * IQR
upper_bound = Q3 + 1.5 * IQR
df_filtered = df[(df[col_name] > lower_bound) & (df[col_name] < upper_bound)]
return df_filtered
# Remove outliers from the DataFrame
# read data
df = pd.read_csv(
"/kaggle/input/udemy-courses-revenue-generation-and-course-anal/Entry Level Project Sheet - 3.1-data-sheet-udemy-courses-web-development.csv"
)
# see samples
df.sample(5)
# check different subjects
pd.unique(df["subject"])
# see information data for example columns and number , nulls and # of rows
df.info()
# here to notice min for duration and num of lectures =0 that is mistake (we will solvve it later)
df.describe()
# **Data Cleaning**
# check duplicate and remove them if found
df[df.duplicated()]
# remove columns we do not need them (url, course id)
df.drop("url", axis=1, inplace=True)
df.drop("course_id", axis=1, inplace=True)
# check type time
#
type(df["published_timestamp"][0])
# covert it to date time
df["published_timestamp"] = pd.to_datetime(df["published_timestamp"]).dt.date
df["Date"] = pd.to_datetime(df["Date"]).dt.date
# we notice free/paid and price column(-_-)
df.sample(5)
# we check here if we find row free and price has value (mistake) or paid and price =0 (also mistake)
rslt_df = df[(df["price"] != 0) & (df["Free/Paid"] == "Paid")]
# print('\nResult dataframe :\n', rslt_df)
rslt_df = df[(df["price"] == 0) & (df["Free/Paid"] == "Free")]
# print('\nResult dataframe :\n', rslt_df)
rslt_df = df[(df["price"] != 0) & (df["Free/Paid"] == "Free")]
print("\nResult dataframe :\n", rslt_df)
rslt_df = df[(df["price"] == 0) & (df["Free/Paid"] == "Paid")]
print("\nResult dataframe :\n", rslt_df)
# now remove free/paid becuase price has more details information
df.drop("Free/Paid", axis=1, inplace=True)
df.sample(5)
# check if we find row date != published_timestamp
rslt_df = np.where(df["Date"] == df["published_timestamp"], 1, 0)
print("\nResult dataframe :\n", np.sum(rslt_df))
# we do did not find so remove one of them
df.drop("published_timestamp", axis=1, inplace=True)
df.sample(5)
# now we said if we find duration = 0 it is null so we found 1 and also we found 1 #of lectures =0 (same row???)
rslt_df = np.where(df["content_duration"] == 0, 1, 0)
print("\nResult dataframe :\n", np.sum(rslt_df))
rslt_df = np.where(df["num_lectures"] == 0, 1, 0)
print("\nResult dataframe :\n", np.sum(rslt_df))
# remove it (yes same row :D)
df = df[df["num_lectures"] >= 1]
df.describe()
# drop course_title we do not need it
df.drop("course_title", axis=1, inplace=True)
# make columns catogery ->numerical for example subject (4 category) so we will make 4 columns one for each subject and make value 1 or 0
df_Gragh = df.copy()
df_Gragh.drop("Date", axis=1, inplace=True)
df_Gragh = pd.get_dummies(df_Gragh)
df_Gragh["Date"] = df["Date"]
df_Gragh.sample(5)
# **End data cleaning**
# **Now answer questions:**
# A-Descriptive:
# 1.What is the most common level for each subject?
# 2.What is the average rating for each subject?
# B-Exploratory:
# 3.What is the relation between the number of subscribers and the publishing year of the course?
# 4.What is the relation between the course level and the course price?
# 5.What is the relation between the rating with the number of reviews and subscribers?
# C-Predictive:
# 6.What will be the price for a new course?
# 7.What will be the number of subscribers to a new course in a curtain subject?
# 8.What will be the rating of a new course?
# D-Causal:
# 9.does the increase of price affect number of subscribers?
# 10.does the subject affect the duration?
# 11.does the number of lectures and the duration affect the number of subscribers?
# E-Mechanistic:
# 12.How does the increase or decrease in the course rating affect the course’s price?
sub = [
"subject_Business Finance",
"subject_Graphic Design",
"subject_Musical Instruments",
"subject_Web Development",
]
lev = [
"level_All Levels",
"level_Beginner Level",
"level_Expert Level",
"level_Intermediate Level",
]
# What is the most common level for each subject?
# make sense **All Levels** but we will prove it
c = 0
for s in sub:
maximume = 0
sStr = -1
lStr = -1
d = []
for l in lev:
res = np.where((df_Gragh[s] == 1) & (df_Gragh[l] == 1), 1, 0)
d.append(np.sum(res))
print(" for s =", s, " level=", l, " num=", np.sum(res))
c += np.sum(res)
if np.sum(res) > maximume:
sStr = s
lStr = l
maximume = np.sum(res)
fig = plt.figure(figsize=(10, 5))
plt.bar(lev, d, color="maroon", width=0.4)
plt.xlabel("Levels")
plt.ylabel("No. of level")
plt.title(s)
plt.show()
print(c)
# 2-What is the average rating for each subject?
# avarage rate for subject_Business Finance is 0.6901344537815126
# avarage rate for subject_Graphic Design is 0.7303820598006644
# avarage rate for subject_Musical Instruments is 0.30891176470588233
# avarage rate for subject_Web Development is 0.643050706566916
d = []
for s in sub:
x = df_Gragh.loc[df_Gragh[s] == 1, "rating"].mean()
d.append(x)
print("avarage rate for ", s, " is ", x)
fig = plt.figure(figsize=(10, 5))
plt.bar(sub, d, color="maroon", width=0.4)
plt.xlabel("subjects")
plt.ylabel("average rating")
plt.title("average rating for each subject")
plt.show()
# 3.What is the relation between the number of subscribers and the publishing year of the course?
# we can not notice anything from first graph so we will use time series STL decompostion to get information
df_Gragh["Year"], df_Gragh["Month"] = df_Gragh["Date"].apply(lambda x: x.year), df[
"Date"
].apply(lambda x: x.month)
y = pd.unique(df_Gragh["Year"])
y = np.sort(y)
m = pd.unique(df_Gragh["Month"])
m = np.sort(m)
m
import datetime
numOfSubs = []
monthes = []
for Y in y:
for i, M in enumerate(m):
sumOfMonth = df_Gragh.loc[
(df_Gragh["Year"] == Y) & (df_Gragh["Month"] == M), "num_subscribers"
].sum()
numOfSubs.append(sumOfMonth)
monthes.append(datetime.date(Y, M, 1))
for i, Y in enumerate(y):
print(
"year=",
Y,
" max month=",
(numOfSubs.index(np.max(numOfSubs[i * 12 : i * 12 + 11])) % 12) + 1,
)
print(
"year=",
Y,
" min month=",
(numOfSubs.index(np.min(numOfSubs[i * 12 : i * 12 + 11])) % 12) + 1,
)
plt.figure(figsize=(15, 15))
plt.plot(monthes, numOfSubs)
plt.xticks(rotation=90)
plt.show()
# we notice trend in 2016 and also increase subscribers every year but in 2017 down why?????
from statsmodels.tsa.seasonal import STL
dataFrameRel = pd.DataFrame(numOfSubs, index=monthes)
print(dataFrameRel.sample(5))
stl = STL(dataFrameRel, period=12)
res = stl.fit()
fig = res.plot()
# becuase max date is 6/7/2017 we didnot have information about last 5 months
df_Gragh["Date"].max()
df_Gragh.sample(5)
# we have three types of hypothesis test first category and category columns ,second numerical and category columns and third numerical and numerical columns
# numerical and numerical columns:
# the Spearman rank correlation coefficient measures the strength and direction of the monotonic relationship between X and Y. The p-value indicates the probability of observing a correlation coefficient as extreme as the one computed, assuming that there is no monotonic relationship between X and Y. If the p-value is less than the chosen significance level (e.g., 0.05), you can reject the null hypothesis that there is no monotonic relationship between X and Y.
# numerical and category columns :
# f_oneway test
# if p_val < 0.05:
# print("There is a significant difference in the mean price across different levels")
# else:
# print("There is no significant difference in the mean price across different levels")
# there is no category and category columns in questions
# **note:correlation detect only linear relation**
# 4.What is the relation between the course level and the course price?
# here category and numerical columns so we will use f_one way test
# and this test tell us if probability .05 there is relation.
import pandas as pd
from scipy.stats import f_oneway
# Group the data by category
groups = df.groupby("level")["price"].apply(list)
print(groups)
# # Calculate the means and standard deviations of the price for each category
# means = groups.mean()
# stds = groups.std()
# Perform One-Way ANOVA
f_stat, p_val = f_oneway(*groups)
print(p_val)
if p_val < 0.05:
print("There is a significant difference in the mean price across different levels")
else:
print(
"There is no significant difference in the mean price across different levels"
)
print(df["price"].corr(df["num_lectures"]))
plt.scatter(df["price"], df["num_lectures"])
plt.xlabel("price")
plt.ylabel("num_lectures")
plt.title("relation between price and num_lectures")
plt.show()
corr_coef, p_val = spearmanr(df["price"], df["num_lectures"])
print(f"Spearman rank correlation coefficient: {corr_coef:.4f}")
print(f"P-value: {p_val:.4f}")
# 5.What is the relation between the rating with the number of reviews and subscribers?
# if we have two numerical cloumns we use ttest and also we can use correlation
# so we will do that for all combination between rating ,
# number of reviews and subscribers:
# we see relation between number of reviews and subscribers because correlation = .65
# and also spearmanr test p-value=0 and that tell us there is relation
# number of reviews and rating:
# we see no relation between number of reviews and rating use correlation = 0.004184210818520607
# and also spearmanr test p-value=.67 and that tell us there is no relation
# number of subscribers and rating:
# we see no relation between number of subscribers and rating use correlation = -0.00726145411559383
# and also spearmanr test p-value=.33and that tell us there is no relation
print(df["rating"].corr(df["num_reviews"]))
plt.scatter(df["rating"], df["num_reviews"])
plt.xlabel("rating")
plt.ylabel("num_reviews")
plt.title("relation between rating and num_reviews")
plt.show()
corr_coef, p_val = spearmanr(df["rating"], df["num_reviews"])
print(f"Spearman rank correlation coefficient: {corr_coef:.4f}")
print(f"P-value: {p_val:.4f}")
print(df["rating"].corr(df["num_subscribers"]))
plt.scatter(df["rating"], df["num_subscribers"])
plt.xlabel("rating")
plt.ylabel("num_subscribers")
plt.title("relation between rating and num_subscribers")
plt.show()
corr_coef, p_val = spearmanr(df["rating"], df["num_subscribers"])
print(f"Spearman rank correlation coefficient: {corr_coef:.4f}")
print(f"P-value: {p_val:.4f}")
print(df["num_reviews"].corr(df["num_subscribers"]))
plt.scatter(df["num_reviews"], df["num_subscribers"])
plt.xlabel("num_reviews")
plt.ylabel("num_subscribers")
plt.title("relation between num_reviews and num_subscribers")
plt.show()
corr_coef, p_val = spearmanr(df["num_reviews"], df["num_subscribers"])
print(f"Spearman rank correlation coefficient: {corr_coef:.4f}")
print(f"P-value: {p_val:.4f}")
# 9.does the increase of price affect number of subscribers?
# from correlation we see there is no relation
print(df["price"].corr(df["num_subscribers"]))
plt.scatter(df["price"], df["num_subscribers"])
plt.xlabel("price")
plt.ylabel("num_subscribers")
plt.title("relation between price and num_subscribers")
plt.show()
corr_coef, p_val = spearmanr(df["num_reviews"], df["num_subscribers"])
print(f"Spearman rank correlation coefficient: {corr_coef:.4f}")
print(f"P-value: {p_val:.4f}")
# 10.does the subject affect the duration?
# f_oneway test p-value=0 then there is relation
import pandas as pd
from scipy.stats import f_oneway
# Group the data by category
groups = df.groupby("subject")["content_duration"].apply(list)
print(groups)
# Perform One-Way ANOVA
f_stat, p_val = f_oneway(*groups)
print(p_val)
if p_val < 0.05:
print(
"There is a significant difference in the mean subject across different content_duration"
)
else:
print(
"There is no significant difference in the mean subject across different content_duration"
)
# 11.does the number of lectures and the duration affect the number of subscribers?
# correlation = .8
# and p-value = 0
# then there is high relation between content_duration and num_lectures
# and also num_subscribers and content_duration
# and also num_lectures and num_subscribers
# because p-value =0 but correlation not high why ???????
# becuause correltion detect only linear relation but test detect prabola and .....
print(df["num_lectures"].corr(df["content_duration"]))
plt.scatter(df["num_lectures"], df["content_duration"])
corr_coef, p_val = spearmanr(df["num_lectures"], df["content_duration"])
plt.xlabel("num_lectures")
plt.ylabel("content_duration")
plt.title("relation between num_lectures and content_duration")
plt.show()
print(f"Spearman rank correlation coefficient: {corr_coef:.4f}")
print(f"P-value: {p_val:.4f}")
print(df["num_lectures"].corr(df["num_subscribers"]))
plt.scatter(df["num_lectures"], df["num_subscribers"])
plt.xlabel("num_lectures")
plt.ylabel("num_subscribers")
plt.title("relation between num_lectures and num_subscribers")
plt.show()
corr_coef, p_val = spearmanr(df["num_lectures"], df["num_subscribers"])
print(f"Spearman rank correlation coefficient: {corr_coef:.4f}")
print(f"P-value: {p_val:.4f}")
print(df["content_duration"].corr(df["num_subscribers"]))
plt.scatter(df["content_duration"], df["num_subscribers"])
plt.xlabel("content_duration")
plt.ylabel("num_subscribers")
plt.title("relation between content_duration and num_subscribers")
plt.show()
corr_coef, p_val = spearmanr(df["content_duration"], df["num_subscribers"])
print(f"Spearman rank correlation coefficient: {corr_coef:.4f}")
print(f"P-value: {p_val:.4f}")
df.sample(5)
# mech
import pandas as pd
from scipy.stats import f_oneway
# Group the data by category
groups = df.groupby("subject")["num_subscribers"].apply(list)
print(groups)
# # Calculate the means and standard deviations of the price for each category
# means = groups.mean()
# stds = groups.std()
# Perform One-Way ANOVA
f_stat, p_val = f_oneway(*groups)
print(p_val)
if p_val < 0.05:
print("There is a significant difference in the mean price across different levels")
else:
print(
"There is no significant difference in the mean price across different levels"
)
# 12.How does the increase or decrease in the course rating affect the course’s price?
# correlation =0 and p-value=.4 then there is no relation
# mach
print(df["price"].corr(df["rating"]))
plt.scatter(df["price"], df["rating"])
plt.xlabel("price")
plt.ylabel("rating")
plt.title("relation between price and rating")
plt.show()
corr_coef, p_val = spearmanr(df["price"], df["rating"])
print(f"Spearman rank correlation coefficient: {corr_coef:.4f}")
print(f"P-value: {p_val:.4f}")
df_Gragh.sample(5)
df_Gragh.drop("Date", axis=1, inplace=True)
df_Gragh.drop("Year", axis=1, inplace=True)
df_Gragh.drop("Month", axis=1, inplace=True)
df_Gragh.sample(5)
# **there is problem in data which is for analyis only not to predict so randomness in data so much ,so error not the best**
# 6.What will be the price for a new course?
# 7.What will be the number of subscribers to a new course in a curtain subject?
# 8.What will be the rating of a new course?
# we use more than one classifier to get the best classifier but still bad that is becuase no relation between (price or rating or number of subscribers) and all feature and also randomness
# for col in df_Gragh.columns:
# print(col)
# plt.scatter(df_Gragh["price"],df_Gragh[col])
# plt.show()
for col in df_Gragh.columns:
print(col)
plt.scatter(df_Gragh["price"], df_Gragh[col])
plt.show()
X = df_Gragh.iloc[:, :].copy()
Y = df_Gragh["price"].copy()
print(len(Y))
X = X.drop(["price"], axis=1)
X.columns
X_train, X_test, y_train, y_test = train_test_split(
X, Y, test_size=0.1, random_state=412
)
from sklearn.compose import TransformedTargetRegressor
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import PowerTransformer
from xgboost import XGBRegressor
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
steps = [
("power_transformer", PowerTransformer()),
("regressor", XGBRegressor(n_estimators=350)),
]
# create the pipeline
pipeline = Pipeline(steps=steps)
# fit the pipeline on X_train and y_train
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_train)
print(math.sqrt(mean_squared_error(y_train, y_pred)))
# Predict on the test data
y_pred = pipeline.predict(X_test)
print(math.sqrt(mean_squared_error(y_test, y_pred)))
steps = [("power_transformer", PowerTransformer()), ("Ridge", Ridge(alpha=0.001))]
# create the pipeline
pipeline = Pipeline(steps=steps)
# fit the pipeline on X_train and y_train
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_train)
print(math.sqrt(mean_squared_error(y_train, y_pred)))
# Predict on the test data
y_pred = pipeline.predict(X_test)
print(math.sqrt(mean_squared_error(y_test, y_pred)))
steps = [("power_transformer", PowerTransformer()), ("lasso", Lasso(alpha=0.5))]
# create the pipeline
pipeline = Pipeline(steps=steps)
# fit the pipeline on X_train and y_train
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_train)
print(math.sqrt(mean_squared_error(y_train, y_pred)))
# Predict on the test data
y_pred = pipeline.predict(X_test)
print(math.sqrt(mean_squared_error(y_test, y_pred)))
steps = [
("power_transformer", PowerTransformer()),
("DecisionTree", DecisionTreeRegressor()),
]
# create the pipeline
pipeline = Pipeline(steps=steps)
# fit the pipeline on X_train and y_train
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_train)
print(math.sqrt(mean_squared_error(y_train, y_pred)))
# Predict on the test data
y_pred = pipeline.predict(X_test)
print(math.sqrt(mean_squared_error(y_test, y_pred)))
steps = [
("power_transformer", PowerTransformer()),
("RandomForest", RandomForestRegressor()),
]
# create the pipeline
pipeline = Pipeline(steps=steps)
# fit the pipeline on X_train and y_train
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_train)
print(math.sqrt(mean_squared_error(y_train, y_pred)))
# Predict on the test data
y_pred = pipeline.predict(X_test)
print(math.sqrt(mean_squared_error(y_test, y_pred)))
for col in df_Gragh.columns:
print(col)
plt.scatter(df_Gragh["rating"], df_Gragh[col])
plt.show()
X = df_Gragh.iloc[:, :].copy()
Y = df_Gragh["rating"].copy()
print(len(Y))
X = X.drop(["rating"], axis=1)
X.columns
X_train, X_test, y_train, y_test = train_test_split(
X, Y, test_size=0.1, random_state=412
)
steps = [
("power_transformer", PowerTransformer()),
("regressor", XGBRegressor(n_estimators=350)),
]
# create the pipeline
pipeline = Pipeline(steps=steps)
# fit the pipeline on X_train and y_train
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_train)
print(math.sqrt(mean_squared_error(y_train, y_pred)))
# Predict on the test data
y_pred = pipeline.predict(X_test)
print(math.sqrt(mean_squared_error(y_test, y_pred)))
steps = [
("power_transformer", PowerTransformer()),
("DecisionTree", DecisionTreeRegressor(max_depth=15)),
]
# create the pipeline
pipeline = Pipeline(steps=steps)
# fit the pipeline on X_train and y_train
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_train)
print(math.sqrt(mean_squared_error(y_train, y_pred)))
# Predict on the test data
y_pred = pipeline.predict(X_test)
print(math.sqrt(mean_squared_error(y_test, y_pred)))
df_Gragh.corr()
X = df_Gragh.iloc[:, :].copy()
Y = df_Gragh["num_subscribers"].copy()
print(len(Y))
X = X.drop(["num_subscribers"], axis=1)
X.columns
X_train, X_test, y_train, y_test = train_test_split(
X, Y, test_size=0.1, random_state=412
)
steps = [
("power_transformer", PowerTransformer()),
("regressor", XGBRegressor(n_estimators=350)),
]
# create the pipeline
pipeline = Pipeline(steps=steps)
# fit the pipeline on X_train and y_train
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_train)
print(math.sqrt(mean_squared_error(y_train, y_pred)))
# Predict on the test data
y_pred = pipeline.predict(X_test)
print(math.sqrt(mean_squared_error(y_test, y_pred)))
| false | 1 | 8,049 | 5 | 8,478 | 8,049 |
||
129958532
|
<jupyter_start><jupyter_text>Titanic dataset

### Context
I took the titanic test file and the gender_submission and put them together in excel to make a csv. This is great for making charts to help you visualize. This also will help you know who died or survived. At least 70% right, but its up to you to make it 100% Thanks to the titanic beginners competitions for providing with the data. Please **Upvote** my dataset, it will mean a lot to me. Thank you!
Kaggle dataset identifier: test-file
<jupyter_code>import pandas as pd
df = pd.read_csv('test-file/tested.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 418 entries, 0 to 417
Data columns (total 12 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 PassengerId 418 non-null int64
1 Survived 418 non-null int64
2 Pclass 418 non-null int64
3 Name 418 non-null object
4 Sex 418 non-null object
5 Age 332 non-null float64
6 SibSp 418 non-null int64
7 Parch 418 non-null int64
8 Ticket 418 non-null object
9 Fare 417 non-null float64
10 Cabin 91 non-null object
11 Embarked 418 non-null object
dtypes: float64(2), int64(5), object(5)
memory usage: 39.3+ KB
<jupyter_text>Examples:
{
"PassengerId": 892,
"Survived": 0,
"Pclass": 3,
"Name": "Kelly, Mr. James",
"Sex": "male",
"Age": 34.5,
"SibSp": 0,
"Parch": 0,
"Ticket": 330911,
"Fare": 7.8292,
"Cabin": NaN,
"Embarked": "Q"
}
{
"PassengerId": 893,
"Survived": 1,
"Pclass": 3,
"Name": "Wilkes, Mrs. James (Ellen Needs)",
"Sex": "female",
"Age": 47.0,
"SibSp": 1,
"Parch": 0,
"Ticket": 363272,
"Fare": 7.0,
"Cabin": NaN,
"Embarked": "S"
}
{
"PassengerId": 894,
"Survived": 0,
"Pclass": 2,
"Name": "Myles, Mr. Thomas Francis",
"Sex": "male",
"Age": 62.0,
"SibSp": 0,
"Parch": 0,
"Ticket": 240276,
"Fare": 9.6875,
"Cabin": NaN,
"Embarked": "Q"
}
{
"PassengerId": 895,
"Survived": 0,
"Pclass": 3,
"Name": "Wirz, Mr. Albert",
"Sex": "male",
"Age": 27.0,
"SibSp": 0,
"Parch": 0,
"Ticket": 315154,
"Fare": 8.6625,
"Cabin": NaN,
"Embarked": "S"
}
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# #### Load the data you receive into a Pandas DataFrame
# # iris-flower-dataset
df1 = pd.read_csv("/kaggle/input/iris-flower-dataset/IRIS.csv")
df1
# #### Show the first five rows of the data set.
first_five_rows = df1.head(5)
first_five_rows
# #### Show the description and the info of the data set.
description = df1.describe()
description
info = df1.info()
info
import seaborn as sns
import matplotlib.pyplot as plt
g = sns.violinplot(y="species", x="sepal_length", data=df1, inner="quartile")
plt.show()
# #### creates a violin plot that visualizes the distribution of the numerical variable 'sepal_length' across different categories or groups represented by the 'species' column.
sns.kdeplot(
data=df1.query("species != 'Iris-virginica'"),
x="sepal_width",
y="sepal_length",
hue="species",
thresh=0.1,
)
# #### Comparing and identifying patterns in 'sepal_width' and 'sepal_length' for 'setosa' and 'versicolor' species
# # Titanic dataset
df2 = pd.read_csv("/kaggle/input/test-file/tested.csv")
df2
# #### Show the first five rows of the data set.
first_five_rows = df2.head(5)
first_five_rows
# #### Show the description and the info of the data set.
description = df2.describe()
description
info = df2.info()
info
sns.histplot(
data=df2, x="Age", hue="Survived", multiple="stack", kde=True, log_scale=True
)
# #### The sns.histplot function creates a histogram to visualize the distribution of ages (x='Age') in the dataset. The bars in the histogram are stacked based on the Survived category (hue='Survived'). The color blue represents category 0 (non-survived), while the color orange represents category 1 (survived). By setting multiple='stack', the bars are stacked on top of each other. )
sns.lmplot(data=df2, x="Age", y="Fare", hue="Survived", scatter_kws={"s": 70}, height=6)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/958/129958532.ipynb
|
test-file
|
brendan45774
|
[{"Id": 129958532, "ScriptId": 38631708, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6177653, "CreationDate": "05/17/2023 17:51:32", "VersionNumber": 1.0, "Title": "Data Visualization", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 82.0, "LinesInsertedFromPrevious": 82.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 186393584, "KernelVersionId": 129958532, "SourceDatasetVersionId": 2879186}, {"Id": 186393583, "KernelVersionId": 129958532, "SourceDatasetVersionId": 23404}]
|
[{"Id": 2879186, "DatasetId": 826163, "DatasourceVersionId": 2926173, "CreatorUserId": 2681031, "LicenseName": "CC0: Public Domain", "CreationDate": "12/02/2021 16:11:42", "VersionNumber": 6.0, "Title": "Titanic dataset", "Slug": "test-file", "Subtitle": "Gender submission and test file merged", "Description": "\n\n### Context\n\nI took the titanic test file and the gender_submission and put them together in excel to make a csv. This is great for making charts to help you visualize. This also will help you know who died or survived. At least 70% right, but its up to you to make it 100% Thanks to the titanic beginners competitions for providing with the data. Please **Upvote** my dataset, it will mean a lot to me. Thank you!", "VersionNotes": "tested", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 826163, "CreatorUserId": 2681031, "OwnerUserId": 2681031.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2879186.0, "CurrentDatasourceVersionId": 2926173.0, "ForumId": 841293, "Type": 2, "CreationDate": "08/11/2020 14:08:36", "LastActivityDate": "08/11/2020", "TotalViews": 262161, "TotalDownloads": 72658, "TotalVotes": 665, "TotalKernels": 203}]
|
[{"Id": 2681031, "UserName": "brendan45774", "DisplayName": "Brenda N", "RegisterDate": "01/07/2019", "PerformanceTier": 3}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# #### Load the data you receive into a Pandas DataFrame
# # iris-flower-dataset
df1 = pd.read_csv("/kaggle/input/iris-flower-dataset/IRIS.csv")
df1
# #### Show the first five rows of the data set.
first_five_rows = df1.head(5)
first_five_rows
# #### Show the description and the info of the data set.
description = df1.describe()
description
info = df1.info()
info
import seaborn as sns
import matplotlib.pyplot as plt
g = sns.violinplot(y="species", x="sepal_length", data=df1, inner="quartile")
plt.show()
# #### creates a violin plot that visualizes the distribution of the numerical variable 'sepal_length' across different categories or groups represented by the 'species' column.
sns.kdeplot(
data=df1.query("species != 'Iris-virginica'"),
x="sepal_width",
y="sepal_length",
hue="species",
thresh=0.1,
)
# #### Comparing and identifying patterns in 'sepal_width' and 'sepal_length' for 'setosa' and 'versicolor' species
# # Titanic dataset
df2 = pd.read_csv("/kaggle/input/test-file/tested.csv")
df2
# #### Show the first five rows of the data set.
first_five_rows = df2.head(5)
first_five_rows
# #### Show the description and the info of the data set.
description = df2.describe()
description
info = df2.info()
info
sns.histplot(
data=df2, x="Age", hue="Survived", multiple="stack", kde=True, log_scale=True
)
# #### The sns.histplot function creates a histogram to visualize the distribution of ages (x='Age') in the dataset. The bars in the histogram are stacked based on the Survived category (hue='Survived'). The color blue represents category 0 (non-survived), while the color orange represents category 1 (survived). By setting multiple='stack', the bars are stacked on top of each other. )
sns.lmplot(data=df2, x="Age", y="Fare", hue="Survived", scatter_kws={"s": 70}, height=6)
|
[{"test-file/tested.csv": {"column_names": "[\"PassengerId\", \"Survived\", \"Pclass\", \"Name\", \"Sex\", \"Age\", \"SibSp\", \"Parch\", \"Ticket\", \"Fare\", \"Cabin\", \"Embarked\"]", "column_data_types": "{\"PassengerId\": \"int64\", \"Survived\": \"int64\", \"Pclass\": \"int64\", \"Name\": \"object\", \"Sex\": \"object\", \"Age\": \"float64\", \"SibSp\": \"int64\", \"Parch\": \"int64\", \"Ticket\": \"object\", \"Fare\": \"float64\", \"Cabin\": \"object\", \"Embarked\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 418 entries, 0 to 417\nData columns (total 12 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 PassengerId 418 non-null int64 \n 1 Survived 418 non-null int64 \n 2 Pclass 418 non-null int64 \n 3 Name 418 non-null object \n 4 Sex 418 non-null object \n 5 Age 332 non-null float64\n 6 SibSp 418 non-null int64 \n 7 Parch 418 non-null int64 \n 8 Ticket 418 non-null object \n 9 Fare 417 non-null float64\n 10 Cabin 91 non-null object \n 11 Embarked 418 non-null object \ndtypes: float64(2), int64(5), object(5)\nmemory usage: 39.3+ KB\n", "summary": "{\"PassengerId\": {\"count\": 418.0, \"mean\": 1100.5, \"std\": 120.81045760473994, \"min\": 892.0, \"25%\": 996.25, \"50%\": 1100.5, \"75%\": 1204.75, \"max\": 1309.0}, \"Survived\": {\"count\": 418.0, \"mean\": 0.36363636363636365, \"std\": 0.4816221409322309, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 1.0}, \"Pclass\": {\"count\": 418.0, \"mean\": 2.2655502392344498, \"std\": 0.8418375519640503, \"min\": 1.0, \"25%\": 1.0, \"50%\": 3.0, \"75%\": 3.0, \"max\": 3.0}, \"Age\": {\"count\": 332.0, \"mean\": 30.272590361445783, \"std\": 14.181209235624422, \"min\": 0.17, \"25%\": 21.0, \"50%\": 27.0, \"75%\": 39.0, \"max\": 76.0}, \"SibSp\": {\"count\": 418.0, \"mean\": 0.4473684210526316, \"std\": 0.8967595611217135, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 8.0}, \"Parch\": {\"count\": 418.0, \"mean\": 0.3923444976076555, \"std\": 0.9814288785371691, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 9.0}, \"Fare\": {\"count\": 417.0, \"mean\": 35.627188489208635, \"std\": 55.907576179973844, \"min\": 0.0, \"25%\": 7.8958, \"50%\": 14.4542, \"75%\": 31.5, \"max\": 512.3292}}", "examples": "{\"PassengerId\":{\"0\":892,\"1\":893,\"2\":894,\"3\":895},\"Survived\":{\"0\":0,\"1\":1,\"2\":0,\"3\":0},\"Pclass\":{\"0\":3,\"1\":3,\"2\":2,\"3\":3},\"Name\":{\"0\":\"Kelly, Mr. James\",\"1\":\"Wilkes, Mrs. James (Ellen Needs)\",\"2\":\"Myles, Mr. Thomas Francis\",\"3\":\"Wirz, Mr. Albert\"},\"Sex\":{\"0\":\"male\",\"1\":\"female\",\"2\":\"male\",\"3\":\"male\"},\"Age\":{\"0\":34.5,\"1\":47.0,\"2\":62.0,\"3\":27.0},\"SibSp\":{\"0\":0,\"1\":1,\"2\":0,\"3\":0},\"Parch\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"Ticket\":{\"0\":\"330911\",\"1\":\"363272\",\"2\":\"240276\",\"3\":\"315154\"},\"Fare\":{\"0\":7.8292,\"1\":7.0,\"2\":9.6875,\"3\":8.6625},\"Cabin\":{\"0\":null,\"1\":null,\"2\":null,\"3\":null},\"Embarked\":{\"0\":\"Q\",\"1\":\"S\",\"2\":\"Q\",\"3\":\"S\"}}"}}]
| true | 2 |
<start_data_description><data_path>test-file/tested.csv:
<column_names>
['PassengerId', 'Survived', 'Pclass', 'Name', 'Sex', 'Age', 'SibSp', 'Parch', 'Ticket', 'Fare', 'Cabin', 'Embarked']
<column_types>
{'PassengerId': 'int64', 'Survived': 'int64', 'Pclass': 'int64', 'Name': 'object', 'Sex': 'object', 'Age': 'float64', 'SibSp': 'int64', 'Parch': 'int64', 'Ticket': 'object', 'Fare': 'float64', 'Cabin': 'object', 'Embarked': 'object'}
<dataframe_Summary>
{'PassengerId': {'count': 418.0, 'mean': 1100.5, 'std': 120.81045760473994, 'min': 892.0, '25%': 996.25, '50%': 1100.5, '75%': 1204.75, 'max': 1309.0}, 'Survived': {'count': 418.0, 'mean': 0.36363636363636365, 'std': 0.4816221409322309, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 1.0}, 'Pclass': {'count': 418.0, 'mean': 2.2655502392344498, 'std': 0.8418375519640503, 'min': 1.0, '25%': 1.0, '50%': 3.0, '75%': 3.0, 'max': 3.0}, 'Age': {'count': 332.0, 'mean': 30.272590361445783, 'std': 14.181209235624422, 'min': 0.17, '25%': 21.0, '50%': 27.0, '75%': 39.0, 'max': 76.0}, 'SibSp': {'count': 418.0, 'mean': 0.4473684210526316, 'std': 0.8967595611217135, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 8.0}, 'Parch': {'count': 418.0, 'mean': 0.3923444976076555, 'std': 0.9814288785371691, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 9.0}, 'Fare': {'count': 417.0, 'mean': 35.627188489208635, 'std': 55.907576179973844, 'min': 0.0, '25%': 7.8958, '50%': 14.4542, '75%': 31.5, 'max': 512.3292}}
<dataframe_info>
RangeIndex: 418 entries, 0 to 417
Data columns (total 12 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 PassengerId 418 non-null int64
1 Survived 418 non-null int64
2 Pclass 418 non-null int64
3 Name 418 non-null object
4 Sex 418 non-null object
5 Age 332 non-null float64
6 SibSp 418 non-null int64
7 Parch 418 non-null int64
8 Ticket 418 non-null object
9 Fare 417 non-null float64
10 Cabin 91 non-null object
11 Embarked 418 non-null object
dtypes: float64(2), int64(5), object(5)
memory usage: 39.3+ KB
<some_examples>
{'PassengerId': {'0': 892, '1': 893, '2': 894, '3': 895}, 'Survived': {'0': 0, '1': 1, '2': 0, '3': 0}, 'Pclass': {'0': 3, '1': 3, '2': 2, '3': 3}, 'Name': {'0': 'Kelly, Mr. James', '1': 'Wilkes, Mrs. James (Ellen Needs)', '2': 'Myles, Mr. Thomas Francis', '3': 'Wirz, Mr. Albert'}, 'Sex': {'0': 'male', '1': 'female', '2': 'male', '3': 'male'}, 'Age': {'0': 34.5, '1': 47.0, '2': 62.0, '3': 27.0}, 'SibSp': {'0': 0, '1': 1, '2': 0, '3': 0}, 'Parch': {'0': 0, '1': 0, '2': 0, '3': 0}, 'Ticket': {'0': '330911', '1': '363272', '2': '240276', '3': '315154'}, 'Fare': {'0': 7.8292, '1': 7.0, '2': 9.6875, '3': 8.6625}, 'Cabin': {'0': None, '1': None, '2': None, '3': None}, 'Embarked': {'0': 'Q', '1': 'S', '2': 'Q', '3': 'S'}}
<end_description>
| 752 | 0 | 1,712 | 752 |
129502717
|
<jupyter_start><jupyter_text>extend_model
Kaggle dataset identifier: extend-model
<jupyter_script>import sys
sys.path.append("/kaggle/input/helper-scripts")
sys.path.append("/kaggle/input/fetch1")
import segmentation_models as sm
import tensorflow as tf
tf.config.run_functions_eagerly(True)
from fetch_data1 import fetch
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras.optimizers import Adam
import numpy as np
import matplotlib.pyplot as plt
from visualise import visualize, denormalize
import os
import cv2
base_dir = "/kaggle/input/isic-2016/ISIC 2016 for segmentation (total)/ISIC 2016 for segmentation (total)/"
batch_size = 4
input_size = (224, 224)
num_epochs = 40
shuffle = False
ratio = 1
grp = 0
metrics = [sm.metrics.IOUScore(threshold=0.5), sm.metrics.FScore(threshold=0.5)]
model_save = "2016_extend_best_model.h5"
callbacks = [
ModelCheckpoint(
"./" + model_save,
monitor="val_iou_score",
save_weights_only=True,
save_best_only=True,
mode="max",
initial_value_threshold=0.0,
),
ReduceLROnPlateau(
monitor="val_iou_score", factor=0.5, patience=3, verbose=1, mode="max"
),
]
train_dataset, validation_dataset, val_paths = fetch(
base_dir, input_size, grp, batch_size, shuffle, ratio
)
model = sm.Unet(backbone_name="densenet201", input_shape=(224, 224, 3))
model.compile(
optimizer=Adam(learning_rate=8e-6), loss=sm.losses.bce_dice_loss, metrics=metrics
)
print(len(model.layers))
# model.summary()
model.load_weights("/kaggle/input/extend-model/2016_extend_best_model.h5")
"""target_dir = '/kaggle/working/'
from tqdm import tqdm
for image_file in tqdm(os.listdir(base_dir), total=len(os.listdir(base_dir))):
image_path = os.path.join(base_dir, image_file)
image = cv2.imread(image_path)
print(image.shape)
mask = model.predict(image).round()
mask_path = os.path.join(target_dir, image_file)
mask = np.reshape(mask[0], (224, 224))
if not cv2.imwrite(mask_path, mask): # Replace with your mask saving code
raise Exception('Image not saved')"""
scores = model.evaluate(validation_dataset)
print("Loss: {:.5}".format(scores[0]))
for metric, value in zip(metrics, scores[1:]):
print("mean {}: {:.5}".format(metric.__name__, value))
"""from PIL import Image
import cv2
from tqdm import tqdm
for batch, val_path in tqdm(zip(validation_dataset, val_paths), total=1279):
img, gt_mask = batch
img = img.numpy()
gt_mask = gt_mask.numpy()
pr_mask = model.predict(img).round()
visualize(
img=denormalize(img.squeeze()),
gt_mask=gt_mask[..., 0].squeeze(),
pr_mask=pr_mask[..., 0].squeeze(),
)
pr_mask = np.reshape(pr_mask[0], (224, 224))
mask = Image.fromarray((pr_mask*255).astype(np.uint8), mode = 'L')
mask.save('/kaggle/working/'+str(val_path[-16:]))"""
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/502/129502717.ipynb
|
extend-model
|
devbhuyan
|
[{"Id": 129502717, "ScriptId": 35681796, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5731496, "CreationDate": "05/14/2023 10:59:00", "VersionNumber": 21.0, "Title": "Segmentation_Unet(densenet121)_2016_original", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 98.0, "LinesInsertedFromPrevious": 4.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 94.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185624719, "KernelVersionId": 129502717, "SourceDatasetVersionId": 5152424}]
|
[{"Id": 5152424, "DatasetId": 2993729, "DatasourceVersionId": 5224151, "CreatorUserId": 5731496, "LicenseName": "Unknown", "CreationDate": "03/12/2023 14:56:45", "VersionNumber": 1.0, "Title": "extend_model", "Slug": "extend-model", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 2993729, "CreatorUserId": 5731496, "OwnerUserId": 5731496.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5152424.0, "CurrentDatasourceVersionId": 5224151.0, "ForumId": 3032452, "Type": 2, "CreationDate": "03/12/2023 14:56:45", "LastActivityDate": "03/12/2023", "TotalViews": 3, "TotalDownloads": 0, "TotalVotes": 0, "TotalKernels": 1}]
|
[{"Id": 5731496, "UserName": "devbhuyan", "DisplayName": "dev bhuyan", "RegisterDate": "09/06/2020", "PerformanceTier": 0}]
|
import sys
sys.path.append("/kaggle/input/helper-scripts")
sys.path.append("/kaggle/input/fetch1")
import segmentation_models as sm
import tensorflow as tf
tf.config.run_functions_eagerly(True)
from fetch_data1 import fetch
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras.optimizers import Adam
import numpy as np
import matplotlib.pyplot as plt
from visualise import visualize, denormalize
import os
import cv2
base_dir = "/kaggle/input/isic-2016/ISIC 2016 for segmentation (total)/ISIC 2016 for segmentation (total)/"
batch_size = 4
input_size = (224, 224)
num_epochs = 40
shuffle = False
ratio = 1
grp = 0
metrics = [sm.metrics.IOUScore(threshold=0.5), sm.metrics.FScore(threshold=0.5)]
model_save = "2016_extend_best_model.h5"
callbacks = [
ModelCheckpoint(
"./" + model_save,
monitor="val_iou_score",
save_weights_only=True,
save_best_only=True,
mode="max",
initial_value_threshold=0.0,
),
ReduceLROnPlateau(
monitor="val_iou_score", factor=0.5, patience=3, verbose=1, mode="max"
),
]
train_dataset, validation_dataset, val_paths = fetch(
base_dir, input_size, grp, batch_size, shuffle, ratio
)
model = sm.Unet(backbone_name="densenet201", input_shape=(224, 224, 3))
model.compile(
optimizer=Adam(learning_rate=8e-6), loss=sm.losses.bce_dice_loss, metrics=metrics
)
print(len(model.layers))
# model.summary()
model.load_weights("/kaggle/input/extend-model/2016_extend_best_model.h5")
"""target_dir = '/kaggle/working/'
from tqdm import tqdm
for image_file in tqdm(os.listdir(base_dir), total=len(os.listdir(base_dir))):
image_path = os.path.join(base_dir, image_file)
image = cv2.imread(image_path)
print(image.shape)
mask = model.predict(image).round()
mask_path = os.path.join(target_dir, image_file)
mask = np.reshape(mask[0], (224, 224))
if not cv2.imwrite(mask_path, mask): # Replace with your mask saving code
raise Exception('Image not saved')"""
scores = model.evaluate(validation_dataset)
print("Loss: {:.5}".format(scores[0]))
for metric, value in zip(metrics, scores[1:]):
print("mean {}: {:.5}".format(metric.__name__, value))
"""from PIL import Image
import cv2
from tqdm import tqdm
for batch, val_path in tqdm(zip(validation_dataset, val_paths), total=1279):
img, gt_mask = batch
img = img.numpy()
gt_mask = gt_mask.numpy()
pr_mask = model.predict(img).round()
visualize(
img=denormalize(img.squeeze()),
gt_mask=gt_mask[..., 0].squeeze(),
pr_mask=pr_mask[..., 0].squeeze(),
)
pr_mask = np.reshape(pr_mask[0], (224, 224))
mask = Image.fromarray((pr_mask*255).astype(np.uint8), mode = 'L')
mask.save('/kaggle/working/'+str(val_path[-16:]))"""
| false | 0 | 940 | 0 | 960 | 940 |
||
129502325
|
#
# ## Installation
# ## Get started
from InstructorEmbedding import INSTRUCTOR
model = INSTRUCTOR("hkunlp/instructor-large")
import clickhouse_connect
from pprint import pprint
from kaggle_secrets import UserSecretsClient
user_secrets = UserSecretsClient()
db_host = user_secrets.get_secret("db_host")
db_password = user_secrets.get_secret("db_password")
db_port = user_secrets.get_secret("db_port")
db_username = user_secrets.get_secret("db_username")
client = clickhouse_connect.get_client(
host=db_host, port=db_port, username=db_username, password=db_password
)
sql = """
SELECT id, concat(if(title IS NULL, '', concat(title, '\n\n')), text) AS content
FROM hackernews_items hi
WHERE text IS NOT NULL LIMIT 10
"""
result = client.query(sql)
corpus = result.result_rows
pprint(corpus)
instruction = "Represent this Hacker News comment; Input:"
texts_with_instructions = []
for doc in corpus:
texts_with_instructions.append([instruction, doc[1]])
# calculate embeddings
corpus_embeddings = model.encode(texts_with_instructions)
# ## Use customized embeddings for information retrieval
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
# query = [["Represent the Hacker News comment: ", "I'm very curious about why people are starting to process data that. This may be revolutionary. (Or I could be stupid.)"]]
# query = [[instruction, "The bank won't be happy."]]
query = [[instruction, "Mobile frameworks have pros and cons."]]
query_embeddings = model.encode(query)
similarities = cosine_similarity(query_embeddings, corpus_embeddings)
retrieved_doc_id = np.argmax(similarities)
pprint(similarities)
print(corpus[retrieved_doc_id])
query = [
[
"Represent the Wikipedia question for retrieving supporting documents: ",
"What is the disparate impact theory?",
]
]
query_embeddings = model.encode(query)
similarities = cosine_similarity(query_embeddings, corpus_embeddings)
print(similarities)
retrieved_doc_id = np.argmax(similarities)
print(retrieved_doc_id)
# ## Use customized embeddings for clustering
import sklearn.cluster
sentences = [
[
"Represent the Medicine sentence for clustering: ",
"Dynamical Scalar Degree of Freedom in Horava-Lifshitz Gravity",
],
[
"Represent the Medicine sentence for clustering: ",
"Comparison of Atmospheric Neutrino Flux Calculations at Low Energies",
],
[
"Represent the Medicine sentence for clustering: ",
"Fermion Bags in the Massive Gross-Neveu Model",
],
[
"Represent the Medicine sentence for clustering: ",
"QCD corrections to Associated t-tbar-H production at the Tevatron",
],
[
"Represent the Medicine sentence for clustering: ",
"A New Analysis of the R Measurements: Resonance Parameters of the Higher, Vector States of Charmonium",
],
]
embeddings = model.encode(sentences)
clustering_model = sklearn.cluster.MiniBatchKMeans(n_clusters=2)
clustering_model.fit(embeddings)
cluster_assignment = clustering_model.labels_
print(cluster_assignment)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/502/129502325.ipynb
| null | null |
[{"Id": 129502325, "ScriptId": 38505652, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2723287, "CreationDate": "05/14/2023 10:55:07", "VersionNumber": 1.0, "Title": "Wyts_HackerNews_Instructor_Embeddings", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 95.0, "LinesInsertedFromPrevious": 95.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
#
# ## Installation
# ## Get started
from InstructorEmbedding import INSTRUCTOR
model = INSTRUCTOR("hkunlp/instructor-large")
import clickhouse_connect
from pprint import pprint
from kaggle_secrets import UserSecretsClient
user_secrets = UserSecretsClient()
db_host = user_secrets.get_secret("db_host")
db_password = user_secrets.get_secret("db_password")
db_port = user_secrets.get_secret("db_port")
db_username = user_secrets.get_secret("db_username")
client = clickhouse_connect.get_client(
host=db_host, port=db_port, username=db_username, password=db_password
)
sql = """
SELECT id, concat(if(title IS NULL, '', concat(title, '\n\n')), text) AS content
FROM hackernews_items hi
WHERE text IS NOT NULL LIMIT 10
"""
result = client.query(sql)
corpus = result.result_rows
pprint(corpus)
instruction = "Represent this Hacker News comment; Input:"
texts_with_instructions = []
for doc in corpus:
texts_with_instructions.append([instruction, doc[1]])
# calculate embeddings
corpus_embeddings = model.encode(texts_with_instructions)
# ## Use customized embeddings for information retrieval
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
# query = [["Represent the Hacker News comment: ", "I'm very curious about why people are starting to process data that. This may be revolutionary. (Or I could be stupid.)"]]
# query = [[instruction, "The bank won't be happy."]]
query = [[instruction, "Mobile frameworks have pros and cons."]]
query_embeddings = model.encode(query)
similarities = cosine_similarity(query_embeddings, corpus_embeddings)
retrieved_doc_id = np.argmax(similarities)
pprint(similarities)
print(corpus[retrieved_doc_id])
query = [
[
"Represent the Wikipedia question for retrieving supporting documents: ",
"What is the disparate impact theory?",
]
]
query_embeddings = model.encode(query)
similarities = cosine_similarity(query_embeddings, corpus_embeddings)
print(similarities)
retrieved_doc_id = np.argmax(similarities)
print(retrieved_doc_id)
# ## Use customized embeddings for clustering
import sklearn.cluster
sentences = [
[
"Represent the Medicine sentence for clustering: ",
"Dynamical Scalar Degree of Freedom in Horava-Lifshitz Gravity",
],
[
"Represent the Medicine sentence for clustering: ",
"Comparison of Atmospheric Neutrino Flux Calculations at Low Energies",
],
[
"Represent the Medicine sentence for clustering: ",
"Fermion Bags in the Massive Gross-Neveu Model",
],
[
"Represent the Medicine sentence for clustering: ",
"QCD corrections to Associated t-tbar-H production at the Tevatron",
],
[
"Represent the Medicine sentence for clustering: ",
"A New Analysis of the R Measurements: Resonance Parameters of the Higher, Vector States of Charmonium",
],
]
embeddings = model.encode(sentences)
clustering_model = sklearn.cluster.MiniBatchKMeans(n_clusters=2)
clustering_model.fit(embeddings)
cluster_assignment = clustering_model.labels_
print(cluster_assignment)
| false | 0 | 834 | 0 | 834 | 834 |
||
129502332
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt # для вывода картинки
import cv2
import pywt # вейвлеты
from pywt._doc_utils import wavedec2_keys, draw_2d_wp_basis
from scipy import fftpack
import os
import random
plt.rcParams["figure.figsize"] = [16, 16]
plt.rcParams.update({"font.size": 18})
# читаем картинку
ph = cv2.imread("/kaggle/input/wavelet/katia", 0)
# Cropping an image
ph = ph[132:3168, 500:3536]
ph.shape
# Меняем размер на [512; 512]
width = 512
height = 512
points = (width, height)
ph = cv2.resize(ph, points, interpolation=cv2.INTER_LINEAR)
plt.imshow(ph, cmap="gray")
plt.show()
ph
# Вейвлет-разложение, 1 уровень
# https://pywavelets.readthedocs.io/en/latest/ref/2d-dwt-and-idwt.html
# https://pywavelets.readthedocs.io/en/latest/
titles = ["Approximation", "Horizontal detail", "Vertical detail", "Diagonal detail"]
original = ph
coeffs2 = pywt.dwt2(original, "haar")
LL, (LH, HL, HH) = coeffs2
fig = plt.figure(figsize=(15, 15))
for i, a in enumerate([LL, LH, HL, HH]):
ax = fig.add_subplot(2, 2, i + 1)
ax.imshow(a, interpolation="nearest", cmap=plt.cm.gray)
ax.set_title(titles[i], fontsize=10)
ax.set_xticks([])
ax.set_yticks([])
fig.tight_layout()
# Задание 1. Провести 4 уровня вейвлет-разложения изображения. Показать выделенные границы объекта. Удалить высокочастотные составляющие и восстановить изображение. Сравнить полученный результат с классической низкочастотной фильтрацией изображения.
original = ph
coeffs2 = pywt.wavedec2(original, "haar", mode="symmetric", level=4)
LL4, (LH4, HL4, HH4), (LH3, HL3, HH3), (LH2, HL2, HH2), (LH1, HL1, HH1) = coeffs2
# Здесь (LH1, HL1, HH1) == (LH, HL, HH) из разложения до 1го уровня.
# Посмотрим на элементы разложения разных уровней. На каждом из уровней видно границы объектов.
titles = [
"Approximation L1",
"Horizontal detail L2",
"Vertical detail L3",
"Diagonal detail L4",
]
fig = plt.figure(figsize=(15, 15))
for i, a in enumerate([LL, LH2, HL3, HH4]):
ax = fig.add_subplot(2, 2, i + 1)
ax.imshow(a, interpolation="nearest", cmap=plt.cm.gray)
ax.set_title(titles[i], fontsize=10)
ax.set_xticks([])
ax.set_yticks([])
fig.tight_layout()
# Удалим высокочастотные составляющие:
LL4, (LH4, HL4, HH4), (LH3, HL3, HH3), (LH2, HL2, HH2), (LH1, HL1, HH1) = coeffs2
HH1 = np.zeros_like(HH1)
HH2 = np.zeros_like(HH2)
# HH3 = np.zeros_like(HH3);
coeffs2_filt = LL4, (LH4, HL4, HH4), (LH3, HL3, HH3), (LH2, HL2, HH2), (LH1, HL1, HH1)
rec = pywt.waverec2(coeffs2_filt, "haar", mode="symmetric", axes=(-2, -1))
plt.imshow(rec, cmap="gray")
plt.show()
abs(ph).max()
# Сравним с кассической высокочастотной фильтрацией:
# https://scipy-lectures.org/intro/scipy/auto_examples/solutions/plot_fft_image_denoise.html
im_fft = fftpack.fft2(ph)
def plot_spectrum(im_fft):
from matplotlib.colors import LogNorm
# A logarithmic colormap
plt.imshow(np.abs(im_fft), norm=LogNorm(vmin=5))
plt.colorbar()
plt.figure()
plot_spectrum(im_fft)
plt.title("Fourier transform")
keep_fraction = 0.25
im_fft2 = im_fft.copy()
r, c = im_fft2.shape
im_fft2[int(r * keep_fraction) : int(r * (1 - keep_fraction))] = 0
im_fft2[:, int(c * keep_fraction) : int(c * (1 - keep_fraction))] = 0
plt.figure()
plot_spectrum(im_fft2)
plt.title("Filtered Spectrum")
im_new = fftpack.ifft2(im_fft2).real
plt.figure()
plt.imshow(im_new, plt.cm.gray)
plt.title("Reconstructed Image")
# Разницы не видно
# Задание 2. С помощью генератора случайных чисел необходимо добавить на фотографию белый гауссовский шум. Провести разложение зашумленного изображения и его фильтрацию путем применения различных уровней порога. Восстановить изображение и построить зависимость СКО (среднекв. ош.) восстановленного изображения от уровня порога. Представить лучший визуальный результат фильтрации изображения. Провести данный эксперимент для трех уровней шума (низкий, средний и высокий)
# про добавление шума:
# https://www.kaggle.com/code/chanduanilkumar/adding-and-removing-image-noise-in-python/notebook
# https://www.geeksforgeeks.org/add-a-salt-and-pepper-noise-to-an-image-with-python/
# про сжатие изображений с помощью вейвлетов:
# https://www.youtube.com/watch?v=eJLF9HeZA8I&list=PLMrJAkhIeNNT_Xh3Oy0Y4LTj0Oxo8GqsC&index=36
# добавление гауссовского шума
def noisy_gauss(ph, lev, show):
gauss_noise = np.zeros_like(ph)
cv2.randn(gauss_noise, 128, lev)
gauss_noise = (gauss_noise * 0.5).astype(np.uint8)
ph_gn = cv2.add(ph, gauss_noise)
if show == 1:
plt.imshow(ph_gn, cmap="gray")
plt.show()
return ph_gn
# добавление шума salt&pepper (impulse noise)
# lev - доля зашумленных пикселей
def noisy_sp(img, lev, show):
row, col = img.shape
# salt
number_of_pixels = int(lev * row * col / 2)
for i in range(number_of_pixels):
y_coord = random.randint(0, row - 1)
x_coord = random.randint(0, col - 1)
img[y_coord][x_coord] = 255
# pepper
for i in range(number_of_pixels):
y_coord = random.randint(0, row - 1)
x_coord = random.randint(0, col - 1)
img[y_coord][x_coord] = 0
if show == 1:
plt.imshow(img, cmap="gray")
plt.show()
return img
## фильтрация шума с помощью вейвлет-преобразования
def wavelet_denoise(ph, ph_nz, keep_set, show):
n = 4
w = "db1"
B = ph_nz
coeffs = pywt.wavedec2(B, wavelet=w, level=n)
coeff_arr, coeff_slices = pywt.coeffs_to_array(coeffs)
Csort = np.sort(np.abs(coeff_arr.reshape(-1)))
MSE = np.zeros_like(keep_set)
(sz1, sz2) = np.shape(B)
ph_rec = np.zeros((sz1, sz2, len(keep_set)))
for i, keep in enumerate(keep_set):
thresh = Csort[int(np.floor((1 - keep) * len(Csort)))]
ind = np.abs(coeff_arr) > thresh
Cfilt = coeff_arr * ind # Threshold small indices
coeffs_filt = pywt.array_to_coeffs(
Cfilt, coeff_slices, output_format="wavedec2"
)
ph_rec[:, :, i] = pywt.waverec2(coeffs_filt, wavelet=w)
MSE[i] = np.linalg.norm(ph - ph_rec[:, :, i], ord="fro")
if show == 1:
plt.figure()
plt.imshow(ph_rec[:, :, i].astype("uint8"), cmap="gray")
plt.axis("off")
plt.title("keep = " + str(keep))
return MSE, ph_rec
ph_gn = noisy_gauss(ph, lev=5, show=0)
ph_sp = noisy_sp(ph, lev=0.05, show=1)
keep_set = np.linspace(0.01, 0.5, 5)
MSE, ph_rec = wavelet_denoise(ph, ph_sp, keep_set, show=1)
plt.figure()
plt.plot(keep_set, MSE, linestyle="--", marker="o", color="b")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/502/129502332.ipynb
| null | null |
[{"Id": 129502332, "ScriptId": 37475387, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12817197, "CreationDate": "05/14/2023 10:55:12", "VersionNumber": 4.0, "Title": "wavelet_transform", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 218.0, "LinesInsertedFromPrevious": 32.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 186.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt # для вывода картинки
import cv2
import pywt # вейвлеты
from pywt._doc_utils import wavedec2_keys, draw_2d_wp_basis
from scipy import fftpack
import os
import random
plt.rcParams["figure.figsize"] = [16, 16]
plt.rcParams.update({"font.size": 18})
# читаем картинку
ph = cv2.imread("/kaggle/input/wavelet/katia", 0)
# Cropping an image
ph = ph[132:3168, 500:3536]
ph.shape
# Меняем размер на [512; 512]
width = 512
height = 512
points = (width, height)
ph = cv2.resize(ph, points, interpolation=cv2.INTER_LINEAR)
plt.imshow(ph, cmap="gray")
plt.show()
ph
# Вейвлет-разложение, 1 уровень
# https://pywavelets.readthedocs.io/en/latest/ref/2d-dwt-and-idwt.html
# https://pywavelets.readthedocs.io/en/latest/
titles = ["Approximation", "Horizontal detail", "Vertical detail", "Diagonal detail"]
original = ph
coeffs2 = pywt.dwt2(original, "haar")
LL, (LH, HL, HH) = coeffs2
fig = plt.figure(figsize=(15, 15))
for i, a in enumerate([LL, LH, HL, HH]):
ax = fig.add_subplot(2, 2, i + 1)
ax.imshow(a, interpolation="nearest", cmap=plt.cm.gray)
ax.set_title(titles[i], fontsize=10)
ax.set_xticks([])
ax.set_yticks([])
fig.tight_layout()
# Задание 1. Провести 4 уровня вейвлет-разложения изображения. Показать выделенные границы объекта. Удалить высокочастотные составляющие и восстановить изображение. Сравнить полученный результат с классической низкочастотной фильтрацией изображения.
original = ph
coeffs2 = pywt.wavedec2(original, "haar", mode="symmetric", level=4)
LL4, (LH4, HL4, HH4), (LH3, HL3, HH3), (LH2, HL2, HH2), (LH1, HL1, HH1) = coeffs2
# Здесь (LH1, HL1, HH1) == (LH, HL, HH) из разложения до 1го уровня.
# Посмотрим на элементы разложения разных уровней. На каждом из уровней видно границы объектов.
titles = [
"Approximation L1",
"Horizontal detail L2",
"Vertical detail L3",
"Diagonal detail L4",
]
fig = plt.figure(figsize=(15, 15))
for i, a in enumerate([LL, LH2, HL3, HH4]):
ax = fig.add_subplot(2, 2, i + 1)
ax.imshow(a, interpolation="nearest", cmap=plt.cm.gray)
ax.set_title(titles[i], fontsize=10)
ax.set_xticks([])
ax.set_yticks([])
fig.tight_layout()
# Удалим высокочастотные составляющие:
LL4, (LH4, HL4, HH4), (LH3, HL3, HH3), (LH2, HL2, HH2), (LH1, HL1, HH1) = coeffs2
HH1 = np.zeros_like(HH1)
HH2 = np.zeros_like(HH2)
# HH3 = np.zeros_like(HH3);
coeffs2_filt = LL4, (LH4, HL4, HH4), (LH3, HL3, HH3), (LH2, HL2, HH2), (LH1, HL1, HH1)
rec = pywt.waverec2(coeffs2_filt, "haar", mode="symmetric", axes=(-2, -1))
plt.imshow(rec, cmap="gray")
plt.show()
abs(ph).max()
# Сравним с кассической высокочастотной фильтрацией:
# https://scipy-lectures.org/intro/scipy/auto_examples/solutions/plot_fft_image_denoise.html
im_fft = fftpack.fft2(ph)
def plot_spectrum(im_fft):
from matplotlib.colors import LogNorm
# A logarithmic colormap
plt.imshow(np.abs(im_fft), norm=LogNorm(vmin=5))
plt.colorbar()
plt.figure()
plot_spectrum(im_fft)
plt.title("Fourier transform")
keep_fraction = 0.25
im_fft2 = im_fft.copy()
r, c = im_fft2.shape
im_fft2[int(r * keep_fraction) : int(r * (1 - keep_fraction))] = 0
im_fft2[:, int(c * keep_fraction) : int(c * (1 - keep_fraction))] = 0
plt.figure()
plot_spectrum(im_fft2)
plt.title("Filtered Spectrum")
im_new = fftpack.ifft2(im_fft2).real
plt.figure()
plt.imshow(im_new, plt.cm.gray)
plt.title("Reconstructed Image")
# Разницы не видно
# Задание 2. С помощью генератора случайных чисел необходимо добавить на фотографию белый гауссовский шум. Провести разложение зашумленного изображения и его фильтрацию путем применения различных уровней порога. Восстановить изображение и построить зависимость СКО (среднекв. ош.) восстановленного изображения от уровня порога. Представить лучший визуальный результат фильтрации изображения. Провести данный эксперимент для трех уровней шума (низкий, средний и высокий)
# про добавление шума:
# https://www.kaggle.com/code/chanduanilkumar/adding-and-removing-image-noise-in-python/notebook
# https://www.geeksforgeeks.org/add-a-salt-and-pepper-noise-to-an-image-with-python/
# про сжатие изображений с помощью вейвлетов:
# https://www.youtube.com/watch?v=eJLF9HeZA8I&list=PLMrJAkhIeNNT_Xh3Oy0Y4LTj0Oxo8GqsC&index=36
# добавление гауссовского шума
def noisy_gauss(ph, lev, show):
gauss_noise = np.zeros_like(ph)
cv2.randn(gauss_noise, 128, lev)
gauss_noise = (gauss_noise * 0.5).astype(np.uint8)
ph_gn = cv2.add(ph, gauss_noise)
if show == 1:
plt.imshow(ph_gn, cmap="gray")
plt.show()
return ph_gn
# добавление шума salt&pepper (impulse noise)
# lev - доля зашумленных пикселей
def noisy_sp(img, lev, show):
row, col = img.shape
# salt
number_of_pixels = int(lev * row * col / 2)
for i in range(number_of_pixels):
y_coord = random.randint(0, row - 1)
x_coord = random.randint(0, col - 1)
img[y_coord][x_coord] = 255
# pepper
for i in range(number_of_pixels):
y_coord = random.randint(0, row - 1)
x_coord = random.randint(0, col - 1)
img[y_coord][x_coord] = 0
if show == 1:
plt.imshow(img, cmap="gray")
plt.show()
return img
## фильтрация шума с помощью вейвлет-преобразования
def wavelet_denoise(ph, ph_nz, keep_set, show):
n = 4
w = "db1"
B = ph_nz
coeffs = pywt.wavedec2(B, wavelet=w, level=n)
coeff_arr, coeff_slices = pywt.coeffs_to_array(coeffs)
Csort = np.sort(np.abs(coeff_arr.reshape(-1)))
MSE = np.zeros_like(keep_set)
(sz1, sz2) = np.shape(B)
ph_rec = np.zeros((sz1, sz2, len(keep_set)))
for i, keep in enumerate(keep_set):
thresh = Csort[int(np.floor((1 - keep) * len(Csort)))]
ind = np.abs(coeff_arr) > thresh
Cfilt = coeff_arr * ind # Threshold small indices
coeffs_filt = pywt.array_to_coeffs(
Cfilt, coeff_slices, output_format="wavedec2"
)
ph_rec[:, :, i] = pywt.waverec2(coeffs_filt, wavelet=w)
MSE[i] = np.linalg.norm(ph - ph_rec[:, :, i], ord="fro")
if show == 1:
plt.figure()
plt.imshow(ph_rec[:, :, i].astype("uint8"), cmap="gray")
plt.axis("off")
plt.title("keep = " + str(keep))
return MSE, ph_rec
ph_gn = noisy_gauss(ph, lev=5, show=0)
ph_sp = noisy_sp(ph, lev=0.05, show=1)
keep_set = np.linspace(0.01, 0.5, 5)
MSE, ph_rec = wavelet_denoise(ph, ph_sp, keep_set, show=1)
plt.figure()
plt.plot(keep_set, MSE, linestyle="--", marker="o", color="b")
| false | 0 | 2,681 | 0 | 2,681 | 2,681 |
||
129575842
|
<jupyter_start><jupyter_text>COVID-19 Dataset
## Context
Coronavirus disease (COVID-19) is an infectious disease caused by a newly discovered coronavirus. Most people infected with COVID-19 virus will experience mild to moderate respiratory illness and recover without requiring special treatment. Older people, and those with underlying medical problems like cardiovascular disease, diabetes, chronic respiratory disease, and cancer are more likely to develop serious illness.
During the entire course of the pandemic, one of the main problems that healthcare providers have faced is the shortage of medical resources and a proper plan to efficiently distribute them. In these tough times, being able to predict what kind of resource an individual might require at the time of being tested positive or even before that will be of immense help to the authorities as they would be able to procure and arrange for the resources necessary to save the life of that patient.
The main goal of this project is to build a machine learning model that, given a Covid-19 patient's current symptom, status, and medical history, will predict whether the patient is in high risk or not.
## content
The dataset was provided by the Mexican government [(link)](https://datos.gob.mx/busca/dataset/informacion-referente-a-casos-covid-19-en-mexico). This dataset contains an enormous number of anonymized patient-related information including pre-conditions. The raw dataset consists of 21 unique features and 1,048,576 unique patients. **In the Boolean features, 1 means "yes" and 2 means "no". values as 97 and 99 are missing data**.
- sex: 1 for female and 2 for male.
- age: of the patient.
- classification: covid test findings. Values 1-3 mean that the patient was diagnosed with covid in different
degrees. 4 or higher means that the patient is not a carrier of covid or that the test is inconclusive.
- patient type: type of care the patient received in the unit. 1 for returned home and 2 for hospitalization.
- pneumonia: whether the patient already have air sacs inflammation or not.
- pregnancy: whether the patient is pregnant or not.
- diabetes: whether the patient has diabetes or not.
- copd: Indicates whether the patient has Chronic obstructive pulmonary disease or not.
- asthma: whether the patient has asthma or not.
- inmsupr: whether the patient is immunosuppressed or not.
- hypertension: whether the patient has hypertension or not.
- cardiovascular: whether the patient has heart or blood vessels related disease.
- renal chronic: whether the patient has chronic renal disease or not.
- other disease: whether the patient has other disease or not.
- obesity: whether the patient is obese or not.
- tobacco: whether the patient is a tobacco user.
- usmr: Indicates whether the patient treated medical units of the first, second or third level.
- medical unit: type of institution of the National Health System that provided the care.
- intubed: whether the patient was connected to the ventilator.
- icu: Indicates whether the patient had been admitted to an Intensive Care Unit.
- date died: If the patient died indicate the date of death, and 9999-99-99 otherwise.
Kaggle dataset identifier: covid19-dataset
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv("/kaggle/input/covid19-dataset/Covid Data.csv")
df.head()
df.loc[df.DATE_DIED != "9999-99-99", "DATE_DIED"] = 1
df.loc[df.DATE_DIED == "9999-99-99", "DATE_DIED"] = 2
df = df.replace([97, 99], 0)
df.head()
n_target = np.array([0, 0, 0, 0, 0, 0, 0, 0])
for i in range(1, 8):
n_target[i] = len(df[df["CLASIFFICATION_FINAL"] == i])
n_all = np.sum(n_target)
for i in range(1, 8):
print("target{0}の割合: {1}".format(i, n_target[i] / n_all))
print("-----------------")
print(df["CLASIFFICATION_FINAL"].value_counts())
age_range = [-float("inf"), 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, float("inf")]
df["AGE"] = pd.cut(df["AGE"], age_range, labels=False)
sns.catplot(x="AGE", y="CLASIFFICATION_FINAL", data=df, kind="bar")
sns.catplot(x="AGE", data=df, kind="count")
columns = [
"USMER",
"MEDICAL_UNIT",
"SEX",
"PATIENT_TYPE",
"DATE_DIED",
"INTUBED",
"PNEUMONIA",
"PREGNANT",
"DIABETES",
"COPD",
"ASTHMA",
"INMSUPR",
"HIPERTENSION",
"OTHER_DISEASE",
"CARDIOVASCULAR",
"OBESITY",
"RENAL_CHRONIC",
"TOBACCO",
"ICU",
]
for col in columns:
print(col, "= 1")
print("mean:", df[df[col] == 1]["CLASIFFICATION_FINAL"].mean())
print(col, "= 2")
print("mean:", df[df[col] == 2]["CLASIFFICATION_FINAL"].mean())
print("--------------------")
for col in columns:
print(col, "= 1")
print("mean:", df[df[col] == 1]["CLASIFFICATION_FINAL"].count())
print(col, "= 2")
print("mean:", df[df[col] == 2]["CLASIFFICATION_FINAL"].count())
print("--------------------")
df_pos = df[df.CLASIFFICATION_FINAL < 4]
df_neg = df[df.CLASIFFICATION_FINAL >= 4]
sns.catplot(x="AGE", y="CLASIFFICATION_FINAL", data=df_pos, kind="bar")
sns.catplot(x="AGE", data=df_pos, kind="count")
sns.catplot(x="AGE", y="CLASIFFICATION_FINAL", data=df_neg, kind="bar")
sns.catplot(x="AGE", data=df_neg, kind="count")
for col in columns:
print(col, "= 1")
print("mean:", df_pos[df_pos[col] == 1]["CLASIFFICATION_FINAL"].mean())
print(col, "= 2")
print("mean:", df_pos[df_pos[col] == 2]["CLASIFFICATION_FINAL"].mean())
print("--------------------")
for col in columns:
print(col, "= 1")
print("mean:", df_neg[df_neg[col] == 1]["CLASIFFICATION_FINAL"].mean())
print(col, "= 2")
print("mean:", df_neg[df_neg[col] == 2]["CLASIFFICATION_FINAL"].mean())
print("--------------------")
for col in columns:
sns.catplot(x="CLASIFFICATION_FINAL", y=col, data=df, kind="bar")
colormap = plt.cm.coolwarm
plt.figure(figsize=(21, 18))
plt.title("Pearson Correlation of Features", y=1.05, size=15)
sns.heatmap(
df.astype(float).corr(),
linewidths=0.1,
vmax=1.0,
square=True,
cmap=colormap,
linecolor="white",
annot=True,
)
health_facts = [
"DIABETES",
"COPD",
"ASTHMA",
"INMSUPR",
"HIPERTENSION",
"OTHER_DISEASE",
"CARDIOVASCULAR",
"OBESITY",
"RENAL_CHRONIC",
"TOBACCO",
]
df2 = df[health_facts].replace(2, 0)
df["HEALTH"] = df2.sum(axis=1)
df["IN_UNIT"] = (df["PATIENT_TYPE"] == 1) | (df["INTUBED"] == 1) | (df["ICU"] == 1)
df = df.replace({True: 1, False: 2})
df.head()
from sklearn.preprocessing import *
from sklearn.model_selection import *
from sklearn.preprocessing import *
from sklearn.metrics import *
import lightgbm as lgb
import optuna.integration.lightgbm as opt_lgb
y = df["CLASIFFICATION_FINAL"] - 1
X = df.drop(
[
"PATIENT_TYPE",
"INTUBED",
"ICU",
"DIABETES",
"COPD",
"ASTHMA",
"INMSUPR",
"HIPERTENSION",
"OTHER_DISEASE",
"CARDIOVASCULAR",
"OBESITY",
"RENAL_CHRONIC",
"TOBACCO",
"CLASIFFICATION_FINAL",
],
axis=1,
)
X_trainval, X_test, y_trainval, y_test = train_test_split(X, y, random_state=42)
X_train, X_val, y_train, y_val = train_test_split(
X_trainval, y_trainval, random_state=42
)
lgb_train = opt_lgb.Dataset(X_train, y_train)
lgb_val = opt_lgb.Dataset(X_val, y_val)
params = {
"objective": "multiclass",
"metric": "multi_error",
"verbosity": -1,
"zero_as_missing": True,
"random_state": 1,
"num_class": 7,
}
num_round = 100
model = opt_lgb.train(
params,
lgb_train,
num_boost_round=num_round,
valid_names=["train", "valid"],
valid_sets=[lgb_train, lgb_val],
verbose_eval=-1,
)
val_pred = model.predict(X_val)
print("valid accuracy: {:.4f}".format(accuracy_score(y_val, val_pred)))
val_score = log_loss(y_val, val_pred)
print("valid logloss: {:.4f}".format(val_score))
test_pred = model.predict(X_test)
print("test accuracy: {:.4f}".format(accuracy_score(y_test, test_pred)))
score = log_loss(y_test, test_pred)
print("test logloss: {:.4f}".format(score))
df["CLASIFFICATION_FINAL"] = df["CLASIFFICATION_FINAL"].replace([1, 2, 3], 0)
df["CLASIFFICATION_FINAL"] = df["CLASIFFICATION_FINAL"].replace([4, 5, 6, 7], 1)
df.head()
from sklearn.preprocessing import *
from sklearn.model_selection import *
from sklearn.preprocessing import *
from sklearn.metrics import *
import lightgbm as lgb
import optuna.integration.lightgbm as opt_lgb
y = df["CLASIFFICATION_FINAL"]
X = df.drop(
[
"PATIENT_TYPE",
"INTUBED",
"ICU",
"DIABETES",
"COPD",
"ASTHMA",
"INMSUPR",
"HIPERTENSION",
"OTHER_DISEASE",
"CARDIOVASCULAR",
"OBESITY",
"RENAL_CHRONIC",
"TOBACCO",
"CLASIFFICATION_FINAL",
],
axis=1,
)
X_trainval, X_test, y_trainval, y_test = train_test_split(X, y, random_state=42)
X_train, X_val, y_train, y_val = train_test_split(
X_trainval, y_trainval, random_state=42
)
lgb_train = opt_lgb.Dataset(X_train, y_train)
lgb_val = opt_lgb.Dataset(X_val, y_val)
params = {
"objective": "binary",
"metric": "binary_error",
"verbosity": -1,
"zero_as_missing": True,
"random_state": 1,
}
num_round = 100
model = opt_lgb.train(
params,
lgb_train,
num_boost_round=num_round,
valid_names=["train", "valid"],
valid_sets=[lgb_train, lgb_val],
verbose_eval=-1,
)
val_pred = model.predict(X_val)
print("binary valid accuracy: {:.4f}".format(accuracy_score(y_val, val_pred)))
val_score = log_loss(y_val, val_pred)
print("binary valid logloss: {:.4f}".format(val_score))
test_pred = model.predict(X_test)
print("binary test accuracy: {:.4f}".format(accuracy_score(y_test, test_pred)))
score = log_loss(y_test, test_pred)
print("binary test logloss: {:.4f}".format(score))
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/575/129575842.ipynb
|
covid19-dataset
|
meirnizri
|
[{"Id": 129575842, "ScriptId": 38352779, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10726664, "CreationDate": "05/15/2023 01:34:40", "VersionNumber": 11.0, "Title": "Covid-19", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 204.0, "LinesInsertedFromPrevious": 4.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 200.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185779299, "KernelVersionId": 129575842, "SourceDatasetVersionId": 4504149}]
|
[{"Id": 4504149, "DatasetId": 2633044, "DatasourceVersionId": 4564560, "CreatorUserId": 6637313, "LicenseName": "CC0: Public Domain", "CreationDate": "11/13/2022 15:47:17", "VersionNumber": 1.0, "Title": "COVID-19 Dataset", "Slug": "covid19-dataset", "Subtitle": "COVID-19 patient's symptoms, status, and medical history.", "Description": "## Context\nCoronavirus disease (COVID-19) is an infectious disease caused by a newly discovered coronavirus. Most people infected with COVID-19 virus will experience mild to moderate respiratory illness and recover without requiring special treatment. Older people, and those with underlying medical problems like cardiovascular disease, diabetes, chronic respiratory disease, and cancer are more likely to develop serious illness.\nDuring the entire course of the pandemic, one of the main problems that healthcare providers have faced is the shortage of medical resources and a proper plan to efficiently distribute them. In these tough times, being able to predict what kind of resource an individual might require at the time of being tested positive or even before that will be of immense help to the authorities as they would be able to procure and arrange for the resources necessary to save the life of that patient.\n\nThe main goal of this project is to build a machine learning model that, given a Covid-19 patient's current symptom, status, and medical history, will predict whether the patient is in high risk or not. \n\n## content\nThe dataset was provided by the Mexican government [(link)](https://datos.gob.mx/busca/dataset/informacion-referente-a-casos-covid-19-en-mexico). This dataset contains an enormous number of anonymized patient-related information including pre-conditions. The raw dataset consists of 21 unique features and 1,048,576 unique patients. **In the Boolean features, 1 means \"yes\" and 2 means \"no\". values as 97 and 99 are missing data**.\n\n-\tsex: 1 for female and 2 for male.\n-\tage: of the patient.\n-\tclassification: covid test findings. Values 1-3 mean that the patient was diagnosed with covid in different\n degrees. 4 or higher means that the patient is not a carrier of covid or that the test is inconclusive.\n-\tpatient type: type of care the patient received in the unit. 1 for returned home and 2 for hospitalization.\n-\tpneumonia: whether the patient already have air sacs inflammation or not.\n-\tpregnancy: whether the patient is pregnant or not.\n-\tdiabetes: whether the patient has diabetes or not.\n-\tcopd: Indicates whether the patient has Chronic obstructive pulmonary disease or not.\n-\tasthma: whether the patient has asthma or not.\n-\tinmsupr: whether the patient is immunosuppressed or not.\n-\thypertension: whether the patient has hypertension or not.\n-\tcardiovascular: whether the patient has heart or blood vessels related disease.\n-\trenal chronic: whether the patient has chronic renal disease or not.\n-\tother disease: whether the patient has other disease or not.\n-\tobesity: whether the patient is obese or not.\n-\ttobacco: whether the patient is a tobacco user.\n-\tusmr: Indicates whether the patient treated medical units of the first, second or third level.\n-\tmedical unit: type of institution of the National Health System that provided the care.\n-\tintubed: whether the patient was connected to the ventilator.\n-\ticu: Indicates whether the patient had been admitted to an Intensive Care Unit.\n-\tdate died: If the patient died indicate the date of death, and 9999-99-99 otherwise.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 2633044, "CreatorUserId": 6637313, "OwnerUserId": 6637313.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4504149.0, "CurrentDatasourceVersionId": 4564560.0, "ForumId": 2664214, "Type": 2, "CreationDate": "11/13/2022 15:47:17", "LastActivityDate": "11/13/2022", "TotalViews": 186306, "TotalDownloads": 31424, "TotalVotes": 641, "TotalKernels": 84}]
|
[{"Id": 6637313, "UserName": "meirnizri", "DisplayName": "Meir Nizri", "RegisterDate": "02/01/2021", "PerformanceTier": 0}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv("/kaggle/input/covid19-dataset/Covid Data.csv")
df.head()
df.loc[df.DATE_DIED != "9999-99-99", "DATE_DIED"] = 1
df.loc[df.DATE_DIED == "9999-99-99", "DATE_DIED"] = 2
df = df.replace([97, 99], 0)
df.head()
n_target = np.array([0, 0, 0, 0, 0, 0, 0, 0])
for i in range(1, 8):
n_target[i] = len(df[df["CLASIFFICATION_FINAL"] == i])
n_all = np.sum(n_target)
for i in range(1, 8):
print("target{0}の割合: {1}".format(i, n_target[i] / n_all))
print("-----------------")
print(df["CLASIFFICATION_FINAL"].value_counts())
age_range = [-float("inf"), 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, float("inf")]
df["AGE"] = pd.cut(df["AGE"], age_range, labels=False)
sns.catplot(x="AGE", y="CLASIFFICATION_FINAL", data=df, kind="bar")
sns.catplot(x="AGE", data=df, kind="count")
columns = [
"USMER",
"MEDICAL_UNIT",
"SEX",
"PATIENT_TYPE",
"DATE_DIED",
"INTUBED",
"PNEUMONIA",
"PREGNANT",
"DIABETES",
"COPD",
"ASTHMA",
"INMSUPR",
"HIPERTENSION",
"OTHER_DISEASE",
"CARDIOVASCULAR",
"OBESITY",
"RENAL_CHRONIC",
"TOBACCO",
"ICU",
]
for col in columns:
print(col, "= 1")
print("mean:", df[df[col] == 1]["CLASIFFICATION_FINAL"].mean())
print(col, "= 2")
print("mean:", df[df[col] == 2]["CLASIFFICATION_FINAL"].mean())
print("--------------------")
for col in columns:
print(col, "= 1")
print("mean:", df[df[col] == 1]["CLASIFFICATION_FINAL"].count())
print(col, "= 2")
print("mean:", df[df[col] == 2]["CLASIFFICATION_FINAL"].count())
print("--------------------")
df_pos = df[df.CLASIFFICATION_FINAL < 4]
df_neg = df[df.CLASIFFICATION_FINAL >= 4]
sns.catplot(x="AGE", y="CLASIFFICATION_FINAL", data=df_pos, kind="bar")
sns.catplot(x="AGE", data=df_pos, kind="count")
sns.catplot(x="AGE", y="CLASIFFICATION_FINAL", data=df_neg, kind="bar")
sns.catplot(x="AGE", data=df_neg, kind="count")
for col in columns:
print(col, "= 1")
print("mean:", df_pos[df_pos[col] == 1]["CLASIFFICATION_FINAL"].mean())
print(col, "= 2")
print("mean:", df_pos[df_pos[col] == 2]["CLASIFFICATION_FINAL"].mean())
print("--------------------")
for col in columns:
print(col, "= 1")
print("mean:", df_neg[df_neg[col] == 1]["CLASIFFICATION_FINAL"].mean())
print(col, "= 2")
print("mean:", df_neg[df_neg[col] == 2]["CLASIFFICATION_FINAL"].mean())
print("--------------------")
for col in columns:
sns.catplot(x="CLASIFFICATION_FINAL", y=col, data=df, kind="bar")
colormap = plt.cm.coolwarm
plt.figure(figsize=(21, 18))
plt.title("Pearson Correlation of Features", y=1.05, size=15)
sns.heatmap(
df.astype(float).corr(),
linewidths=0.1,
vmax=1.0,
square=True,
cmap=colormap,
linecolor="white",
annot=True,
)
health_facts = [
"DIABETES",
"COPD",
"ASTHMA",
"INMSUPR",
"HIPERTENSION",
"OTHER_DISEASE",
"CARDIOVASCULAR",
"OBESITY",
"RENAL_CHRONIC",
"TOBACCO",
]
df2 = df[health_facts].replace(2, 0)
df["HEALTH"] = df2.sum(axis=1)
df["IN_UNIT"] = (df["PATIENT_TYPE"] == 1) | (df["INTUBED"] == 1) | (df["ICU"] == 1)
df = df.replace({True: 1, False: 2})
df.head()
from sklearn.preprocessing import *
from sklearn.model_selection import *
from sklearn.preprocessing import *
from sklearn.metrics import *
import lightgbm as lgb
import optuna.integration.lightgbm as opt_lgb
y = df["CLASIFFICATION_FINAL"] - 1
X = df.drop(
[
"PATIENT_TYPE",
"INTUBED",
"ICU",
"DIABETES",
"COPD",
"ASTHMA",
"INMSUPR",
"HIPERTENSION",
"OTHER_DISEASE",
"CARDIOVASCULAR",
"OBESITY",
"RENAL_CHRONIC",
"TOBACCO",
"CLASIFFICATION_FINAL",
],
axis=1,
)
X_trainval, X_test, y_trainval, y_test = train_test_split(X, y, random_state=42)
X_train, X_val, y_train, y_val = train_test_split(
X_trainval, y_trainval, random_state=42
)
lgb_train = opt_lgb.Dataset(X_train, y_train)
lgb_val = opt_lgb.Dataset(X_val, y_val)
params = {
"objective": "multiclass",
"metric": "multi_error",
"verbosity": -1,
"zero_as_missing": True,
"random_state": 1,
"num_class": 7,
}
num_round = 100
model = opt_lgb.train(
params,
lgb_train,
num_boost_round=num_round,
valid_names=["train", "valid"],
valid_sets=[lgb_train, lgb_val],
verbose_eval=-1,
)
val_pred = model.predict(X_val)
print("valid accuracy: {:.4f}".format(accuracy_score(y_val, val_pred)))
val_score = log_loss(y_val, val_pred)
print("valid logloss: {:.4f}".format(val_score))
test_pred = model.predict(X_test)
print("test accuracy: {:.4f}".format(accuracy_score(y_test, test_pred)))
score = log_loss(y_test, test_pred)
print("test logloss: {:.4f}".format(score))
df["CLASIFFICATION_FINAL"] = df["CLASIFFICATION_FINAL"].replace([1, 2, 3], 0)
df["CLASIFFICATION_FINAL"] = df["CLASIFFICATION_FINAL"].replace([4, 5, 6, 7], 1)
df.head()
from sklearn.preprocessing import *
from sklearn.model_selection import *
from sklearn.preprocessing import *
from sklearn.metrics import *
import lightgbm as lgb
import optuna.integration.lightgbm as opt_lgb
y = df["CLASIFFICATION_FINAL"]
X = df.drop(
[
"PATIENT_TYPE",
"INTUBED",
"ICU",
"DIABETES",
"COPD",
"ASTHMA",
"INMSUPR",
"HIPERTENSION",
"OTHER_DISEASE",
"CARDIOVASCULAR",
"OBESITY",
"RENAL_CHRONIC",
"TOBACCO",
"CLASIFFICATION_FINAL",
],
axis=1,
)
X_trainval, X_test, y_trainval, y_test = train_test_split(X, y, random_state=42)
X_train, X_val, y_train, y_val = train_test_split(
X_trainval, y_trainval, random_state=42
)
lgb_train = opt_lgb.Dataset(X_train, y_train)
lgb_val = opt_lgb.Dataset(X_val, y_val)
params = {
"objective": "binary",
"metric": "binary_error",
"verbosity": -1,
"zero_as_missing": True,
"random_state": 1,
}
num_round = 100
model = opt_lgb.train(
params,
lgb_train,
num_boost_round=num_round,
valid_names=["train", "valid"],
valid_sets=[lgb_train, lgb_val],
verbose_eval=-1,
)
val_pred = model.predict(X_val)
print("binary valid accuracy: {:.4f}".format(accuracy_score(y_val, val_pred)))
val_score = log_loss(y_val, val_pred)
print("binary valid logloss: {:.4f}".format(val_score))
test_pred = model.predict(X_test)
print("binary test accuracy: {:.4f}".format(accuracy_score(y_test, test_pred)))
score = log_loss(y_test, test_pred)
print("binary test logloss: {:.4f}".format(score))
| false | 1 | 2,665 | 0 | 3,515 | 2,665 |
||
129855427
|
<jupyter_start><jupyter_text>Predicting Critical Heat Flux
### Context
This dataset was prepared for the journal article entitled "On the prediction of critical heat flux using a physics-informed machine learning-aided framework" (doi: 10.1016/j.applthermaleng.2019.114540). The dataset contains processed and compiled records of experimental critical heat flux and boundary conditions used for the work presented in the article.
Kaggle dataset identifier: predicting-heat-flux
<jupyter_script>#
# Table of Contents
# [1. Notebook Versions](#1)
# [2. Loading Libraries](#2)
# [3. Reading Data Files](#3)
# [4. Data Exploration](#4)
# [5. Baseline Modeling 1.0](#5)
# # Notebook Versions
# 1. Version 1 (05/15/2023)
# * EDA
# * Baseline modeling 1.0
#
# 2. Version 2 (05/16/2023)
# * Baseline modeling 1.0 updated.
#
# 3. Version 3 (05/16/2023)
# * Baseline modeling 1.0 updated
#
# # Loading Libraries
import pandas as pd
pd.set_option("display.max_columns", 100)
import numpy as np
from tqdm.notebook import tqdm
import matplotlib.pyplot as plt
plt.style.use("ggplot")
import seaborn as sns
import plotly.express as px
from sklearn.tree import DecisionTreeRegressor, plot_tree
from sklearn.preprocessing import MinMaxScaler, StandardScaler, LabelEncoder
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import Ridge, RidgeCV, Lasso, LassoCV
from sklearn.model_selection import (
KFold,
StratifiedKFold,
train_test_split,
GridSearchCV,
RepeatedKFold,
RepeatedStratifiedKFold,
)
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.inspection import PartialDependenceDisplay
from sklearn.ensemble import (
RandomForestRegressor,
HistGradientBoostingRegressor,
GradientBoostingRegressor,
ExtraTreesRegressor,
)
from sklearn.svm import SVR
from lightgbm import LGBMRegressor
from xgboost import XGBRegressor
from catboost import CatBoostRegressor
#
# # Reading Data Files
data = pd.read_csv("../input/playground-series-s3e15/data.csv")
original = pd.read_csv("../input/predicting-heat-flux/Data_CHF_Zhao_2020_ATE.csv")
submission = pd.read_csv("../input/playground-series-s3e15/sample_submission.csv")
print("The dimension of the synthetic dataset is:", data.shape)
print("The dimension of the orginal dataset is:", original.shape)
print("The dimension of the submission dataset is:", submission.shape)
data.info()
data.head()
data.describe()
# From the above table, we see that there are several missing values in the competition data.
100 * np.sum(data.isnull(), axis=0) / data.shape[0]
# From the above, we see that at least 14% of the data is missing in each of the features of the competition data (except `chf_exp [MW/m2]`).
original.head()
original.describe()
# >
# 💡 There are no missing values in the original dataset. However, there are several missing values in the competition data. Also, by a quick eye-ball comparison of the summary statistics of the train and test datasets, they seem to have similar distributions.
# # Data Exploration
# First, we start by visualizing the variable of interest.
fig, axes = plt.subplots(1, 2, figsize=(18, 8))
sns.kdeplot(
ax=axes[0], data=data, x="x_e_out [-]", fill=True, color="steelblue"
).set_title("Competition Dataset")
sns.kdeplot(
ax=axes[1], data=original, x="x_e_out [-]", fill=True, color="orange"
).set_title("Original Dataset")
# By a quick eye-ball test, the distribution of the variable of interest are approximately the same in the competition and original datasets. Next, we explore what features are related to the variable of interest.
corr_mat_data = data.drop(columns=["id", "author", "geometry"], axis=1).corr()
corr_mat_original = original.drop(columns=["id"], axis=1).corr()
data_mask = np.triu(np.ones_like(corr_mat_data, dtype=bool))
original_mask = np.triu(np.ones_like(corr_mat_original, dtype=bool))
cmap = sns.diverging_palette(100, 7, s=75, l=40, n=5, center="light", as_cmap=True)
fig, axes = plt.subplots(1, 2, figsize=(25, 10))
sns.heatmap(
corr_mat_data,
annot=True,
cmap=cmap,
fmt=".2f",
center=0,
annot_kws={"size": 12},
ax=axes[0],
mask=data_mask,
).set_title("Correlations Among Features (in Competition Dataset)")
sns.heatmap(
corr_mat_original,
annot=True,
cmap=cmap,
fmt=".2f",
center=0,
annot_kws={"size": 12},
ax=axes[1],
mask=original_mask,
).set_title("Correlations Among Features (in Original Dataset)")
# From the above, `length [mm]` and `chf_exp [MW/m2]` correlated the most with `x_e_out [-]`. Note that this is consiistent in the competition and orginal datasets. Next we check for duplicates.
print("There are ", data.shape[0], "observations in the competition dataset")
print(
"There are ",
data.drop(columns=["id", "author", "geometry"], axis=1).drop_duplicates().shape[0],
"unique observations in the competition dataset",
)
print("There are ", original.shape[0], "observations in the original dataset")
print(
"There are ",
original.drop(columns=["id"], axis=1).drop_duplicates().shape[0],
"unique observations in the original dataset",
)
fig, axes = plt.subplots(1, 2, figsize=(15, 6))
sns.scatterplot(
ax=axes[0], data=data, x="length [mm]", y="x_e_out [-]", color="steelblue"
).set_title("Competition Dataset")
sns.scatterplot(
ax=axes[1], data=original, x="length [mm]", y="x_e_out [-]", color="orange"
).set_title("Original Dataset")
# From the above plots, there seems to be a positive relationship between `length [mm]` and `x_e_out [-]`.
fig, axes = plt.subplots(1, 2, figsize=(15, 6))
sns.scatterplot(
ax=axes[0], data=data, x="chf_exp [MW/m2]", y="x_e_out [-]", color="steelblue"
).set_title("Competition Dataset")
sns.scatterplot(
ax=axes[1], data=original, x="chf_exp [MW/m2]", y="x_e_out [-]", color="orange"
).set_title("Original Dataset")
# From the left plot (competition dataset), the relationship between `chf_exp [MW/m2]` and `x_e_out [-]` is not clear. However, in the right plot (original dataset), the relationship between `chf_exp [MW/m2]` and `x_e_out [-]` is more clear (negative).
fig, axes = plt.subplots(1, 2, figsize=(15, 6))
sns.scatterplot(
ax=axes[0], data=data, x="pressure [MPa]", y="x_e_out [-]", color="steelblue"
).set_title("Competition Dataset")
sns.scatterplot(
ax=axes[1], data=original, x="pressure [MPa]", y="x_e_out [-]", color="orange"
).set_title("Original Dataset")
# From the left plot (competition dataset), the relationship between `pressure [MPa]` and `x_e_out [-]` is not clear. However, in the right plot (original dataset), the relationship between `pressure [MPa]` and `x_e_out [-]` is a lite more clear (negative).
# # Baseline Modeling 1.0
X = data[
["pressure [MPa]", "mass_flux [kg/m2-s]", "chf_exp [MW/m2]", "length [mm]"]
].copy()
X["mass_flux_missing"] = np.where(X["mass_flux [kg/m2-s]"].isnull(), 1, 0)
X["pressure_missing"] = np.where(X["pressure [MPa]"].isnull(), 1, 0)
X["chf_missing"] = np.where(X["chf_exp [MW/m2]"].isnull(), 1, 0)
X["generated"] = 1
X_org = original[
["pressure [MPa]", "mass_flux [kg/m2-s]", "chf_exp [MW/m2]", "length [mm]"]
].copy()
X_org["mass_flux_missing"] = np.where(X_org["mass_flux [kg/m2-s]"].isnull(), 1, 0)
X_org["pressure_missing"] = np.where(X_org["pressure [MPa]"].isnull(), 1, 0)
X_org["chf_missing"] = np.where(X_org["chf_exp [MW/m2]"].isnull(), 1, 0)
X_org["generated"] = 0
Y = data["x_e_out [-]"]
Y_org = original["x_e_out [-]"]
X = pd.concat([X, X_org], axis=0).reset_index(drop=True)
Y = pd.concat([Y, Y_org], axis=0).reset_index(drop=True)
X.columns = [
"pressure",
"mass_flux",
"chf_exp",
"length",
"mass_flux_missing",
"pressure_missing",
"chf_missing",
"generated",
]
test = X[Y.isnull()]
X = X[~Y.isnull()]
Y = Y[~Y.isnull()]
hist_cv_scores, hist_preds = list(), list()
lgb_cv_scores, lgb_preds = list(), list()
xgb_cv_scores, xgb_preds = list(), list()
cat_cv_scores, cat_preds = list(), list()
ridge_cv_scores, ridge_preds = list(), list()
skf = RepeatedKFold(n_splits=10, n_repeats=1, random_state=42)
for i, (train_ix, test_ix) in enumerate(skf.split(X, Y)):
X_train, X_test = X.iloc[train_ix], X.iloc[test_ix]
Y_train, Y_test = Y.iloc[train_ix], Y.iloc[test_ix]
print("---------------------------------------------------------------")
##########################
## HistGradientBoosting ##
##########################
hist_md = HistGradientBoostingRegressor(
l2_regularization=0.01,
early_stopping=False,
learning_rate=0.01,
max_iter=1000,
max_depth=8,
max_bins=255,
min_samples_leaf=20,
max_leaf_nodes=50,
).fit(X_train, Y_train)
hist_pred_1 = hist_md.predict(X_test[X_test["generated"] == 1])
hist_pred_2 = hist_md.predict(test)
hist_score_fold = mean_squared_error(
Y_test[X_test["generated"] == 1], hist_pred_1, squared=False
)
hist_cv_scores.append(hist_score_fold)
hist_preds.append(hist_pred_2)
print("Fold", i, "==> HistGradient oof RMSE is ==>", hist_score_fold)
##############
## LightGBM ##
##############
lgb_md = LGBMRegressor(
n_estimators=1000,
max_depth=5,
learning_rate=0.01,
num_leaves=50,
min_child_samples=20,
reg_alpha=0.01,
reg_lambda=0.01,
subsample=0.7,
colsample_bytree=0.8,
device="gpu",
).fit(X_train, Y_train)
lgb_pred_1 = lgb_md.predict(X_test[X_test["generated"] == 1])
lgb_pred_2 = lgb_md.predict(test)
lgb_score_fold = mean_squared_error(
Y_test[X_test["generated"] == 1], lgb_pred_1, squared=False
)
lgb_cv_scores.append(lgb_score_fold)
lgb_preds.append(lgb_pred_2)
print("Fold", i, "==> LightGBM oof RMSE is ==>", lgb_score_fold)
#############
## XGBoost ##
#############
xgb_md = XGBRegressor(
tree_method="gpu_hist",
colsample_bytree=0.8,
gamma=0.01,
learning_rate=0.01,
max_depth=5,
min_child_weight=50,
n_estimators=1000,
subsample=0.7,
).fit(X_train, Y_train)
xgb_pred_1 = xgb_md.predict(X_test[X_test["generated"] == 1])
xgb_pred_2 = xgb_md.predict(test)
xgb_score_fold = mean_squared_error(
Y_test[X_test["generated"] == 1], xgb_pred_1, squared=False
)
xgb_cv_scores.append(xgb_score_fold)
xgb_preds.append(xgb_pred_2)
print("Fold", i, "==> XGBoost oof RMSE is ==>", xgb_score_fold)
##############
## CatBoost ##
##############
cat_md = CatBoostRegressor(
loss_function="RMSE",
iterations=1000,
learning_rate=0.01,
depth=10,
random_strength=0.5,
bagging_temperature=0.7,
border_count=55,
l2_leaf_reg=5,
verbose=False,
task_type="GPU",
).fit(X_train, Y_train)
cat_pred_1 = cat_md.predict(X_test[X_test["generated"] == 1])
cat_pred_2 = cat_md.predict(test)
cat_score_fold = mean_squared_error(
Y_test[X_test["generated"] == 1], cat_pred_1, squared=False
)
cat_cv_scores.append(cat_score_fold)
cat_preds.append(cat_pred_2)
print("Fold", i, "==> CatBoost oof RMSE is ==>", cat_score_fold)
##############
## Ensemble ##
##############
x = pd.DataFrame(
{"hist": hist_pred_1, "lgb": lgb_pred_1, "xgb": xgb_pred_1, "cat": cat_pred_1}
)
y = Y_test[X_test["generated"] == 1]
ridge_cv = RidgeCV(alphas=[1e-3, 1e-2, 1e-1, 1], cv=5).fit(x, y)
alpha_cv = ridge_cv.alpha_
ridge_md = Ridge(alpha=alpha_cv).fit(x, y)
ridge_pred = ridge_md.predict(x)
x_test = pd.DataFrame(
{"hist": hist_pred_2, "lgb": lgb_pred_2, "xgb": xgb_pred_2, "cat": cat_pred_2}
)
ridge_test_pred = ridge_md.predict(x_test)
ridge_score = mean_squared_error(y, ridge_pred, squared=False)
ridge_cv_scores.append(ridge_score)
ridge_preds.append(ridge_test_pred)
print("Fold", i, "==> Ridge ensemble oof RMSE is ==>", ridge_score)
print("---------------------------------------------------------------")
hist_cv_score = np.mean(hist_cv_scores)
lgb_cv_score = np.mean(lgb_cv_scores)
xgb_cv_score = np.mean(xgb_cv_scores)
cat_cv_score = np.mean(cat_cv_scores)
ridge_cv_score = np.mean(ridge_cv_scores)
model_perf = pd.DataFrame(
{
"Model": ["HistGradient", "LightGBM", "XGBoost", "CatBoost", "Ridge-Ensemble"],
"cv-score": [
hist_cv_score,
lgb_cv_score,
xgb_cv_score,
cat_cv_score,
ridge_cv_score,
],
}
)
plt.figure(figsize=(8, 8))
ax = sns.barplot(y="Model", x="cv-score", data=model_perf)
ax.bar_label(ax.containers[0])
hist_preds_test = pd.DataFrame(hist_preds).apply(np.mean, axis=0)
lgb_preds_test = pd.DataFrame(lgb_preds).apply(np.mean, axis=0)
xgb_preds_test = pd.DataFrame(xgb_preds).apply(np.mean, axis=0)
cat_preds_test = pd.DataFrame(cat_preds).apply(np.mean, axis=0)
ridge_preds_test = pd.DataFrame(ridge_preds).apply(np.mean, axis=0)
submission["x_e_out [-]"] = hist_preds_test
submission.to_csv("Hist_Baseline_submission.csv", index=False)
submission["x_e_out [-]"] = lgb_preds_test
submission.to_csv("LightGBM_Baseline_submission.csv", index=False)
submission["x_e_out [-]"] = xgb_preds_test
submission.to_csv("XGBoost_Baseline_submission.csv", index=False)
submission["x_e_out [-]"] = cat_preds_test
submission.to_csv("CatBoost_Baseline_submission.csv", index=False)
submission["x_e_out [-]"] = ridge_preds_test
submission.to_csv("Ridge_Baseline_submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/855/129855427.ipynb
|
predicting-heat-flux
|
saurabhshahane
|
[{"Id": 129855427, "ScriptId": 38574651, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1822542, "CreationDate": "05/17/2023 02:04:03", "VersionNumber": 3.0, "Title": "PS-S3-Ep15 | EDA \ud83d\udcca | Modeling + Submission \ud83d\ude80", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 327.0, "LinesInsertedFromPrevious": 42.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 285.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
|
[{"Id": 186248167, "KernelVersionId": 129855427, "SourceDatasetVersionId": 1921393}]
|
[{"Id": 1921393, "DatasetId": 1145869, "DatasourceVersionId": 1959907, "CreatorUserId": 2411256, "LicenseName": "Attribution 4.0 International (CC BY 4.0)", "CreationDate": "02/08/2021 11:44:07", "VersionNumber": 1.0, "Title": "Predicting Critical Heat Flux", "Slug": "predicting-heat-flux", "Subtitle": "prediction of critical heat flux using Machine Learning", "Description": "### Context\n\nThis dataset was prepared for the journal article entitled \"On the prediction of critical heat flux using a physics-informed machine learning-aided framework\" (doi: 10.1016/j.applthermaleng.2019.114540). The dataset contains processed and compiled records of experimental critical heat flux and boundary conditions used for the work presented in the article. \n\n### Acknowledgements\n\nZhao, Xingang (2020), \u201cData for: On the prediction of critical heat flux using a physics-informed machine learning-aided framework\u201d, Mendeley Data, V1, doi: 10.17632/5p5h37tyv7.1", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1145869, "CreatorUserId": 2411256, "OwnerUserId": 2411256.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1921393.0, "CurrentDatasourceVersionId": 1959907.0, "ForumId": 1163376, "Type": 2, "CreationDate": "02/08/2021 11:44:07", "LastActivityDate": "02/08/2021", "TotalViews": 6889, "TotalDownloads": 589, "TotalVotes": 42, "TotalKernels": 78}]
|
[{"Id": 2411256, "UserName": "saurabhshahane", "DisplayName": "Saurabh Shahane", "RegisterDate": "10/26/2018", "PerformanceTier": 4}]
|
#
# Table of Contents
# [1. Notebook Versions](#1)
# [2. Loading Libraries](#2)
# [3. Reading Data Files](#3)
# [4. Data Exploration](#4)
# [5. Baseline Modeling 1.0](#5)
# # Notebook Versions
# 1. Version 1 (05/15/2023)
# * EDA
# * Baseline modeling 1.0
#
# 2. Version 2 (05/16/2023)
# * Baseline modeling 1.0 updated.
#
# 3. Version 3 (05/16/2023)
# * Baseline modeling 1.0 updated
#
# # Loading Libraries
import pandas as pd
pd.set_option("display.max_columns", 100)
import numpy as np
from tqdm.notebook import tqdm
import matplotlib.pyplot as plt
plt.style.use("ggplot")
import seaborn as sns
import plotly.express as px
from sklearn.tree import DecisionTreeRegressor, plot_tree
from sklearn.preprocessing import MinMaxScaler, StandardScaler, LabelEncoder
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import Ridge, RidgeCV, Lasso, LassoCV
from sklearn.model_selection import (
KFold,
StratifiedKFold,
train_test_split,
GridSearchCV,
RepeatedKFold,
RepeatedStratifiedKFold,
)
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.inspection import PartialDependenceDisplay
from sklearn.ensemble import (
RandomForestRegressor,
HistGradientBoostingRegressor,
GradientBoostingRegressor,
ExtraTreesRegressor,
)
from sklearn.svm import SVR
from lightgbm import LGBMRegressor
from xgboost import XGBRegressor
from catboost import CatBoostRegressor
#
# # Reading Data Files
data = pd.read_csv("../input/playground-series-s3e15/data.csv")
original = pd.read_csv("../input/predicting-heat-flux/Data_CHF_Zhao_2020_ATE.csv")
submission = pd.read_csv("../input/playground-series-s3e15/sample_submission.csv")
print("The dimension of the synthetic dataset is:", data.shape)
print("The dimension of the orginal dataset is:", original.shape)
print("The dimension of the submission dataset is:", submission.shape)
data.info()
data.head()
data.describe()
# From the above table, we see that there are several missing values in the competition data.
100 * np.sum(data.isnull(), axis=0) / data.shape[0]
# From the above, we see that at least 14% of the data is missing in each of the features of the competition data (except `chf_exp [MW/m2]`).
original.head()
original.describe()
# >
# 💡 There are no missing values in the original dataset. However, there are several missing values in the competition data. Also, by a quick eye-ball comparison of the summary statistics of the train and test datasets, they seem to have similar distributions.
# # Data Exploration
# First, we start by visualizing the variable of interest.
fig, axes = plt.subplots(1, 2, figsize=(18, 8))
sns.kdeplot(
ax=axes[0], data=data, x="x_e_out [-]", fill=True, color="steelblue"
).set_title("Competition Dataset")
sns.kdeplot(
ax=axes[1], data=original, x="x_e_out [-]", fill=True, color="orange"
).set_title("Original Dataset")
# By a quick eye-ball test, the distribution of the variable of interest are approximately the same in the competition and original datasets. Next, we explore what features are related to the variable of interest.
corr_mat_data = data.drop(columns=["id", "author", "geometry"], axis=1).corr()
corr_mat_original = original.drop(columns=["id"], axis=1).corr()
data_mask = np.triu(np.ones_like(corr_mat_data, dtype=bool))
original_mask = np.triu(np.ones_like(corr_mat_original, dtype=bool))
cmap = sns.diverging_palette(100, 7, s=75, l=40, n=5, center="light", as_cmap=True)
fig, axes = plt.subplots(1, 2, figsize=(25, 10))
sns.heatmap(
corr_mat_data,
annot=True,
cmap=cmap,
fmt=".2f",
center=0,
annot_kws={"size": 12},
ax=axes[0],
mask=data_mask,
).set_title("Correlations Among Features (in Competition Dataset)")
sns.heatmap(
corr_mat_original,
annot=True,
cmap=cmap,
fmt=".2f",
center=0,
annot_kws={"size": 12},
ax=axes[1],
mask=original_mask,
).set_title("Correlations Among Features (in Original Dataset)")
# From the above, `length [mm]` and `chf_exp [MW/m2]` correlated the most with `x_e_out [-]`. Note that this is consiistent in the competition and orginal datasets. Next we check for duplicates.
print("There are ", data.shape[0], "observations in the competition dataset")
print(
"There are ",
data.drop(columns=["id", "author", "geometry"], axis=1).drop_duplicates().shape[0],
"unique observations in the competition dataset",
)
print("There are ", original.shape[0], "observations in the original dataset")
print(
"There are ",
original.drop(columns=["id"], axis=1).drop_duplicates().shape[0],
"unique observations in the original dataset",
)
fig, axes = plt.subplots(1, 2, figsize=(15, 6))
sns.scatterplot(
ax=axes[0], data=data, x="length [mm]", y="x_e_out [-]", color="steelblue"
).set_title("Competition Dataset")
sns.scatterplot(
ax=axes[1], data=original, x="length [mm]", y="x_e_out [-]", color="orange"
).set_title("Original Dataset")
# From the above plots, there seems to be a positive relationship between `length [mm]` and `x_e_out [-]`.
fig, axes = plt.subplots(1, 2, figsize=(15, 6))
sns.scatterplot(
ax=axes[0], data=data, x="chf_exp [MW/m2]", y="x_e_out [-]", color="steelblue"
).set_title("Competition Dataset")
sns.scatterplot(
ax=axes[1], data=original, x="chf_exp [MW/m2]", y="x_e_out [-]", color="orange"
).set_title("Original Dataset")
# From the left plot (competition dataset), the relationship between `chf_exp [MW/m2]` and `x_e_out [-]` is not clear. However, in the right plot (original dataset), the relationship between `chf_exp [MW/m2]` and `x_e_out [-]` is more clear (negative).
fig, axes = plt.subplots(1, 2, figsize=(15, 6))
sns.scatterplot(
ax=axes[0], data=data, x="pressure [MPa]", y="x_e_out [-]", color="steelblue"
).set_title("Competition Dataset")
sns.scatterplot(
ax=axes[1], data=original, x="pressure [MPa]", y="x_e_out [-]", color="orange"
).set_title("Original Dataset")
# From the left plot (competition dataset), the relationship between `pressure [MPa]` and `x_e_out [-]` is not clear. However, in the right plot (original dataset), the relationship between `pressure [MPa]` and `x_e_out [-]` is a lite more clear (negative).
# # Baseline Modeling 1.0
X = data[
["pressure [MPa]", "mass_flux [kg/m2-s]", "chf_exp [MW/m2]", "length [mm]"]
].copy()
X["mass_flux_missing"] = np.where(X["mass_flux [kg/m2-s]"].isnull(), 1, 0)
X["pressure_missing"] = np.where(X["pressure [MPa]"].isnull(), 1, 0)
X["chf_missing"] = np.where(X["chf_exp [MW/m2]"].isnull(), 1, 0)
X["generated"] = 1
X_org = original[
["pressure [MPa]", "mass_flux [kg/m2-s]", "chf_exp [MW/m2]", "length [mm]"]
].copy()
X_org["mass_flux_missing"] = np.where(X_org["mass_flux [kg/m2-s]"].isnull(), 1, 0)
X_org["pressure_missing"] = np.where(X_org["pressure [MPa]"].isnull(), 1, 0)
X_org["chf_missing"] = np.where(X_org["chf_exp [MW/m2]"].isnull(), 1, 0)
X_org["generated"] = 0
Y = data["x_e_out [-]"]
Y_org = original["x_e_out [-]"]
X = pd.concat([X, X_org], axis=0).reset_index(drop=True)
Y = pd.concat([Y, Y_org], axis=0).reset_index(drop=True)
X.columns = [
"pressure",
"mass_flux",
"chf_exp",
"length",
"mass_flux_missing",
"pressure_missing",
"chf_missing",
"generated",
]
test = X[Y.isnull()]
X = X[~Y.isnull()]
Y = Y[~Y.isnull()]
hist_cv_scores, hist_preds = list(), list()
lgb_cv_scores, lgb_preds = list(), list()
xgb_cv_scores, xgb_preds = list(), list()
cat_cv_scores, cat_preds = list(), list()
ridge_cv_scores, ridge_preds = list(), list()
skf = RepeatedKFold(n_splits=10, n_repeats=1, random_state=42)
for i, (train_ix, test_ix) in enumerate(skf.split(X, Y)):
X_train, X_test = X.iloc[train_ix], X.iloc[test_ix]
Y_train, Y_test = Y.iloc[train_ix], Y.iloc[test_ix]
print("---------------------------------------------------------------")
##########################
## HistGradientBoosting ##
##########################
hist_md = HistGradientBoostingRegressor(
l2_regularization=0.01,
early_stopping=False,
learning_rate=0.01,
max_iter=1000,
max_depth=8,
max_bins=255,
min_samples_leaf=20,
max_leaf_nodes=50,
).fit(X_train, Y_train)
hist_pred_1 = hist_md.predict(X_test[X_test["generated"] == 1])
hist_pred_2 = hist_md.predict(test)
hist_score_fold = mean_squared_error(
Y_test[X_test["generated"] == 1], hist_pred_1, squared=False
)
hist_cv_scores.append(hist_score_fold)
hist_preds.append(hist_pred_2)
print("Fold", i, "==> HistGradient oof RMSE is ==>", hist_score_fold)
##############
## LightGBM ##
##############
lgb_md = LGBMRegressor(
n_estimators=1000,
max_depth=5,
learning_rate=0.01,
num_leaves=50,
min_child_samples=20,
reg_alpha=0.01,
reg_lambda=0.01,
subsample=0.7,
colsample_bytree=0.8,
device="gpu",
).fit(X_train, Y_train)
lgb_pred_1 = lgb_md.predict(X_test[X_test["generated"] == 1])
lgb_pred_2 = lgb_md.predict(test)
lgb_score_fold = mean_squared_error(
Y_test[X_test["generated"] == 1], lgb_pred_1, squared=False
)
lgb_cv_scores.append(lgb_score_fold)
lgb_preds.append(lgb_pred_2)
print("Fold", i, "==> LightGBM oof RMSE is ==>", lgb_score_fold)
#############
## XGBoost ##
#############
xgb_md = XGBRegressor(
tree_method="gpu_hist",
colsample_bytree=0.8,
gamma=0.01,
learning_rate=0.01,
max_depth=5,
min_child_weight=50,
n_estimators=1000,
subsample=0.7,
).fit(X_train, Y_train)
xgb_pred_1 = xgb_md.predict(X_test[X_test["generated"] == 1])
xgb_pred_2 = xgb_md.predict(test)
xgb_score_fold = mean_squared_error(
Y_test[X_test["generated"] == 1], xgb_pred_1, squared=False
)
xgb_cv_scores.append(xgb_score_fold)
xgb_preds.append(xgb_pred_2)
print("Fold", i, "==> XGBoost oof RMSE is ==>", xgb_score_fold)
##############
## CatBoost ##
##############
cat_md = CatBoostRegressor(
loss_function="RMSE",
iterations=1000,
learning_rate=0.01,
depth=10,
random_strength=0.5,
bagging_temperature=0.7,
border_count=55,
l2_leaf_reg=5,
verbose=False,
task_type="GPU",
).fit(X_train, Y_train)
cat_pred_1 = cat_md.predict(X_test[X_test["generated"] == 1])
cat_pred_2 = cat_md.predict(test)
cat_score_fold = mean_squared_error(
Y_test[X_test["generated"] == 1], cat_pred_1, squared=False
)
cat_cv_scores.append(cat_score_fold)
cat_preds.append(cat_pred_2)
print("Fold", i, "==> CatBoost oof RMSE is ==>", cat_score_fold)
##############
## Ensemble ##
##############
x = pd.DataFrame(
{"hist": hist_pred_1, "lgb": lgb_pred_1, "xgb": xgb_pred_1, "cat": cat_pred_1}
)
y = Y_test[X_test["generated"] == 1]
ridge_cv = RidgeCV(alphas=[1e-3, 1e-2, 1e-1, 1], cv=5).fit(x, y)
alpha_cv = ridge_cv.alpha_
ridge_md = Ridge(alpha=alpha_cv).fit(x, y)
ridge_pred = ridge_md.predict(x)
x_test = pd.DataFrame(
{"hist": hist_pred_2, "lgb": lgb_pred_2, "xgb": xgb_pred_2, "cat": cat_pred_2}
)
ridge_test_pred = ridge_md.predict(x_test)
ridge_score = mean_squared_error(y, ridge_pred, squared=False)
ridge_cv_scores.append(ridge_score)
ridge_preds.append(ridge_test_pred)
print("Fold", i, "==> Ridge ensemble oof RMSE is ==>", ridge_score)
print("---------------------------------------------------------------")
hist_cv_score = np.mean(hist_cv_scores)
lgb_cv_score = np.mean(lgb_cv_scores)
xgb_cv_score = np.mean(xgb_cv_scores)
cat_cv_score = np.mean(cat_cv_scores)
ridge_cv_score = np.mean(ridge_cv_scores)
model_perf = pd.DataFrame(
{
"Model": ["HistGradient", "LightGBM", "XGBoost", "CatBoost", "Ridge-Ensemble"],
"cv-score": [
hist_cv_score,
lgb_cv_score,
xgb_cv_score,
cat_cv_score,
ridge_cv_score,
],
}
)
plt.figure(figsize=(8, 8))
ax = sns.barplot(y="Model", x="cv-score", data=model_perf)
ax.bar_label(ax.containers[0])
hist_preds_test = pd.DataFrame(hist_preds).apply(np.mean, axis=0)
lgb_preds_test = pd.DataFrame(lgb_preds).apply(np.mean, axis=0)
xgb_preds_test = pd.DataFrame(xgb_preds).apply(np.mean, axis=0)
cat_preds_test = pd.DataFrame(cat_preds).apply(np.mean, axis=0)
ridge_preds_test = pd.DataFrame(ridge_preds).apply(np.mean, axis=0)
submission["x_e_out [-]"] = hist_preds_test
submission.to_csv("Hist_Baseline_submission.csv", index=False)
submission["x_e_out [-]"] = lgb_preds_test
submission.to_csv("LightGBM_Baseline_submission.csv", index=False)
submission["x_e_out [-]"] = xgb_preds_test
submission.to_csv("XGBoost_Baseline_submission.csv", index=False)
submission["x_e_out [-]"] = cat_preds_test
submission.to_csv("CatBoost_Baseline_submission.csv", index=False)
submission["x_e_out [-]"] = ridge_preds_test
submission.to_csv("Ridge_Baseline_submission.csv", index=False)
| false | 3 | 4,539 | 3 | 4,655 | 4,539 |
||
129855288
|
<jupyter_start><jupyter_text>Loan Default Prediction
This is a synthetic dataset created using actual data from a financial institution. The data has been modified to remove identifiable features and the numbers transformed to ensure they do not link to original source (financial institution).
This is intended to be used for academic purposes for beginners who want to practice financial analytics from a simple financial dataset
Kaggle dataset identifier: loan-default-prediction
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Data Preparation
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
# Load the dataset
df = pd.read_csv("/kaggle/input/loan-default-prediction/Default_Fin.csv")
# Handle Missing Values
imputer = SimpleImputer(strategy="mean") # Use mean imputation
df["Bank Balance"] = imputer.fit_transform(df[["Bank Balance"]])
df["Annual Salary"] = imputer.fit_transform(df[["Annual Salary"]])
# Handle Outliers (using manual winsorization)
def winsorize_column(column, lower_limit, upper_limit):
p = np.percentile(column, [lower_limit, upper_limit])
column[column < p[0]] = p[0]
column[column > p[1]] = p[1]
return column
df["Bank Balance"] = winsorize_column(df["Bank Balance"], 5, 95)
df["Annual Salary"] = winsorize_column(df["Annual Salary"], 5, 95)
# Format Conversion (if needed)
# If any categorical variables need conversion, use appropriate encoding methods
# Splitting Data
X = df[["Employed", "Bank Balance", "Annual Salary"]] # Features
y = df["Defaulted?"] # Target variable
# Scaling or Normalization
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
# Rename the column
df = df.rename(columns={"Defaulted?": "Defaulted"})
import matplotlib.pyplot as plt
import seaborn as sns
# Examine the dataset
print(df.head()) # Display the first few rows of the dataset
print(
df.info()
) # Get information about the dataset, including data types and missing values
print(df.describe()) # Statistical summary of numerical features
# # Data Exploration
# Explore the distribution of the target variable (Defaulted)
sns.countplot(x="Defaulted", data=df)
plt.title("Distribution of Defaulted")
plt.show()
# Explore the relationship between features and the target variable
sns.boxplot(x="Defaulted", y="Bank Balance", data=df)
plt.title("Bank Balance vs. Defaulted")
plt.show()
sns.boxplot(x="Defaulted", y="Annual Salary", data=df)
plt.title("Annual Salary vs. Defaulted")
plt.show()
sns.violinplot(x="Defaulted", y="Employed", data=df)
plt.title("Employed vs. Defaulted")
plt.show()
# Correlation heatmap of numerical features
numerical_features = ["Bank Balance", "Annual Salary"]
corr_matrix = df[numerical_features].corr()
sns.heatmap(corr_matrix, annot=True, cmap="coolwarm")
plt.title("Correlation Heatmap of Numerical Features")
plt.show()
# # Feature Engineering
# Feature Engineering
df["Debt-to-Income Ratio"] = df["Bank Balance"] / df["Annual Salary"]
# Verify the new feature
print(df.head())
# # Model Training and Selection
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.metrics import (
accuracy_score,
precision_score,
recall_score,
f1_score,
roc_auc_score,
)
# Split the Data
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# Instantiate and Train Different Models
models = {
"Logistic Regression": LogisticRegression(),
"Decision Tree": DecisionTreeClassifier(),
"Random Forest": RandomForestClassifier(),
"Gradient Boosting": GradientBoostingClassifier(),
}
results = {}
for model_name, model in models.items():
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# Evaluate the Model
accuracy = accuracy_score(y_test, y_pred)
precision = precision_score(y_test, y_pred)
recall = recall_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
roc_auc = roc_auc_score(y_test, y_pred)
results[model_name] = {
"Accuracy": accuracy,
"Precision": precision,
"Recall": recall,
"F1 Score": f1,
"ROC AUC Score": roc_auc,
}
# Print the Results
for model_name, metrics in results.items():
print(f"Metrics for {model_name}:")
for metric_name, value in metrics.items():
print(f"{metric_name}: {value}")
print()
from sklearn.naive_bayes import GaussianNB
# Instantiate and Train the Model
model = GaussianNB()
model.fit(X_train, y_train)
# Make Predictions
y_pred = model.predict(X_test)
# Evaluate the Model
accuracy = accuracy_score(y_test, y_pred)
precision = precision_score(y_test, y_pred)
recall = recall_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
roc_auc = roc_auc_score(y_test, y_pred)
# Print the Evaluation Metrics
print("Accuracy:", accuracy)
print("Precision:", precision)
print("Recall:", recall)
print("F1 Score:", f1)
print("ROC AUC Score:", roc_auc)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/855/129855288.ipynb
|
loan-default-prediction
|
kmldas
|
[{"Id": 129855288, "ScriptId": 38621547, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4940277, "CreationDate": "05/17/2023 02:02:00", "VersionNumber": 2.0, "Title": "Loan Default - 96% Accuracy", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 171.0, "LinesInsertedFromPrevious": 24.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 147.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 12}]
|
[{"Id": 186247895, "KernelVersionId": 129855288, "SourceDatasetVersionId": 2092236}]
|
[{"Id": 2092236, "DatasetId": 1254539, "DatasourceVersionId": 2132707, "CreatorUserId": 2781854, "LicenseName": "Data files \u00a9 Original Authors", "CreationDate": "04/06/2021 15:03:41", "VersionNumber": 2.0, "Title": "Loan Default Prediction", "Slug": "loan-default-prediction", "Subtitle": "Beginners data set for financial analytics", "Description": "This is a synthetic dataset created using actual data from a financial institution. The data has been modified to remove identifiable features and the numbers transformed to ensure they do not link to original source (financial institution). \n\n\nThis is intended to be used for academic purposes for beginners who want to practice financial analytics from a simple financial dataset", "VersionNotes": "v2", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1254539, "CreatorUserId": 2781854, "OwnerUserId": 2781854.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2092236.0, "CurrentDatasourceVersionId": 2132707.0, "ForumId": 1272906, "Type": 2, "CreationDate": "04/06/2021 14:46:59", "LastActivityDate": "04/06/2021", "TotalViews": 56465, "TotalDownloads": 6869, "TotalVotes": 89, "TotalKernels": 47}]
|
[{"Id": 2781854, "UserName": "kmldas", "DisplayName": "Kamal Das", "RegisterDate": "02/05/2019", "PerformanceTier": 4}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Data Preparation
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
# Load the dataset
df = pd.read_csv("/kaggle/input/loan-default-prediction/Default_Fin.csv")
# Handle Missing Values
imputer = SimpleImputer(strategy="mean") # Use mean imputation
df["Bank Balance"] = imputer.fit_transform(df[["Bank Balance"]])
df["Annual Salary"] = imputer.fit_transform(df[["Annual Salary"]])
# Handle Outliers (using manual winsorization)
def winsorize_column(column, lower_limit, upper_limit):
p = np.percentile(column, [lower_limit, upper_limit])
column[column < p[0]] = p[0]
column[column > p[1]] = p[1]
return column
df["Bank Balance"] = winsorize_column(df["Bank Balance"], 5, 95)
df["Annual Salary"] = winsorize_column(df["Annual Salary"], 5, 95)
# Format Conversion (if needed)
# If any categorical variables need conversion, use appropriate encoding methods
# Splitting Data
X = df[["Employed", "Bank Balance", "Annual Salary"]] # Features
y = df["Defaulted?"] # Target variable
# Scaling or Normalization
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
# Rename the column
df = df.rename(columns={"Defaulted?": "Defaulted"})
import matplotlib.pyplot as plt
import seaborn as sns
# Examine the dataset
print(df.head()) # Display the first few rows of the dataset
print(
df.info()
) # Get information about the dataset, including data types and missing values
print(df.describe()) # Statistical summary of numerical features
# # Data Exploration
# Explore the distribution of the target variable (Defaulted)
sns.countplot(x="Defaulted", data=df)
plt.title("Distribution of Defaulted")
plt.show()
# Explore the relationship between features and the target variable
sns.boxplot(x="Defaulted", y="Bank Balance", data=df)
plt.title("Bank Balance vs. Defaulted")
plt.show()
sns.boxplot(x="Defaulted", y="Annual Salary", data=df)
plt.title("Annual Salary vs. Defaulted")
plt.show()
sns.violinplot(x="Defaulted", y="Employed", data=df)
plt.title("Employed vs. Defaulted")
plt.show()
# Correlation heatmap of numerical features
numerical_features = ["Bank Balance", "Annual Salary"]
corr_matrix = df[numerical_features].corr()
sns.heatmap(corr_matrix, annot=True, cmap="coolwarm")
plt.title("Correlation Heatmap of Numerical Features")
plt.show()
# # Feature Engineering
# Feature Engineering
df["Debt-to-Income Ratio"] = df["Bank Balance"] / df["Annual Salary"]
# Verify the new feature
print(df.head())
# # Model Training and Selection
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.metrics import (
accuracy_score,
precision_score,
recall_score,
f1_score,
roc_auc_score,
)
# Split the Data
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# Instantiate and Train Different Models
models = {
"Logistic Regression": LogisticRegression(),
"Decision Tree": DecisionTreeClassifier(),
"Random Forest": RandomForestClassifier(),
"Gradient Boosting": GradientBoostingClassifier(),
}
results = {}
for model_name, model in models.items():
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# Evaluate the Model
accuracy = accuracy_score(y_test, y_pred)
precision = precision_score(y_test, y_pred)
recall = recall_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
roc_auc = roc_auc_score(y_test, y_pred)
results[model_name] = {
"Accuracy": accuracy,
"Precision": precision,
"Recall": recall,
"F1 Score": f1,
"ROC AUC Score": roc_auc,
}
# Print the Results
for model_name, metrics in results.items():
print(f"Metrics for {model_name}:")
for metric_name, value in metrics.items():
print(f"{metric_name}: {value}")
print()
from sklearn.naive_bayes import GaussianNB
# Instantiate and Train the Model
model = GaussianNB()
model.fit(X_train, y_train)
# Make Predictions
y_pred = model.predict(X_test)
# Evaluate the Model
accuracy = accuracy_score(y_test, y_pred)
precision = precision_score(y_test, y_pred)
recall = recall_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
roc_auc = roc_auc_score(y_test, y_pred)
# Print the Evaluation Metrics
print("Accuracy:", accuracy)
print("Precision:", precision)
print("Recall:", recall)
print("F1 Score:", f1)
print("ROC AUC Score:", roc_auc)
| false | 1 | 1,557 | 12 | 1,647 | 1,557 |
||
129822076
|
# ## K Nearest Neighbour Classifier
# K-nearest neighbors (KNN) is a simple, supervised machine learning algorithm that can be used for both classification and regression problems. It works by finding the k most similar instances to a new instance, and then assigning the label of the majority of those instances to the new instance.
# In this post, I will walk you through the steps of implementing KNN in Python, using the codes you provided.
# ## First, we need to import the necessary libraries:
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
# ## Next, we create a classification dataset:
#
from sklearn.datasets import make_classification
X, y = make_classification(
n_samples=1000, # 1000 observations
n_features=3, # 3 total features
n_redundant=1, # creates an redundant feature(it does not contribute new info or calcuted from existed features)
n_classes=2, # binary target/label
random_state=999,
)
# **The X variable contains the features of the dataset, and the y variable contains the labels.**
#
X
# ## We can now split the data into a training set and a test set:
#
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, random_state=42
)
# The training set will be used to train the KNN model, and the test set will be used to evaluate the model's performance.
# ## Now, we can create a KNN classifier and Train:
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier(n_neighbors=5, algorithm="auto")
classifier.fit(X_train, y_train)
# The n_neighbors parameter specifies the number of neighbors to use.
# ## We can now predict the KNN model:
y_pred = classifier.predict(X_test)
# ### Finally, we can evaluate the model's performance on the test set:
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
print(confusion_matrix(y_pred, y_test))
print(accuracy_score(y_pred, y_test))
print(classification_report(y_pred, y_test))
# **The output of the confusion_matrix function is a table that shows the number of instances that were correctly classified and the number of instances that were incorrectly classified.The accuracy score is the fraction of instances that were correctly classified.
# The f1-score is a harmonic mean of the precision and recall.In this case, the KNN model has an accuracy of 0.906, a precision of 0.93, a recall of 0.89, and an f1-score of 0.91. This means that the model is very good at predicting the correct class for the test instances.**
# ## Perform a GridSearchCV to find the best parameters
## Task
# GridsearchCV\
from sklearn.model_selection import GridSearchCV
kclassifier1 = KNeighborsClassifier(algorithm="auto")
param_grid = {
"n_neighbors": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
}
# Create an instance of GridSearchCV
grid_search = GridSearchCV(estimator=kclassifier1, param_grid=param_grid, cv=5)
# Fit the grid search to the data
grid_search.fit(X_train, y_train)
grid_search.best_params_
# ## Make predictions and Evaluate the performance of the model with the best parameters:
y_pred = grid_search.predict(X_test)
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
print(confusion_matrix(y_pred, y_test))
print(accuracy_score(y_pred, y_test))
print(classification_report(y_pred, y_test))
# **The best parameters for the KNN model are n_neighbors=9. The model has an accuracy of 0.912, a precision of 0.92, a recall of 0.91, and an f1-score of 0.91. This means that the model is very good at predicting the correct class for the test instances.**
# ## K Nearest Neighbour Regression
## K Nearest Neighbour Regression
from sklearn.datasets import make_regression
X, y = make_regression(n_samples=1000, n_features=2, noise=10, random_state=42)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, random_state=42
)
from sklearn.neighbors import KNeighborsRegressor
regressor = KNeighborsRegressor(n_neighbors=6, algorithm="auto")
regressor.fit(X_train, y_train)
y_pred = regressor.predict(X_test)
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
print(r2_score(y_test, y_pred))
print(mean_absolute_error(y_test, y_pred))
print(mean_squared_error(y_test, y_pred))
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/822/129822076.ipynb
| null | null |
[{"Id": 129822076, "ScriptId": 38564325, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14732841, "CreationDate": "05/16/2023 18:05:03", "VersionNumber": 1.0, "Title": "KNN Classifier & Regression implementation", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 114.0, "LinesInsertedFromPrevious": 114.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
| null | null | null | null |
# ## K Nearest Neighbour Classifier
# K-nearest neighbors (KNN) is a simple, supervised machine learning algorithm that can be used for both classification and regression problems. It works by finding the k most similar instances to a new instance, and then assigning the label of the majority of those instances to the new instance.
# In this post, I will walk you through the steps of implementing KNN in Python, using the codes you provided.
# ## First, we need to import the necessary libraries:
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
# ## Next, we create a classification dataset:
#
from sklearn.datasets import make_classification
X, y = make_classification(
n_samples=1000, # 1000 observations
n_features=3, # 3 total features
n_redundant=1, # creates an redundant feature(it does not contribute new info or calcuted from existed features)
n_classes=2, # binary target/label
random_state=999,
)
# **The X variable contains the features of the dataset, and the y variable contains the labels.**
#
X
# ## We can now split the data into a training set and a test set:
#
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, random_state=42
)
# The training set will be used to train the KNN model, and the test set will be used to evaluate the model's performance.
# ## Now, we can create a KNN classifier and Train:
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier(n_neighbors=5, algorithm="auto")
classifier.fit(X_train, y_train)
# The n_neighbors parameter specifies the number of neighbors to use.
# ## We can now predict the KNN model:
y_pred = classifier.predict(X_test)
# ### Finally, we can evaluate the model's performance on the test set:
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
print(confusion_matrix(y_pred, y_test))
print(accuracy_score(y_pred, y_test))
print(classification_report(y_pred, y_test))
# **The output of the confusion_matrix function is a table that shows the number of instances that were correctly classified and the number of instances that were incorrectly classified.The accuracy score is the fraction of instances that were correctly classified.
# The f1-score is a harmonic mean of the precision and recall.In this case, the KNN model has an accuracy of 0.906, a precision of 0.93, a recall of 0.89, and an f1-score of 0.91. This means that the model is very good at predicting the correct class for the test instances.**
# ## Perform a GridSearchCV to find the best parameters
## Task
# GridsearchCV\
from sklearn.model_selection import GridSearchCV
kclassifier1 = KNeighborsClassifier(algorithm="auto")
param_grid = {
"n_neighbors": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
}
# Create an instance of GridSearchCV
grid_search = GridSearchCV(estimator=kclassifier1, param_grid=param_grid, cv=5)
# Fit the grid search to the data
grid_search.fit(X_train, y_train)
grid_search.best_params_
# ## Make predictions and Evaluate the performance of the model with the best parameters:
y_pred = grid_search.predict(X_test)
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
print(confusion_matrix(y_pred, y_test))
print(accuracy_score(y_pred, y_test))
print(classification_report(y_pred, y_test))
# **The best parameters for the KNN model are n_neighbors=9. The model has an accuracy of 0.912, a precision of 0.92, a recall of 0.91, and an f1-score of 0.91. This means that the model is very good at predicting the correct class for the test instances.**
# ## K Nearest Neighbour Regression
## K Nearest Neighbour Regression
from sklearn.datasets import make_regression
X, y = make_regression(n_samples=1000, n_features=2, noise=10, random_state=42)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, random_state=42
)
from sklearn.neighbors import KNeighborsRegressor
regressor = KNeighborsRegressor(n_neighbors=6, algorithm="auto")
regressor.fit(X_train, y_train)
y_pred = regressor.predict(X_test)
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
print(r2_score(y_test, y_pred))
print(mean_absolute_error(y_test, y_pred))
print(mean_squared_error(y_test, y_pred))
| false | 0 | 1,290 | 3 | 1,290 | 1,290 |
||
129822348
|
#
# CS256 Team Project, 2023 Spring
# Team 1
# Anjana Priyatham Tatavarthi, Daniel Quintana, Lifan Zeng, Lohith Nagaraja, Mrunal Deepak Zambre, Priya Harika Yerapothu, Srinivas Rao Chavan
# #### Essential info
# - Machine learning algorithms used: ????
# - Bonus part(s): ??? and by whom (if any)
# - Any other things ???
# ### Who did what
# | What | Description | By |
# | ----------- | ----------- | ----------- |
# | Data preparation| Examine the data and peroform data cleansing | John smith and Mary Joe|
# | item#2 | what was done in item#2 | by whom |
# | more items | what was done in more items | ... |
# | ... | ... | ... |
# ### Link to your online discussion forum for this project: [discussion link](http://???)
# The following pair of gesture datasets are assigned to
# (a) Warn, Turn left and turn right: Team #4
# b) Warn, Turn and circle : Team #3
# c) Warn, Stop and no : Team #2
# d) Warn, Hello and abort : Team #1
# ## We are working on classification of ???, ???, and ??? gestures
# Please make sure
# - Provide clear and explicit explanation on the process and results in your work
# - No ad hoc manual methodology should be used in any task below.
# - Provide generic solution that is not specific/valid to your data file only
# - Comments, in Markdown cells, should be added wherever appropriate
# - Always use charts, figures, or other visual formats to present your results
# - Copy and rename this file as teamXfinalproject.ipynb in your submission
# - Your submitted Colab can be run from the beginning to the end without any error ( you may opt for a different Colab file to showcase your bonus parts).
# - Make sure all your Colab and data files are viewable to all SJSU account holders.
# ## Project Result Outline
# - [EDA and Data Preparation (selected milestone 1 content)](#EDA)
# - [Shift the origin of the data to the center of the body](#Shifttheorigin)
# - [Evaluating polar angle, angular velocity, and angular acceleration of from (x,y)](#angle-data)
# - [Store/write the polar angle velocity and polar angle acceleration data in the 3rd and the 4th component of each dataset.](#storing-angle-data)
# - [Classification with LSTM (selected milestone 2 content)](#LSTM-classification)
# - [Data preparation for LSTM by Keras](#Keras-data)
# - [Split data to train(80%) and test(20%)](#Train-test-split)
# - [LSTM model performance with different configurations](#lstm-model)
# - [LSTM model performance with different datasets](#differnt-datasets)
# - [Reasons for the varying performance due to different model configurations or datasets](#performance-reasons)
# - [classification of multiple gesture sequences(milestone 3)](#sequence)
# - [Creation of Concatenated videos with two gestures in a sequence](#videos)
# - [Creation of joint coordinate profiles of concatenated gestures](#joints)
# - [Modeling](#model)
# - [Discussion of your results using classification report](#classification_report)
# - [Explain your results](#explain)
#
#
# - [Bonus item B1](#bonus-B1)
# - [9 gestures with 2-gesture sequence recognition](#9-2-gestures)
# - [Bonus item B2](#bonus-B2)
# - [9 gestures with 3-gesture sequence recognition](#9-3-gestures)
# - [Additional related results not shown above](#additional_results)
# **Load packages:**
import os
# Everyone must put the copied folder in the exact path shown below so you collaborate and get graded easily.
# **No project score will be given for not following this folder configuration**
# (Our grader is *not reponsible to figure out your own* perferred Google *folder* configuration)
# **Only Keras or scikit_learn should be used in this project**
root_path = "/kaggle/input/cs-256/CS256Project/data"
(os.path.exists(root_path)) # Checking if the data paths indeed exist and are valid.
# - Import libraries and models needed for this work
from numpy import mean
from numpy import std
from numpy import dstack
from pandas import read_csv
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Dropout
from keras.layers import LSTM
from keras.utils.np_utils import to_categorical
from matplotlib import pyplot
import math
from tensorflow.keras.preprocessing.sequence import pad_sequences
import csv
#
# ## EDA and Data Preparation (selected milestone 1 content)
# ---
import colorsys
import os
from PIL import Image, ImageDraw
import matplotlib.pyplot as plt
import numpy as np
# You may use [this chrome app](https://script.google.com/macros/s/AKfycbxbGNGajrxv-HbX2sVY2OTu7yj9VvxlOMOeQblZFuq7rYm7uyo/exec) to copy the CS256Project folder that I shared with you to your own drive.
# Everyone must put the copied folder in the exact path shown below so you collaborate and get graded easily.
# **No project score will be given for not following this folder configuration**
# (Our grader is *not reponsible to figure out your own* perferred Google *folder* configuration)
# Basically, you simply need to use the chrome app above to copy the entire CS256Project folder to your Google doc root folder
gestures_data_path = "/kaggle/input/cs-256/CS256Project/data/"
gestures_2d_path = os.path.join(gestures_data_path, "gestures_basic_d2")
(
os.path.exists(gestures_data_path),
os.path.exists(gestures_2d_path),
) # Checking if the data paths indeed exist and are valid.
# Sample Datapoint Properties
class_label = "abort"
video_id = "1_0"
n_frames = 24
# ## Exploring data
# Extract a sample data and see what is in it
sample_2d_data_path = os.path.join(
gestures_2d_path, class_label, f"{class_label}_{video_id}.mp4.npz"
)
data = np.load(sample_2d_data_path, allow_pickle=True)
# What does the data contain?
data.files
data["segments"]
data["keypoints"].shape
data["keypoints"][0][1]
data["boxes"].shape
data["boxes"][0][1][0]
data["metadata"]
# ### keypoints is where it stores the (x,y) coordindate values
coords = data["keypoints"]
coords[0, 1].shape
# ### There are 17 labeled joints as generated from a research algorithm based on the human body image.
# **The joint is indexed as follows**.
# 0 - Nose
# 1 - left eye
# 2 - Right eye
# 3 - left ear
# 4 - Right ear
# 5 - left shoulder
# 6 - Right shoulder
# 7 - left elbow
# 8 - Right elbow
# 9 - left wrist
# 10 - Right wrist
# 11 - left hip
# 12 - Right hip
# 13 - left knee
# 14 - Right knee
# 15 - left Ankle
# 16 - Right Ankle
# ### (x,y) coordinate of the first frame in the given sample
coords[0][1][0][:, 16]
# The first two components are (x,y) coordinate values
# ### Let's take a tranponse so we can access the (x,y) coordindate easier
# [0][1][0] because... [frame_id][always 1][0 to remove extra dimension]
# transpose(1, 0) to make it shape (17, 4) so first index is keypoint index
coords_0 = coords[0][1][0].transpose(1, 0)
coords_0.shape, coords_0
## Fetch only the required joints from the keypoints in the data
def get_data(class_label):
data = []
data_path = os.path.join(gestures_2d_path, class_label)
for filename in os.listdir(data_path):
# get path of each file
file_path = os.path.join(data_path, filename)
# load the numpy array from the input file
l = np.load(file_path, allow_pickle=True)
sample = []
for i, frame in enumerate(l["keypoints"]):
sample.append(frame[1][0])
data.append(sample)
return data
hello_data = get_data("hello")
abort_data = get_data("abort")
warn_data = get_data("warn")
print(hello_data[0][0])
len(hello_data)
len(abort_data)
len(warn_data)
hello_data[0][0].transpose(1, 0)
def transpose_keypoints(data):
keypoints = []
for frames in data:
new_frames = []
for frame in frames:
transposed_frame = frame.transpose()
new_frames.append(np.array(transposed_frame))
keypoints.append(np.array(new_frames))
return np.array(keypoints)
hello_data = transpose_keypoints(hello_data)
abort_data = transpose_keypoints(abort_data)
warn_data = transpose_keypoints(warn_data)
hello_data[0][0]
#
# ## Shift the origin to the nose and recalculate the new (x,y) coordinate for all datasets and
# # overwrite the original (x,y) values
print(hello_data[0][0])
def shift_origin(keypoints):
for frames in keypoints:
for frame in frames:
coordinates = frame
x_origin, y_origin = coordinates[0][0], coordinates[0][1]
for i in range(len(coordinates)):
coordinates[i][0] -= x_origin
coordinates[i][1] -= y_origin
shift_origin(hello_data)
shift_origin(abort_data)
shift_origin(warn_data)
hello_data[0][0]
#
# # Calculate the polar angle velocity and the polar angle acceleration based on the transformed (x,y) from above
def update_polar_angles(keypoints):
for frames in keypoints:
for frame in frames:
coordinates = frame
for i in range(1, len(coordinates)):
if coordinates[i][0] != 0:
coordinates[i][2] = math.atan(coordinates[i][1] / coordinates[i][0])
def update_polar_velocity(keypoints):
for frames in keypoints:
for i in range(len(frames)):
# print("frames len: ", len(frames))
# print("j range:", len(frames[i][0]))
for j in range(1, len(frames[i][0])):
frames[i][j][3] = frames[i][j][2] - frames[i - 1][j][2]
update_polar_angles(hello_data)
update_polar_angles(abort_data)
update_polar_angles(warn_data)
update_polar_velocity(hello_data)
update_polar_velocity(abort_data)
update_polar_velocity(warn_data)
def select_required_gestures(data):
new_data = []
for sample in data:
new_sample = []
for frame in sample:
new_frame = frame[5:11]
new_sample.append(new_frame)
new_data.append(new_sample)
return new_data
hello_data = select_required_gestures(hello_data)
abort_data = select_required_gestures(abort_data)
warn_data = select_required_gestures(warn_data)
print(max([len(hello) for hello in hello_data]))
print(max([len(abort) for abort in abort_data]))
print(max([len(warn) for warn in warn_data]))
#
# # Classification with LSTM (selected milestone 2 content)
# Padding the data to fix inequal number of frames in each sample of the dataset
def pad_data(data):
# max length set to 49 since the longest video has 49 frames
max_length = 49
# using pad_sequences() function to pad 0s to each sample such that all samples have the same number of frames
return pad_sequences(data, maxlen=max_length, padding="post", dtype="float32")
# combining both hello and abort samples and padding the data
X_values = pad_data(np.array(list(hello_data) + list(abort_data) + list(warn_data)))
# target value has 2 variables, isHello and isAbort. Target values for hello sample = [1,0] and target values for abort sample = [0,1]
target_values = np.array(
[0] * len(hello_data) + [1] * len(abort_data) + [2] * len(warn_data)
)
# lstm model
import numpy as np
from numpy import mean
from numpy import std
from numpy import dstack
from pandas import read_csv
from keras.models import Sequential
from keras.layers import Dense, Masking
from keras.layers import Flatten
from keras.layers import Dropout
from keras.layers import LSTM
from keras.utils.np_utils import to_categorical
# from keras.utils import np_utils
from matplotlib import pyplot
from sklearn.model_selection import train_test_split
from tensorflow.keras.preprocessing.sequence import pad_sequences
# split the data into train and test
X_train_all, X_test_all, y_train, y_test = train_test_split(
X_values, target_values, test_size=0.2, random_state=42, stratify=target_values
)
# Function used to extract polar angles, polar velocity and polar acceleration separately.
def extract_data(data, index, index2=None):
res = []
for sample in data:
new_sample = []
for frame in sample:
polar_angles = []
for joint in frame:
polar_angles.append(joint[index])
if index2 is not None:
for joint in frame:
polar_angles.append(joint[index2])
new_sample.append(polar_angles)
res.append(new_sample)
return np.array(res)
# Calling extract_data() to fetch only polar angles for testing with LSTM.
X_train = extract_data(X_train_all, 2)
X_test = extract_data(X_test_all, 2)
# fit and evaluate a model
def evaluate_model(
trainX,
trainy,
testX,
testy,
neurons=50,
num_layers=1,
dropout=0.5,
input_shape=(49, 6),
):
verbose, epochs, batch_size = 0, 15, 32
n_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[2], 3
model = Sequential()
# Masking is done to ignore padded data.
model.add(Masking(mask_value=0.0, input_shape=input_shape))
if num_layers == 1:
model.add(LSTM(neurons, input_shape=(n_timesteps, n_features)))
else:
for i in range(num_layers):
if i == 0:
# First layer requires input shape
model.add(
LSTM(
neurons,
input_shape=(n_timesteps, n_features),
return_sequences=True,
)
)
model.add(Dense(neurons, activation="relu")) # z
elif i == num_layers - 1:
# Last layer does not return sequences
model.add(LSTM(neurons))
model.add(Dense(neurons, activation="relu")) # z
else:
# Intermediate layers return sequences
model.add(LSTM(neurons, return_sequences=True))
model.add(Dense(neurons, activation="relu")) # z
model.add(Dropout(dropout))
model.add(Dense(neurons, activation="relu"))
model.add(Dense(n_outputs, activation="softmax"))
model.compile(
loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"]
)
# fit network
print(model.summary())
model.fit(trainX, trainy, epochs=epochs, batch_size=batch_size, verbose=verbose)
# evaluate model
_, accuracy = model.evaluate(testX, testy, batch_size=batch_size, verbose=0)
return accuracy
# summarize scores
from sklearn.metrics import classification_report
def summarize_results(scores):
best_params, best_score = max(scores.items(), key=lambda x: x[1])
score_list = list(scores.values())
mean_acc = mean(score_list)
std_acc = std(score_list)
print(f"Best Parameters: {best_params}, Best Score: {best_score}")
print("Accuracy: %.3f%% (+/-%.3f)" % (mean_acc, std_acc))
# run an experiment
def run_experiment(input_shape=(49, 6)):
# repeat experiment
scores = {}
neurons_ls = [50, 100, 150, 200]
dropout_ls = [0.2, 0.5, 0.7]
layers_ls = [1, 2, 3, 4]
for neurons in neurons_ls:
for layers in layers_ls:
for rate in dropout_ls:
score = evaluate_model(
X_train, y_train, X_test, y_test, neurons, layers, rate, input_shape
)
score = score * 100.0
print(
"> with LSTM (%d neurons, %d layers, %.1f dropout): %.3f"
% (neurons, layers, rate, score)
)
params = f"{layers}_{neurons}_{rate}"
scores[params] = score
# summarize results
summarize_results(scores)
return scores
# run the experiment
exp1_scores = run_experiment()
# function to document accuracies obtained with different combinations of parameters
import csv
with open(
"/content/drive/MyDrive/CS256Project/Milestone3-results/exp1-scores.csv",
"w",
newline="",
) as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["layers", "neurons", "dropout_rate", "score"])
for params, score in exp1_scores.items():
layers, neurons, dropout_rate = params.split("_")
writer.writerow([layers, neurons, dropout_rate, score])
#
# ## Store the velocity and acceleration in the 3rd and 4th component of the dataset
# Change the position of velocity from 4th component to 3rd component
def shift_velocity(data):
for sample in data:
for i in range(len(sample)):
for j in range(len(sample[i])):
if i == 0:
# setting the polar velocity of the first frame to 0
sample[i][j][2] = 0
else:
# moving polar velocity from third component to second
sample[i][j][2] = sample[i][j][3]
sample[i][j][3] = 0
# Calculating polar acceleration and storing it in 4th component
def update_polar_acceleration(data):
for sample in data:
for i in range(1, len(sample)):
for j in range(1, len(sample[i][0])):
# set acceleration value as the different between velocities of previous and current frame
sample[i][j][3] = sample[i][j][2] - sample[i - 1][j][2]
shift_velocity(X_train_all)
shift_velocity(X_test_all)
update_polar_acceleration(X_train_all)
update_polar_acceleration(X_test_all)
# Calling extract_data() to fetch only polar velocity for testing with LSTM.
X_train_polar_velocity = extract_data(X_train_all, 2)
X_test_polar_velocity = extract_data(X_test_all, 2)
X_train = X_train_polar_velocity
X_test = X_test_polar_velocity
# run the experiment
exp2_scores = run_experiment()
import csv
with open(
"/content/drive/MyDrive/CS256Project/Milestone3-results/exp2-scores.csv",
"w",
newline="",
) as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["layers", "neurons", "dropout_rate", "score"])
for params, score in exp2_scores.items():
layers, neurons, dropout_rate = params.split("_")
writer.writerow([layers, neurons, dropout_rate, score])
# Calling extract_data() to fetch only polar acceleration for testing with LSTM.
X_train_polar_acceleration = extract_data(X_train_all, 3)
X_test_polar_acceleration = extract_data(X_test_all, 3)
X_train = X_train_polar_acceleration
X_test = X_test_polar_acceleration
# run the experiment
exp3_scores = run_experiment()
with open(
"/content/drive/MyDrive/CS256Project/Milestone3-results/exp3-scores.csv",
"w",
newline="",
) as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["layers", "neurons", "dropout_rate", "score"])
for params, score in exp3_scores.items():
layers, neurons, dropout_rate = params.split("_")
writer.writerow([layers, neurons, dropout_rate, score])
#
# ## Data preparation for LSTM by Keras
#
# ## Split data to train(80%) and test(20%)
#
# ## LSTM model performance with different configurations
#
# ## LSTM model performance with different datasets
#
# ## Reasons for the varying performance due to different model configurations or datasets
#
# # classification of multiple gesture sequences(milestone 3)
# ## Creation of Concatenated videos with two gestures in a sequence
# ### All possible ordered cases out of your assigned 3 gestures must be created and saved in shared folders with matching names
# ---
gestures_video_path = os.path.join(gestures_data_path, "gestures_visualized")
os.path.exists(
"./drive/My Drive/CS256Project/data/gestures_visualized/combined_hello_abort"
)
def get_data(class_label):
data = {
"boxes": [],
"segments": [],
"keypoints": [],
"metadata": [],
"sample_number": [],
}
data_path = os.path.join(gestures_2d_path, class_label)
for filename in os.listdir(data_path):
file_path = os.path.join(data_path, filename)
l = np.load(file_path, allow_pickle=True)
for file_ in l.files:
data.get(file_, []).append(l[file_])
data.get("sample_number", []).append(filename.split("_", 1)[1].split(".")[0])
return data
#
# ## Creation of joint coordinate profiles of concatenated gestures
# ---
def create_concatenated_numpy_arrays(gesture1_label, gesture2_label):
gesture1_path = os.path.join(gestures_2d_path, gesture1_label)
gesture2_path = os.path.join(gestures_2d_path, gesture2_label)
combined_gestures_path = os.path.join(
"/kaggle/working/", f"combined1_{gesture1_label}_{gesture2_label}"
)
if not os.path.exists(combined_gestures_path):
os.mkdir(combined_gestures_path)
for file1 in os.listdir(gesture1_path):
file1_path = os.path.join(gesture1_path, file1)
np_arr1 = np.load(file1_path, allow_pickle=True)["keypoints"]
sample_num_g1 = file1.split("_", 1)[1].split(".", 1)[0]
for file2 in os.listdir(gesture2_path):
file2_path = os.path.join(gesture2_path, file2)
np_arr2 = np.load(file2_path, allow_pickle=True)["keypoints"]
print(np_arr1.shape, np_arr2.shape)
sample_num_g2 = file2.split("_", 1)[1].split(".", 1)[0]
final = np.concatenate((np_arr1, np_arr2), axis=0)
combined_file_name = f"{combined_gestures_path}/{gesture1_label}_{sample_num_g1}_{gesture2_label}_{sample_num_g2}.mp4.npz"
np.savez(combined_file_name, final)
create_concatenated_numpy_arrays("abort", "hello")
create_concatenated_numpy_arrays("hello", "abort")
create_concatenated_numpy_arrays("hello", "warn")
create_concatenated_numpy_arrays("warn", "hello")
create_concatenated_numpy_arrays("warn", "abort")
create_concatenated_numpy_arrays("abort", "warn")
type(hello_data)
new_hello_data = pad_data(hello_data)
new_warn_data = pad_data(warn_data)
new_abort_data = pad_data(abort_data)
def create_concatenated_numpy_arrays_for_code(gesture1_label, gesture2_label):
# gesture1_path = os.path.join(gestures_2d_path, gesture1_label)
# gesture2_path = os.path.join(gestures_2d_path, gesture2_label)
# combined_gestures_path = os.path.join(gestures_data_path, f"combined_gesture_arrays/combined_{gesture1_label}_{gesture2_label}")
# if not os.path.exists(combined_gestures_path):
# os.mkdir(combined_gestures_path)
gestures = {"hello": hello_data, "warn": warn_data, "abort": abort_data}
gesture1_data = gestures[gesture1_label]
gesture2_data = gestures[gesture2_label]
new_concatenated_data = []
for sample1 in gesture1_data:
for sample2 in gesture2_data:
new_sample = np.concatenate((sample1, sample2), axis=0)
new_concatenated_data.append(new_sample)
print(len(new_concatenated_data))
return new_concatenated_data
hello_abort_data = create_concatenated_numpy_arrays("hello", "abort")
abort_hello_data = create_concatenated_numpy_arrays_for_code("abort", "hello")
hello_warn_data = create_concatenated_numpy_arrays_for_code("hello", "warn")
warn_hello_data = create_concatenated_numpy_arrays_for_code("warn", "hello")
abort_warn_data = create_concatenated_numpy_arrays_for_code("abort", "warn")
warn_abort_data = create_concatenated_numpy_arrays_for_code("warn", "abort")
#
# ## Modeling
# ---
# #### CONV LSTM MODEL
X_train_convlstm = extract_data(X_train_all, 2, 3)
X_test_convlstm = extract_data(X_test_all, 2, 3)
X_train_convlstm.shape, X_test_convlstm.shape
from keras.models import Sequential
from keras.layers import ConvLSTM2D, Conv2D, MaxPooling2D, Flatten, Dense
def evaluate_convlstm_model(
trainX,
trainy,
testX,
testy,
neurons=50,
num_layers=1,
dropout=0.5,
input_shape=(1, 1, 49, 12),
):
verbose, epochs, batch_size = 0, 30, 32
n_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[2], 3
trainX = np.expand_dims(trainX, axis=1)
trainX = np.expand_dims(trainX, axis=1)
testX = np.expand_dims(testX, axis=1)
testX = np.expand_dims(testX, axis=1)
model = Sequential()
if num_layers == 1:
model.add(
ConvLSTM2D(
filters=64,
input_shape=(1, 1, 49, 12),
kernel_size=(2, 2),
padding="same",
return_sequences=True,
)
)
model.add(Dropout(rate=dropout))
else:
for i in range(num_layers):
if i == 0:
model.add(
ConvLSTM2D(
filters=64,
input_shape=(1, 1, 49, 12),
kernel_size=(2, 2),
padding="same",
return_sequences=True,
)
)
model.add(Dropout(rate=dropout))
else:
model.add(
ConvLSTM2D(
filters=64,
kernel_size=(2, 2),
padding="same",
return_sequences=True,
)
)
model.add(Dropout(rate=dropout))
model.add(Flatten())
model.add(Dense(units=neurons, activation="relu"))
model.add(Dense(units=3, activation="softmax"))
model.compile(
loss="sparse_categorical_crossentropy",
optimizer="adam",
metrics=["sparse_categorical_accuracy"],
)
print(model.summary())
model.fit(trainX, trainy, epochs=epochs, batch_size=batch_size, verbose=verbose)
_, accuracy = model.evaluate(testX, testy, batch_size=batch_size, verbose=0)
return accuracy, model
from sklearn.metrics import classification_report
def summarize_results_lstm(scores):
print("Average Accuracy: %.3f%% (+/-%.3f)" % (mean(scores), std(scores)))
print("Best Accuracy: %.3f%%" % (max(scores)))
def run_experiment_convlstm(input_shape=(49, 6)):
# repeat experiment
scores = {}
neurons_ls = [50, 75, 100]
dropout_ls = [0.2, 0.5, 0.7]
layers_ls = [1, 2, 3]
best_model = None
best_accuracy = 0
for neurons in neurons_ls:
for layers in layers_ls:
for rate in dropout_ls:
score, model = evaluate_convlstm_model(
X_train_convlstm,
y_train,
X_test_convlstm,
y_test,
neurons,
layers,
rate,
)
score = score * 100.0
print(
"> with ConvLSTM (%d neurons, %d layers, %.1f dropout): %.3f"
% (neurons, layers, rate, score)
)
params = f"{layers}_{neurons}_{rate}"
scores[params] = score
if score > best_accuracy:
best_accuracy = score
best_model = model
# summarize results
summarize_results(scores)
return scores, best_model
convexp_scores, best_model = run_experiment_convlstm()
best_model.save(
"/content/drive/MyDrive/CS256Project/Milestone3-results/best_convlstm_model_exp4.h5"
)
with open(
"/content/drive/MyDrive/CS256Project/Milestone3-results/best_convlstm_model_exp4_scores.csv",
"w",
newline="",
) as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["layers", "neurons", "dropout_rate", "score"])
for params, score in convexp_scores.items():
layers, neurons, dropout_rate = params.split("_")
writer.writerow([layers, neurons, dropout_rate, score])
#
# ## Discussion of your results using classification report
# ---
from keras.models import load_model
new_acc, model_test2 = evaluate_convlstm_model(
X_train_convlstm, y_train, X_test_convlstm, y_test, 75, 1, 0.5
)
model_test2.save(
"/content/drive/MyDrive/CS256Project/Milestone3-results/model_test_2.h5"
)
new_model_test = load_model(
"/content/drive/MyDrive/CS256Project/Milestone3-results/new_model_test.h5"
)
test_conv_model = load_model(
"/content/drive/MyDrive/CS256Project/Milestone3-results/best_convlstm_model_exp3.h5"
)
testdata = np.expand_dims(pad_data(np.array(list(abort_data))), axis=1)
testdata.shape
# test_data = extract_data(X_test_convlstm, 2, 3)
test_data = np.expand_dims(X_test_convlstm, axis=1)
test_data = np.expand_dims(test_data, axis=1)
test_data.shape
x = np.expand_dims(test_data[0], axis=2)
x.shape
preds = test_conv_model.predict(test_data)
correct_preds = 0
test_labels = []
for pred in new_preds:
test_labels.append(np.argmax(pred))
if np.argmax(pred) == 0:
correct_preds += 1
correct_preds
test_labels
preds[0]
new_preds = new_model_test.predict(test_data)
new_preds[0]
from sklearn.metrics import confusion_matrix, accuracy_score
import seaborn as sns
# Generate confusion matrix
cm = confusion_matrix(y_test, test_labels)
print(accuracy_score(y_test, test_labels))
# Plot confusion matrix
sns.heatmap(cm, annot=True, cmap="Blues")
plt.xlabel("Predicted Labels")
plt.ylabel("True Labels")
plt.show()
new_hello_abort_data = np.array(list(hello_abort_data))
print(new_hello_abort_data.shape)
new_abort_hello_data = np.array(list(abort_hello_data))
print(new_abort_hello_data.shape)
test_data_all = extract_data(hello_abort_data, 2, 3)
# print(test_data_all.shape)
# test_data_all = np.expand_dims(test_data_all, axis=1)
# test_data_all = np.expand_dims(test_data_all, axis=1)
test_data_all.shape
average_framLen = sum([len(x) for x in test_data_all]) // len(test_data_all)
average_framLen / 2
hello_data[0][0]
abort_data[0][0]
warn_data[0][0]
test_data_all_2 = extract_data(new_abort_hello_data, 2, 3)
len(test_data_all_2[0][0])
# Function used to extract polar angles, polar velocity and polar acceleration separately.
def extract_test_data(data, index, index2=None):
new_sample = []
for frame in data:
polar_angles = []
for joint in frame:
polar_angles.append(joint[index])
if index2 is not None:
for joint in frame:
polar_angles.append(joint[index2])
new_sample.append(polar_angles)
return np.array(new_sample)
# new_hello_abort_data[0][0:49].shape
testdataaa = new_hello_abort_data[0][0:49]
print(testdataaa.shape)
test_data = extract_test_data(testdataaa, 2, 3)
print(test_data.shape)
test_data = np.expand_dims(test_data, axis=0)
test_data = np.expand_dims(test_data, axis=1)
test_data.shape
X_train_3dcnn = extract_data(X_train_all, 2, 3)
X_test_3dcnn = extract_data(X_test_all, 2, 3)
X_train_3dcnn.shape, X_test_3dcnn.shape
X_train_3dcnn = np.expand_dims(X_train_3dcnn, axis=1)
X_train_3dcnn = np.expand_dims(X_train_3dcnn, axis=1)
X_test_3dcnn = np.expand_dims(X_test_3dcnn, axis=1)
X_test_3dcnn = np.expand_dims(X_test_3dcnn, axis=1)
X_train_3dcnn.shape, X_test_3dcnn.shape
from keras.models import Sequential
from keras.layers import Conv3D, MaxPooling3D, Flatten, Dense
from keras.regularizers import l1, l2
# Define the model
model = Sequential()
# Add the layers
model.add(
Conv3D(
64,
kernel_size=(1, 1, 2),
activation="relu",
input_shape=(1, 1, 49, 12),
kernel_regularizer=l2(0.05),
)
)
model.add(Dropout(rate=0.2))
model.add(MaxPooling3D(pool_size=(1, 1, 2)))
# model.add(Conv3D(64, kernel_size=(1, 1, 2), activation='relu', kernel_regularizer=l2(0.01)))
# model.add(MaxPooling3D(pool_size=(1, 1, 2)))
model.add(
Conv3D(64, kernel_size=(1, 1, 2), activation="relu", kernel_regularizer=l2(0.05))
)
model.add(Flatten())
model.add(Dense(64, activation="relu"))
model.add(Dense(3, activation="softmax"))
# Compile the model
model.compile(
loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"]
)
# Print the model summary
model.summary()
model.fit(
X_train_3dcnn,
y_train,
batch_size=32,
epochs=100,
validation_data=(X_test_3dcnn, y_test),
)
probabilities = []
window1, window2 = hello_data[0], abort_data[0]
len(window1)
window1 = np.expand_dims(window1, axis=0)
window1 = np.expand_dims(window1, axis=1)
window1.shape
window2 = np.expand_dims(window2, axis=0)
window2 = np.expand_dims(window2, axis=1)
window2 = np.expand_dims(window2, axis=1)
window2 = np.pad(window2, ((0, 0), (0, 0), (0, 0), (0, 22), (0, 0)), mode="constant")
window1.shape, window2.shape
guesture1_pred, guesture2_pred = model.predict(window1, verbose=0), model.predict(
window1, verbose=0
)
print(guesture1_pred, guesture2_pred)
import statistics
from collections import Counter
# probabilities = []
correct = 0
# print(len(test_data_all[1]))
for sample in test_data_all[20:30]:
for i in range(0, len(sample) - 29 + 1, 1):
print(i)
window = sample[i : i + 29]
window = np.expand_dims(window, axis=0)
window = np.expand_dims(window, axis=1)
window = np.expand_dims(window, axis=1)
window = np.pad(
window, ((0, 0), (0, 0), (0, 0), (0, 20), (0, 0)), mode="constant"
)
window_probs = model.predict(window, verbose=0)
probabilities.append(np.round(window_probs, decimals=3))
probabilities
##conv lstm
import statistics
from collections import Counter
# probabilities = []
correct = 0
# print(len(test_data_all[1]))
for sample in test_data_all[20:30]:
probabilities = []
probs = {0: 0, 1: 0, 2: []}
for i in range(0, len(sample) - 49 + 1, 1):
# for i in range(98, 49, -2):
# print(i, i-49)
window = sample[i : i + 49]
# print(window.shape)
window = np.expand_dims(window, axis=0)
window = np.expand_dims(window, axis=1)
window = np.expand_dims(window, axis=1)
# print(window.shape)
window_probs = new_model_test.predict(window, verbose=0)
# print(window_probs)
# Get the indices of the sorted array
# sorted_indices1 = np.argsort(window_probs[0])
# # print(sorted_indices1)
# # Get the indices of the top two maximum elements
# top_two_indices1 = sorted_indices1[-2:][::-1]
# # print(top_two_indices1)
probabilities.append(window_probs)
# print(probabilities)
# # Count the occurrence of each pair
# # count_pairs = Counter(map(tuple, probabilities))
# probs = {}
# for prob in probabilities:
# # print(prob)
# max_ele = np.max(prob)
# probs[max_ele] = np.argmax(prob)
# print(probs)
# Get the most common pair
# most_common_pair = count_pairs.most_common(1)[0][0]
# print(most_common_pair)
# Split the array into two halves
half1 = probabilities[: len(probabilities) // 2]
half2 = probabilities[len(probabilities) // 2 :]
col_max1 = np.amax(half1, axis=0)
col_max2 = np.amax(half2, axis=0)
first = np.argmax(col_max1)
second = np.argmax(col_max2)
print(col_max1, col_max2)
print(first, second)
# sliding window
# Define the sliding window parameters
window_size = 49
stride = 10
# Initialize a list to store the predicted probabilities for each window
probabilities = []
for sample in test_data_all:
# Loop over the test data with the sliding window approach
for i in range(0, len(test_data) - window_size + 1, stride):
# Extract the current window of data
window = test_data[i : i + window_size]
# Make a prediction for the window and get the probabilities
window_probs = model.predict(window)
# Append the probabilities to the list
probabilities.append(window_probs)
# Concatenate the list of probabilities into a single array
probabilities = np.concatenate(probabilities)
# Get the predicted classes for each window by taking the argmax of the probabilities
predicted_classes = np.argmax(probabilities, axis=1)
# Get the two gestures with maximum probability for each window
top_two_classes = np.argsort(probabilities, axis=1)[:, -2:]
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/822/129822348.ipynb
| null | null |
[{"Id": 129822348, "ScriptId": 38584606, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1643975, "CreationDate": "05/16/2023 18:07:30", "VersionNumber": 1.0, "Title": "Anjan-Topics-AI-Final-Project", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 1063.0, "LinesInsertedFromPrevious": 1063.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
#
# CS256 Team Project, 2023 Spring
# Team 1
# Anjana Priyatham Tatavarthi, Daniel Quintana, Lifan Zeng, Lohith Nagaraja, Mrunal Deepak Zambre, Priya Harika Yerapothu, Srinivas Rao Chavan
# #### Essential info
# - Machine learning algorithms used: ????
# - Bonus part(s): ??? and by whom (if any)
# - Any other things ???
# ### Who did what
# | What | Description | By |
# | ----------- | ----------- | ----------- |
# | Data preparation| Examine the data and peroform data cleansing | John smith and Mary Joe|
# | item#2 | what was done in item#2 | by whom |
# | more items | what was done in more items | ... |
# | ... | ... | ... |
# ### Link to your online discussion forum for this project: [discussion link](http://???)
# The following pair of gesture datasets are assigned to
# (a) Warn, Turn left and turn right: Team #4
# b) Warn, Turn and circle : Team #3
# c) Warn, Stop and no : Team #2
# d) Warn, Hello and abort : Team #1
# ## We are working on classification of ???, ???, and ??? gestures
# Please make sure
# - Provide clear and explicit explanation on the process and results in your work
# - No ad hoc manual methodology should be used in any task below.
# - Provide generic solution that is not specific/valid to your data file only
# - Comments, in Markdown cells, should be added wherever appropriate
# - Always use charts, figures, or other visual formats to present your results
# - Copy and rename this file as teamXfinalproject.ipynb in your submission
# - Your submitted Colab can be run from the beginning to the end without any error ( you may opt for a different Colab file to showcase your bonus parts).
# - Make sure all your Colab and data files are viewable to all SJSU account holders.
# ## Project Result Outline
# - [EDA and Data Preparation (selected milestone 1 content)](#EDA)
# - [Shift the origin of the data to the center of the body](#Shifttheorigin)
# - [Evaluating polar angle, angular velocity, and angular acceleration of from (x,y)](#angle-data)
# - [Store/write the polar angle velocity and polar angle acceleration data in the 3rd and the 4th component of each dataset.](#storing-angle-data)
# - [Classification with LSTM (selected milestone 2 content)](#LSTM-classification)
# - [Data preparation for LSTM by Keras](#Keras-data)
# - [Split data to train(80%) and test(20%)](#Train-test-split)
# - [LSTM model performance with different configurations](#lstm-model)
# - [LSTM model performance with different datasets](#differnt-datasets)
# - [Reasons for the varying performance due to different model configurations or datasets](#performance-reasons)
# - [classification of multiple gesture sequences(milestone 3)](#sequence)
# - [Creation of Concatenated videos with two gestures in a sequence](#videos)
# - [Creation of joint coordinate profiles of concatenated gestures](#joints)
# - [Modeling](#model)
# - [Discussion of your results using classification report](#classification_report)
# - [Explain your results](#explain)
#
#
# - [Bonus item B1](#bonus-B1)
# - [9 gestures with 2-gesture sequence recognition](#9-2-gestures)
# - [Bonus item B2](#bonus-B2)
# - [9 gestures with 3-gesture sequence recognition](#9-3-gestures)
# - [Additional related results not shown above](#additional_results)
# **Load packages:**
import os
# Everyone must put the copied folder in the exact path shown below so you collaborate and get graded easily.
# **No project score will be given for not following this folder configuration**
# (Our grader is *not reponsible to figure out your own* perferred Google *folder* configuration)
# **Only Keras or scikit_learn should be used in this project**
root_path = "/kaggle/input/cs-256/CS256Project/data"
(os.path.exists(root_path)) # Checking if the data paths indeed exist and are valid.
# - Import libraries and models needed for this work
from numpy import mean
from numpy import std
from numpy import dstack
from pandas import read_csv
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Dropout
from keras.layers import LSTM
from keras.utils.np_utils import to_categorical
from matplotlib import pyplot
import math
from tensorflow.keras.preprocessing.sequence import pad_sequences
import csv
#
# ## EDA and Data Preparation (selected milestone 1 content)
# ---
import colorsys
import os
from PIL import Image, ImageDraw
import matplotlib.pyplot as plt
import numpy as np
# You may use [this chrome app](https://script.google.com/macros/s/AKfycbxbGNGajrxv-HbX2sVY2OTu7yj9VvxlOMOeQblZFuq7rYm7uyo/exec) to copy the CS256Project folder that I shared with you to your own drive.
# Everyone must put the copied folder in the exact path shown below so you collaborate and get graded easily.
# **No project score will be given for not following this folder configuration**
# (Our grader is *not reponsible to figure out your own* perferred Google *folder* configuration)
# Basically, you simply need to use the chrome app above to copy the entire CS256Project folder to your Google doc root folder
gestures_data_path = "/kaggle/input/cs-256/CS256Project/data/"
gestures_2d_path = os.path.join(gestures_data_path, "gestures_basic_d2")
(
os.path.exists(gestures_data_path),
os.path.exists(gestures_2d_path),
) # Checking if the data paths indeed exist and are valid.
# Sample Datapoint Properties
class_label = "abort"
video_id = "1_0"
n_frames = 24
# ## Exploring data
# Extract a sample data and see what is in it
sample_2d_data_path = os.path.join(
gestures_2d_path, class_label, f"{class_label}_{video_id}.mp4.npz"
)
data = np.load(sample_2d_data_path, allow_pickle=True)
# What does the data contain?
data.files
data["segments"]
data["keypoints"].shape
data["keypoints"][0][1]
data["boxes"].shape
data["boxes"][0][1][0]
data["metadata"]
# ### keypoints is where it stores the (x,y) coordindate values
coords = data["keypoints"]
coords[0, 1].shape
# ### There are 17 labeled joints as generated from a research algorithm based on the human body image.
# **The joint is indexed as follows**.
# 0 - Nose
# 1 - left eye
# 2 - Right eye
# 3 - left ear
# 4 - Right ear
# 5 - left shoulder
# 6 - Right shoulder
# 7 - left elbow
# 8 - Right elbow
# 9 - left wrist
# 10 - Right wrist
# 11 - left hip
# 12 - Right hip
# 13 - left knee
# 14 - Right knee
# 15 - left Ankle
# 16 - Right Ankle
# ### (x,y) coordinate of the first frame in the given sample
coords[0][1][0][:, 16]
# The first two components are (x,y) coordinate values
# ### Let's take a tranponse so we can access the (x,y) coordindate easier
# [0][1][0] because... [frame_id][always 1][0 to remove extra dimension]
# transpose(1, 0) to make it shape (17, 4) so first index is keypoint index
coords_0 = coords[0][1][0].transpose(1, 0)
coords_0.shape, coords_0
## Fetch only the required joints from the keypoints in the data
def get_data(class_label):
data = []
data_path = os.path.join(gestures_2d_path, class_label)
for filename in os.listdir(data_path):
# get path of each file
file_path = os.path.join(data_path, filename)
# load the numpy array from the input file
l = np.load(file_path, allow_pickle=True)
sample = []
for i, frame in enumerate(l["keypoints"]):
sample.append(frame[1][0])
data.append(sample)
return data
hello_data = get_data("hello")
abort_data = get_data("abort")
warn_data = get_data("warn")
print(hello_data[0][0])
len(hello_data)
len(abort_data)
len(warn_data)
hello_data[0][0].transpose(1, 0)
def transpose_keypoints(data):
keypoints = []
for frames in data:
new_frames = []
for frame in frames:
transposed_frame = frame.transpose()
new_frames.append(np.array(transposed_frame))
keypoints.append(np.array(new_frames))
return np.array(keypoints)
hello_data = transpose_keypoints(hello_data)
abort_data = transpose_keypoints(abort_data)
warn_data = transpose_keypoints(warn_data)
hello_data[0][0]
#
# ## Shift the origin to the nose and recalculate the new (x,y) coordinate for all datasets and
# # overwrite the original (x,y) values
print(hello_data[0][0])
def shift_origin(keypoints):
for frames in keypoints:
for frame in frames:
coordinates = frame
x_origin, y_origin = coordinates[0][0], coordinates[0][1]
for i in range(len(coordinates)):
coordinates[i][0] -= x_origin
coordinates[i][1] -= y_origin
shift_origin(hello_data)
shift_origin(abort_data)
shift_origin(warn_data)
hello_data[0][0]
#
# # Calculate the polar angle velocity and the polar angle acceleration based on the transformed (x,y) from above
def update_polar_angles(keypoints):
for frames in keypoints:
for frame in frames:
coordinates = frame
for i in range(1, len(coordinates)):
if coordinates[i][0] != 0:
coordinates[i][2] = math.atan(coordinates[i][1] / coordinates[i][0])
def update_polar_velocity(keypoints):
for frames in keypoints:
for i in range(len(frames)):
# print("frames len: ", len(frames))
# print("j range:", len(frames[i][0]))
for j in range(1, len(frames[i][0])):
frames[i][j][3] = frames[i][j][2] - frames[i - 1][j][2]
update_polar_angles(hello_data)
update_polar_angles(abort_data)
update_polar_angles(warn_data)
update_polar_velocity(hello_data)
update_polar_velocity(abort_data)
update_polar_velocity(warn_data)
def select_required_gestures(data):
new_data = []
for sample in data:
new_sample = []
for frame in sample:
new_frame = frame[5:11]
new_sample.append(new_frame)
new_data.append(new_sample)
return new_data
hello_data = select_required_gestures(hello_data)
abort_data = select_required_gestures(abort_data)
warn_data = select_required_gestures(warn_data)
print(max([len(hello) for hello in hello_data]))
print(max([len(abort) for abort in abort_data]))
print(max([len(warn) for warn in warn_data]))
#
# # Classification with LSTM (selected milestone 2 content)
# Padding the data to fix inequal number of frames in each sample of the dataset
def pad_data(data):
# max length set to 49 since the longest video has 49 frames
max_length = 49
# using pad_sequences() function to pad 0s to each sample such that all samples have the same number of frames
return pad_sequences(data, maxlen=max_length, padding="post", dtype="float32")
# combining both hello and abort samples and padding the data
X_values = pad_data(np.array(list(hello_data) + list(abort_data) + list(warn_data)))
# target value has 2 variables, isHello and isAbort. Target values for hello sample = [1,0] and target values for abort sample = [0,1]
target_values = np.array(
[0] * len(hello_data) + [1] * len(abort_data) + [2] * len(warn_data)
)
# lstm model
import numpy as np
from numpy import mean
from numpy import std
from numpy import dstack
from pandas import read_csv
from keras.models import Sequential
from keras.layers import Dense, Masking
from keras.layers import Flatten
from keras.layers import Dropout
from keras.layers import LSTM
from keras.utils.np_utils import to_categorical
# from keras.utils import np_utils
from matplotlib import pyplot
from sklearn.model_selection import train_test_split
from tensorflow.keras.preprocessing.sequence import pad_sequences
# split the data into train and test
X_train_all, X_test_all, y_train, y_test = train_test_split(
X_values, target_values, test_size=0.2, random_state=42, stratify=target_values
)
# Function used to extract polar angles, polar velocity and polar acceleration separately.
def extract_data(data, index, index2=None):
res = []
for sample in data:
new_sample = []
for frame in sample:
polar_angles = []
for joint in frame:
polar_angles.append(joint[index])
if index2 is not None:
for joint in frame:
polar_angles.append(joint[index2])
new_sample.append(polar_angles)
res.append(new_sample)
return np.array(res)
# Calling extract_data() to fetch only polar angles for testing with LSTM.
X_train = extract_data(X_train_all, 2)
X_test = extract_data(X_test_all, 2)
# fit and evaluate a model
def evaluate_model(
trainX,
trainy,
testX,
testy,
neurons=50,
num_layers=1,
dropout=0.5,
input_shape=(49, 6),
):
verbose, epochs, batch_size = 0, 15, 32
n_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[2], 3
model = Sequential()
# Masking is done to ignore padded data.
model.add(Masking(mask_value=0.0, input_shape=input_shape))
if num_layers == 1:
model.add(LSTM(neurons, input_shape=(n_timesteps, n_features)))
else:
for i in range(num_layers):
if i == 0:
# First layer requires input shape
model.add(
LSTM(
neurons,
input_shape=(n_timesteps, n_features),
return_sequences=True,
)
)
model.add(Dense(neurons, activation="relu")) # z
elif i == num_layers - 1:
# Last layer does not return sequences
model.add(LSTM(neurons))
model.add(Dense(neurons, activation="relu")) # z
else:
# Intermediate layers return sequences
model.add(LSTM(neurons, return_sequences=True))
model.add(Dense(neurons, activation="relu")) # z
model.add(Dropout(dropout))
model.add(Dense(neurons, activation="relu"))
model.add(Dense(n_outputs, activation="softmax"))
model.compile(
loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"]
)
# fit network
print(model.summary())
model.fit(trainX, trainy, epochs=epochs, batch_size=batch_size, verbose=verbose)
# evaluate model
_, accuracy = model.evaluate(testX, testy, batch_size=batch_size, verbose=0)
return accuracy
# summarize scores
from sklearn.metrics import classification_report
def summarize_results(scores):
best_params, best_score = max(scores.items(), key=lambda x: x[1])
score_list = list(scores.values())
mean_acc = mean(score_list)
std_acc = std(score_list)
print(f"Best Parameters: {best_params}, Best Score: {best_score}")
print("Accuracy: %.3f%% (+/-%.3f)" % (mean_acc, std_acc))
# run an experiment
def run_experiment(input_shape=(49, 6)):
# repeat experiment
scores = {}
neurons_ls = [50, 100, 150, 200]
dropout_ls = [0.2, 0.5, 0.7]
layers_ls = [1, 2, 3, 4]
for neurons in neurons_ls:
for layers in layers_ls:
for rate in dropout_ls:
score = evaluate_model(
X_train, y_train, X_test, y_test, neurons, layers, rate, input_shape
)
score = score * 100.0
print(
"> with LSTM (%d neurons, %d layers, %.1f dropout): %.3f"
% (neurons, layers, rate, score)
)
params = f"{layers}_{neurons}_{rate}"
scores[params] = score
# summarize results
summarize_results(scores)
return scores
# run the experiment
exp1_scores = run_experiment()
# function to document accuracies obtained with different combinations of parameters
import csv
with open(
"/content/drive/MyDrive/CS256Project/Milestone3-results/exp1-scores.csv",
"w",
newline="",
) as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["layers", "neurons", "dropout_rate", "score"])
for params, score in exp1_scores.items():
layers, neurons, dropout_rate = params.split("_")
writer.writerow([layers, neurons, dropout_rate, score])
#
# ## Store the velocity and acceleration in the 3rd and 4th component of the dataset
# Change the position of velocity from 4th component to 3rd component
def shift_velocity(data):
for sample in data:
for i in range(len(sample)):
for j in range(len(sample[i])):
if i == 0:
# setting the polar velocity of the first frame to 0
sample[i][j][2] = 0
else:
# moving polar velocity from third component to second
sample[i][j][2] = sample[i][j][3]
sample[i][j][3] = 0
# Calculating polar acceleration and storing it in 4th component
def update_polar_acceleration(data):
for sample in data:
for i in range(1, len(sample)):
for j in range(1, len(sample[i][0])):
# set acceleration value as the different between velocities of previous and current frame
sample[i][j][3] = sample[i][j][2] - sample[i - 1][j][2]
shift_velocity(X_train_all)
shift_velocity(X_test_all)
update_polar_acceleration(X_train_all)
update_polar_acceleration(X_test_all)
# Calling extract_data() to fetch only polar velocity for testing with LSTM.
X_train_polar_velocity = extract_data(X_train_all, 2)
X_test_polar_velocity = extract_data(X_test_all, 2)
X_train = X_train_polar_velocity
X_test = X_test_polar_velocity
# run the experiment
exp2_scores = run_experiment()
import csv
with open(
"/content/drive/MyDrive/CS256Project/Milestone3-results/exp2-scores.csv",
"w",
newline="",
) as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["layers", "neurons", "dropout_rate", "score"])
for params, score in exp2_scores.items():
layers, neurons, dropout_rate = params.split("_")
writer.writerow([layers, neurons, dropout_rate, score])
# Calling extract_data() to fetch only polar acceleration for testing with LSTM.
X_train_polar_acceleration = extract_data(X_train_all, 3)
X_test_polar_acceleration = extract_data(X_test_all, 3)
X_train = X_train_polar_acceleration
X_test = X_test_polar_acceleration
# run the experiment
exp3_scores = run_experiment()
with open(
"/content/drive/MyDrive/CS256Project/Milestone3-results/exp3-scores.csv",
"w",
newline="",
) as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["layers", "neurons", "dropout_rate", "score"])
for params, score in exp3_scores.items():
layers, neurons, dropout_rate = params.split("_")
writer.writerow([layers, neurons, dropout_rate, score])
#
# ## Data preparation for LSTM by Keras
#
# ## Split data to train(80%) and test(20%)
#
# ## LSTM model performance with different configurations
#
# ## LSTM model performance with different datasets
#
# ## Reasons for the varying performance due to different model configurations or datasets
#
# # classification of multiple gesture sequences(milestone 3)
# ## Creation of Concatenated videos with two gestures in a sequence
# ### All possible ordered cases out of your assigned 3 gestures must be created and saved in shared folders with matching names
# ---
gestures_video_path = os.path.join(gestures_data_path, "gestures_visualized")
os.path.exists(
"./drive/My Drive/CS256Project/data/gestures_visualized/combined_hello_abort"
)
def get_data(class_label):
data = {
"boxes": [],
"segments": [],
"keypoints": [],
"metadata": [],
"sample_number": [],
}
data_path = os.path.join(gestures_2d_path, class_label)
for filename in os.listdir(data_path):
file_path = os.path.join(data_path, filename)
l = np.load(file_path, allow_pickle=True)
for file_ in l.files:
data.get(file_, []).append(l[file_])
data.get("sample_number", []).append(filename.split("_", 1)[1].split(".")[0])
return data
#
# ## Creation of joint coordinate profiles of concatenated gestures
# ---
def create_concatenated_numpy_arrays(gesture1_label, gesture2_label):
gesture1_path = os.path.join(gestures_2d_path, gesture1_label)
gesture2_path = os.path.join(gestures_2d_path, gesture2_label)
combined_gestures_path = os.path.join(
"/kaggle/working/", f"combined1_{gesture1_label}_{gesture2_label}"
)
if not os.path.exists(combined_gestures_path):
os.mkdir(combined_gestures_path)
for file1 in os.listdir(gesture1_path):
file1_path = os.path.join(gesture1_path, file1)
np_arr1 = np.load(file1_path, allow_pickle=True)["keypoints"]
sample_num_g1 = file1.split("_", 1)[1].split(".", 1)[0]
for file2 in os.listdir(gesture2_path):
file2_path = os.path.join(gesture2_path, file2)
np_arr2 = np.load(file2_path, allow_pickle=True)["keypoints"]
print(np_arr1.shape, np_arr2.shape)
sample_num_g2 = file2.split("_", 1)[1].split(".", 1)[0]
final = np.concatenate((np_arr1, np_arr2), axis=0)
combined_file_name = f"{combined_gestures_path}/{gesture1_label}_{sample_num_g1}_{gesture2_label}_{sample_num_g2}.mp4.npz"
np.savez(combined_file_name, final)
create_concatenated_numpy_arrays("abort", "hello")
create_concatenated_numpy_arrays("hello", "abort")
create_concatenated_numpy_arrays("hello", "warn")
create_concatenated_numpy_arrays("warn", "hello")
create_concatenated_numpy_arrays("warn", "abort")
create_concatenated_numpy_arrays("abort", "warn")
type(hello_data)
new_hello_data = pad_data(hello_data)
new_warn_data = pad_data(warn_data)
new_abort_data = pad_data(abort_data)
def create_concatenated_numpy_arrays_for_code(gesture1_label, gesture2_label):
# gesture1_path = os.path.join(gestures_2d_path, gesture1_label)
# gesture2_path = os.path.join(gestures_2d_path, gesture2_label)
# combined_gestures_path = os.path.join(gestures_data_path, f"combined_gesture_arrays/combined_{gesture1_label}_{gesture2_label}")
# if not os.path.exists(combined_gestures_path):
# os.mkdir(combined_gestures_path)
gestures = {"hello": hello_data, "warn": warn_data, "abort": abort_data}
gesture1_data = gestures[gesture1_label]
gesture2_data = gestures[gesture2_label]
new_concatenated_data = []
for sample1 in gesture1_data:
for sample2 in gesture2_data:
new_sample = np.concatenate((sample1, sample2), axis=0)
new_concatenated_data.append(new_sample)
print(len(new_concatenated_data))
return new_concatenated_data
hello_abort_data = create_concatenated_numpy_arrays("hello", "abort")
abort_hello_data = create_concatenated_numpy_arrays_for_code("abort", "hello")
hello_warn_data = create_concatenated_numpy_arrays_for_code("hello", "warn")
warn_hello_data = create_concatenated_numpy_arrays_for_code("warn", "hello")
abort_warn_data = create_concatenated_numpy_arrays_for_code("abort", "warn")
warn_abort_data = create_concatenated_numpy_arrays_for_code("warn", "abort")
#
# ## Modeling
# ---
# #### CONV LSTM MODEL
X_train_convlstm = extract_data(X_train_all, 2, 3)
X_test_convlstm = extract_data(X_test_all, 2, 3)
X_train_convlstm.shape, X_test_convlstm.shape
from keras.models import Sequential
from keras.layers import ConvLSTM2D, Conv2D, MaxPooling2D, Flatten, Dense
def evaluate_convlstm_model(
trainX,
trainy,
testX,
testy,
neurons=50,
num_layers=1,
dropout=0.5,
input_shape=(1, 1, 49, 12),
):
verbose, epochs, batch_size = 0, 30, 32
n_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[2], 3
trainX = np.expand_dims(trainX, axis=1)
trainX = np.expand_dims(trainX, axis=1)
testX = np.expand_dims(testX, axis=1)
testX = np.expand_dims(testX, axis=1)
model = Sequential()
if num_layers == 1:
model.add(
ConvLSTM2D(
filters=64,
input_shape=(1, 1, 49, 12),
kernel_size=(2, 2),
padding="same",
return_sequences=True,
)
)
model.add(Dropout(rate=dropout))
else:
for i in range(num_layers):
if i == 0:
model.add(
ConvLSTM2D(
filters=64,
input_shape=(1, 1, 49, 12),
kernel_size=(2, 2),
padding="same",
return_sequences=True,
)
)
model.add(Dropout(rate=dropout))
else:
model.add(
ConvLSTM2D(
filters=64,
kernel_size=(2, 2),
padding="same",
return_sequences=True,
)
)
model.add(Dropout(rate=dropout))
model.add(Flatten())
model.add(Dense(units=neurons, activation="relu"))
model.add(Dense(units=3, activation="softmax"))
model.compile(
loss="sparse_categorical_crossentropy",
optimizer="adam",
metrics=["sparse_categorical_accuracy"],
)
print(model.summary())
model.fit(trainX, trainy, epochs=epochs, batch_size=batch_size, verbose=verbose)
_, accuracy = model.evaluate(testX, testy, batch_size=batch_size, verbose=0)
return accuracy, model
from sklearn.metrics import classification_report
def summarize_results_lstm(scores):
print("Average Accuracy: %.3f%% (+/-%.3f)" % (mean(scores), std(scores)))
print("Best Accuracy: %.3f%%" % (max(scores)))
def run_experiment_convlstm(input_shape=(49, 6)):
# repeat experiment
scores = {}
neurons_ls = [50, 75, 100]
dropout_ls = [0.2, 0.5, 0.7]
layers_ls = [1, 2, 3]
best_model = None
best_accuracy = 0
for neurons in neurons_ls:
for layers in layers_ls:
for rate in dropout_ls:
score, model = evaluate_convlstm_model(
X_train_convlstm,
y_train,
X_test_convlstm,
y_test,
neurons,
layers,
rate,
)
score = score * 100.0
print(
"> with ConvLSTM (%d neurons, %d layers, %.1f dropout): %.3f"
% (neurons, layers, rate, score)
)
params = f"{layers}_{neurons}_{rate}"
scores[params] = score
if score > best_accuracy:
best_accuracy = score
best_model = model
# summarize results
summarize_results(scores)
return scores, best_model
convexp_scores, best_model = run_experiment_convlstm()
best_model.save(
"/content/drive/MyDrive/CS256Project/Milestone3-results/best_convlstm_model_exp4.h5"
)
with open(
"/content/drive/MyDrive/CS256Project/Milestone3-results/best_convlstm_model_exp4_scores.csv",
"w",
newline="",
) as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["layers", "neurons", "dropout_rate", "score"])
for params, score in convexp_scores.items():
layers, neurons, dropout_rate = params.split("_")
writer.writerow([layers, neurons, dropout_rate, score])
#
# ## Discussion of your results using classification report
# ---
from keras.models import load_model
new_acc, model_test2 = evaluate_convlstm_model(
X_train_convlstm, y_train, X_test_convlstm, y_test, 75, 1, 0.5
)
model_test2.save(
"/content/drive/MyDrive/CS256Project/Milestone3-results/model_test_2.h5"
)
new_model_test = load_model(
"/content/drive/MyDrive/CS256Project/Milestone3-results/new_model_test.h5"
)
test_conv_model = load_model(
"/content/drive/MyDrive/CS256Project/Milestone3-results/best_convlstm_model_exp3.h5"
)
testdata = np.expand_dims(pad_data(np.array(list(abort_data))), axis=1)
testdata.shape
# test_data = extract_data(X_test_convlstm, 2, 3)
test_data = np.expand_dims(X_test_convlstm, axis=1)
test_data = np.expand_dims(test_data, axis=1)
test_data.shape
x = np.expand_dims(test_data[0], axis=2)
x.shape
preds = test_conv_model.predict(test_data)
correct_preds = 0
test_labels = []
for pred in new_preds:
test_labels.append(np.argmax(pred))
if np.argmax(pred) == 0:
correct_preds += 1
correct_preds
test_labels
preds[0]
new_preds = new_model_test.predict(test_data)
new_preds[0]
from sklearn.metrics import confusion_matrix, accuracy_score
import seaborn as sns
# Generate confusion matrix
cm = confusion_matrix(y_test, test_labels)
print(accuracy_score(y_test, test_labels))
# Plot confusion matrix
sns.heatmap(cm, annot=True, cmap="Blues")
plt.xlabel("Predicted Labels")
plt.ylabel("True Labels")
plt.show()
new_hello_abort_data = np.array(list(hello_abort_data))
print(new_hello_abort_data.shape)
new_abort_hello_data = np.array(list(abort_hello_data))
print(new_abort_hello_data.shape)
test_data_all = extract_data(hello_abort_data, 2, 3)
# print(test_data_all.shape)
# test_data_all = np.expand_dims(test_data_all, axis=1)
# test_data_all = np.expand_dims(test_data_all, axis=1)
test_data_all.shape
average_framLen = sum([len(x) for x in test_data_all]) // len(test_data_all)
average_framLen / 2
hello_data[0][0]
abort_data[0][0]
warn_data[0][0]
test_data_all_2 = extract_data(new_abort_hello_data, 2, 3)
len(test_data_all_2[0][0])
# Function used to extract polar angles, polar velocity and polar acceleration separately.
def extract_test_data(data, index, index2=None):
new_sample = []
for frame in data:
polar_angles = []
for joint in frame:
polar_angles.append(joint[index])
if index2 is not None:
for joint in frame:
polar_angles.append(joint[index2])
new_sample.append(polar_angles)
return np.array(new_sample)
# new_hello_abort_data[0][0:49].shape
testdataaa = new_hello_abort_data[0][0:49]
print(testdataaa.shape)
test_data = extract_test_data(testdataaa, 2, 3)
print(test_data.shape)
test_data = np.expand_dims(test_data, axis=0)
test_data = np.expand_dims(test_data, axis=1)
test_data.shape
X_train_3dcnn = extract_data(X_train_all, 2, 3)
X_test_3dcnn = extract_data(X_test_all, 2, 3)
X_train_3dcnn.shape, X_test_3dcnn.shape
X_train_3dcnn = np.expand_dims(X_train_3dcnn, axis=1)
X_train_3dcnn = np.expand_dims(X_train_3dcnn, axis=1)
X_test_3dcnn = np.expand_dims(X_test_3dcnn, axis=1)
X_test_3dcnn = np.expand_dims(X_test_3dcnn, axis=1)
X_train_3dcnn.shape, X_test_3dcnn.shape
from keras.models import Sequential
from keras.layers import Conv3D, MaxPooling3D, Flatten, Dense
from keras.regularizers import l1, l2
# Define the model
model = Sequential()
# Add the layers
model.add(
Conv3D(
64,
kernel_size=(1, 1, 2),
activation="relu",
input_shape=(1, 1, 49, 12),
kernel_regularizer=l2(0.05),
)
)
model.add(Dropout(rate=0.2))
model.add(MaxPooling3D(pool_size=(1, 1, 2)))
# model.add(Conv3D(64, kernel_size=(1, 1, 2), activation='relu', kernel_regularizer=l2(0.01)))
# model.add(MaxPooling3D(pool_size=(1, 1, 2)))
model.add(
Conv3D(64, kernel_size=(1, 1, 2), activation="relu", kernel_regularizer=l2(0.05))
)
model.add(Flatten())
model.add(Dense(64, activation="relu"))
model.add(Dense(3, activation="softmax"))
# Compile the model
model.compile(
loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"]
)
# Print the model summary
model.summary()
model.fit(
X_train_3dcnn,
y_train,
batch_size=32,
epochs=100,
validation_data=(X_test_3dcnn, y_test),
)
probabilities = []
window1, window2 = hello_data[0], abort_data[0]
len(window1)
window1 = np.expand_dims(window1, axis=0)
window1 = np.expand_dims(window1, axis=1)
window1.shape
window2 = np.expand_dims(window2, axis=0)
window2 = np.expand_dims(window2, axis=1)
window2 = np.expand_dims(window2, axis=1)
window2 = np.pad(window2, ((0, 0), (0, 0), (0, 0), (0, 22), (0, 0)), mode="constant")
window1.shape, window2.shape
guesture1_pred, guesture2_pred = model.predict(window1, verbose=0), model.predict(
window1, verbose=0
)
print(guesture1_pred, guesture2_pred)
import statistics
from collections import Counter
# probabilities = []
correct = 0
# print(len(test_data_all[1]))
for sample in test_data_all[20:30]:
for i in range(0, len(sample) - 29 + 1, 1):
print(i)
window = sample[i : i + 29]
window = np.expand_dims(window, axis=0)
window = np.expand_dims(window, axis=1)
window = np.expand_dims(window, axis=1)
window = np.pad(
window, ((0, 0), (0, 0), (0, 0), (0, 20), (0, 0)), mode="constant"
)
window_probs = model.predict(window, verbose=0)
probabilities.append(np.round(window_probs, decimals=3))
probabilities
##conv lstm
import statistics
from collections import Counter
# probabilities = []
correct = 0
# print(len(test_data_all[1]))
for sample in test_data_all[20:30]:
probabilities = []
probs = {0: 0, 1: 0, 2: []}
for i in range(0, len(sample) - 49 + 1, 1):
# for i in range(98, 49, -2):
# print(i, i-49)
window = sample[i : i + 49]
# print(window.shape)
window = np.expand_dims(window, axis=0)
window = np.expand_dims(window, axis=1)
window = np.expand_dims(window, axis=1)
# print(window.shape)
window_probs = new_model_test.predict(window, verbose=0)
# print(window_probs)
# Get the indices of the sorted array
# sorted_indices1 = np.argsort(window_probs[0])
# # print(sorted_indices1)
# # Get the indices of the top two maximum elements
# top_two_indices1 = sorted_indices1[-2:][::-1]
# # print(top_two_indices1)
probabilities.append(window_probs)
# print(probabilities)
# # Count the occurrence of each pair
# # count_pairs = Counter(map(tuple, probabilities))
# probs = {}
# for prob in probabilities:
# # print(prob)
# max_ele = np.max(prob)
# probs[max_ele] = np.argmax(prob)
# print(probs)
# Get the most common pair
# most_common_pair = count_pairs.most_common(1)[0][0]
# print(most_common_pair)
# Split the array into two halves
half1 = probabilities[: len(probabilities) // 2]
half2 = probabilities[len(probabilities) // 2 :]
col_max1 = np.amax(half1, axis=0)
col_max2 = np.amax(half2, axis=0)
first = np.argmax(col_max1)
second = np.argmax(col_max2)
print(col_max1, col_max2)
print(first, second)
# sliding window
# Define the sliding window parameters
window_size = 49
stride = 10
# Initialize a list to store the predicted probabilities for each window
probabilities = []
for sample in test_data_all:
# Loop over the test data with the sliding window approach
for i in range(0, len(test_data) - window_size + 1, stride):
# Extract the current window of data
window = test_data[i : i + window_size]
# Make a prediction for the window and get the probabilities
window_probs = model.predict(window)
# Append the probabilities to the list
probabilities.append(window_probs)
# Concatenate the list of probabilities into a single array
probabilities = np.concatenate(probabilities)
# Get the predicted classes for each window by taking the argmax of the probabilities
predicted_classes = np.argmax(probabilities, axis=1)
# Get the two gestures with maximum probability for each window
top_two_classes = np.argsort(probabilities, axis=1)[:, -2:]
| false | 0 | 10,881 | 0 | 10,881 | 10,881 |
||
129822364
|
<jupyter_start><jupyter_text>Breast Cancer Dataset
The data set contains patient records from a 1984-1989 trial conducted by the German Breast Cancer Study Group (GBSG) of 720 patients with node positive breast cancer; it retains the 686 patients with complete data for the prognostic variables.
These data sets are used in the paper by Royston and Altman(2013). The Rotterdam data is used to create a fitted model, and the GBSG data for validation of the model. The paper gives references for the data source.
# Dataset Format
A data set with 686 observations and 11 variables.
| Columns | Description |
| --- | --- |
| pid | patient identifier |
| age | age, years |
| meno | menopausal status (0= premenopausal, 1= postmenopausal) |
| size | tumor size, mm |
| grade | tumor grade |
| nodes | number of positive lymph nodes |
| pgr | progesterone receptors (fmol/l) |
| er | estrogen receptors (fmol/l) |
| hormon | hormonal therapy, 0= no, 1= yes |
| rfstime | recurrence free survival time; days to first of recurrence, death or last follow-up |
| status | 0= alive without recurrence, 1= recurrence or death |
# References
Patrick Royston and Douglas Altman, External validation of a Cox prognostic model: principles and methods. BMC Medical Research Methodology 2013, 13:33
Kaggle dataset identifier: breast-cancer-dataset-used-royston-and-altman
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Import necessary Library
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (
classification_report,
confusion_matrix,
ConfusionMatrixDisplay,
)
import warnings
warnings.filterwarnings("ignore")
df = pd.read_csv("/kaggle/input/breast-cancer-dataset-used-royston-and-altman/gbsg.csv")
df.shape
df.head()
# ## It is necessary to Understand meaning of Some important features to proceed further
# 1) Meno (menopausal status (0= premenopausal, 1= postmenopausal))
# -- It refers to a woman's hormonal status and whether she has reached menopause or not. Menopause is a natural process that occurs in women typically around the age of 45 to 55, marking the end of their reproductive years. Menopausal status is important in breast cancer because hormonal factors play a significant role in the development, progression, and treatment of the disease
#
# 2) Grade
# -- In breast cancer, "grade" refers to a measure of how abnormal the cancer cells look under a microscope compared to normal cells. The grade provides information about the aggressiveness or the likelihood of the cancer to grow and spread. It helps in determining the treatment approach and predicting the patient's prognosis
# 3) Nodes
# -- It helps to determine how much spread of cancer cells into organs and also helps guide the selection of therapies such as surgery, radiation, chemotherapy, or targeted treatments.Lower is better
# 4) pgr (progesterone receptors (fmol/l))
# -- Progesterone receptors (PR) are proteins found on the surface of breast cancer cells. Their presence indicates that the cancer cells are responsive to the hormone progesterone, which is naturally produced in the body
# 5) er (estrogen receptors (fmol/l))
# -- estrogen receptors (ER), helps determine if the cancer cells are sensitive to hormonal therapies
# 6) Harmone (hormonal therapy, 0= no, 1= yes)
# -- Harmone features indicates that how the hormonals respond to the cancer Therepy. if observed +ve means 1 and if found -ve means hormonal are not responsive to therepy
# 7) rfstime (recurrence free survival time)
# -- Rrstime feature in breast cancer refers to the length of time a patient lives without experiencing any signs or symptoms of cancer after the completion of initial treatment.
df.info()
# 1) dataset contains only int type datatype
# 2) No any missing value present
df.describe()
# Drop Unrelevant column from the dataset
df.drop(columns=["Unnamed: 0", "pid"], inplace=True)
# Let's check box plot of each numerical column:
columns = df.columns.tolist()
fig, ax = plt.subplots(1, len(columns), figsize=(15, 5))
for i, col in enumerate(columns):
df.boxplot(column=col, ax=ax[i])
ax[i].set_title(col)
plt.tight_layout()
plt.show()
# Inference
# 1) we can see that Size,Nodes , pgr, er columns having most of the outliers datavaues
# 2) As we have only 686 Rows it is not recommend to do drop outlier values as it lost the data and may affect training
# ## Univariate Analysis
plt.hist(df["age"], bins=20)
plt.xlabel("Age")
plt.ylabel("Frequency")
plt.title("Histogram of Age")
plt.show()
plt.hist(df["meno"], bins=30)
plt.xlabel("Meno")
plt.ylabel("Frequency")
plt.title("Histogram of Meno")
plt.show()
plt.hist(df["size"], bins=30)
plt.xlabel("Cancer cell Size")
plt.ylabel("Frequency")
plt.title("Size of Cancer Cell")
plt.show()
plt.hist(df["grade"], bins=30)
plt.xlabel("Grade")
plt.ylabel("Frequency")
plt.title("Grade of Cancer Cell")
plt.show()
plt.hist(df["nodes"], bins=30)
plt.xlabel("Nodes")
plt.ylabel("Frequency")
plt.title("Number of Nodes")
plt.show()
plt.hist(df["pgr"], bins=30)
plt.xlabel("PGR")
plt.ylabel("Frequency")
plt.title("Projesteron receipter qty")
plt.show()
plt.hist(df["er"], bins=30)
plt.xlabel("ER")
plt.ylabel("Frequency")
plt.title("Estrogen receipter qty")
plt.show()
sns.heatmap(df.corr(), annot=True, cmap="coolwarm")
df.shape
X = df.iloc[:, 0:8].values
y = df.iloc[:, [9]].values
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.20, random_state=24
)
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
mms = MinMaxScaler()
X_train_scaled = mms.fit_transform(X_train)
X_test_scaled = mms.fit_transform(X_test)
lr_model = LogisticRegression()
lr_model.fit(X_train_scaled, y_train)
y_preds_lr = lr_model.predict(X_test_scaled)
from sklearn.metrics import (
accuracy_score,
precision_score,
recall_score,
f1_score,
roc_auc_score,
log_loss,
)
score = accuracy_score(y_test, y_preds_lr)
print(score)
report = classification_report(y_test, y_preds_lr)
print(report)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/822/129822364.ipynb
|
breast-cancer-dataset-used-royston-and-altman
|
utkarshx27
|
[{"Id": 129822364, "ScriptId": 38558219, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11219290, "CreationDate": "05/16/2023 18:07:39", "VersionNumber": 2.0, "Title": "Breast Cancer Prediction", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 159.0, "LinesInsertedFromPrevious": 87.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 72.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 186200028, "KernelVersionId": 129822364, "SourceDatasetVersionId": 5642294}]
|
[{"Id": 5642294, "DatasetId": 3243155, "DatasourceVersionId": 5717613, "CreatorUserId": 13364933, "LicenseName": "CC0: Public Domain", "CreationDate": "05/09/2023 10:42:51", "VersionNumber": 1.0, "Title": "Breast Cancer Dataset", "Slug": "breast-cancer-dataset-used-royston-and-altman", "Subtitle": "Breast Cancer data sets used in Royston and Altman (2013)", "Description": "The data set contains patient records from a 1984-1989 trial conducted by the German Breast Cancer Study Group (GBSG) of 720 patients with node positive breast cancer; it retains the 686 patients with complete data for the prognostic variables.\nThese data sets are used in the paper by Royston and Altman(2013). The Rotterdam data is used to create a fitted model, and the GBSG data for validation of the model. The paper gives references for the data source.\n# Dataset Format\nA data set with 686 observations and 11 variables.\n\n| Columns | Description |\n| --- | --- |\n| pid | patient identifier |\n| age | age, years |\n| meno | menopausal status (0= premenopausal, 1= postmenopausal) |\n| size | tumor size, mm |\n| grade | tumor grade |\n| nodes | number of positive lymph nodes |\n| pgr | progesterone receptors (fmol/l) |\n| er | estrogen receptors (fmol/l) |\n| hormon | hormonal therapy, 0= no, 1= yes |\n| rfstime | recurrence free survival time; days to first of recurrence, death or last follow-up |\n| status | 0= alive without recurrence, 1= recurrence or death |\n\n# References\nPatrick Royston and Douglas Altman, External validation of a Cox prognostic model: principles and methods. BMC Medical Research Methodology 2013, 13:33", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3243155, "CreatorUserId": 13364933, "OwnerUserId": 13364933.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5642294.0, "CurrentDatasourceVersionId": 5717613.0, "ForumId": 3308429, "Type": 2, "CreationDate": "05/09/2023 10:42:51", "LastActivityDate": "05/09/2023", "TotalViews": 8106, "TotalDownloads": 1244, "TotalVotes": 41, "TotalKernels": 9}]
|
[{"Id": 13364933, "UserName": "utkarshx27", "DisplayName": "Utkarsh Singh", "RegisterDate": "01/21/2023", "PerformanceTier": 2}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Import necessary Library
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (
classification_report,
confusion_matrix,
ConfusionMatrixDisplay,
)
import warnings
warnings.filterwarnings("ignore")
df = pd.read_csv("/kaggle/input/breast-cancer-dataset-used-royston-and-altman/gbsg.csv")
df.shape
df.head()
# ## It is necessary to Understand meaning of Some important features to proceed further
# 1) Meno (menopausal status (0= premenopausal, 1= postmenopausal))
# -- It refers to a woman's hormonal status and whether she has reached menopause or not. Menopause is a natural process that occurs in women typically around the age of 45 to 55, marking the end of their reproductive years. Menopausal status is important in breast cancer because hormonal factors play a significant role in the development, progression, and treatment of the disease
#
# 2) Grade
# -- In breast cancer, "grade" refers to a measure of how abnormal the cancer cells look under a microscope compared to normal cells. The grade provides information about the aggressiveness or the likelihood of the cancer to grow and spread. It helps in determining the treatment approach and predicting the patient's prognosis
# 3) Nodes
# -- It helps to determine how much spread of cancer cells into organs and also helps guide the selection of therapies such as surgery, radiation, chemotherapy, or targeted treatments.Lower is better
# 4) pgr (progesterone receptors (fmol/l))
# -- Progesterone receptors (PR) are proteins found on the surface of breast cancer cells. Their presence indicates that the cancer cells are responsive to the hormone progesterone, which is naturally produced in the body
# 5) er (estrogen receptors (fmol/l))
# -- estrogen receptors (ER), helps determine if the cancer cells are sensitive to hormonal therapies
# 6) Harmone (hormonal therapy, 0= no, 1= yes)
# -- Harmone features indicates that how the hormonals respond to the cancer Therepy. if observed +ve means 1 and if found -ve means hormonal are not responsive to therepy
# 7) rfstime (recurrence free survival time)
# -- Rrstime feature in breast cancer refers to the length of time a patient lives without experiencing any signs or symptoms of cancer after the completion of initial treatment.
df.info()
# 1) dataset contains only int type datatype
# 2) No any missing value present
df.describe()
# Drop Unrelevant column from the dataset
df.drop(columns=["Unnamed: 0", "pid"], inplace=True)
# Let's check box plot of each numerical column:
columns = df.columns.tolist()
fig, ax = plt.subplots(1, len(columns), figsize=(15, 5))
for i, col in enumerate(columns):
df.boxplot(column=col, ax=ax[i])
ax[i].set_title(col)
plt.tight_layout()
plt.show()
# Inference
# 1) we can see that Size,Nodes , pgr, er columns having most of the outliers datavaues
# 2) As we have only 686 Rows it is not recommend to do drop outlier values as it lost the data and may affect training
# ## Univariate Analysis
plt.hist(df["age"], bins=20)
plt.xlabel("Age")
plt.ylabel("Frequency")
plt.title("Histogram of Age")
plt.show()
plt.hist(df["meno"], bins=30)
plt.xlabel("Meno")
plt.ylabel("Frequency")
plt.title("Histogram of Meno")
plt.show()
plt.hist(df["size"], bins=30)
plt.xlabel("Cancer cell Size")
plt.ylabel("Frequency")
plt.title("Size of Cancer Cell")
plt.show()
plt.hist(df["grade"], bins=30)
plt.xlabel("Grade")
plt.ylabel("Frequency")
plt.title("Grade of Cancer Cell")
plt.show()
plt.hist(df["nodes"], bins=30)
plt.xlabel("Nodes")
plt.ylabel("Frequency")
plt.title("Number of Nodes")
plt.show()
plt.hist(df["pgr"], bins=30)
plt.xlabel("PGR")
plt.ylabel("Frequency")
plt.title("Projesteron receipter qty")
plt.show()
plt.hist(df["er"], bins=30)
plt.xlabel("ER")
plt.ylabel("Frequency")
plt.title("Estrogen receipter qty")
plt.show()
sns.heatmap(df.corr(), annot=True, cmap="coolwarm")
df.shape
X = df.iloc[:, 0:8].values
y = df.iloc[:, [9]].values
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.20, random_state=24
)
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
mms = MinMaxScaler()
X_train_scaled = mms.fit_transform(X_train)
X_test_scaled = mms.fit_transform(X_test)
lr_model = LogisticRegression()
lr_model.fit(X_train_scaled, y_train)
y_preds_lr = lr_model.predict(X_test_scaled)
from sklearn.metrics import (
accuracy_score,
precision_score,
recall_score,
f1_score,
roc_auc_score,
log_loss,
)
score = accuracy_score(y_test, y_preds_lr)
print(score)
report = classification_report(y_test, y_preds_lr)
print(report)
| false | 1 | 1,685 | 0 | 2,111 | 1,685 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.