content
stringlengths 73
1.12M
| license
stringclasses 3
values | path
stringlengths 9
197
| repo_name
stringlengths 7
106
| chain_length
int64 1
144
|
---|---|---|---|---|
<jupyter_start><jupyter_text>https://stackoverflow.com/questions/37126108/how-to-read-data-into-tensorflow-batches-from-example-queue
https://stackoverflow.com/questions/39076388/tensorflow-deep-mnist-resource-exhausted-oom-when-allocating-tensor-with-shape
https://stackoverflow.com/questions/42495930/tensorflow-oom-on-gpu
<jupyter_code># Load the TensorBoard notebook extension
%load_ext tensorboard
!rm -rf ./checkpoints/
# Clear any logs from previous runs
!rm -rf ./logs/
import datetime
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
train_log_dir = 'logs/gradient_tape/' + current_time + '/train'
test_log_dir = 'logs/gradient_tape/' + current_time + '/test'
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
test_summary_writer = tf.summary.create_file_writer(test_log_dir)
# Clear any checkpointss from previous runs
# Clear any logs from previous runs
checkpoint_path = "./checkpoints/train"
ckpt = tf.train.Checkpoint(encoder=encoder,
decoder=decoder,
optimizer = optimizer)
ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5)
start_epoch = 0
# if ckpt_manager.latest_checkpoint:
# start_epoch = int(ckpt_manager.latest_checkpoint.split('-')[-1])
# # restoring the latest checkpoint in checkpoint_path
# ckpt.restore(ckpt_manager.latest_checkpoint)
# adding this in a separate cell because if you run the training cell
# many times, the loss_plot array will be reset
loss_plot = []
val_plot=[]
EPOCHS = 12
# start_epoch=0
# with strategy.scope():
for epoch in range(start_epoch, EPOCHS):
start = time.time()
total_loss = 0
dataset_train = tf.data.Dataset.from_tensor_slices((img_name_train, cap_train))
# Use map to load the numpy files in parallel
dataset_train = dataset_train.map(lambda item1, item2: tf.numpy_function(map_func, [item1, item2], [tf.float32, tf.int32]),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset_train = dataset_train.shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
dataset_train = dataset_train.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
for (batch, (img_tensor, target)) in enumerate(dataset_train):
batch_loss, t_loss = train_step(img_tensor, target)
total_loss += t_loss
if batch % 100 == 0:
print ('Epoch {} Batch {} Loss {:.4f}'.format(
epoch + 1, batch, batch_loss.numpy() / int(target.shape[1])))
# storing the epoch end loss value to plot later
with train_summary_writer.as_default():
tf.summary.scalar('loss', total_loss, step=epoch)
# tf.summary.scalar('accuracy', train_accuracy.result(), step=epoch)
loss_plot.append(total_loss / num_steps)
val_loss=0
for (batch, (img_tensor, target)) in enumerate(dataset_val):
batch_loss_val, t_loss_val = val_step(img_tensor, target)
val_loss += t_loss_val
with test_summary_writer.as_default():
tf.summary.scalar('loss', val_loss, step=epoch)
# tf.summary.scalar('accuracy', test_accuracy.result(), step=epoch)
if epoch % 5 == 0:
ckpt_manager.save()
val_plot.append(val_loss / num_steps)
print ('Epoch {} Loss {:.6f}'.format(epoch + 1, total_loss/num_steps))
print ('Epoch {} Loss_val {:.6f}'.format(epoch + 1, val_loss/num_steps))
print ('Time taken for 1 epoch {} sec\n'.format(time.time() - start))
%tensorboard --logdir logs/gradient_tape
plt.plot(loss_plot, label='Train Loss')
plt.plot(val_plot, label='Val Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.title('Loss Plot')
plt.legend()
plt.savefig("loss_without_tensorboard.png")
plt.show()
[tokenizer.word_index['<start>']]
print(img_tensor.shape)
print(target.shape)
import PIL.Image
def plot_attention(image, result, attention_plot):
# img_name_to_save=image
temp_image = np.array(PIL.Image.open(image))
fig = plt.figure(figsize=(10, 10))
len_result = len(result)
for l in range(len_result):
temp_att = np.resize(attention_plot[l], (8, 8))
ax = fig.add_subplot(len_result//2, len_result//2, l+1)
ax.set_title(result[l])
img = ax.imshow(temp_image)
ax.imshow(temp_att, cmap='gray', alpha=0.6, extent=img.get_extent())
plt.tight_layout()
plt.savefig("figure_latest.png") # save as png
plt.show()
def evaluate(image):
# attention_plot = np.zeros((max_length, attention_features_shape))
attention_plot = np.zeros((max_length, 252))
# print('attention plot shape ',attention_plot.shape)
hidden = decoder.reset_state(batch_size=1)
# print("hidden shape", hidden.shape)
# dec_input = tf.expand_dims([tokenizer.word_index['<start>']] * target.shape[0], 1)
temp_input = tf.expand_dims(load_image(image)[0], 0)
# print("temp input shape", temp_input.shape)
img_tensor_val = image_features_extract_model(temp_input)
# print("img tensor val shape", img_tensor_val.shape)
img_tensor_val = tf.reshape(img_tensor_val, (img_tensor_val.shape[0], -1, img_tensor_val.shape[3]))
# img_tensor_val = tf.reshape(img_tensor_val, (img_tensor_val.shape[0], -1, img_tensor_val.shape[3]))
# print("img tensor val shape after reshape", img_tensor_val.shape)
features = encoder(img_tensor_val)
# print("Encoder output features shape ", features.shape)
# dec_input = tf.expand_dims([tokenizer.word_index['<start>']], 0)
dec_input = tf.expand_dims([tokenizer.word_index['<start> ']], 0)
# print('shape dec input ', dec_input.shape)
result = []
for i in range(max_length):
predictions, hidden, attention_weights = decoder(dec_input, features, hidden)
# print('attention weights shape ',attention_weights.shape)
attention_plot[i] = tf.reshape(attention_weights, (-1, )).numpy()
predicted_id = tf.random.categorical(predictions, 1)[0][0].numpy()
result.append(tokenizer.index_word[predicted_id])
if tokenizer.index_word[predicted_id] == '<end>':
return result, attention_plot
dec_input = tf.expand_dims([predicted_id], 0)
attention_plot = attention_plot[:len(result), :]
return result, attention_plot
# captions on the validation set
rid = np.random.randint(0, len(img_name_val)) #from live session code on URL Shortner
image = img_name_val[rid]
real_caption = ' '.join([tokenizer.index_word[i] for i in cap_val[rid] if i not in [0]])
result, attention_plot = evaluate(image)
print ('Real Caption:', real_caption)
print ('Prediction Caption:', ' '.join(result))
plot_attention(image, result, attention_plot)
# captions on the validation set
rid = np.random.randint(0, len(img_name_val)) #from live session code on URL Shortner
image = img_name_val[rid]
real_caption = ' '.join([tokenizer.index_word[i] for i in cap_val[rid] if i not in [0]])
result, attention_plot = evaluate(image)
print ('Real Caption:', real_caption)
print ('Prediction Caption:', ' '.join(result))
plot_attention(image, result, attention_plot)
# captions on the validation set
rid = np.random.randint(0, len(img_name_val)) #from live session code on URL Shortner
image = img_name_val[rid]
real_caption = ' '.join([tokenizer.index_word[i] for i in cap_val[rid] if i not in [0]])
result, attention_plot = evaluate(image)
print ('Real Caption:', real_caption)
print ('Prediction Caption:', ' '.join(result))
plot_attention(image, result, attention_plot)
# captions on the validation set
rid = np.random.randint(0, len(img_name_val)) #from live session code on URL Shortner
image = img_name_val[rid]
real_caption = ' '.join([tokenizer.index_word[i] for i in cap_val[rid] if i not in [0]])
result, attention_plot = evaluate(image)
print ('Real Caption:', real_caption)
print ('Prediction Caption:', ' '.join(result))
plot_attention(image, result, attention_plot)
# plt.savefig('sample.pdf')
# captions on the validation set
rid = np.random.randint(0, len(img_name_val)) #from live session code on URL Shortner
image = img_name_val[rid]
real_caption = ' '.join([tokenizer.index_word[i] for i in cap_val[rid] if i not in [0]])
result, attention_plot = evaluate(image)
print ('Real Caption:', real_caption)
print ('Prediction Caption:', ' '.join(result))
plot_attention(image, result, attention_plot)
# captions on the validation set
rid = np.random.randint(0, len(img_name_val)) #from live session code on URL Shortner
image = img_name_val[rid]
real_caption = ' '.join([tokenizer.index_word[i] for i in cap_val[rid] if i not in [0]])
result, attention_plot = evaluate(image)
print ('Real Caption:', real_caption)
print ('Prediction Caption:', ' '.join(result))
plot_attention(image, result, attention_plot)
# captions on the validation set
rid = np.random.randint(0, len(img_name_val)) #from live session code on URL Shortner
image = img_name_val[rid]
real_caption = ' '.join([tokenizer.index_word[i] for i in cap_val[rid] if i not in [0]])
result, attention_plot = evaluate(image)
print ('Real Caption:', real_caption)
print ('Prediction Caption:', ' '.join(result))
plot_attention(image, result, attention_plot)
# captions on the validation set
rid = np.random.randint(0, len(img_name_val)) #from live session code on URL Shortner
image = img_name_val[rid]
real_caption = ' '.join([tokenizer.index_word[i] for i in cap_val[rid] if i not in [0]])
result, attention_plot = evaluate(image)
print ('Real Caption:', real_caption)
print ('Prediction Caption:', ' '.join(result))
plot_attention(image, result, attention_plot)
<jupyter_output>Real Caption: <start> no acute cardiopulmonary abnormality <end>
Prediction Caption: date lobe focal airspace disease <end>
<jupyter_text>Taking images online
<jupyter_code>image_url='https://upload.wikimedia.org/wikipedia/commons/thumb/3/36/X-ray_of_COPD_exacerbation_-_anteroposterior_view.jpg/300px-X-ray_of_COPD_exacerbation_-_anteroposterior_view.jpg'
print("Original Image")
Image(url='https://upload.wikimedia.org/wikipedia/commons/thumb/3/36/X-ray_of_COPD_exacerbation_-_anteroposterior_view.jpg/300px-X-ray_of_COPD_exacerbation_-_anteroposterior_view.jpg')
# image_url = 'https://tensorflow.org/images/surf.jpg'
image_extension = image_url[-4:]
image_path = tf.keras.utils.get_file('image'+image_extension,
origin=image_url)
result, attention_plot = evaluate(image_path)
print ('Prediction Caption:', ' '.join(result))
plot_attention(image_path, result, attention_plot)
# opening the image
# Image.open(image_path)
from IPython.display import Image
from IPython.core.display import HTML
print("Original Image")
Image(url= "https://upload.wikimedia.org/wikipedia/commons/thumb/8/81/Chest_radiograph_in_influensa_and_H_influenzae%2C_posteroanterior%2C_annotated.jpg/300px-Chest_radiograph_in_influensa_and_H_influenzae%2C_posteroanterior%2C_annotated.jpg")
image_url='https://upload.wikimedia.org/wikipedia/commons/thumb/8/81/Chest_radiograph_in_influensa_and_H_influenzae%2C_posteroanterior%2C_annotated.jpg/300px-Chest_radiograph_in_influensa_and_H_influenzae%2C_posteroanterior%2C_annotated.jpg'
image_extension = image_url[-4:]
image_path = tf.keras.utils.get_file('image'+image_extension,
origin=image_url)
result, attention_plot = evaluate(image_path)
print ('Prediction Caption:', ' '.join(result))
plot_attention(image_path, result, attention_plot)
# opening the image
# Image.open(image_path)
import time
# while True:
print("This prints once a minute.")
time.sleep(60) # Delay for 1 minute (60 seconds).
## https://stackoverflow.com/questions/510348/how-can-i-make-a-time-delay-in-python
import time
i=0
while True:
print("Printing Something to keep notebook alive", i, "th time")
time.sleep(600) # Delay for 1 minute (60 seconds).
i+=1
2-3<jupyter_output><empty_output>
|
no_license
|
/.ipynb_checkpoints/Model_trained_3-checkpoint.ipynb
|
aman-sawarn/Medical-Report-Generation-Using-X-Ray-Images
| 2 |
<jupyter_start><jupyter_text>#Building Machine Learning Pipelines:
##Data Analysis Phase
In this and the upcoming videos we will focus on creating Machine Learning Pipelines considering all the life cycle of a Data Science Projects. This will be important for professionals who have not worked with huge dataset.
#Project Name: House Prices: Advanced Regression Techniques
The main aim of this project is to predict the house price based on various features which we will discuss as we go ahead
#All the Lifecycle In A Data Science Projects
1. Data Analysis
2.Feature Engineering
3.Feature Selection
4.Model Building
5.Model Deployment<jupyter_code>import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
dataset = pd.read_csv("/content/drive/MyDrive/kaggle/train.csv")
dataset.head()
dataset.shape<jupyter_output><empty_output><jupyter_text>#In Data Analysis We will Analyze To Find out the below stuff
1. Missing Values
2.All The Numerical Variables
3.Distribution of the Numerical Variables
4.Categorical Variables
5.Cardinality of Categorical Variables
6.Outliers
7.Relationship between independent and dependent feature(SalePrice)#**Missing Values**<jupyter_code>## Here we will check the percentage of nan values present in each feature
## 1 -step make the list of features which has missing values
features_with_na=[features for features in dataset.columns if dataset[features].isnull().sum()>1]
## 2- step print the feature name and the percentage of missing values
for feature in features_with_na:
print(feature, np.round(dataset[feature].isnull().mean(), 4), ' % missing values')
for feature in features_with_na:
data = dataset.copy()
# let's make a variable that indicates 1 if the observation was missing or zero otherwise
data[feature] = np.where(data[feature].isnull() ,1,0)
# let's calculate the mean SalePrice where the information is missing or present
data.groupby(feature)['SalePrice'].median().plot.bar()
plt.title(feature)
plt.show()
<jupyter_output><empty_output><jupyter_text>Here With the relation between the missing values and the dependent variable is clearly visible.So We need to replace these nan values with something meaningful which we will do in the Feature Engineering section
From the above dataset some of the features like Id is not required<jupyter_code>print("Id of Houses : {}".format(len(dataset.Id)))<jupyter_output>Id of Houses : 1460
<jupyter_text>#Numerical Variables<jupyter_code># list of numerical variables
numerical_features = [feature for feature in dataset.columns if dataset[feature].dtype!='O']
print('Number of numerical variables: ', len(numerical_features))
# visualise the numerical variables
dataset[numerical_features].head()
# list of variables that contain year information
year_feature = [feature for feature in numerical_features if 'Yr' in feature or 'Year' in feature]
year_feature
# let's explore the content of these year variables
for feature in year_feature:
print(feature, dataset[feature].unique())
## Lets analyze the Temporal Datetime Variables
## We will check whether there is a relation between year the house is sold and the sales price
dataset.groupby('YrSold')['SalePrice'].median().plot()
plt.xlabel('Year Sold')
plt.ylabel('Median House Price')
plt.title("House Price vs YearSold")
## Here we will compare the difference between All years feature with SalePrice
for feature in year_feature:
if feature!='YrSold':
data=dataset.copy()
## We will capture the difference between year variable and year the house was sold for
data[feature]=data['YrSold']-data[feature]
plt.scatter(data[feature],data['SalePrice'])
plt.xlabel(feature)
plt.ylabel('SalePrice')
plt.show()
## Numerical variables are usually of 2 type
## 1. Continous variable and Discrete Variables
discrete_feature=[feature for feature in numerical_features if len(dataset[feature].unique())<25 and feature not in year_feature+['Id']]
print("Discrete Variables Count: {}".format(len(discrete_feature)))
discrete_feature
dataset[discrete_feature].head()
## Lets Find the realtionship between them and Sale PRice
for feature in discrete_feature:
data=dataset.copy()
data.groupby(feature)['SalePrice'].median().plot.bar()
plt.xlabel(feature)
plt.ylabel('SalePrice')
plt.title(feature)
plt.show()
## There is a relationship between variable number and SalePrice
<jupyter_output><empty_output><jupyter_text>#Continuous Variable<jupyter_code>continuous_feature=[feature for feature in numerical_features if feature not in discrete_feature+year_feature+['Id']]
print("Continuous feature Count {}".format(len(continuous_feature)))
## Lets analyse the continuous values by creating histograms to understand the distribution
for feature in continuous_feature:
data=dataset.copy()
data[feature].hist(bins=25)
plt.xlabel(feature)
plt.ylabel("Count")
plt.title(feature)
plt.show()
## We will be using logarithmic transformation
for feature in continuous_feature:
data=dataset.copy()
if 0 in data[feature].unique():
pass
else:
data[feature]=np.log(data[feature])
data['SalePrice']=np.log(data['SalePrice'])
plt.scatter(data[feature],data['SalePrice'])
plt.xlabel(feature)
plt.ylabel('SalesPrice')
plt.title(feature)
plt.show()
for feature in continuous_feature:
data=dataset.copy()
if 0 in data[feature].unique():
pass
else:
data[feature]=np.log(data[feature])
data.boxplot(column=feature)
plt.ylabel(feature)
plt.title(feature)
plt.show()
<jupyter_output><empty_output><jupyter_text>#Categorical Variables<jupyter_code>categorical_features=[feature for feature in dataset.columns if data[feature].dtypes=='O']
categorical_features
dataset[categorical_features].head()
for feature in categorical_features:
print('The feature is {} and number of categories are {}'.format(feature,len(dataset[feature].unique())))
# Find out the relationship between categorical variable and dependent feature SalesPrice
for feature in categorical_features:
data=dataset.copy()
data.groupby(feature)['SalePrice'].median().plot.bar()
plt.xlabel(feature)
plt.ylabel('SalePrice')
plt.title(feature)
plt.show()
<jupyter_output><empty_output>
|
no_license
|
/EDA_on_Advance_House_Price_Prediction.ipynb
|
damanpreet1234/House-Price-Prediction.
| 6 |
<jupyter_start><jupyter_text> DATASET<jupyter_code>df = pd.read_csv('Dataset1A.csv')
df.head()<jupyter_output><empty_output><jupyter_text> Ukuran Pusat<jupyter_code>stats = df[['video_id','views', 'likes', 'dislikes']]
stats.head()
mean_views = stats['views'].mean()
mean_likes = stats['likes'].mean()
mean_dislikes = stats['dislikes'].mean()
median_views = stats['views'].median()
median_likes = stats['likes'].median()
median_dislikes = stats['dislikes'].mean()
mode_views = stats['views'].mode()[0]
mode_likes = stats['likes'].mode()[0]
mode_dislikes = stats['dislikes'].mode()[0]
print("Mean of \nViews: {}\nLikes: {}\nDislikes: {}".format(mean_views, mean_likes, mean_dislikes))
print()
print("Median of \nViews: {}\nLikes: {}\nDislikes: {}".format(median_views, median_likes, median_dislikes))
print()
print("Mode of \nViews: {}\nLikes: {}\nDislikes: {}".format(mode_views, mode_likes, mode_dislikes))<jupyter_output>Mean of
Views: 2360784.6382573447
Likes: 74266.7024347359
Dislikes: 3711.400888910596
Median of
Views: 681861.0
Likes: 18091.0
Dislikes: 3711.400888910596
Mode of
Views: 2078
Likes: 0
Dislikes: 0
<jupyter_text> Ukuran Variasi<jupyter_code>std_views = stats['views'].std(skipna = True)
std_likes = stats['likes'].std(skipna = True)
std_dislikes = stats['dislikes'].std(skipna = True)
print("Standard Deviation of \nViews: {}\nLikes: {}\nDislikes: {}".format(std_views, std_likes, std_dislikes))<jupyter_output>Standard Deviation of
Views: 7394113.75970394
Likes: 228885.33820949917
Dislikes: 29029.7059450018
<jupyter_text> Ukuran Lokasi <jupyter_code>loc_views = stats['views'].quantile([0.25,0.5,0.75])
loc_likes = stats['likes'].quantile([0.25,0.5,0.75])
loc_dislikes = stats['dislikes'].quantile([0.25,0.5,0.75])
print("Quartiles of \nViews:\n{}\nLikes:\n{}\nDislikes:\n{}".format(loc_views, loc_likes, loc_dislikes))
q1views, q3views = np.percentile(stats['views'], 25), np.percentile(stats['views'], 75)
q1likes, q3likes = np.percentile(stats['likes'], 25), np.percentile(stats['likes'], 75)
q1dislikes, q3dislikes = np.percentile(stats['dislikes'], 25), np.percentile(stats['dislikes'], 75)
iqr_views = q3views - q1views
iqr_likes = q3likes - q1likes
iqr_dislikes = q3dislikes - q1dislikes
print("IQR of \nViews: {}\nLikes: {}\nDislikes: {}".format(iqr_views, iqr_likes, iqr_dislikes))
outliers_val_views = 1.5 * iqr_views
outliers_val_likes = 1.5 * iqr_likes
outliers_val_dislikes = 1.5 * iqr_dislikes
high_view = q3views+outliers_val_views
low_view = q1views-outliers_val_views
high_likes = q3likes+outliers_val_likes
low_likes = q1likes-outliers_val_likes
high_dislikes = q3dislikes+outliers_val_dislikes
low_dislikes = q1dislikes-outliers_val_dislikes
print("Outliers for Views is higher than {} and lower than {}".format(high_view, low_view))
print("Outliers for Likes is higher than {} and lower than {}".format(high_likes, low_likes))
print("Outliers for Dislikes is higher than {} and lower than {}".format(high_dislikes, low_dislikes))<jupyter_output>Outliers for Views is higher than 4194399.0 and lower than -2128913.0
Outliers for Likes is higher than 130406.5 and lower than -69565.5
Outliers for Dislikes is higher than 4542.0 and lower than -2402.0
<jupyter_text> Visualisasi Histogram<jupyter_code>stats.describe
n = 40949
num_inter_views = (2*iqr_views)/n**(1/3)
num_inter_likes = (2*iqr_likes)/n**(1/3)
num_inter_dislikes = (2*iqr_dislikes)/n**(1/3)
range_views = stats['views'].max() - stats['views'].min()
bins_views = range_views / num_inter_views
range_likes = stats['likes'].max() - stats['likes'].min()
bins_likes = range_likes / num_inter_likes
range_dislikes = stats['dislikes'].max() - stats['dislikes'].min()
bins_dislikes = range_dislikes / num_inter_dislikes
sns.set(rc={'figure.figsize':(15,5)})
#histogram views dengan bin custom
mydata = stats["views"]
fig = plt.figure()
ax = fig.add_subplot(111)
n, bins, patches = ax.hist(mydata, weights=np.zeros_like(mydata) + 1. / mydata.size, bins = 500)
fig.suptitle('Views', fontsize=20)
ax.set_xlabel('Bins-Custom', size=20)
ax.set_ylabel('Relative Frequency Views')
ax.legend
plt.show()
#histogram views dengan bins rumus
mydata = stats["views"]
fig = plt.figure()
ax = fig.add_subplot(111)
n, bins, patches = ax.hist(mydata, weights=np.zeros_like(mydata) + 1. / mydata.size, bins = round(bins_views), lw=0)
fig.suptitle('Views', fontsize=20)
ax.set_xlabel('Bins-Formula', size=20)
ax.set_ylabel('Relative Frequency Views')
ax.legend
plt.show()
#histogram likes dengan bin custom
mydata = stats["likes"]
fig = plt.figure()
ax = fig.add_subplot(111)
n, bins, patches = ax.hist(mydata, weights=np.zeros_like(mydata) + 1. / mydata.size, bins = 500)
fig.suptitle('Likes', fontsize=20)
ax.set_xlabel('Bins-Custom', size=20)
ax.set_ylabel('Relative Frequency Likes')
ax.legend
plt.show()
#histogram likes dengan bin custom
mydata = stats["likes"]
fig = plt.figure()
ax = fig.add_subplot(111)
n, bins, patches = ax.hist(mydata, weights=np.zeros_like(mydata) + 1. / mydata.size, bins = round(bins_likes), lw=0)
fig.suptitle('Likes', fontsize=20)
ax.set_xlabel('Bins-Formula', size=20)
ax.set_ylabel('Relative Frequency Likes')
ax.legend
plt.show()
mydata = stats["dislikes"]
fig = plt.figure()
ax = fig.add_subplot(111)
n, bins, patches = ax.hist(mydata, weights=np.zeros_like(mydata) + 1. / mydata.size, bins = 500)
fig.suptitle('Dislikes', fontsize=20)
ax.set_xlabel('Bins-Custom', size=20)
ax.set_ylabel('Relative Frequency Dislikes')
ax.legend
plt.show()
mydata = stats["dislikes"]
fig = plt.figure()
ax = fig.add_subplot(111)
n, bins, patches = ax.hist(mydata, weights=np.zeros_like(mydata) + 1. / mydata.size, bins = round(bins_dislikes), lw=0)
fig.suptitle('Dislikes', fontsize=20)
ax.set_xlabel('Bins-Formula', size=20)
ax.set_ylabel('Relative Frequency Dislikes')
ax.legend
plt.show()<jupyter_output><empty_output><jupyter_text> Box and Plot<jupyter_code>sns.set(rc={'figure.figsize':(5,15)})
sns.boxplot(y=stats["views"]);
plt.show()
sns.boxplot(y=stats["likes"]);
plt.show()
sns.boxplot(y=stats["dislikes"]);
plt.show()<jupyter_output><empty_output><jupyter_text> Pertanyaan diskusi A<jupyter_code>views = stats["views"]
val = views[views > 100000000].count()
n = views.size
prob_of_large_view = val / n
prob_of_large_view<jupyter_output><empty_output><jupyter_text> BUntuk mendapatkan video favorit dengan percentil 0.1%, maka kita harus memilih persentil ke 99.9<jupyter_code>res = np.percentile(stats['likes'], 99.9)
print("Minimal like yang diperlukan untuk mencapai video favorit adalah: {}".format(res))<jupyter_output>Minimal like yang diperlukan untuk mencapai video favorit adalah: 2836084.652000136
<jupyter_text> C<jupyter_code>ratio = mean_likes/mean_dislikes
ratio<jupyter_output><empty_output>
|
no_license
|
/Proyek1A.ipynb
|
Johansaputro/Probability-and-Statistics-with-Python
| 9 |
<jupyter_start><jupyter_text># Using Researcher With Tensorflow MNIST
`researcher` makes it easier for you to record and visualize and reproduce the results of your data science experiments. Below we'll demonstrate how this can be done by using researcher to compare the results of different experiments on the tensorflow MNIST dataset (based off the [tensorflow MNIST tutorial](https://www.tensorflow.org/datasets/keras_example)). <jupyter_code>import tensorflow as tf
import tensorflow_datasets as tfds
import researcher as rs<jupyter_output><empty_output><jupyter_text>## Prepare the data
First we have to download the MNIST dataset and split it into train and test subsets.<jupyter_code>(ds_train, ds_test), ds_info = tfds.load(
'mnist',
split=['train', 'test'],
shuffle_files=True,
as_supervised=True,
with_info=True,
)
def normalize_img(image, label):
"""Normalizes images: `uint8` -> `float32`."""
return tf.cast(image, tf.float32) / 255., label
ds_train = ds_train.map(
normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_train = ds_train.cache()
ds_train = ds_train.shuffle(ds_info.splits['train'].num_examples)
ds_train = ds_train.batch(128)
ds_train = ds_train.prefetch(tf.data.experimental.AUTOTUNE)
ds_test = ds_test.map(
normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_test = ds_test.batch(128)
ds_test = ds_test.cache()
ds_test = ds_test.prefetch(tf.data.experimental.AUTOTUNE)<jupyter_output><empty_output><jupyter_text>## Running Experiments
Now we're ready to start training. It's not good enough to just create a model and jump right in though, good data science requires planning.
First we need to think about what statement we're trying to prove, for instance, perhaps we suspect that [ReLU](https://machinelearningmastery.com/rectified-linear-activation-function-for-deep-learning-neural-networks/) activation functions perform better on MNIST than [sigmoid](https://en.wikipedia.org/wiki/Sigmoid_function) functions.
Next we need to determine how we would prove this statement one way or the other. A simple, scientific approach would be to run a series of experiemnts where we train a neural network to recognize MNIST digits, keeping all variables exactly the same except the activation function, and record the final test loss each time. If one activation function has a much lower test loss, then we'll know that that activation function is better for MINST (at least under these specific conditions).
The first step is to define an experiment helper function that will help us re-run exactly the same experiment while varying only the activation function:<jupyter_code>def run_experiment(params):
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28, 1)),
tf.keras.layers.Dense(128,activation=params["activation_function"]), # activation function is chosen from params
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(0.001),
metrics=['accuracy'],
)
history = model.fit(
ds_train,
epochs=5,
validation_data=ds_test,
)
return history<jupyter_output><empty_output><jupyter_text>This function takes a set of parameters, creates a neural network according to those parameters, trains it on MNIST, and finally returns the results of the training.
Let's try it out.<jupyter_code>param_queue = [
{"activation_function": "relu"},
{"activation_function": "sigmoid"},
]
results = []
for params in param_queue:
print("\nStarting Next Experiment")
results.append(run_experiment(params))<jupyter_output>
Starting Next Experiment
Epoch 1/5
469/469 [==============================] - 1s 2ms/step - loss: 0.3461 - accuracy: 0.9058 - val_loss: 0.1848 - val_accuracy: 0.9442
Epoch 2/5
469/469 [==============================] - 1s 1ms/step - loss: 0.1618 - accuracy: 0.9530 - val_loss: 0.1381 - val_accuracy: 0.9599
Epoch 3/5
469/469 [==============================] - 1s 2ms/step - loss: 0.1187 - accuracy: 0.9658 - val_loss: 0.1065 - val_accuracy: 0.9687
Epoch 4/5
469/469 [==============================] - 1s 1ms/step - loss: 0.0925 - accuracy: 0.9730 - val_loss: 0.0981 - val_accuracy: 0.9705
Epoch 5/5
469/469 [==============================] - 1s 2ms/step - loss: 0.0750 - accuracy: 0.9782 - val_loss: 0.0892 - val_accuracy: 0.9733
Starting Next Experiment
Epoch 1/5
469/469 [==============================] - 1s 2ms/step - loss: 0.5951 - accuracy: 0.8562 - val_loss: 0.3031 - val_accuracy: 0.9171
Epoch 2/5
469/469 [==============================] - 1s 2ms/step - loss: 0.2733 - accuracy: 0.9233 - v[...]<jupyter_text>## Saving Experiments with `researcher`
Now, just by eyeballing the printed output we can see that the ReLU function seems to perform better than the sigmoid function.
To get the most out of this experiment though, we need to record the results. If in 6 months a colleague or client asks us which activation to use for an MNIST classifier, we don't want to say "use ReLU, because if I remember correctly, it seemed a bit better than sigmoid". We want to be able to show them the actual data we gathered, so that they know **exactly** how much better, and according to which metrics. We also want to give them the tools to easily re-run the experiment for themselves if they have any doubts.
This is what researcher helps us do: for each experiment we'll save the parameters and the associated experiment outcome to an "experiment" JSON file, then in future we can easily re-analyse or re-create the results.<jupyter_code># create a directory for storing experiments
! mkdir experiments
experiment_dir = "experiments/"
for i in range(len(param_queue)):
params = param_queue[i]
collector = rs.ObservationCollector()
collector.add_tensorflow_history(0, results[i])
rs.record_experiment_with_collector(params, experiment_dir, collector)
! ls experiments<jupyter_output>no_title_91601d41e6ba315dd57bd9c2b1db378e.json
no_title_eebb49b9d1487396dd6c0e5271cc3083.json
<jupyter_text>`researcher`automatically associates each experiment with a hash. We can easily load them again and visualize their outcomes.<jupyter_code>experiments = rs.all_experiments(experiment_dir)
experiments
experiments[0].data
rs.final_compare(experiments, ["loss", "accuracy", "val_loss", "val_accuracy"])
rs.plot_training(experiments, ["loss", "val_loss"], figsize=(10, 10))<jupyter_output><empty_output><jupyter_text>## Adding More Experiments
If we want to do more research, we simply run and save more experiments. We can define a function to make this process easier.<jupyter_code>def run_and_save(params):
results = run_experiment(params)
collector = rs.ObservationCollector()
collector.add_tensorflow_history(0, results)
rs.record_experiment_with_collector(params, "./experiments/", collector)<jupyter_output><empty_output><jupyter_text>Now running and recording new experiments becomes very simple. We're also going to give our experiment parameters a `"title"` now, to help us identify that experiment in future.<jupyter_code>run_and_save({
"title": "tanh-activation",
"activation_function": "tanh"
})
rs.final_compare(experiments, ["loss", "accuracy", "val_loss", "val_accuracy"])
experiments = rs.all_experiments("experiments/")
rs.plot_training(experiments, ["loss", "val_loss"], figsize=(10, 10))<jupyter_output><empty_output><jupyter_text>The tanh function seems to perform better than the sigmoid function, but worse than ReLU. Now that we have this data recorded, we can come back and re-confirm or re-assess these conclusions any time we like.
Since the parameters for each experiment are also recorded, we can easily re-run experiments later to confirm that the results haven't changed (for example, we might want to confirm that the results still hold on a newer version of Tensorflow), or on someone else's machine.<jupyter_code>experiments[-1].data<jupyter_output><empty_output>
|
permissive
|
/mnist_demo.ipynb
|
Lewington-pitsos/researcher
| 9 |
<jupyter_start><jupyter_text># --> Importations<jupyter_code>import tensorflow as tf
import numpy as np
#Par default c'est le mode Eager qui est utilisee. Si on etait en mode graph on aurait une erreur.
#Le mode eager est cependant bien plus lent que le mode graph
a = np.array([1., 2.])
b = np.array([2., 5.])
tf.add(a, b)<jupyter_output><empty_output><jupyter_text># --> Graph model : AutoGraph<jupyter_code>@tf.function #Ceci est un decorateur pour utiliser le mode graph
def add_fc(a,b):
return a+b
print(add_fc(a,b))
def add_fc(a,b):
return tf.add(a,b)
print(tf.autograph.to_code(add_fc))<jupyter_output>def tf__add_fc(a, b):
with ag__.FunctionScope('add_fc', 'fscope', ag__.ConversionOptions(recursive=True, user_requested=True, optional_features=(), internal_convert_user_code=True)) as fscope:
do_return = False
retval_ = ag__.UndefinedReturnValue()
try:
do_return = True
retval_ = ag__.converted_call(ag__.ld(tf).add, (ag__.ld(a), ag__.ld(b)), None, fscope)
except:
do_return = False
raise
return fscope.ret(retval_, do_return)
<jupyter_text># --> Graph mode and eager mode with a keras model<jupyter_code>model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(256, activation="relu"))
model.add(tf.keras.layers.Dense(128, activation="relu"))
model.add(tf.keras.layers.Dense(10, activation="softmax"))
model_output = model.predict(np.zeros((1,30)))
print(model_output) #Donne un numpy array
model_output = model(np.zeros((1,30)))
print(model_output) #Donne un tensor
#Ne fonctionne pas car on transforme en graph, car model.predict(x) est un numpy array
#alors que cela devrait etre un tensor pour fonctionner.
#L'object tensor contient le chemin qu'ont parcouru les valeurs, leurs operations...
#Cela est necessaire pour le gradient !
@tf.function
def predict(x):
#return model.predict(x) #Il faut utiliser model(x) et non model.predict(x)
return model(x) #car il retourne un object tensor
model_output = predict(np.zeros((1,30)))
print(model_output)<jupyter_output>tf.Tensor([[0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1]], shape=(1, 10), dtype=float32)
|
permissive
|
/Notebooks/EagerOrGraphMode/main.ipynb
|
hanzopgp/TensorFlowTest
| 3 |
<jupyter_start><jupyter_text>## scipy
SciPy库依赖于NumPy,它提供了便捷且快速的N维数组操作。 SciPy库的构建与NumPy数组一起工作,并提供了许多用户友好和高效的数字实践,例如:数值积分和优化的例程。 它们一起运行在所有流行的操作系统上,安装快速且免费。 NumPy和SciPy易于使用,但强大到足以依靠世界上一些顶尖的科学家和工程师。
### 安装
pip install scipy
### 保存和读取文件<jupyter_code>from scipy import io #导入io
import numpy as np #导入numpy并命名为np
arr = np.array([1,2,3,4,5,6])
#保存文件
#io.savemat('name',{dict})
io.savemat('test.mat',{'arr1':arr})
loadArr=io.loadmat('test.mat')
#读取文件
#io.loadmat('name')
data = io.loadmat('test.mat')
data<jupyter_output><empty_output><jupyter_text>### scipy.stats中的统计函数分析随机数
<jupyter_code>from scipy import stats
# stats提供了产生连续性分布的函数
# 均匀分布(uniform)
x=stats.uniform.rvs(size = 20)
#生成20个[0,1]均匀分布随机数
# -正态分布(norm)
x=stats.norm.rvs(size = 20)
#生成20个正态分布随机数
# -贝塔分布(beta)
x=stats.beta.rvs(size=20,a=3,b=4)
#生成20个服从参数a=3,b=4贝塔分布随机数
# -泊松分布(poisson)
x=stats.poisson.rvs(0.6,loc=0,size = 20)
#生成20个服从泊松分布随机数
<jupyter_output><empty_output><jupyter_text>### 计算随机数均值和标准差
stats.norm.fit :利用正态分布去拟合生成的数据,得到其均值和标准差<jupyter_code>import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
arr = stats.norm.rvs(size=900)
(mean,std) = stats.norm.fit(arr)
print('平均值',mean) #mean平均值
print('std标准差',std) #std标准差
<jupyter_output>平均值 -0.04212244953490511
std标准差 1.0159301772450002
<jupyter_text>### 计算随机数的偏度
1.概念:
偏度(skewness)描述的是概率分布的偏度(非对称)程度。
有两个返回值,第二个为p-value,即数据集服从正态分布的概率(0~1)
2 利用 stats.skewtest()计算偏度
<jupyter_code>(skewness,pvalue1) = stats.skewtest(arr)
print('偏度值')
print(skewness)
print('符合正态分布数据的概率为')
print(pvalue1)<jupyter_output>偏度值
0.6737973436377096
符合正态分布数据的概率为
0.5004401639058014
<jupyter_text>### 计算随机数的峰度
1 概念:峰度(kurtosis)-描述的是概率分布曲线陡峭程度
2 利用 stats.kurtosis() 计算峰度
3 正态分布峰度值为3,excess_k为0
低阔峰(platykurtic) 相对于正态分布来说更扁平 excess_k<0
高狭峰(leptokurtic) 相对于正态分布来说更陡峭 excess_k>0
<jupyter_code>(Kurtosistest,pvalue2) = stats.kurtosistest(arr)
print('Kurtosistest',Kurtosistest) #峰度<jupyter_output>Kurtosistest 0.5315796817668517
<jupyter_text>### 正态分布程度检验
1 正态性检验(normality test),同样返回两个值,第二个返回p-values
2 利用 检验 stats.normaltest()
一般情况 pvalue>0.05 表示服从正态分布
<jupyter_code>print('pvalue2',pvalue2)
(Normltest,pvalue3) = stats.normaltest(arr)
print('Normltest',Normltest) #服从正太分布度<jupyter_output>pvalue2 0.5950171402484943
Normltest 0.736579818360581
<jupyter_text>### 计算数据所在区域中某一百分比处的数值
1 利用scoreatpercentile 计算在某一百分比位置的数值
格式:scoreatpercentile (数据集、百分比)
stats.scoreatpercentile(name_arr,percent)
2 示例:求出95%所在位置的数值
num = stats.scoreatpercentile(arr,95)
print(num)
<jupyter_code>print('pvalue3',pvalue3)
num = stats.scoreatpercentile(arr,95) #某一百分比处的数值
print('在95%处的数值:') #某一百分比处的数值
print(num)<jupyter_output>pvalue3 0.6919165596464132
在95%处的数值:
1.5941786592887324
<jupyter_text>### 从某数值出发找到对应的百分比
利用percentileofscore计算在某数值对应的百分比
格式:percentileofscore(数据集,数值)
示例:indexPercent = stats.percentileofscore(arr,1)
<jupyter_code>indexPercent = stats.percentileofscore(arr,1) #某一数值处的百分比
print ('在数值1处的百分比:') #某一数值处的百分比
print(indexPercent)
#直方图显示
plt.hist(arr) #设置直方图
plt.show() #显示图<jupyter_output><empty_output><jupyter_text>scipy文档:https://scipy.org/docs.html## statsmodels
这个非常简单的案例研究旨在让您快速上手 statsmodels。从原始数据开始,我们将展示估计统计模型和绘制诊断图所需的步骤。我们只使用由statsmodels它或它pandas和patsy 依赖项提供的函数。<jupyter_code>import statsmodels.api as sm
import pandas
from patsy import dmatrices # patsy用于描述统计模型和使用类似公式构建设计矩阵R
# df = sm.datasets.get_rdataset("uerry", "HistData").data
df = pandas.read_csv('guerry.csv')
df.head()
df.info()
import numpy
#注意,Region列中有一个缺少的观察值。我们使用以下DataFrame方法提供的方法消除它pandas:
df = df.replace(' ',numpy.NaN)
df = df.dropna()
df[-5:]<jupyter_output><empty_output><jupyter_text>### 设计矩阵
第一个是内源变量矩阵(即依赖,响应,回归等)。
第二个是外生变量矩阵(即独立变量,预测变量,回归量等)。<jupyter_code># 使用patsy的dmatrices函数来创建设计矩阵
# y 是一个 N×1关于人均彩票投注数据的一栏(彩票)。X 是 N×7带有截距, 识字和财富变量,以及4个区域二进制变量
y, X = dmatrices('Lottery ~ Literacy + Wealth + Region', data=df, return_type='dataframe')
y[:3]
X[:3]
# 模型拟合和总结
mod = sm.OLS(y, X) # Describe model
res = mod.fit() # Fit model
print(res.summary()) # Summarize model
res.params
# 应用Rainbow测试线性度
print(sm.stats.linear_rainbow(res))#第一个数字是F统计量,第二个数字是p值。
sm.graphics.plot_partregress('Lottery', 'Wealth', ['Region', 'Literacy'],
data=df, obs_labels=False)
<jupyter_output>(0.847233997615691, 0.6997965543621644)
|
no_license
|
/第五周/补充资料/scipy&Statsmodels.ipynb
|
DataCastle2016/DataCastleVIP01
| 10 |
<jupyter_start><jupyter_text># NumpyNumpy is the core library for scientific computing in Python. It provides a high-performance multidimensional array object, and tools for working with these arrays. If you are already familiar with MATLAB, you might find this [tutorial](http://wiki.scipy.org/NumPy_for_Matlab_Users) useful to get started with Numpy.To use Numpy, we first need to import the `numpy` package:<jupyter_code>import numpy as np<jupyter_output><empty_output><jupyter_text>### ArraysA numpy array is a grid of values, all of the same type, and is indexed by a tuple of nonnegative integers. The number of dimensions is the rank of the array; the shape of an array is a tuple of integers giving the size of the array along each dimension.Convert python list to numpy array<jupyter_code>x = np.array([1, 2, 3]) # Create a rank 1 array
print(x, type(x), type(x[0]))<jupyter_output>[1 2 3] <class 'numpy.ndarray'> <class 'numpy.int64'>
<jupyter_text>Find out the size of a numpy array<jupyter_code>x = np.array([
[1, 2, 3],
[4, 5 ,6]
]) # Create a rank 2 array
# x has 2 rows, 3 columns
x.shape<jupyter_output><empty_output><jupyter_text>Access elements in numpy array<jupyter_code>x = np.array([1, 2, 3]) # Create a rank 1 array
print(x[0]) # Access first element
print(x[1:]) # Slice from first element
print(x[-2]) # Access second to last element
print(x[::-1]) # Reverse array<jupyter_output>1
[2 3]
2
[3 2 1]
<jupyter_text>Numpy also provides many functions to create arrays:<jupyter_code>a = np.zeros((2,2)) # Create an array of all zeros
print(a)
b = np.ones((3,4)) # Create an array of all ones
print(b)
c = np.full((2,2), 7) # Create a constant array
print(c)
d = np.eye(2) # Create a 2x2 identity matrix
print(d)
e = np.random.random((2,2)) # Create an array filled with random values
# Values are drawn from a Uniform distribution ~U(0,1)
# https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.random.random.html
print(e)
e = np.random.randn(2,2) # Create an array filled with random values
# Values are drawn from a Normal distribution ~N(0, 1)
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.random.randn.html
print(e)<jupyter_output>[[-0.49970845 0.23424523]
[ 0.19701222 -0.97504503]]
<jupyter_text>### Array indexingNumpy offers several ways to index into arrays.Slicing: Similar to Python lists, numpy arrays can be sliced. Since arrays may be multidimensional, you must specify a slice for each dimension of the array:<jupyter_code>import numpy as np
# Create the following rank 2 array with shape (3, 4)
# [[ 1 2 3 4]
# [ 5 6 7 8]
# [ 9 10 11 12]]
a = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
# Use slicing to pull out the subarray consisting of the first 2 rows
# and columns 1 and 2; b is the following array of shape (2, 2):
# [[2 3]
# [6 7]]
b = a[:2, 1:3]
print(b)<jupyter_output>[[2 3]
[6 7]]
<jupyter_text>A slice of an array is a view into the same data, so modifying it will modify the original array.<jupyter_code>print(a[0, 1])
b[0, 0] = 77 # b[0, 0] is the same piece of data as a[0, 1]
print(a[0, 1])<jupyter_output>2
77
<jupyter_text>You can also mix integer indexing with slice indexing. However, doing so will yield an array of lower rank than the original array. Note that this is quite different from the way that MATLAB handles array slicing:<jupyter_code># Create the following rank 2 array with shape (3, 4)
a = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
print(a)<jupyter_output>[[ 1 2 3 4]
[ 5 6 7 8]
[ 9 10 11 12]]
<jupyter_text>Two ways of accessing the data in the middle row of the array.
Mixing integer indexing with slices yields an array of lower rank,
while using only slices yields an array of the same rank as the
original array:<jupyter_code>row_r1 = a[1, :] # Rank 1 view of the second row of a
row_r2 = a[1:2, :] # Rank 2 view of the second row of a
row_r3 = a[[1], :] # Rank 2 view of the second row of a
print(row_r1, row_r1.shape)
print(row_r2, row_r2.shape)
print(row_r3, row_r3.shape)
# We can make the same distinction when accessing columns of an array:
col_r1 = a[:, 1]
col_r2 = a[:, 1:2]
print(col_r1, col_r1.shape)
print(col_r2, col_r2.shape)<jupyter_output>[ 2 6 10] (3,)
[[ 2]
[ 6]
[10]] (3, 1)
<jupyter_text>Boolean array indexing: Boolean array indexing lets you pick out arbitrary elements of an array. Frequently this type of indexing is used to select the elements of an array that satisfy some condition. Here is an example:<jupyter_code>import numpy as np
a = np.array([[1,2], [3, 4], [5, 6]])
bool_idx = (a > 2) # Find the elements of a that are bigger than 2;
# this returns a numpy array of Booleans of the same
# shape as a, where each slot of bool_idx tells
# whether that element of a is > 2.
print(bool_idx)
# We use boolean array indexing to construct a rank 1 array
# consisting of the elements of a corresponding to the True values
# of bool_idx
print(a[bool_idx])
# We can do all of the above in a single concise statement:
print(a[a > 2])<jupyter_output>[3 4 5 6]
[3 4 5 6]
<jupyter_text>For brevity we have left out a lot of details about numpy array indexing; if you want to know more you should read the [documentation](https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.indexing.html).### DatatypesEvery numpy array is a grid of elements of the same type. Numpy provides a large set of numeric datatypes that you can use to construct arrays. Numpy tries to guess a datatype when you create an array, but functions that construct arrays usually also include an optional argument to explicitly specify the datatype. Here is an example:<jupyter_code>x = np.array([1, 2]) # Let numpy choose the datatype
y = np.array([1.0, 2.0]) # Let numpy choose the datatype
z = np.array([1, 2], dtype=np.int64) # Force a particular datatype
print(x.dtype, y.dtype, z.dtype)<jupyter_output>int64 float64 int64
<jupyter_text>Specify the type of the array elements<jupyter_code>x = np.array([1, 2, 3], dtype=np.float)
print(x, type(x), type(x[0]))<jupyter_output>[1. 2. 3.] <class 'numpy.ndarray'> <class 'numpy.float64'>
<jupyter_text>You can read all about numpy datatypes in the [documentation](http://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html).### Array mathBasic mathematical functions operate elementwise on arrays, and are available both as operator overloads and as functions in the numpy module:<jupyter_code>x = np.array([[1,2],[3,4]], dtype=np.float64)
y = np.array([[5,6],[7,8]], dtype=np.float64)
# Elementwise sum; both produce a numpy array
print(x + y)
print(np.add(x, y))
# Elementwise difference; both produce a numpy array
print(x - y)
print(np.subtract(x, y))
# Elementwise product; both produce a numpy array
# Notice, unlike MATLAB, this is not matrix multiplication
print(x * y)
print(np.multiply(x, y))
# Elementwise square root; produces the array
# [[ 1. 1.41421356]
# [ 1.73205081 2. ]]
print(np.sqrt(x))<jupyter_output>[[1. 1.41421356]
[1.73205081 2. ]]
<jupyter_text>Note that unlike MATLAB, `*` is elementwise multiplication, not matrix multiplication. You can use the following for matrix multiplication<jupyter_code># Matrix multiplication
x = np.array([[1,2],[3,4]])
y = np.array([[5,6],[7,8]])
v = np.array([9,10])
w = np.array([11, 12])
print(x @ y)
print(np.matmul(x, y))
print(x.dot(y))
print(np.dot(x, y))
# Inner product of vectors; both produce 219
print(v @ w)
print(v.dot(w))
# Matrix / vector product; both produce the rank 1 array [29 67]
print(x @ v)
print(x.dot(v))
# Matrix / matrix product; both produce the rank 2 array
# [[19 22]
# [43 50]]
print(x @ y)
print(x.dot(y))<jupyter_output>[[19 22]
[43 50]]
[[19 22]
[43 50]]
<jupyter_text>Numpy provides many useful functions for performing computations on arrays; one of the most useful is `sum`:<jupyter_code>x = np.array([[1,2],[3,4]])
print(x)
print(np.sum(x)) # Compute sum of all elements; prints "10"
print(np.sum(x, axis=0)) # Compute sum of each column; prints "[4 6]"
print(np.sum(x, axis=1)) # Compute sum of each row; prints "[3 7]"<jupyter_output>[[1 2]
[3 4]]
10
[4 6]
[3 7]
<jupyter_text>You can find the full list of mathematical functions provided by numpy in the [documentation](http://docs.scipy.org/doc/numpy/reference/routines.math.html).
Apart from computing mathematical functions using arrays, we frequently need to reshape or otherwise manipulate data in arrays. The simplest example of this type of operation is transposing a matrix; to transpose a matrix, simply use the T attribute of an array object:<jupyter_code>print(x)
print(x.T)
v = np.array([[1,2,3]])
print(v)
print(v.T)<jupyter_output>[[1 2 3]]
[[1]
[2]
[3]]
<jupyter_text>### BroadcastingBroadcasting is a powerful mechanism that allows numpy to work with arrays of different shapes when performing arithmetic operations. Frequently we have a smaller array and a larger array, and we want to use the smaller array multiple times to perform some operation on the larger array.
For example, suppose that we want to add a constant vector to each row of a matrix. We could do it like this:<jupyter_code># We will add the vector v to each row of the matrix x,
# storing the result in the matrix y
x = np.array([[1,2,3], [4,5,6], [7,8,9], [10, 11, 12]])
v = np.array([1, 0, 1])
y = np.empty_like(x) # Create an empty matrix with the same shape as x
# Add the vector v to each row of the matrix x with an explicit loop
for i in range(4):
y[i, :] = x[i, :] + v
print(y)<jupyter_output>[[ 2 2 4]
[ 5 5 7]
[ 8 8 10]
[11 11 13]]
<jupyter_text>This works; however when the matrix `x` is very large, computing an explicit loop in Python could be slow. Note that adding the vector v to each row of the matrix `x` is equivalent to forming a matrix `vv` by stacking multiple copies of `v` vertically, then performing elementwise summation of `x` and `vv`. We could implement this approach like this:<jupyter_code>vv = np.tile(v, (4, 1)) # Stack 4 copies of v on top of each other
print(vv) # Prints "[[1 0 1]
# [1 0 1]
# [1 0 1]
# [1 0 1]]"
y = x + vv # Add x and vv elementwise
print(y)<jupyter_output>[[ 2 2 4]
[ 5 5 7]
[ 8 8 10]
[11 11 13]]
<jupyter_text>Numpy broadcasting allows us to perform this computation without actually creating multiple copies of v. Consider this version, using broadcasting:<jupyter_code>import numpy as np
# We will add the vector v to each row of the matrix x,
# storing the result in the matrix y
x = np.array([[1,2,3], [4,5,6], [7,8,9], [10, 11, 12]])
v = np.array([1, 0, 1])
y = x + v # Add v to each row of x using broadcasting
print(y)<jupyter_output>[[ 2 2 4]
[ 5 5 7]
[ 8 8 10]
[11 11 13]]
<jupyter_text>The line `y = x + v` works even though `x` has shape `(4, 3)` and `v` has shape `(3,)` due to broadcasting; this line works as if v actually had shape `(4, 3)`, where each row was a copy of `v`, and the sum was performed elementwise.
Broadcasting two arrays together follows these rules:
1. If the arrays do not have the same rank, prepend the shape of the lower rank array with 1s until both shapes have the same length.
2. The two arrays are said to be compatible in a dimension if they have the same size in the dimension, or if one of the arrays has size 1 in that dimension.
3. The arrays can be broadcast together if they are compatible in all dimensions.
4. After broadcasting, each array behaves as if it had shape equal to the elementwise maximum of shapes of the two input arrays.
5. In any dimension where one array had size 1 and the other array had size greater than 1, the first array behaves as if it were copied along that dimension
If this explanation does not make sense, try reading the explanation from the [documentation](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) or this [explanation](http://wiki.scipy.org/EricsBroadcastingDoc).
Functions that support broadcasting are known as universal functions. You can find the list of all universal functions in the [documentation](http://docs.scipy.org/doc/numpy/reference/ufuncs.html#available-ufuncs).
Here are some applications of broadcasting:<jupyter_code># Compute outer product of vectors
v = np.array([1,2,3]) # v has shape (3,)
w = np.array([4,5]) # w has shape (2,)
# To compute an outer product, we first reshape v to be a column
# vector of shape (3, 1); we can then broadcast it against w to yield
# an output of shape (3, 2), which is the outer product of v and w:
print(v.reshape((3, 1)) * w)
# Add a vector to each row of a matrix
x = np.array([[1,2,3], [4,5,6]])
# x has shape (2, 3) and v has shape (3,) so they broadcast to (2, 3),
# giving the following matrix:
print(x + v)
# Add a vector to each column of a matrix
# x has shape (2, 3) and w has shape (2,).
# If we transpose x then it has shape (3, 2) and can be broadcast
# against w to yield a result of shape (3, 2); transposing this result
# yields the final result of shape (2, 3) which is the matrix x with
# the vector w added to each column. Gives the following matrix:
print((x.T + w).T)
# Another solution is to reshape w to be a row vector of shape (2, 1);
# we can then broadcast it directly against x to produce the same
# output.
print(x + w.reshape((2, 1)))
# Multiply a matrix by a constant:
# x has shape (2, 3). Numpy treats scalars as arrays of shape ();
# these can be broadcast together to shape (2, 3), producing the
# following array:
print(x * 2)<jupyter_output>[[ 2 4 6]
[ 8 10 12]]
<jupyter_text>Broadcasting makes it easy to write vectorized code. Like in MATLAB it is highly recommended to vectorize operations on Numpy arrays. It leads to great performance improvements.
Here's an example of the performance difference:<jupyter_code>def add_lists(a, b):
c = []
for x, y in zip(a, b):
c.append(x + y)
return c
def add_lists2(a, b):
return a + b
N = 100000
x = np.ones((N, 100))
y = np.arange(N).reshape(N, 1)
z1 = add_lists(x, y)
z2 = add_lists2(x, y)
print(np.all(z1 == z2))
%timeit add_lists(x, y)
%timeit add_lists2(x, y) # Vectorized operation is >7x faster
print(x.shape)
print(y.shape)<jupyter_output>(100000, 100)
(100000, 1)
<jupyter_text>## MatplotlibMatplotlib is a plotting library. In this section give a brief introduction to the `matplotlib.pyplot` module, which provides a plotting system similar to that of MATLAB.<jupyter_code>import matplotlib.pyplot as plt<jupyter_output><empty_output><jupyter_text>By running this special iPython command, we will be displaying plots inline:<jupyter_code>%matplotlib inline<jupyter_output><empty_output><jupyter_text>### PlottingThe most important function in `matplotlib` is plot, which allows you to plot 2D data. Here is a simple example:<jupyter_code># Compute the x and y coordinates for points on a sine curve
x = np.arange(0, 3 * np.pi, 0.1)
y = np.sin(x)
# Plot the points using matplotlib
plt.plot(x, y)<jupyter_output><empty_output><jupyter_text>With just a little bit of extra work we can easily plot multiple lines at once, and add a title, legend, and axis labels:<jupyter_code>y_sin = np.sin(x)
y_cos = np.cos(x)
# Plot the points using matplotlib
plt.plot(x, y_sin)
plt.plot(x, y_cos)
plt.xlabel('x axis label')
plt.ylabel('y axis label')
plt.title('Sine and Cosine')
plt.legend(['Sine', 'Cosine'])<jupyter_output><empty_output><jupyter_text>### Subplots You can plot different things in the same figure using the subplot function. Here is an example:<jupyter_code># Compute the x and y coordinates for points on sine and cosine curves
x = np.arange(0, 3 * np.pi, 0.1)
y_sin = np.sin(x)
y_cos = np.cos(x)
# Set up a subplot grid that has height 2 and width 1,
# and set the first such subplot as active.
plt.subplot(2, 1, 1)
# Make the first plot
plt.plot(x, y_sin)
plt.title('Sine')
# Set the second subplot as active, and make the second plot.
plt.subplot(2, 1, 2)
plt.plot(x, y_cos)
plt.title('Cosine')
# Show the figure.
plt.show()<jupyter_output><empty_output><jupyter_text>You can read much more about the `subplot` function in the [documentation](http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.subplot).### Exercise 1Write a function to implement the sigmoid function which is given by the following formula.
$$s(x) = \frac{e^{z}}{1 + e^{z}}$$
The function should take as input a numpy array and return the sigmoid element-wise
Write the test code that prints the sigmoid of $z, z \in [-10, 10]$<jupyter_code>def sigmoid(z):
raise NotImplementedError
# Define z in [0, 1]
# Calculate simgoid(z)
# print result<jupyter_output><empty_output><jupyter_text>### Exercise 2
Plot the sigmoid function in the interval $z \in [-10, 10]$<jupyter_code>def plot_sigmoid(z):
s = sigmoid(z)
raise NotImplementedError
# plot_sigmoid(z)<jupyter_output><empty_output><jupyter_text>### Exercise 3
Generate a $1D$ distribution of white noise, following a normal distribution ~$N(3,0.1)$
Plot the result in the interval $x \in [-1, 1]$
Function `generate_noise` should return two numpy arrays, the interval $x$ and the generated noise
Hint: Use np.linspace <jupyter_code>def generate_noise(nsamples, mu=3, sigma=.1, low=-1, high=1):
raise NotImplementedError
# x, n = generate_noise(...)
# plot n<jupyter_output><empty_output><jupyter_text>### Exercise 4
Create a function $f$ that is $sin(x)$ when $x \in [-\pi, \pi]$ and white noise when $x \in [\pi, 2\pi]$.
Plot the result.
Function `calculate_f` should return $x$ and $f(x)$, $x \in [-\pi, 2\pi]$
Hint: Use np.concatenate and np.linspace<jupyter_code>def calculate_f(nsamples):
raise NotImplementedError
# x, f = calculate_f(...)
# plot f(x)<jupyter_output><empty_output>
|
no_license
|
/Numpy_Intro.ipynb
|
meconom/DSP_LabSupport
| 30 |
<jupyter_start><jupyter_text># Keras tutorial - Emotion Detection in Images of Faces
Welcome to the first assignment of week 2. In this assignment, you will:
1. Learn to use Keras, a high-level neural networks API (programming framework), written in Python and capable of running on top of several lower-level frameworks including TensorFlow and CNTK.
2. See how you can in a couple of hours build a deep learning algorithm.
#### Why are we using Keras?
* Keras was developed to enable deep learning engineers to build and experiment with different models very quickly.
* Just as TensorFlow is a higher-level framework than Python, Keras is an even higher-level framework and provides additional abstractions.
* Being able to go from idea to result with the least possible delay is key to finding good models.
* However, Keras is more restrictive than the lower-level frameworks, so there are some very complex models that you would still implement in TensorFlow rather than in Keras.
* That being said, Keras will work fine for many common models. ## Updates
#### If you were working on the notebook before this update...
* The current notebook is version "v2a".
* You can find your original work saved in the notebook with the previous version name ("v2").
* To view the file directory, go to the menu "File->Open", and this will open a new tab that shows the file directory.
#### List of updates
* Changed back-story of model to "emotion detection" from "happy house."
* Cleaned/organized wording of instructions and commentary.
* Added instructions on how to set `input_shape`
* Added explanation of "objects as functions" syntax.
* Clarified explanation of variable naming convention.
* Added hints for steps 1,2,3,4## Load packages
* In this exercise, you'll work on the "Emotion detection" model, which we'll explain below.
* Let's load the required packages.<jupyter_code>import numpy as np
from keras import layers
from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D
from keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D
from keras.models import Model
from keras.preprocessing import image
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import preprocess_input
import pydot
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model
from kt_utils import *
import keras.backend as K
K.set_image_data_format('channels_last')
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
%matplotlib inline<jupyter_output>Using TensorFlow backend.
<jupyter_text>**Note**: As you can see, we've imported a lot of functions from Keras. You can use them by calling them directly in your code. Ex: `X = Input(...)` or `X = ZeroPadding2D(...)`.
In other words, unlike TensorFlow, you don't have to create the graph and then make a separate `sess.run()` call to evaluate those variables.## 1 - Emotion Tracking
* A nearby community health clinic is helping the local residents monitor their mental health.
* As part of their study, they are asking volunteers to record their emotions throughout the day.
* To help the participants more easily track their emotions, you are asked to create an app that will classify their emotions based on some pictures that the volunteers will take of their facial expressions.
* As a proof-of-concept, you first train your model to detect if someone's emotion is classified as "happy" or "not happy."
To build and train this model, you have gathered pictures of some volunteers in a nearby neighborhood. The dataset is labeled.
Run the following code to normalize the dataset and learn about its shapes.<jupyter_code>X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
# Normalize image vectors
X_train = X_train_orig/255.
X_test = X_test_orig/255.
# Reshape
Y_train = Y_train_orig.T
Y_test = Y_test_orig.T
print ("number of training examples = " + str(X_train.shape[0]))
print ("number of test examples = " + str(X_test.shape[0]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))<jupyter_output>number of training examples = 600
number of test examples = 150
X_train shape: (600, 64, 64, 3)
Y_train shape: (600, 1)
X_test shape: (150, 64, 64, 3)
Y_test shape: (150, 1)
<jupyter_text>**Details of the "Face" dataset**:
- Images are of shape (64,64,3)
- Training: 600 pictures
- Test: 150 pictures## 2 - Building a model in Keras
Keras is very good for rapid prototyping. In just a short time you will be able to build a model that achieves outstanding results.
Here is an example of a model in Keras:
```python
def model(input_shape):
"""
input_shape: The height, width and channels as a tuple.
Note that this does not include the 'batch' as a dimension.
If you have a batch like 'X_train',
then you can provide the input_shape using
X_train.shape[1:]
"""
# Define the input placeholder as a tensor with shape input_shape. Think of this as your input image!
X_input = Input(input_shape)
# Zero-Padding: pads the border of X_input with zeroes
X = ZeroPadding2D((3, 3))(X_input)
# CONV -> BN -> RELU Block applied to X
X = Conv2D(32, (7, 7), strides = (1, 1), name = 'conv0')(X)
X = BatchNormalization(axis = 3, name = 'bn0')(X)
X = Activation('relu')(X)
# MAXPOOL
X = MaxPooling2D((2, 2), name='max_pool')(X)
# FLATTEN X (means convert it to a vector) + FULLYCONNECTED
X = Flatten()(X)
X = Dense(1, activation='sigmoid', name='fc')(X)
# Create model. This creates your Keras model instance, you'll use this instance to train/test the model.
model = Model(inputs = X_input, outputs = X, name='HappyModel')
return model
```#### Variable naming convention
* Note that Keras uses a different convention with variable names than we've previously used with numpy and TensorFlow.
* Instead of creating unique variable names for each step and each layer, such as
```
X = ...
Z1 = ...
A1 = ...
```
* Keras re-uses and overwrites the same variable at each step:
```
X = ...
X = ...
X = ...
```
* The exception is `X_input`, which we kept separate since it's needed later.#### Objects as functions
* Notice how there are two pairs of parentheses in each statement. For example:
```
X = ZeroPadding2D((3, 3))(X_input)
```
* The first is a constructor call which creates an object (ZeroPadding2D).
* In Python, objects can be called as functions. Search for 'python object as function and you can read this blog post [Python Pandemonium](https://medium.com/python-pandemonium/function-as-objects-in-python-d5215e6d1b0d). See the section titled "Objects as functions."
* The single line is equivalent to this:
```
ZP = ZeroPadding2D((3, 3)) # ZP is an object that can be called as a function
X = ZP(X_input)
```**Exercise**: Implement a `HappyModel()`.
* This assignment is more open-ended than most.
* Start by implementing a model using the architecture we suggest, and run through the rest of this assignment using that as your initial model. * Later, come back and try out other model architectures.
* For example, you might take inspiration from the model above, but then vary the network architecture and hyperparameters however you wish.
* You can also use other functions such as `AveragePooling2D()`, `GlobalMaxPooling2D()`, `Dropout()`.
**Note**: Be careful with your data's shapes. Use what you've learned in the videos to make sure your convolutional, pooling and fully-connected layers are adapted to the volumes you're applying it to.<jupyter_code># GRADED FUNCTION: HappyModel
def HappyModel(input_shape):
"""
Implementation of the HappyModel.
Arguments:
input_shape -- shape of the images of the dataset
(height, width, channels) as a tuple.
Note that this does not include the 'batch' as a dimension.
If you have a batch like 'X_train',
then you can provide the input_shape using
X_train.shape[1:]
Returns:
model -- a Model() instance in Keras
"""
### START CODE HERE ###
# Define the input placeholder as a tensor with shape input_shape. Think of this as your input image!
X_input = Input(input_shape)
# Zero-Padding: pads the border of X_input with zeroes
X = ZeroPadding2D((3, 3))(X_input)
# CONV -> BN -> RELU Block applied to X
X = Conv2D(32, (7, 7), strides = (1, 1), name = 'conv0')(X)
X = BatchNormalization(axis = 3, name = 'bn0')(X)
X = Activation('relu')(X)
# MAXPOOL
X = MaxPooling2D((2, 2), name='max_pool')(X)
# FLATTEN X (means convert it to a vector) + FULLYCONNECTED
X = Flatten()(X)
X = Dense(1, activation='sigmoid', name='fc')(X)
# Create model. This creates your Keras model instance, you'll use this instance to train/test the model.
model = Model(inputs = X_input, outputs = X, name='HappyModel')
return model<jupyter_output><empty_output><jupyter_text>You have now built a function to describe your model. To train and test this model, there are four steps in Keras:
1. Create the model by calling the function above
2. Compile the model by calling `model.compile(optimizer = "...", loss = "...", metrics = ["accuracy"])`
3. Train the model on train data by calling `model.fit(x = ..., y = ..., epochs = ..., batch_size = ...)`
4. Test the model on test data by calling `model.evaluate(x = ..., y = ...)`
If you want to know more about `model.compile()`, `model.fit()`, `model.evaluate()` and their arguments, refer to the official [Keras documentation](https://keras.io/models/model/).#### Step 1: create the model.
**Hint**:
The `input_shape` parameter is a tuple (height, width, channels). It excludes the batch number.
Try `X_train.shape[1:]` as the `input_shape`.<jupyter_code>### START CODE HERE ### (1 line)
happyModel = HappyModel(X_train.shape[1:])
### END CODE HERE ###<jupyter_output><empty_output><jupyter_text>#### Step 2: compile the model
**Hint**:
Optimizers you can try include `'adam'`, `'sgd'` or others. See the documentation for [optimizers](https://keras.io/optimizers/)
The "happiness detection" is a binary classification problem. The loss function that you can use is `'binary_cross_entropy'`. Note that `'categorical_cross_entropy'` won't work with your data set as its formatted, because the data is an array of 0 or 1 rather than two arrays (one for each category). Documentation for [losses](https://keras.io/losses/)<jupyter_code>### START CODE HERE ### (1 line)
happyModel.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
### END CODE HERE ###<jupyter_output><empty_output><jupyter_text>#### Step 3: train the model
**Hint**:
Use the `'X_train'`, `'Y_train'` variables. Use integers for the epochs and batch_size
**Note**: If you run `fit()` again, the `model` will continue to train with the parameters it has already learned instead of reinitializing them.<jupyter_code>### START CODE HERE ### (1 line)
happyModel.fit(x=X_train, y = Y_train, epochs=15, batch_size=20)
### END CODE HERE ###<jupyter_output>Epoch 1/15
600/600 [==============================] - 12s - loss: 0.1094 - acc: 0.9567
Epoch 2/15
600/600 [==============================] - 12s - loss: 0.0857 - acc: 0.9700
Epoch 3/15
600/600 [==============================] - 12s - loss: 0.0822 - acc: 0.9717
Epoch 4/15
600/600 [==============================] - 12s - loss: 0.0788 - acc: 0.9650
Epoch 5/15
600/600 [==============================] - 12s - loss: 0.0469 - acc: 0.9883
Epoch 6/15
600/600 [==============================] - 12s - loss: 0.0290 - acc: 0.9883
Epoch 7/15
600/600 [==============================] - 12s - loss: 0.0420 - acc: 0.9883
Epoch 8/15
600/600 [==============================] - 12s - loss: 0.0878 - acc: 0.9700
Epoch 9/15
600/600 [==============================] - 12s - loss: 0.0573 - acc: 0.9800
Epoch 10/15
600/600 [==============================] - 12s - loss: 0.0561 - acc: 0.9800
Epoch 11/15
600/600 [==============================] - 12s - loss: 0.0291 - acc: 0.9883 [...]<jupyter_text>#### Step 4: evaluate model
**Hint**:
Use the `'X_test'` and `'Y_test'` variables to evaluate the model's performance.<jupyter_code>### START CODE HERE ### (1 line)
preds = happyModel.evaluate(x = X_test, y = Y_test)
### END CODE HERE ###
print()
print ("Loss = " + str(preds[0]))
print ("Test Accuracy = " + str(preds[1]))<jupyter_output>150/150 [==============================] - 1s
Loss = 0.550353128115
Test Accuracy = 0.820000001589
<jupyter_text>#### Expected performance
If your `happyModel()` function worked, its accuracy should be better than random guessing (50% accuracy).
To give you a point of comparison, our model gets around **95% test accuracy in 40 epochs** (and 99% train accuracy) with a mini batch size of 16 and "adam" optimizer.#### Tips for improving your model
If you have not yet achieved a very good accuracy (>= 80%), here are some things tips:
- Use blocks of CONV->BATCHNORM->RELU such as:
```python
X = Conv2D(32, (3, 3), strides = (1, 1), name = 'conv0')(X)
X = BatchNormalization(axis = 3, name = 'bn0')(X)
X = Activation('relu')(X)
```
until your height and width dimensions are quite low and your number of channels quite large (≈32 for example).
You can then flatten the volume and use a fully-connected layer.
- Use MAXPOOL after such blocks. It will help you lower the dimension in height and width.
- Change your optimizer. We find 'adam' works well.
- If you get memory issues, lower your batch_size (e.g. 12 )
- Run more epochs until you see the train accuracy no longer improves.
**Note**: If you perform hyperparameter tuning on your model, the test set actually becomes a dev set, and your model might end up overfitting to the test (dev) set. Normally, you'll want separate dev and test sets. The dev set is used for parameter tuning, and the test set is used once to estimate the model's performance in production.## 3 - Conclusion
Congratulations, you have created a proof of concept for "happiness detection"!## Key Points to remember
- Keras is a tool we recommend for rapid prototyping. It allows you to quickly try out different model architectures.
- Remember The four steps in Keras:
1. Create
2. Compile
3. Fit/Train
4. Evaluate/Test ## 4 - Test with your own image (Optional)
Congratulations on finishing this assignment. You can now take a picture of your face and see if it can classify whether your expression is "happy" or "not happy". To do that:
1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub.
2. Add your image to this Jupyter Notebook's directory, in the "images" folder
3. Write your image's name in the following code
4. Run the code and check if the algorithm is right (0 is not happy, 1 is happy)!
The training/test sets were quite similar; for example, all the pictures were taken against the same background (since a front door camera is always mounted in the same position). This makes the problem easier, but a model trained on this data may or may not work on your own data. But feel free to give it a try! <jupyter_code>### START CODE HERE ###
img_path = 'images/my_image.jpg'
### END CODE HERE ###
img = image.load_img(img_path, target_size=(64, 64))
imshow(img)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
print(happyModel.predict(x))<jupyter_output>[[ 0.]]
<jupyter_text>## 5 - Other useful functions in Keras (Optional)
Two other basic features of Keras that you'll find useful are:
- `model.summary()`: prints the details of your layers in a table with the sizes of its inputs/outputs
- `plot_model()`: plots your graph in a nice layout. You can even save it as ".png" using SVG() if you'd like to share it on social media ;). It is saved in "File" then "Open..." in the upper bar of the notebook.
Run the following code.<jupyter_code>happyModel.summary()
plot_model(happyModel, to_file='HappyModel.png')
SVG(model_to_dot(happyModel).create(prog='dot', format='svg'))<jupyter_output><empty_output>
|
no_license
|
/Convolutional Neural Networks/week2/KerasTutorial/Keras_Tutorial_v2a.ipynb
|
ahmadsadeed/deep_learning_ai
| 9 |
<jupyter_start><jupyter_text># Analiza danych klientów bankuZbiór to dane 10 tys. klientów banku, przygotowany w celu predykcji rezygnacji klienta z konta.
Zbiór został udostępniony na stronie Kaggle: https://www.kaggle.com/santoshd3/bank-customers#### Cel: zbadać zbiór pod kątem interesujących zależności<jupyter_code># Wczytanie bibliotek
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# Wczytanie zbioru
data = pd.read_csv('https://raw.githubusercontent.com/Patrick-H-dev/PH/master/Churn%20Modeling.csv')
data.head(5)<jupyter_output><empty_output><jupyter_text>Wyjasnienie wybranych kolumn:
- Tenure - staż w latach w obecnym miejscu pracy
- IsActiveMember - czy klient jest aktywny, czyli czy bierze udział w dodatkowych programach banku lub posiada ubezpieczenie, obligacje, itp
- Exited - czy klient zrezygnował z konta (1- tak, 0- nie)<jupyter_code># Potwierdzenie, że mamy 10 tys unikalnych ID klientów, a nie np zdublowane dane mniejszej grupy klientów wzięte z różnych okresów czasu
len(data['CustomerId'].unique())
# Usunięcie kolumn niepotrzebnych do analizy: 'Surname' oraz 'RowNumber' (pandas domyślnie ponumerował nam wiersze)
data.drop(['RowNumber','Surname'], axis=1, inplace=True)
data.head()<jupyter_output><empty_output><jupyter_text>### Przyjrzenie się danym w celu nabrania intuicji i pewnego wyobrażenia<jupyter_code># Skąd są klienci? O jakiej walucie mowa?
data['Geography'].unique()
# Podstawowe statystyki (wiemy już ze waluta to Euro)
dane_liczbowe = data[['CreditScore','Age','Tenure','Balance','EstimatedSalary']]
dane_liczbowe.describe()
# Wyzej widac sredni stan konta 76485 euro.
# Sprawdzamy jaki jest stan po wykluczeniu kont z brakiem srodkow:
data_without_cash = data[data['Balance'] > 0]
data_without_cash['Balance'].mean()<jupyter_output><empty_output><jupyter_text>Srednia stanu konta bez pustych kont wynosi 100 tys, czyli o prawie 25 tys wiecej niz srednia dla calego zbioru.Sprawdzamy czy nie ma kont z ujemny saldem / debetem:<jupyter_code>data[data['Balance'] < 0]<jupyter_output><empty_output><jupyter_text>Brak kont z ujemny saldem### Zliczanie wartości <jupyter_code>to_check = ['Geography', 'Gender','NumOfProducts','HasCrCard','IsActiveMember','Exited']
for col in to_check:
print(data[col].value_counts(),"\n")<jupyter_output>France 5014
Germany 2509
Spain 2477
Name: Geography, dtype: int64
Male 5457
Female 4543
Name: Gender, dtype: int64
1 5084
2 4590
3 266
4 60
Name: NumOfProducts, dtype: int64
1 7055
0 2945
Name: HasCrCard, dtype: int64
1 5151
0 4849
Name: IsActiveMember, dtype: int64
0 7963
1 2037
Name: Exited, dtype: int64
<jupyter_text>Komentarz:
- Połowa kont jest francuskich, 1/4 niemieckich i 1/4 hiszpańskich. Mamy pewność że kwoty są w Euro
- Rozkład mężczyzn i kobiet jest w miare równy
- Zdecydowana większość klientów posiada 1 lub 2 produkty (oprócz konta to np ubezpieczenie, obligacje lub inny produkty/usługi)
- Ponad 70% klientów banku posiada karte kredytową
- Około połowa klientów jest "aktywna" - bierze udział w dodatkowych programach lub akcjach banku
- 20% klientów w tym zbiorze zrezygnowało z konta
### Kontynuujemy sprawdzanie zbioru<jupyter_code>fig = plt.figure(figsize=(13,7))
ax1 = fig.add_subplot(2,3,1)
ax1.hist(data['CreditScore'])
ax1.set_title('CreditScore')
ax2 = fig.add_subplot(2,3,2)
ax2.hist(data['Age'], bins = 10)
ax2.set_title('Age')
ax3 = fig.add_subplot(2,3,3)
ax3.hist(data['Tenure'])
ax3.set_title('Tenure')
ax4 = fig.add_subplot(2,3,4)
ax4.hist(data['Balance'])
ax4.set_title('Balance')
ax4 = fig.add_subplot(2,3,5)
ax4.hist(data['EstimatedSalary'], bins=30)
ax4.set_title('EstimatedSalary')<jupyter_output><empty_output><jupyter_text>- Ocena kredytowa rozkłada sie w miare normalnie
- Najczęstszy wiek klienta to 30 do 40 lat
- Większość klientów ma między 1 a 9 lat stażu zawodowego
- Stan konta (Balance) klientów którzy trzymają jakiekolwiek pieniądze w tym banku rozkłada sie normalnie, co w pewnym stopiu potwierdza wykres oceny kredytowej
- Ponad 3500 klientów, czyli ponad 35% nie trzyma żadnych pieniędzy na tym koncie. Możliwe powody:
- Część osób mogła założć konto dla jednorazowej nagrody (akcje marketingowe banków)
- ludzie nie mają pieniędzy. Z drugiej strony, na wykresie Tenure (staż pracy) widzimy że tylko ok 400 klientów ma mniej niż 1 rok stażu a ludzi bez pieniędzy jest ponad 3500
- Pamiętamy, że prawie 80% zbioru to klienci którzy zrezygnowali z konta więc uzasadnione jest wyciągnięcie swoich pieniędzy przed likwidacją rachunku. Z drugiej strony można zadać pytanie "Dlaczego tylko 35% ludzi wybrało te pieniędze skoro wiemy, że 80% tego zbioru to klienci którzy zrezygnowali z konta?"
- Ludzie są niezadowoleni z konta więc przenieśli swoje środki do innego banku, ale jeszcze nie zlikwidowali konta w tym banku### Macierz korelacji<jupyter_code>rs = np.random.RandomState(0)
df = pd.DataFrame(rs.rand(10, 10))
corr = data.corr()
corr.style.background_gradient(cmap='coolwarm').set_precision(2)<jupyter_output><empty_output><jupyter_text>
- Informacją o likwidacji konta (kolumna Exited) koreluje lekko dodatnio (0.12) ze stanem konta (potwierdziły to histogramy) oraz bardziej (0.29) z wiekiem - być może młodzi ludzie są bardziej bezkompromisowi i są bardziej skłonni odejść od firmy która nie spełnia ich warunków. To również może potwierdzać tezę o młodych ludziach zakładajacych konta w banku dla jednorazowej nagrody.
- Zauważalna ujemna korelacja między ilością produktów bankowych dołączonych do konta a stanem konta (Balance): -0.3### Macierz korelacji dla zbioru bez klientow z zerowym saldem<jupyter_code>data_without_cash = data[data['Balance'] > 0]
rs = np.random.RandomState(0)
df = pd.DataFrame(rs.rand(10, 10))
corr = data_without_cash.corr()
corr.style.background_gradient(cmap='coolwarm').set_precision(2)<jupyter_output><empty_output><jupyter_text>Obserwujemy wzrost zależności miedzy wiekiem a odejsciem oraz zanik wczesniejszej zaleznosci miedzy stanem konta a odejsciem gdyż wykluczlismy konta bez srodkow.### Tworzenie koszykow dla kolumn Age i Exited<jupyter_code>posortowane = sorted(data['Age'].unique())
_, bins = pd.qcut(posortowane, q=12, retbins=True)
bins = np.around(bins).astype('int')
bins
data['Age_bucket'] = pd.cut(data['Age'],bins=bins)
data['Age_bucket'] = pd.cut(data['Age'],bins=bins)
records_in_bucket = data[['Age_bucket', 'Exited']].groupby('Age_bucket').count()
records_in_bucket = records_in_bucket.reset_index()
exited_per_bucket = data[['Age_bucket', 'Exited']].groupby('Age_bucket').sum().reset_index()
result = pd.concat([exited_per_bucket, records_in_bucket['Exited']], axis=1, join='inner')
result.columns = ['Balance_bucket','exited_per_bucket','records_in_bucket']
result['exited_percent'] = round(result['exited_per_bucket'] / result['records_in_bucket'] * 100, 1)
result<jupyter_output><empty_output><jupyter_text>- Wsrod wieku, najwiekszy odsetek ludzi ktorzy odeszlo są z przedzialu wiekowego 47 - 64 lat.### Tworzenie koszyka dla kolumn Balance<jupyter_code>posortowane = sorted(data['Balance'].unique())
bins = [-1, 25000, 50000, 75000, 100000, 125000, 150000, 175000, 200000, 1000000]
data['Balance_bucket'] = pd.cut(data['Balance'],bins=bins)
records_in_bucket = data[['Balance_bucket', 'Exited']].groupby('Balance_bucket').count()
records_in_bucket = records_in_bucket.reset_index()
exited_per_bucket = data[['Balance_bucket', 'Exited']].groupby('Balance_bucket').sum().reset_index()
result = pd.concat([exited_per_bucket, records_in_bucket['Exited']], axis=1, join='inner')
result.columns = ['Balance_bucket','exited_per_bucket','records_in_bucket']
result['exited_percent'] = round(result['exited_per_bucket'] / result['records_in_bucket'] * 100, 1)
result<jupyter_output><empty_output><jupyter_text>Najwiekszy odsetek ludzi (prawie 56%) odchodzi ze srodkami z najwiekszego koszyka pow. 200 tys euro<jupyter_code>data[['EstimatedSalary','Balance', 'Gender']].groupby('Gender').mean().round(0).astype('int').reset_index()<jupyter_output><empty_output>
|
no_license
|
/Patryk Hesok - Analiza danych klientów banku.ipynb
|
Patrick-H-dev/PH
| 11 |
<jupyter_start><jupyter_text>Alright this works pretty weel. Now adding a bit of noise<jupyter_code># First dataset to build: fixed rise and decay constants, fixed amplifying coefficient, fixed base, and small noise
taur = 1
taud = 3
a = 1
b = 0
sigma = a / 25
sparse = 0.3 # if random threshold is too low this could result in too many spikes
# Building dataset
Xset = np.zeros((setsize, sigsize))
Yset = np.zeros((setsize, sigsize))
for signal in range(setsize):
# Defining spiking array using threshold
randtemp = 0
while randtemp < sparse:
randtemp = np.random.random(1)
tocrop = 500 # crop at the beginning to delete high rise
spikes = 1 * (np.random.random(sigsize+tocrop) > randtemp)
cspikes = expconv(spikes, taur, taud)
Yset[signal, :] = spikes[tocrop:]
Xset_nomean = a*cspikes[tocrop:] + b + (sigma*np.random.randn(1, sigsize))
Xset[signal, :] = Xset_nomean - np.mean(Xset_nomean)
plt.plot(Yset[0, :])
plt.plot(Xset[0, :])
# Defining Keras model
deconv_in = Input(shape=(sigsize, 1))
x = Bidirectional(LSTM(100, input_shape=(sigsize, 1), return_sequences=True))(deconv_in)
deconv_out = Dense(1, activation='sigmoid')(x)
deconv_model = Model(inputs=deconv_in, outputs=deconv_out)
deconv_model.summary()
# Fit same model
deconv_model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
Xset_temp = Xset.reshape(Xset.shape[0], Xset.shape[1], 1)
Yset_temp = Yset.reshape(Yset.shape[0], Yset.shape[1], 1)
deconv_model.fit(Xset_temp, Yset_temp, batch_size=32, epochs=5)
# Test model
deconvolved = 1 * (deconv_model.predict(Xset_temp[0, :].reshape(1, sigsize, 1)).reshape(sigsize, 1) >= 0.5)
plt.plot(Yset_temp[0, 0:100])
plt.plot(deconvolved[0:100])
# Just a small test using actual data from HDF5
path = '/home/ljp/Science/Hippolyte/ALL_DATASETS/2018-05-24Run08.h5'
f = h5py.File(path, 'r')
# Get the data
data = np.array(f['/Data/Brain/Analysis/DFF'])
data_temp = data[0:sigsize, 0].T
deconvolved = 1 * (deconv_model.predict(data_temp.reshape(1, sigsize, 1)).reshape(sigsize, 1) >= 0.5)
plt.plot(data_temp[0:100])
plt.plot(deconvolved[0:100])
# Defining Keras model
deconv_in = Input(shape=(sigsize, 1))
x = Bidirectional(LSTM(64, input_shape=(sigsize, 1), return_sequences=True))(deconv_in)
x = Bidirectional(LSTM(128, input_shape=(sigsize, 1), return_sequences=True))(x)
x = Dense(128, activation='relu')(x)
x = Dense(64, activation='relu')(x)
deconv_out = Dense(1, activation='sigmoid')(x)
deconv_model = Model(inputs=deconv_in, outputs=deconv_out)
deconv_model.summary()
# Fit model
deconv_model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
Xset_temp = Xset.reshape(Xset.shape[0], Xset.shape[1], 1)
Yset_temp = Yset.reshape(Yset.shape[0], Yset.shape[1], 1)
deconv_model.fit(Xset_temp, Yset_temp, batch_size=32, epochs=5)
# Test model
deconvolved = 1 * (deconv_model.predict(Xset_temp[0, :].reshape(1, sigsize, 1)).reshape(sigsize, 1) >= 0.5)
plt.plot(Yset_temp[0, 0:100])
plt.plot(deconvolved[0:100])
# First dataset to build: fixed rise and decay constants, fixed amplifying coefficient, fixed base, and medium noise
taur = 1
taud = 3
a = 1
b = 0
sigma = a / 10
sparse = 0.3 # if random threshold is too low this could result in too many spikes
# Building dataset
Xset = np.zeros((setsize, sigsize))
Yset = np.zeros((setsize, sigsize))
for signal in range(setsize):
# Defining spiking array using threshold
randtemp = 0
while randtemp < sparse:
randtemp = np.random.random(1)
tocrop = 500 # crop at the beginning to delete high rise
spikes = 1 * (np.random.random(sigsize+tocrop) > randtemp)
cspikes = expconv(spikes, taur, taud)
Yset[signal, :] = spikes[tocrop:]
Xset_nomean = a*cspikes[tocrop:] + b + (sigma*np.random.randn(1, sigsize))
Xset[signal, :] = Xset_nomean - np.mean(Xset_nomean)
plt.plot(Yset[0, :])
plt.plot(Xset[0, :])
# Defining Keras model
deconv_in = Input(shape=(sigsize, 1))
x = Bidirectional(LSTM(64, input_shape=(sigsize, 1), return_sequences=True))(deconv_in)
x = Bidirectional(LSTM(128, input_shape=(sigsize, 1), return_sequences=True))(x)
x = Dense(128, activation='relu')(x)
x = Dense(64, activation='relu')(x)
deconv_out = Dense(1, activation='sigmoid')(x)
deconv_model = Model(inputs=deconv_in, outputs=deconv_out)
deconv_model.summary()
# Fit model
deconv_model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
Xset_temp = Xset.reshape(Xset.shape[0], Xset.shape[1], 1)
Yset_temp = Yset.reshape(Yset.shape[0], Yset.shape[1], 1)
deconv_model.fit(Xset_temp, Yset_temp, batch_size=32, epochs=5)
# Test model
deconvolved = 1 * (deconv_model.predict(Xset_temp[0, :].reshape(1, sigsize, 1)).reshape(sigsize, 1) >= 0.5)
plt.plot(Yset_temp[0, 0:100])
plt.plot(deconvolved[0:100])
# Dataset to build: fixed rise and decay constants, fixed amplifying coefficient, fixed base, and high noise
taur = 1
taud = 3
a = 1
b = 0
sigma = a/5
sparse = 0.3 # if random threshold is too low this could result in too many spikes
# Building dataset
Xset = np.zeros((setsize, sigsize))
Yset = np.zeros((setsize, sigsize))
for signal in range(setsize):
# Defining spiking array using threshold
randtemp = 0
while randtemp < sparse:
randtemp = np.random.random(1)
tocrop = 500 # crop at the beginning to delete high rise
spikes = 1 * (np.random.random(sigsize+tocrop) > randtemp)
cspikes = expconv(spikes, taur, taud)
Yset[signal, :] = spikes[tocrop:]
Xset_nomean = a*cspikes[tocrop:] + b + (sigma*np.random.randn(1, sigsize))
Xset[signal, :] = Xset_nomean - np.mean(Xset_nomean)
plt.plot(Yset[0, :])
plt.plot(Xset[0, :])
# Defining Keras model
deconv_in = Input(shape=(sigsize, 1))
x = Conv1D(16, 3, strides=1, padding="same", activation="relu")(deconv_in)
x = Conv1D(32, 3, strides=1, padding="same", activation="relu")(x)
x = BatchNormalization(momentum=0.8)(x)
x = Dropout(0.25)(x)
x = Bidirectional(LSTM(32, input_shape=(sigsize, 1), return_sequences=True))(x)
x = Bidirectional(LSTM(64, input_shape=(sigsize, 1), return_sequences=True))(x)
x = BatchNormalization(momentum=0.8)(x)
x = Dropout(0.25)(x)
x = Bidirectional(LSTM(128, input_shape=(sigsize, 1), return_sequences=True))(x)
x = Bidirectional(LSTM(256, input_shape=(sigsize, 1), return_sequences=True))(x)
x = BatchNormalization(momentum=0.8)(x)
x = Dropout(0.25)(x)
x = Dense(256, activation='relu')(x)
x = Dense(128, activation='relu')(x)
x = Dense(64, activation='relu')(x)
deconv_out = Dense(1, activation='sigmoid')(x)
deconv_model = Model(inputs=deconv_in, outputs=deconv_out)
deconv_model.summary()
# Fit model
deconv_model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
Xset_temp = Xset.reshape(Xset.shape[0], Xset.shape[1], 1)
Yset_temp = Yset.reshape(Yset.shape[0], Yset.shape[1], 1)
deconv_model.fit(Xset_temp, Yset_temp, batch_size=32, epochs=5)<jupyter_output>W1211 22:13:10.664560 1932 deprecation_wrapper.py:119] From C:\Users\Hippolyte Moulle\Anaconda3\lib\site-packages\keras\optimizers.py:790: The name tf.train.Optimizer is deprecated. Please use tf.compat.v1.train.Optimizer instead.
W1211 22:13:10.696507 1932 deprecation_wrapper.py:119] From C:\Users\Hippolyte Moulle\Anaconda3\lib\site-packages\keras\backend\tensorflow_backend.py:3376: The name tf.log is deprecated. Please use tf.math.log instead.
W1211 22:13:10.705479 1932 deprecation.py:323] From C:\Users\Hippolyte Moulle\Anaconda3\lib\site-packages\tensorflow\python\ops\nn_impl.py:180: add_dispatch_support.<locals>.wrapper (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.where in 2.0, which has the same broadcast rule as np.where
<jupyter_text>## Hum, using LSTM seems to make the algorithm reaaaaaally slow. 178 hours for one iterations is just out of this world. I am going to try using a Unet in order to do this task<jupyter_code>from keras.models import Sequential
from keras.layers import UpSampling1D, Activation
from keras.layers.advanced_activations import LeakyReLU
model = Sequential()
model_length = 4
# Downsampling
model.add(Conv1D(32, input_shape=(sigsize, 1), kernel_size=3, strides=2, padding='same'))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
for i in range(model_length):
power_2 = 2**(i+6)
model.add(Conv1D(power_2, kernel_size=3, strides=2, padding='same'))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
# Upsampling
for i in range(model_length):
power_2 = 2**(9-i)
model.add(UpSampling1D())
model.add(Conv1D(512, kernel_size=3, padding='same'))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation('relu'))
model.add(UpSampling1D())
model.add(Conv1D(1, kernel_size=3, padding='same'))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation('sigmoid'))
model.summary()
# Fit model
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
Xset_temp = Xset.reshape(Xset.shape[0], Xset.shape[1], 1)
Yset_temp = Yset.reshape(Yset.shape[0], Yset.shape[1], 1)
model.fit(Xset_temp, Yset_temp, batch_size=32, epochs=5)<jupyter_output>Epoch 1/5
37280/50000 [=====================>........] - ETA: 43:02 - loss: 0.5312 - acc: 0.7283
|
no_license
|
/Deconvolution/.ipynb_checkpoints/deconvolution_RNN-checkpoint.ipynb
|
moulli/detectNeuronCNN
| 2 |
<jupyter_start><jupyter_text>Reinforcement Learning with OpenAI Gym
---
This notebook will create and test different reinforcement learning agents and environments.<jupyter_code>import tensorflow as tf
import gym
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline<jupyter_output><empty_output><jupyter_text>Load the Environment
---
Call `gym.make("environment name")` to load a new environment.
Check out the list of available environments at
Edit this cell to load different environments!<jupyter_code># TODO: Load an environment
# TODO: Print observation and action spaces<jupyter_output><empty_output><jupyter_text>Run an Agent
---
Reset the environment before each run with `env.reset`
Step forward through the environment to get new observations and rewards over time with `env.step`
`env.step` takes a parameter for the action to take on this step and returns the following:
- Observations for this step
- Rewards earned this step
- "Done", a boolean value indicating if the game is finished
- Info - some debug information that some environments provide. <jupyter_code># TODO Make a random agent<jupyter_output><empty_output><jupyter_text>Policy Gradients
---
The policy gradients algorithm records gameplay over a training period, then runs the results of the actions chosen through a neural network, making successful actions that resulted in a reward more likely, and unsuccessful actions less likely.<jupyter_code># TODO Build the policy gradient neural network<jupyter_output><empty_output><jupyter_text>Discounting and Normalizing Rewards
---
In order to determine how "successful" a given action is, the policy gradient algorithm evaluates each action based on how many rewards were earned after it was performed in an episode.
The discount rewards function goes through each time step of an episode and tracks the total rewards earned from each step to the end of the episode.
For example, if an episode took 10 steps to finish, and the agent earns 1 point of reward every step, the rewards for each frame would be stored as
`[10, 9, 8, 7, 6, 5, 4, 3, 2, 1]`
This allows the agent to credit early actions that didn't lose the game with future success, and later actions (that likely resulted in the end of the game) to get less credit.
One disadvantage of arranging rewards like this is that early actions didn't necessarily directly contribute to later rewards, so a **discount factor** is applied that scales rewards down over time. A discount factor < 1 means that rewards earned closer to the current time step will be worth more than rewards earned later.
With our reward example above, if we applied a discount factor of .90, the rewards would be stored as
`[ 6.5132156 6.12579511 5.6953279 5.217031 4.68559 4.0951 3.439
2.71 1.9 1. ]`
This means that the early actions still get more credit than later actions, but not the full value of the rewards for the entire episode.
Finally, the rewards are normalized to lower the variance between reward values in longer or shorter episodes.
You can tweak the discount factor as one of the hyperparameters of your model to find one that fits your task the best!<jupyter_code># TODO Create the discounted and normalized rewards function<jupyter_output><empty_output><jupyter_text>Training Procedure
---
The agent will play games and record the history of the episode. At the end of every game, the episode's history will be processed to calculate the **gradients** that the model learned from that episode.
Every few games the calculated gradients will be applied, updating the model's parameters with the lessons from the games so far.
While training, you'll keep track of average scores and render the environment occasionally to see your model's progress.<jupyter_code># TODO Create the training loop<jupyter_output><empty_output><jupyter_text>Testing the Model
---
This cell will run through games choosing actions without the learning process so you can see how your model has learned!<jupyter_code># TODO Create the testing loop
# Run to close the environment
env.close()<jupyter_output><empty_output>
|
no_license
|
/reinforcement-learning-starter.ipynb
|
uzairgheewala/Stanford-project
| 7 |
<jupyter_start><jupyter_text>## Menu Item Recommendation Exercise### [1] Import necessary modules<jupyter_code>import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import geohash
from sklearn.manifold import TSNE
import sklearn
import seaborn as sns
import lang_processor
from sklearn.decomposition import TruncatedSVD
from sklearn.preprocessing import LabelEncoder
from sklearn.cluster import KMeans<jupyter_output><empty_output><jupyter_text>### [2] Define datapaths and load the dataset<jupyter_code># Define datapaths
module_path = os.path.abspath('')
data_path = os.path.join(module_path, 'data')
file_path = os.path.join(data_path, 'products.gz')
dataset = pd.read_parquet(file_path, engine='pyarrow')
dataset = dataset.dropna()<jupyter_output><empty_output><jupyter_text>### [3] Some statistics on the Dataset<jupyter_code># unique products
print("Number of distinct Vendors: {}".format(dataset.vendor_id.nunique()))
print("Number of distinct Products: {}".format(dataset.product_id.nunique()))
print("Number of distinct Geohash: {}".format(dataset.vendor_geohash.nunique()))<jupyter_output>Number of distinct Vendors: 6464
Number of distinct Products: 116524
Number of distinct Geohash: 1834
<jupyter_text>### [4] Visualize the distribution of vendors across the city
* We need this to analyise the clusters of vendors.
* Specifically, we do NOT recommend a menu item by comparing two vendors which are far apart from each other
* Above is under the assumption that the delivery app would only recommend vendors that are within a 'defined' reachable distance from the customers
#### We could use the distribution below to target new vendors in the region not explored previously. This could increase the customer base since customers would usually prefer to order food from a restaurant in their neighbourhood.<jupyter_code>def map_geohash(item: tuple) -> list:
'''
Convert tuple of string cordinates to a list of float
'''
cordinates = geohash.decode(item)
return [float(item) for item in cordinates]
def geo_distribution(geo_data: list, count=None) -> None:
"""Distribution of geohash in the city"""
geo_data = [map_geohash(item) for item in geo_data]
geo_data = np.asarray(geo_data)
palette = sns.color_palette("bright", 10)
if(count is not None):
count = np.asarray(count)
sns.scatterplot(geo_data[:,0], geo_data[:,1], size=count, palette=palette)
geo_data = dataset.vendor_geohash.unique().tolist() # get (x,y) cordiants for each geohash
geo_distribution(geo_data)<jupyter_output><empty_output><jupyter_text>### [5] Visualize the distribution of orders in different geo locations
#### Following observations from the plot can help design a targetted recommendation system
* Some clusters in the region with high order density (hotspots)
* Comparing the distribution of orders with the distribution of vendors, we note that some vendors have very low order count in their neighobourhood (for example the two big clusters at right and bottom of the figure)
* We can pay more emphasis on vendors who have low order count<jupyter_code>subset = dataset[['vendor_geohash', 'order_count']][:]
subset['total_count'] = subset[['order_count']].groupby(subset['vendor_geohash']).transform('sum')
subset = subset[['vendor_geohash', 'total_count']][:]
subset = subset.drop_duplicates()
subset = subset.sample(n=100)
geo_data = subset['vendor_geohash'].tolist()
count = subset['total_count'].tolist()
geo_distribution(geo_data, count)<jupyter_output><empty_output><jupyter_text>### [6] Based on Above, we provide a recommendation as follows:
* Cluster the vendors together that have similar service and those in the same or adjacent neighbourhoods
* Each cluster now represents a homogeneous set or vendors
* For each cluster, we observed the statistics of different menu items and make a recommendation as follows:
* For two vendors A and B serving Fried Noodles, we recommend the vendor with lower than average order count to consider improvements in terms of discount and or variation in the recipe.
* The variation could be suggested by pooling the description of similar and frequently sold items in the cluster
* The disount factor and other offers can be suggested by pooling the price of similar item in the neighbourhood.### [7] Data Clean-Up
* Process product name using language transformation defined in the `lang_processor.py` script
* Encode categorical product name using the TF-IDF transformation
* TF-IDF will result in a high-dimensional vector and the computational cost of K-means would be high. We apply a compressive transformation to the TF-IDF vectors using the SVD. SVD has advantage over PCA in the sense that PCA operates on the Covariance matrix which in itself is expensive to compute. Moreover, SVD is effectice on sparse dataset which is indeed our case<jupyter_code>dataset['product_name'] = dataset['product_name'].apply(lambda x: lang_processor.pre_process(x))
# instantiate labelencoder object
le = LabelEncoder()
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer = TfidfVectorizer()
product_feat = vectorizer.fit_transform(dataset['product_name'])
print("Shape of TF-IDF features from product name: {}".format(product_feat.shape))
# learnable-parameters. Out of the scope of current-work. Can be tuned in the future
svd = TruncatedSVD(n_components=10, n_iter=7, random_state=42)
product_feat = svd.fit_transform(product_feat)
print("Shape of transformed features from product name: {}".format(product_feat.shape))
# Combine the product and geo dataset as a final preparation for clustering
geo_feat = dataset.vendor_geohash.tolist() # get (x,y) cordiants for each geohash
geo_feat = [map_geohash(item) for item in geo_feat]
geo_feat = np.asarray(geo_feat)
print("Shape of transformed features from vendor geohash: {}".format(geo_feat.shape))<jupyter_output>Shape of transformed features from vendor geohash: (116524, 2)
<jupyter_text>### [8] Cluster the data into multiple buckets
* Suggest recommendations to vendors from each bucket.
* Use the data within the bucket to make a recommendation<jupyter_code># optimal value of `n_clusters` currently out of scope of the present work. To be determined in future
data = np.column_stack((geo_feat,product_feat))
# del geo_feat
# del product_feat
km = KMeans(n_clusters=50)
km.fit(data)
labels = km.labels_
dataset['labels'] = labels
# Distribution of records in different clusters.
sns.countplot(labels)<jupyter_output><empty_output><jupyter_text>### [9] Analysis on distribution of product orders in different clusters
* Based on the distribution of clusters above, lets observe one big and one small cluster<jupyter_code>def analyise_cluster_distribution(cluster_id=1):
subset = dataset[['vendor_geohash', 'order_count', 'labels']][:]
subset = subset[subset['labels']==cluster_id]
print("number of points in cluster-{} is {}".format(cluster_id, len(subset)))
subset['total_count'] = subset[['order_count']].groupby(subset['vendor_geohash']).transform('sum')
subset = subset[['vendor_geohash', 'total_count', 'labels']][:]
subset = subset.drop_duplicates()
subset = subset.sample(n=50)
geo_data = subset['vendor_geohash'].tolist()
count = subset['total_count'].tolist()
geo_distribution(geo_data, count)
analyise_cluster_distribution(cluster_id=0)
analyise_cluster_distribution(cluster_id=3)<jupyter_output>number of points in cluster-0 is 20000
number of points in cluster-3 is 3317
<jupyter_text>### [10] Example-1: Product Recommendation in each cluster / peer-group
* Let's take cluster-2 as an example:
* #### We observe that this cluster mostly contains vendors serving beverages.
* #### We also observe that two vendors (#272 and #696) dominate the beverage market
<jupyter_code>subset = dataset[:]
subset = subset[subset['labels']==2][:]
subset = subset.sort_values(by='order_count', ascending=False)
print("Number of distinct Vendors: {}".format(subset.vendor_id.nunique()))
print("Number of distinct Products: {}".format(subset.product_id.nunique()))
print("Number of distinct Geohash: {}".format(subset.vendor_geohash.nunique()))
subset.head(50)
subset1 = subset.copy()
subset1['total_count'] = subset1[['order_count']].groupby(subset1['vendor_geohash']).transform('sum')
subset1 = subset1[['vendor_geohash', 'total_count']][:]
subset1 = subset1.drop_duplicates()
print("Total vendors [blue]: {}".format(len(subset1)))
geo_data = subset1['vendor_geohash'].tolist()
count = subset1['total_count'].tolist()
geo_distribution(geo_data, count)
subset1 = subset.copy()
subset1 = subset1[subset1['vendor_id'].isin(['272', '696', '831'])][:]
subset1['total_count'] = subset1[['order_count']].groupby(subset1['vendor_geohash']).transform('sum')
subset1 = subset1[['vendor_geohash', 'total_count']][:]
subset1 = subset1.drop_duplicates()
print("Top-k vendors [orange]: {}".format(len(subset1)))
geo_data = subset1['vendor_geohash'].tolist()
count = subset1['total_count'].tolist()
geo_distribution(geo_data, count)
del subset1<jupyter_output>Total vendors [blue]: 67
Top-k vendors [orange]: 3
<jupyter_text>### Recommending "Green Tea Cream Frappuccino" to vendors serving beverages
* Not enough market coverage in the west and the north of the city
* Vendors in this region recommended to incorporate this product### [11] Example-1: Product Recommendation in each cluster / peer-group
* Let's take cluster-15 as an example:
* #### We observe that this cluster mostly contains vendors serving Chicken Sub or Chicken Ham.
* #### A few vendors dominate the market in this peer-group. For example, vendors: #1057, #1269, #390, and #1295
<jupyter_code>subset = dataset[:]
subset = subset[subset['labels']==15][:]
subset = subset.sort_values(by='order_count', ascending=False)
print("Number of distinct Vendors: {}".format(subset.vendor_id.nunique()))
print("Number of distinct Products: {}".format(subset.product_id.nunique()))
print("Number of distinct Geohash: {}".format(subset.vendor_geohash.nunique()))
subset.head(50)
subset1 = subset.copy()
subset1['total_count'] = subset1[['order_count']].groupby(subset1['vendor_geohash']).transform('sum')
subset1 = subset1[['vendor_geohash', 'total_count']][:]
subset1 = subset1.drop_duplicates()
print("Total vendors [blue]: {}".format(len(subset1)))
geo_data = subset1['vendor_geohash'].tolist()
count = subset1['total_count'].tolist()
geo_distribution(geo_data, count)
subset1 = subset.copy()
subset1 = subset1[subset1['vendor_id'].isin(['1057', '1269', '390', '1295'])][:]
subset1['total_count'] = subset1[['order_count']].groupby(subset1['vendor_geohash']).transform('sum')
subset1 = subset1[['vendor_geohash', 'total_count']][:]
subset1 = subset1.drop_duplicates()
print("Top-k vendors [orange]: {}".format(len(subset1)))
geo_data = subset1['vendor_geohash'].tolist()
count = subset1['total_count'].tolist()
geo_distribution(geo_data, count)
del subset1<jupyter_output>Total vendors [blue]: 83
Top-k vendors [orange]: 4
|
no_license
|
/driver.ipynb
|
rahul99/menu-recommendation
| 10 |
<jupyter_start><jupyter_text># If you are using colab<jupyter_code># # link to google drive, click on the given link and choose the google drive account you would like to be available to you
# from google.colab import drive
# drive.mount('/content/gdrive/')
# %cd ../content/gdrive/MyDrive/colab-ssh/LiSTra/notebooks/<jupyter_output><empty_output><jupyter_text># Dataset Statistics<jupyter_code># imports
import os
import subprocess
import numpy as np
import tqdm
# You may need to run this in your terminal to install sox
# ! sudo apt-get install sox
def get_audio_length(path):
"Get audio length in second"
output = subprocess.check_output(
['soxi -D \"%s\"' % path.strip()], shell=True)
return float(output)
def get_averall_stats(path = "../dataset/train.en-ln.csv",
audio_path = "../dataset/english/wav_verse/"):
list_wav = []
list_src_lang = []
list_tgt_lang = []
# Count number of words
number_word_src = 0
number_word_tgt = 0
# To keep track of unique words
unique_word_src = set()
unique_word_tgt = set()
# open the and read file on in the first repository
with open(path, 'r') as f: # Open file for read
for textline in f:
audio, transc, transl = textline.split('\t')
list_wav.append(get_audio_length(audio_path+"/"+audio))
list_src_lang.append(len(transc.split()))
list_tgt_lang.append(len(transl.split()))
unique_word_src.update(transc.split())
unique_word_tgt.update(transl.split())
print(f"We had a total of {len(list_wav)} audio files")
print(f"We had a total of {len(list_src_lang)} English files")
print(f"We had a total of {len(list_tgt_lang)} Lingala files")
# Stats specific for the two language at use, you may need to change it according to your study
print("\n#######################################################\n")
print(f"We have an average of {np.mean(list_wav)} audio length")
print(f"We have an average of {np.mean(list_src_lang)} English text length")
print(f"We have an average of {np.mean(list_tgt_lang)} Lingala text length")
print("\n#######################################################\n")
print(f"We have a total of {np.sum(list_wav)} audio length")
print(f"We have {len(unique_word_src)} English unique word")
print(f"We have {len(unique_word_tgt)} Lingala unique word")
return list_wav, list_src_lang, list_src_lang
list_wav_test, list_src_lang_test, list_src_lang_test = get_averall_stats(
path = "../data/processed/tf_data/test.en-ln.csv", \
audio_path = "../data/external/LiSTra/dataset/english/wav_verse")
list_wav_train, list_src_lang_train, list_src_lang_train = get_averall_stats(
path = "../data/processed/tf_data/train.en-ln.csv",\
audio_path = "../data/external/LiSTra/dataset/english/wav_verse")<jupyter_output>We had a total of 23717 audio files
We had a total of 23717 English files
We had a total of 23717 Lingala files
#######################################################
We have an average of 9.287964473753002 audio length
We have an average of 24.271197874942025 English text length
We have an average of 25.9165155795421 Lingala text length
#######################################################
We have a total of 220282.65342399996 audio length
We have 13139 English unique word
We have 16808 Lingala unique word
<jupyter_text>## Total duration wav in hours <jupyter_code># Total hours in training data
sum(list_wav_test)/3600
# Total hours in training data
sum(list_wav_train)/3600<jupyter_output><empty_output>
|
no_license
|
/src/LiSTra stats.ipynb
|
dsfsi/2020-AMMI-salomon
| 3 |
<jupyter_start><jupyter_text>
Linear Regression 1D: Training Two Parameter Mini-Batch Gradient DecentTable of Contents
In this Lab, you will practice training a model by using Mini-Batch Gradient Descent.
Make Some Data
Create the Model and Cost Function (Total Loss)
Train the Model: Batch Gradient Descent
Train the Model: Stochastic Gradient Descent with Dataset DataLoader
Train the Model: Mini Batch Gradient Decent: Batch Size Equals 5
Train the Model: Mini Batch Gradient Decent: Batch Size Equals 10
Estimated Time Needed: 30 min
PreparationWe'll need the following libraries:<jupyter_code># Import the libraries we need for this lab
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d<jupyter_output><empty_output><jupyter_text>The class plot_error_surfaces is just to help you visualize the data space and the parameter space during training and has nothing to do with PyTorch. <jupyter_code># The class for plotting the diagrams
class plot_error_surfaces(object):
# Constructor
def __init__(self, w_range, b_range, X, Y, n_samples = 30, go = True):
W = np.linspace(-w_range, w_range, n_samples)
B = np.linspace(-b_range, b_range, n_samples)
w, b = np.meshgrid(W, B)
Z = np.zeros((30, 30))
count1 = 0
self.y = Y.numpy()
self.x = X.numpy()
for w1, b1 in zip(w, b):
count2 = 0
for w2, b2 in zip(w1, b1):
Z[count1, count2] = np.mean((self.y - w2 * self.x + b2) ** 2)
count2 += 1
count1 += 1
self.Z = Z
self.w = w
self.b = b
self.W = []
self.B = []
self.LOSS = []
self.n = 0
if go == True:
plt.figure()
plt.figure(figsize = (7.5, 5))
plt.axes(projection = '3d').plot_surface(self.w, self.b, self.Z, rstride = 1, cstride = 1, cmap = 'viridis', edgecolor = 'none')
plt.title('Loss Surface')
plt.xlabel('w')
plt.ylabel('b')
plt.show()
plt.figure()
plt.title('Loss Surface Contour')
plt.xlabel('w')
plt.ylabel('b')
plt.contour(self.w, self.b, self.Z)
plt.show()
# Setter
def set_para_loss(self, W, B, loss):
self.n = self.n + 1
self.W.append(W)
self.B.append(B)
self.LOSS.append(loss)
# Plot diagram
def final_plot(self):
ax = plt.axes(projection = '3d')
ax.plot_wireframe(self.w, self.b, self.Z)
ax.scatter(self.W, self.B, self.LOSS, c = 'r', marker = 'x', s = 200, alpha = 1)
plt.figure()
plt.contour(self.w, self.b, self.Z)
plt.scatter(self.W, self.B, c = 'r', marker = 'x')
plt.xlabel('w')
plt.ylabel('b')
plt.show()
# Plot diagram
def plot_ps(self):
plt.subplot(121)
plt.ylim()
plt.plot(self.x, self.y, 'ro', label = "training points")
plt.plot(self.x, self.W[-1] * self.x + self.B[-1], label = "estimated line")
plt.xlabel('x')
plt.ylabel('y')
plt.title('Data Space Iteration: '+ str(self.n))
plt.subplot(122)
plt.contour(self.w, self.b, self.Z)
plt.scatter(self.W, self.B, c = 'r', marker = 'x')
plt.title('Loss Surface Contour')
plt.xlabel('w')
plt.ylabel('b')
plt.show()<jupyter_output><empty_output><jupyter_text>Make Some Data Import PyTorch and set random seed:<jupyter_code># Import PyTorch library
import torch
torch.manual_seed(1)<jupyter_output><empty_output><jupyter_text>Generate values from -3 to 3 that create a line with a slope of 1 and a bias of -1. This is the line that you need to estimate. Add some noise to the data:<jupyter_code># Generate the data with noise and the line
X = torch.arange(-3, 3, 0.1).view(-1, 1)
f = 1 * X - 1
Y = f + 0.1 * torch.randn(X.size())<jupyter_output><empty_output><jupyter_text>Plot the results:<jupyter_code># Plot the line and the data
plt.plot(X.numpy(), Y.numpy(), 'rx', label = 'y')
plt.plot(X.numpy(), f.numpy(), label = 'f')
plt.xlabel('x')
plt.ylabel('y')
plt.legend()
plt.show()<jupyter_output><empty_output><jupyter_text>Create the Model and Cost Function (Total Loss) Define the forward function: <jupyter_code># Define the prediction function
def forward(x):
return w * x + b<jupyter_output><empty_output><jupyter_text>Define the cost or criterion function: <jupyter_code># Define the cost function
def criterion(yhat, y):
return torch.mean((yhat - y) ** 2)<jupyter_output><empty_output><jupyter_text>Create a plot_error_surfaces object to visualize the data space and the parameter space during training:<jupyter_code># Create a plot_error_surfaces object.
get_surface = plot_error_surfaces(15, 13, X, Y, 30)<jupyter_output><empty_output><jupyter_text>Train the Model: Batch Gradient Descent (BGD)Define train_model_BGD function.<jupyter_code># Define the function for training model
w = torch.tensor(-15.0, requires_grad = True)
b = torch.tensor(-10.0, requires_grad = True)
lr = 0.1
LOSS_BGD = []
def train_model_BGD(epochs):
for epoch in range(epochs):
Yhat = forward(X)
loss = criterion(Yhat, Y)
LOSS_BGD.append(loss)
get_surface.set_para_loss(w.data.tolist(), b.data.tolist(), loss.tolist())
get_surface.plot_ps()
loss.backward()
w.data = w.data - lr * w.grad.data
b.data = b.data - lr * b.grad.data
w.grad.data.zero_()
b.grad.data.zero_()<jupyter_output><empty_output><jupyter_text>Run 10 epochs of batch gradient descent: bug data space is 1 iteration ahead of parameter space. <jupyter_code># Run train_model_BGD with 10 iterations
train_model_BGD(10)<jupyter_output><empty_output><jupyter_text> Stochastic Gradient Descent (SGD) with Dataset DataLoaderCreate a plot_error_surfaces object to visualize the data space and the parameter space during training:<jupyter_code># Create a plot_error_surfaces object.
get_surface = plot_error_surfaces(15, 13, X, Y, 30, go = False)<jupyter_output><empty_output><jupyter_text>Import Dataset and DataLoader libraries<jupyter_code># Import libraries
from torch.utils.data import Dataset, DataLoader<jupyter_output><empty_output><jupyter_text>Create Data class<jupyter_code># Create class Data
class Data(Dataset):
# Constructor
def __init__(self):
self.x = torch.arange(-3, 3, 0.1).view(-1, 1)
self.y = 1 * X - 1
self.len = self.x.shape[0]
# Getter
def __getitem__(self, index):
return self.x[index], self.y[index]
# Get length
def __len__(self):
return self.len<jupyter_output><empty_output><jupyter_text>Create a dataset object and a dataloader object: <jupyter_code># Create Data object and DataLoader object
dataset = Data()
trainloader = DataLoader(dataset = dataset, batch_size = 1)<jupyter_output><empty_output><jupyter_text>Define train_model_SGD function for training the model.<jupyter_code># Define train_model_SGD function
w = torch.tensor(-15.0, requires_grad = True)
b = torch.tensor(-10.0, requires_grad = True)
LOSS_SGD = []
lr = 0.1
def train_model_SGD(epochs):
for epoch in range(epochs):
Yhat = forward(X)
get_surface.set_para_loss(w.data.tolist(), b.data.tolist(), criterion(Yhat, Y).tolist())
get_surface.plot_ps()
LOSS_SGD.append(criterion(forward(X), Y).tolist())
for x, y in trainloader:
yhat = forward(x)
loss = criterion(yhat, y)
get_surface.set_para_loss(w.data.tolist(), b.data.tolist(), loss.tolist())
loss.backward()
w.data = w.data - lr * w.grad.data
b.data = b.data - lr * b.grad.data
w.grad.data.zero_()
b.grad.data.zero_()
get_surface.plot_ps()<jupyter_output><empty_output><jupyter_text>Run 10 epochs of stochastic gradient descent: bug data space is 1 iteration ahead of parameter space. <jupyter_code># Run train_model_SGD(iter) with 10 iterations
train_model_SGD(10)<jupyter_output><empty_output><jupyter_text>Mini Batch Gradient Descent: Batch Size Equals 5 Create a plot_error_surfaces object to visualize the data space and the parameter space during training:<jupyter_code># Create a plot_error_surfaces object.
get_surface = plot_error_surfaces(15, 13, X, Y, 30, go = False)<jupyter_output><empty_output><jupyter_text>Create Data object and create a Dataloader object where the batch size equals 5:<jupyter_code># Create DataLoader object and Data object
dataset = Data()
trainloader = DataLoader(dataset = dataset, batch_size = 5)<jupyter_output><empty_output><jupyter_text>Define train_model_Mini5 function to train the model.<jupyter_code># Define train_model_Mini5 function
w = torch.tensor(-15.0, requires_grad = True)
b = torch.tensor(-10.0, requires_grad = True)
LOSS_MINI5 = []
lr = 0.1
def train_model_Mini5(epochs):
for epoch in range(epochs):
Yhat = forward(X)
get_surface.set_para_loss(w.data.tolist(), b.data.tolist(), criterion(Yhat, Y).tolist())
get_surface.plot_ps()
LOSS_MINI5.append(criterion(forward(X), Y).tolist())
for x, y in trainloader:
yhat = forward(x)
loss = criterion(yhat, y)
get_surface.set_para_loss(w.data.tolist(), b.data.tolist(), loss.tolist())
loss.backward()
w.data = w.data - lr * w.grad.data
b.data = b.data - lr * b.grad.data
w.grad.data.zero_()
b.grad.data.zero_()<jupyter_output><empty_output><jupyter_text>Run 10 epochs of mini-batch gradient descent: bug data space is 1 iteration ahead of parameter space. <jupyter_code># Run train_model_Mini5 with 10 iterations.
train_model_Mini5(10)<jupyter_output><empty_output><jupyter_text>Mini Batch Gradient Descent: Batch Size Equals 10 Create a plot_error_surfaces object to visualize the data space and the parameter space during training:<jupyter_code># Create a plot_error_surfaces object.
get_surface = plot_error_surfaces(15, 13, X, Y, 30, go = False)<jupyter_output><empty_output><jupyter_text>Create Data object and create a Dataloader object batch size equals 10<jupyter_code># Create DataLoader object
dataset = Data()
trainloader = DataLoader(dataset = dataset, batch_size = 10)<jupyter_output><empty_output><jupyter_text>Define train_model_Mini10 function for training the model.<jupyter_code># Define train_model_Mini5 function
w = torch.tensor(-15.0, requires_grad = True)
b = torch.tensor(-10.0, requires_grad = True)
LOSS_MINI10 = []
lr = 0.1
def train_model_Mini10(epochs):
for epoch in range(epochs):
Yhat = forward(X)
get_surface.set_para_loss(w.data.tolist(), b.data.tolist(), criterion(Yhat, Y).tolist())
get_surface.plot_ps()
LOSS_MINI10.append(criterion(forward(X),Y).tolist())
for x, y in trainloader:
yhat = forward(x)
loss = criterion(yhat, y)
get_surface.set_para_loss(w.data.tolist(), b.data.tolist(), loss.tolist())
loss.backward()
w.data = w.data - lr * w.grad.data
b.data = b.data - lr * b.grad.data
w.grad.data.zero_()
b.grad.data.zero_()<jupyter_output><empty_output><jupyter_text>Run 10 epochs of mini-batch gradient descent: bug data space is 1 iteration ahead of parameter space. <jupyter_code># Run train_model_Mini5 with 10 iterations.
train_model_Mini10(10)<jupyter_output><empty_output><jupyter_text>Plot the loss for each epoch: <jupyter_code># Plot out the LOSS for each method
plt.plot(LOSS_BGD,label = "Batch Gradient Descent")
plt.plot(LOSS_SGD,label = "Stochastic Gradient Descent")
plt.plot(LOSS_MINI5,label = "Mini-Batch Gradient Descent, Batch size: 5")
plt.plot(LOSS_MINI10,label = "Mini-Batch Gradient Descent, Batch size: 10")
plt.legend()<jupyter_output><empty_output><jupyter_text>PracticePerform mini batch gradient descent with a batch size of 20. Store the total loss for each epoch in the list LOSS20. <jupyter_code># Practice: Perform mini batch gradient descent with a batch size of 20.
dataset = Data()
trainloader = DataLoader(dataset = dataset, batch_size = 20)
w = torch.tensor(-15.0, requires_grad = True)
b = torch.tensor(-10.0, requires_grad = True)
LOSS_MINI20 = []
lr = 0.1
def my_train_model(epochs):
for epoch in range(epochs):
Yhat = forward(X)
get_surface.set_para_loss(w.data.tolist(), b.data.tolist(), criterion(Yhat, Y).tolist())
get_surface.plot_ps()
LOSS_MINI20.append(criterion(forward(X), Y).tolist())
for x, y in trainloader:
yhat = forward(x)
loss = criterion(yhat, y)
get_surface.set_para_loss(w.data.tolist(), b.data.tolist(), loss.tolist())
loss.backward()
w.data = w.data - lr * w.grad.data
b.data = b.data - lr * b.grad.data
w.grad.data.zero_()
b.grad.data.zero_()
my_train_model(10)<jupyter_output><empty_output><jupyter_text>Double-click here for the solution.
<!--
trainloader = DataLoader(dataset = dataset, batch_size = 20)
w = torch.tensor(-15.0, requires_grad = True)
b = torch.tensor(-10.0, requires_grad = True)
LOSS_MINI20 = []
lr = 0.1
def my_train_model(epochs):
for epoch in range(epochs):
Yhat = forward(X)
get_surface.set_para_loss(w.data.tolist(), b.data.tolist(), criterion(Yhat, Y).tolist())
get_surface.plot_ps()
LOSS_MINI20.append(criterion(forward(X), Y).tolist())
for x, y in trainloader:
yhat = forward(x)
loss = criterion(yhat, y)
get_surface.set_para_loss(w.data.tolist(), b.data.tolist(), loss.tolist())
loss.backward()
w.data = w.data - lr * w.grad.data
b.data = b.data - lr * b.grad.data
w.grad.data.zero_()
b.grad.data.zero_()
my_train_model(10)
-->
Plot a graph that shows the LOSS results for all the methods.<jupyter_code># Practice: Plot a graph to show all the LOSS functions
plt.plot(LOSS_BGD, label = "Batch Gradient Descent")
plt.plot(LOSS_SGD, label = "Stochastic Gradient Descent")
plt.plot(LOSS_MINI5, label = "Mini-Batch Gradient Descent,Batch size:5")
plt.plot(LOSS_MINI10, label = "Mini-Batch Gradient Descent,Batch size:10")
plt.plot(LOSS_MINI20, label = "Mini-Batch Gradient Descent,Batch size:20")
plt.legend()<jupyter_output><empty_output>
|
no_license
|
/coursera/IBM AI Engineering Professional Certificate/Deep Neural Networks with PyTorch/week 2/3_2_mini_batch_gradient_descent_v3.ipynb
|
ValentynS96/professional-certificate-programs
| 27 |
<jupyter_start><jupyter_text># ,-.
# ,--' ~.).
# ,' `.
# ; (((__ __)))
# ; ( (#) ( (#)
# | \_/___\_/|
# " ,-' `__".
# ( ( ._ ____`.)--._ _
# `._ `-.`-' \(`-' _ `-. _,-' `-/`.
# ,') `.`._)) ,' `. `. ,',' ;
# .' . `--' / ). `. ;
# ; `- / ' ) ;
# \ ') ,'
# \ ,' ;
# \ `~~~' ,'
# `. _,'
# `. ,--'
# `-._________,--' <jupyter_code># muddle positive time_delta data to generate negative samples
NUM_EXAMPLES = 100000
SEQ_LENGTH = 50
assert all(len(data) >= SEQ_LENGTH for data in [positive_data, negative_data]), 'need at least SEQ_LENGTH events'
def random_sub_seq(xs):
# TODO extend to support variable size sequences
start = int(np.random.uniform(0, len(xs) - SEQ_LENGTH))
end = start + SEQ_LENGTH
return xs[start:end]
train_input = []
train_output = []
for i in range(NUM_EXAMPLES * 10):
if np.random.rand() < 0.5:
# positive sample
train_input.append(random_sub_seq(positive_data))
train_output.append([1, 0])
else:
# negative sample
train_input.append(random_sub_seq(negative_data))
train_output.append([0, 1])
test_input = train_input[NUM_EXAMPLES:]
test_output = train_output[NUM_EXAMPLES:]
train_input = train_input[:NUM_EXAMPLES]
train_output = train_output[:NUM_EXAMPLES]
print("test and training data loaded")
data = tf.placeholder(tf.float32, [None, SEQ_LENGTH,3]) #Number of examples, number of input, dimension of each input
target = tf.placeholder(tf.float32, [None, 2])
num_hidden = 32
cell = tf.nn.rnn_cell.LSTMCell(num_hidden,state_is_tuple=True)
val, _ = tf.nn.dynamic_rnn(cell, data, dtype=tf.float32)
val = tf.transpose(val, [1, 0, 2])
last = tf.gather(val, int(val.get_shape()[0]) - 1)
weight = tf.Variable(tf.truncated_normal([num_hidden, int(target.get_shape()[1])]))
bias = tf.Variable(tf.constant(0.1, shape=[target.get_shape()[1]]))
prediction = tf.nn.softmax(tf.matmul(last, weight) + bias)
cross_entropy = -tf.reduce_sum(target * tf.log(prediction))
optimizer = tf.train.AdamOptimizer()
minimize = optimizer.minimize(cross_entropy)
mistakes = tf.not_equal(tf.argmax(target, 1), tf.argmax(prediction, 1))
error = tf.reduce_mean(tf.cast(mistakes, tf.float32))
sess = tf.Session()
saver = tf.train.Saver()
saver.restore(sess, "./Models/model2/model.ckpt")
print("Model restored.")
good = [test_input[i] for i in range(len(test_output)) if test_output[i] == [1, 0]]
bad = [test_input[i] for i in range(len(test_output)) if test_output[i] == [0, 1]]
print("Calculating true positive....")
pos = sess.run(prediction,{data: good}).mean(axis=0)[0]
print("true positive: ",pos )
print("Calculating true negative....")
neg = sess.run(prediction,{data: bad}).mean(axis=0)[1]
print("true negative: ",neg )
print(len(good), len(bad))
history = []
data_source = positive_data
data_len = 100
for i in range(10):
history += data_source[i * data_len: (i+1) * data_len]
if data_source == positive_data:
data_source = negative_data
else:
data_source = positive_data
prediction_history = []
for i in range(0, len(history) - SEQ_LENGTH):
sample = history[i: i + SEQ_LENGTH]
result = sess.run(prediction,{data: [sample]})[0]
prediction_history.append((result[0] - 0.5) * 2)
plt.scatter(range(len(prediction_history)), prediction_history, color=['g' if h > 0 else 'r' for h in prediction_history])
plt.plot([0] * len(prediction_history), color='b')<jupyter_output><empty_output>
|
no_license
|
/rnn-runner.ipynb
|
schaelle/annex
| 1 |
<jupyter_start><jupyter_text>
The objective of this tutorial is illustrate the use of the *samplics* estimation APIs. There are two main classes: *TaylorEstimator* and *ReplicateEstimator*. The former class uses linearization methods to estimate variance of population parameters while the latter uses replicate-based methods (bootstrap, brr/fay, and jackknife) to estimate the variance. <jupyter_code>from IPython.core.display import Image, display
import numpy as np
import pandas as pd
import samplics
from samplics.estimation import TaylorEstimator, ReplicateEstimator<jupyter_output><empty_output><jupyter_text>## Taylor approximation <jupyter_code>nhanes2f = pd.read_csv("../../../datasets/docs/nhanes2f.csv")
nhanes2f[["psuid", "stratid", "highbp", "highlead", "finalwgt"]].head()<jupyter_output><empty_output><jupyter_text>We calculate the survey mean of the level of zinc using Stata and we get the following <jupyter_code>Image(filename="zinc_mean_stata_str.png")<jupyter_output><empty_output><jupyter_text>Using *samplics*, the same estimate can be obtained using the snippet of code below.<jupyter_code>zinc_mean_str = TaylorEstimator("mean").estimate(
y=nhanes2f["zinc"],
samp_weight=nhanes2f["finalwgt"],
stratum=nhanes2f["stratid"],
psu=nhanes2f["psuid"],
remove_nan=True,
)
print(zinc_mean_str)<jupyter_output>SAMPLICS - Estimation of Mean
Number of strata: 31
Number of psus: 62
Degree of freedom: 31
DOMAINS MEAN SE LCI UCI CV
0 __none__ 87.182067 0.494483 86.173563 88.190571 0.005672
<jupyter_text>
Let's remove the stratum parameter then we get the following with stata <jupyter_code>Image(filename="zinc_mean_stata_nostr.png")<jupyter_output><empty_output><jupyter_text>with samplics, we get ...<jupyter_code>zinc_mean_nostr = TaylorEstimator("mean").estimate(
y=nhanes2f["zinc"], samp_weight=nhanes2f["finalwgt"], psu=nhanes2f["psuid"], remove_nan=True
)
print(zinc_mean_nostr)<jupyter_output>SAMPLICS - Estimation of Mean
Number of strata: 1
Number of psus: 2
Degree of freedom: 1
DOMAINS MEAN SE LCI UCI CV
0 __none__ 87.182067 0.742622 77.746158 96.617976 0.008518
<jupyter_text>
The other parameters currently implemented in *TaylorEstimator* are TOTAL, PROPORTION and RATIO. TOTAL and PROPORTION have the same function call as the MEAN parameter. For the RATIO parameter, it is necessary to provide the parameter *x*. <jupyter_code>Image(filename="ratio_highbp_highlead.png")
ratio_bp_lead = TaylorEstimator("ratio").estimate(
y=nhanes2f["highbp"],
samp_weight=nhanes2f["finalwgt"],
x=nhanes2f["highlead"],
stratum=nhanes2f["stratid"],
psu=nhanes2f["psuid"],
remove_nan=True,
)
print(ratio_bp_lead)<jupyter_output>SAMPLICS - Estimation of Ratio
Number of strata: 31
Number of psus: 62
Degree of freedom: 31
DOMAINS RATIO SE LCI UCI CV
0 __none__ 5.93255 0.553058 4.80458 7.060519 0.093224
<jupyter_text>## Replicate-based variance estimation #### Bootstrap <jupyter_code>nmihs_bs = pd.read_csv("../../../datasets/docs/nmihs_bs.csv")
nmihs_bs.describe()<jupyter_output><empty_output><jupyter_text>
Let's estimate the average birth weight using the bootstrap weights. From Stata, we get <jupyter_code>Image(filename="mean_birthwgt_bs.png")<jupyter_output><empty_output><jupyter_text>Using *samplics*, we obtain ...<jupyter_code># rep_wgt_boot = nmihsboot.loc[:, "bsrw1":"bsrw1000"]
birthwgt = ReplicateEstimator("bootstrap", "mean").estimate(
y=nmihs_bs["birthwgt"],
samp_weight=nmihs_bs["finwgt"],
rep_weights=nmihs_bs.loc[:, "bsrw1":"bsrw1000"],
remove_nan=True,
)
print(birthwgt)<jupyter_output>SAMPLICS - Estimation of Mean
Number of strata: None
Number of psus: None
Degree of freedom: 999
DOMAINS MEAN SE LCI UCI CV
0 __none__ 3355.452419 6.520638 3342.656702 3368.248137 0.001943
<jupyter_text>#### Balanced repeated replication (BRR) <jupyter_code>nhanes2brr = pd.read_csv("../../../datasets/docs/nhanes2brr.csv")
nhanes2brr.describe()<jupyter_output><empty_output><jupyter_text>
Let's estimate the ratio of weight over height using the brr weights. From Stata, we get <jupyter_code>Image(filename="ratio_weight_height_brr.png")
brr = ReplicateEstimator("brr", "ratio")
ratio_wgt_hgt = brr.estimate(
y=nhanes2brr["weight"],
samp_weight=nhanes2brr["finalwgt"],
x=nhanes2brr["height"],
rep_weights=nhanes2brr.loc[:, "brr_1":"brr_32"],
remove_nan=True,
)
print(ratio_wgt_hgt)<jupyter_output>SAMPLICS - Estimation of Ratio
Number of strata: None
Number of psus: None
Degree of freedom: 16
DOMAINS RATIO SE LCI UCI CV
0 __none__ 0.426812 0.00089 0.424924 0.428699 0.002086
<jupyter_text>#### Jackknife <jupyter_code>nhanes2jknife = pd.read_csv("../../../datasets/docs/nhanes2jknife.csv")
nhanes2jknife.describe()<jupyter_output><empty_output><jupyter_text>
Let's estimate the ratio of weight over height using the brr weights. From Stata, we get <jupyter_code>Image(filename="ratio_weight_height_jknife.png")<jupyter_output><empty_output><jupyter_text>In this case, stratification was used to calculate the jackknife weights. The stratum variable is not indicated in the dataset or survey design description. However, it says that the number of strata is 31 and the number of replicates is 62. Hence, the jackknife replicate coefficient is $(n_h - 1) / n_h = (2-1) / 2 = 0.5$. Now we can call *replicate()* and specify *rep_coefs = 0.5*.<jupyter_code>jackknife = ReplicateEstimator("jackknife", "ratio")
ratio_wgt_hgt2 = jackknife.estimate(
y=nhanes2jknife["weight"],
samp_weight=nhanes2jknife["finalwgt"],
x=nhanes2jknife["height"],
rep_weights=nhanes2jknife.loc[:, "jkw_1":"jkw_62"],
rep_coefs=0.5,
remove_nan=True,
)
print(ratio_wgt_hgt2)<jupyter_output>SAMPLICS - Estimation of Ratio
Number of strata: None
Number of psus: None
Degree of freedom: 61
DOMAINS RATIO SE LCI UCI CV
0 __none__ 0.426812 0.000889 0.425035 0.428589 0.002082
|
permissive
|
/docs/source/tutorial/estimation.ipynb
|
1965aafc/samplics
| 15 |
<jupyter_start><jupyter_text>
Number of points for this notebook: 4
Deadline: March 10, 2021 (Wednesday) 23:00
# Exercise 1.2. Train a multilayer perceptron (MLP) network in numpy.
In this exercise, we implement training of a multilayer perceptron network using the `numpy` library.
* We implement forward and backward computations required for computing the gradients with backpropagation.
* We train an MLP on a toy data set.
We will implement an MLP with two hidden layers like shown in this figure:
We will build the following computational graph:
Note that the computational graph contains a mean-squared error (MSE) loss because we solve a regression problem.
Recall what we discussed in the lecture:
<jupyter_code>import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
skip_training = False # Set this flag to True before validation and submission
# During grading, this cell sets skip_training to True
# skip_training = True<jupyter_output><empty_output><jupyter_text>## 1. Gradient of the loss
We start by implementing the last block of the computational graph which is the mean-squared error loss:
$$
c = \frac{1}{N} \sum_{i=1}^N (y_i - t_i)^2
$$
where $y_i$ are the elements of an input vector $\mathbf{y}$ and $t_i$ are the elements of the target vector $\mathbf{t}$.
In the code below, we define a class that performs forward and backward computations of this loss function. Your task is to implement the `backward` function which should compute the gradient $\frac{\partial c}{\partial \mathbf{y}}$.
Note that we process all $N$ training examples at the same time. Therefore, our implementation operates with two-dimensional arrays of shape `(n_samples, some_size)` where `n_samples` is the number $N$ of training samples and `some_size` is the size of an intermediate vector produced inside the MLP (e.g., the number of neurons in a hidden layer).<jupyter_code>class MSELoss:
def forward(self, y, target):
"""
Args:
y of shape (n_samples, ysize): Inputs of the loss function (can be, e.g., an output of a neural network).
target of shape (n_samples, ysize): Targets.
Returns:
loss (float): The loss value.
"""
self.diff = diff = y - target # Keep this for backward computations
c = np.sum(np.square(diff)) / diff.size
return c
def backward(self):
"""
Returns:
dy of shape (n_samples, ysize): Gradient of the MSE loss wrt the inputs.
"""
assert hasattr(self, 'diff'), "Need to call forward() first"
return 2 * self.diff / self.diff.size
def test_MSELoss_shapes():
y = np.random.randn(3)
target = np.zeros(3) # Dummy target
loss = MSELoss() # Create the loss
loss_value = loss.forward(y, target) # Do forward computations
dy = loss.backward() # Do backward computations
assert dy.shape == y.shape, f"Bad dy.shape: {dy.shape}"
print('Success')
test_MSELoss_shapes()<jupyter_output>Success
<jupyter_text>We can test our implementation by comparing the results of our backward computations with a [numerical estimate](https://en.wikipedia.org/wiki/Numerical_differentiation) of the gradient: Suppose we have function
$f(\mathbf{x})$ of a vector input $\mathbf{x}$, then the gradient can be estimated numerically at a (randomly chosen) input $\mathbf{x}$ by
$$ \nabla f(\mathbf{x}) \approx \frac{f(\mathbf{x} + \epsilon) - f(\mathbf{x} - \epsilon)}{2\epsilon}$$
using small $\epsilon$. Note that the numerical gradient is an approximation of the analytical one and therefore there will be a small numerical difference between them.
The function that we import in the cell below implements numerical computations of the gradient of a given function.<jupyter_code>from tests import numerical_gradient
# We now compare our analytical computations of the gradient with its numerical estimate
def test_MSELoss_backward():
y = np.random.randn(3)
target = np.zeros(3) # Dummy target
loss = MSELoss() # Create the loss
loss_value = loss.forward(y, target) # Do forward computations
dy = loss.backward()
print('Analytical gradient:\n', dy)
dy_num = numerical_gradient(lambda y: loss.forward(y, target), y)
print('Numerical gradient:\n', dy_num[0])
assert np.allclose(dy, dy_num), 'Analytical and numerical results differ'
print('Success')
test_MSELoss_backward()
# This cell tests MSELoss<jupyter_output><empty_output><jupyter_text>## 2. Linear layer
Next we implement a linear layer.
The forward computations of the linear layer are
$$
\mathbf{y} = \mathbf{W} \mathbf{x} + \mathbf{b}.
$$
In the backward pass, the linear layer receives the gradients wrt to the outputs $\frac{\partial c}{\partial \mathbf{y}}$ and it needs to compute:
* the gradients wrt the layer parameters $\mathbf{W}$ and $\mathbf{b}$
* the gradient $\frac{\partial c}{\partial \mathbf{x}}$ wrt the inputs.
In the cell below, we define a class that resembles class [`nn.Linear`](https://pytorch.org/docs/stable/generated/torch.nn.Linear.html?highlight=nn%20linear#torch.nn.Linear) of pytorch. It calls functions `linear_forward` and `linear_backward` that implement the forward and backward computations. We implmented it this way because it makes it easier to test `linear_forward` and `linear_backward`.<jupyter_code>class Linear:
def __init__(self, in_features, out_features):
"""
Args:
in_features (int): Number of input features which should be equal to xsize.
out_features (out): Number of output features which should be equal to ysize.
"""
self.in_features = in_features
self.out_features = out_features
# Initialize the weights
bound = 3 / np.sqrt(in_features)
self.W = np.random.uniform(-bound, bound, (out_features, in_features))
bound = 1 / np.sqrt(in_features)
self.b = np.random.uniform(-bound, bound, out_features)
self.grad_W = None # Attribute to store the gradients wrt W
self.grad_b = None # Attribute to store the gradients wrt b
def forward(self, x):
"""
Args:
x of shape (n_samples, xsize): Inputs
Returns:
y of shape (n_samples, ysize): Outputs of shape.
"""
self.x = x # Keep this for backward computations
return linear_forward(x, self.W, self.b)
def backward(self, dy):
"""
Args:
dy of shape (n_samples, ysize): Gradient of a loss wrt outputs.
Returns:
dx of shape (n_samples, xsize): Gradient of a loss wrt inputs.
"""
assert hasattr(self, 'x'), "Need to call forward() first"
assert dy.ndim == 2 and dy.shape[1] == self.W.shape[0]
dx, self.grad_W, self.grad_b = linear_backward(dy, self.x, self.W, self.b)
return dx<jupyter_output><empty_output><jupyter_text>Your task is to implement `linear_forward` and `linear_backward`.<jupyter_code>def linear_forward(x, W, b):
"""Forward computations in the linear layer:
y = W x + b
Args:
x of shape (n_samples, xsize): Inputs .
W of shape (ysize, xsize): Weight matrix.
b of shape (ysize,): Bias term.
Returns:
y of shape (n_samples, ysize): Outputs.
"""
#np.dot(W, x.T)
np.dot(x, W.T) + b
y = np.dot(x, W.T) + b
return y
def linear_backward(dy, x, W, b):
"""Backward computations in the linear layer.
Args:
dy of shape (n_samples, ysize): Gradient of a loss wrt outputs.
x of (n_samples, xsize): Input of shape.
W of shape (ysize, xsize): Weight matrix.
b of shape (ysize,): Bias term.
Returns:
dx of shape (n_samples, xsize): Gradient of a loss wrt inputs.
dW of shape (ysize, xsize): Gradient wrt weight matrix W.
db of shape (ysize,): Gradient wrt bias term b.
"""
assert dy.ndim == 2 and dy.shape[1] == W.shape[0]
# derivative of Cost w.r.t W
#self.dW = np.dot(upstream_grad, self.A_prev.T)
# derivative of Cost w.r.t b, sum across rows
#self.db = np.sum(upstream_grad, axis=1, keepdims=True)
# derivative of Cost w.r.t A_prev
#self.dA_prev = np.dot(self.params['W'].T, upstream_grad)
#m = A_prev.shape[1]
#dW = 1./m * np.dot(dy, x.T)
#db = 1./m * np.sum(dy, axis=1, keepdims=True)
#dA_prev = np.dot(W.T, dZ)
dW = np.dot(x.T, dy).T
db = np.sum(dy, axis=0)
dx = np.dot(W.T, dy.T).T
return dx, dW, db
# We test the shapes of the outputs
def test_linear_shapes():
n_samples = 4
x = np.random.randn(n_samples, 2)
W = np.random.randn(3, 2)
b = np.random.randn(3)
# Test shapes
y = linear_forward(x, W, b)
dy = np.arange(n_samples * 3).reshape((n_samples, 3))
dx, dW, db = linear_backward(dy, x, W, b)
assert dx.shape == x.shape, f"Bad dx.shape={dx.shape}, x.shape={x.shape}"
assert dW.shape == W.shape, f"Bad dW.shape={dW.shape}, W.shape={W.shape}"
assert db.shape == b.shape, f"Bad db.shape={db.shape}, b.shape={b.shape}"
print('Success')
test_linear_shapes()<jupyter_output>Success
<jupyter_text>We can again test the backward computations by numerical differentiation.
Note that function `numerical_gradient` imported above accepts functions `fun` that works only with *one-dimensional arrays* as inputs and outputs.
Suppose we have function `fun(X)` which accepts a two-dimensional array `X` of shape `(n1, n2)` as input and produces a one-dimensional array `y` of shape `(ny,)` as output. We want to compute partial derivatives
`d y[i] / d X[k,l]` for each output element `y[i]` and each element `X[k,l]` of the input matrix. We can to it in the following way.
First, we define a function with one-dimensional inputs such that it can be passed to our `numerical_gradient`
function. Function `fun2` reshapes a one-dimensional array passed to it and calls function `fun`:
```
fun2 = lambda A: fun(A.reshape(n1, n2))
```
Then we can call the `numerical_gradient` function:
```
A = np.random.randn(n1, n2)
dA = numerical_gradient(fun2, A.flatten())
```
which will produce a two dimensional array of shape `(ny, n1*n2)` that will contain the required partial
derivatives.
<jupyter_code># We test the backward computations of d/dW by numerical differentiation
def test_linear_numerically():
n_samples = 4
x = np.random.randn(n_samples, 2)
W = np.random.randn(3, 2)
b = np.random.randn(3)
y = linear_forward(x, W, b)
dy = np.arange(n_samples * 3).reshape((n_samples, 3))
dx, dW, db = linear_backward(dy, x, W, b)
print('Analytical gradient:\n', dW)
dW_num = numerical_gradient(lambda W: linear_forward(x, W.reshape(3, 2), b).flatten(), W.flatten())
dW_num = dW_num.reshape(y.shape + W.shape)
expected = (dy[:, :, None, None] * dW_num).sum(axis=(0,1))
print('Numerical gradient:\n', expected)
assert np.allclose(dW, expected), 'Analytical and numerical results differ'
print('Success')
test_linear_numerically()<jupyter_output>Analytical gradient:
[[ -7.12815962 18.38367999]
[ -8.82975952 19.96901968]
[-10.53135942 21.55435936]]
Numerical gradient:
[[ -7.12815962 18.38367999]
[ -8.82975952 19.96901968]
[-10.53135942 21.55435936]]
Success
<jupyter_text>We recommend you to compare analytical and numerical computations of the gradients also wrt input `x` and bias term `b`.<jupyter_code># This cell tests linear_forward and linear_backward
# This cell tests linear_forward and linear_backward
# This cell tests linear_forward and linear_backward<jupyter_output><empty_output><jupyter_text>## 3. The Tanh activation function
Next we implement the Tanh activation function. The activation function is applied *element-wise* to input vector $\mathbf{x}$ to produce outputs $\mathbf{y}$:
$$
\mathbf{y} = \text{tanh}(\mathbf{x}) \quad \text{such that} \quad y_i = \text{tanh}(x_i).
$$
When we backpropagate through that block, we need to transform the gradients $\frac{\partial c}{\partial \mathbf{y}}$ wrt to the outputs into the gradients wrt the inputs $\frac{\partial c}{\partial \mathbf{x}}$. Your task is to implement the forward and backward computations.
Notes:
* We recommend you to compare analytical and numerical computations of the gradient.
* If you use function `numerical_gradient` to differentiate numerically `Tanh.forward()` using a one-dimensional array `x` as input, the output of `numerical_gradient` is a two-dimensional array (Jacobian matrix). We are interested only in the diagonal elements of that array because the nonlinearity is applied *element-wise*.<jupyter_code>class Tanh:
def forward(self, x):
"""
Args:
x (array): Input of shape (n_features,).
Returns:
y (array): Output of shape (n_features,).
"""
tanh = np.tanh(x)
self.x = x
return np.tanh(x)
def backward(self, dy):
"""
Args:
dy (array): Gradient of a loss wrt outputs, shape (n_features,).
Returns:
dx (array): Gradient of a loss wrt inputs, shape (n_features,).
"""
assert hasattr(self, 'x'), "Need to call forward() first."
return dy * (1 - np.tanh(self.x)**2)
def test_Tanh_shapes():
x = np.random.randn(3)
act_fn = Tanh()
y = act_fn.forward(x)
dy = np.arange(1, 4)
dx = act_fn.backward(dy)
assert dx.shape == x.shape, f"Bad dx.shape: {dx.shape}"
print('Success')
test_Tanh_shapes()<jupyter_output>Success
<jupyter_text>## 4. Multilayer Perceptron (MLP)
In the cell below, you need to implement an MLP with two hidden layers and `Tanh` nonlinearity. Use instances of classes `Linear` and `Tanh` in your implementation.
Note:
* For testing purposes, the instances of `Linear` and `Tanh` classes should be attributes of class `MLP` such as attribute `fc1` in the example below:
```
def __init__(self, in_features, hidden_size1, hidden_size2, out_features):
self.fc1 = Linear(...)
```<jupyter_code>class MLP:
def __init__(self, in_features, hidden_size1, hidden_size2, out_features):
"""
Args:
in_features (int): Number of inputs which should be equal to xsize.
hidden_size1 (int): Number of units in the first hidden layer.
hidden_size2 (int): Number of units in the second hidden layer.
out_features (int): Number of outputs which should be equal to ysize.
"""
# YOUR CODE HERE
self.fc1 = Linear(in_features, hidden_size1)
self.fc2 = Linear(hidden_size1, hidden_size2)
self.fc3 = Linear(hidden_size2, out_features)
self.tanh1 = Tanh()
self.tanh2 = Tanh()
def forward(self, x):
"""
Args:
x (array): Input of shape [N, xsize].
Returns:
y (array): Output of shape [N, ysize].
"""
# YOUR CODE HERE
x = self.tanh1.forward(self.fc1.forward(x))
x = self.tanh2.forward(self.fc2.forward(x))
x = self.fc3.forward(x)
return x
def backward(self, dy):
"""
Args:
dy (array): Gradient of a loss wrt outputs (shape [N, ysize]).
Returns:
dx (array): Gradient of a loss wrt inputs (shape [N, xsize]).
"""
# YOUR CODE HERE
dy = self.tanh2.backward(self.fc3.backward(dy))
dy = self.tanh1.backward(self.fc2.backward(dy))
dy = self.fc1.backward(dy)
return dy
def test_MLP_shapes():
n_samples = 10
x = np.random.randn(n_samples, 1)
mlp_batch = MLP(1, 10, 20, 1)
y = mlp_batch.forward(x)
dy = np.arange(n_samples).reshape((n_samples, 1)) # Dummy gradient of a loss function wrt MLP's outputs.
dx = mlp_batch.backward(dy)
assert dx.shape == x.shape, f"Bad dx.shape={dx.shape}, x.shape={x.shape}"
print('Success')
test_MLP_shapes()
# This cell tests MLP
# Let's create an MLP with random weights and compute the derivative wrt the one-dimensional input
def test_MLP_derivative():
n_samples = 100
x = np.linspace(-10, 10, n_samples)
mlp_batch = MLP(1, 10, 20, 1)
y = mlp_batch.forward(x.reshape((n_samples, 1))).flatten()
dy_dx = mlp_batch.backward(np.ones((n_samples, 1))).flatten()
fig, ax = plt.subplots()
ax.plot(x, y)
ax.plot(x, dy_dx)
ax.grid(True)
ax.legend(['y', 'dy_dx'])
test_MLP_derivative()<jupyter_output><empty_output><jupyter_text>You can visually inspect whether the computations of the derivative seem correct.
More importantly, we can compute the gradient of a loss wrt the parameters of the MLP. The gradients can be used to update the parameters using gradient descent.## 5. Training MLP network with backpropagation
Now let us use our code to train an MLP network.<jupyter_code># Let us generate toy data
np.random.seed(2)
x = np.random.randn(100, 1)
x = np.sort(x, axis=0)
targets = np.sin(x * 2 * np.pi / 3)
targets = targets + 0.2 * np.random.randn(*targets.shape)
# Plot the data
fig, ax = plt.subplots(1)
ax.plot(x, targets, '.')
# And train an MLP network using gradient descent
from IPython import display
if not skip_training: # The trained MLP is not tested
mlp = MLP(1, 10, 11, 1) # Create MLP network
loss = MSELoss() # Create loss
fig, ax = plt.subplots(1)
ax.plot(x, targets, '.')
learning_rate = 0.05
n_epochs = 1 if skip_training else 200
for i in range(n_epochs):
# Forward computations
y = mlp.forward(x)
c = loss.forward(y, targets)
# Backward computations
dy = loss.backward()
dx = mlp.backward(dy)
# Gradient descent update
#learning_rate *= 0.99 # Learning rate annealing
for module in mlp.__dict__.values():
if hasattr(module, 'W'):
module.W = module.W - module.grad_W * learning_rate
module.b = module.b - module.grad_b * learning_rate
ax.clear()
ax.plot(x, targets, '.')
ax.plot(x, y, 'r-')
ax.grid(True)
ax.set_title('Iteration %d/%d' % (i+1, n_epochs))
display.clear_output(wait=True)
display.display(fig)
plt.pause(0.005)
display.clear_output(wait=True)<jupyter_output><empty_output>
|
no_license
|
/02_mlp/2_mlp_numpy.ipynb
|
Arskah/deeplearn
| 10 |
<jupyter_start><jupyter_text># Banknote Authentication## Data Set Information:Les données ont été extraites des images qui ont été prises pour l'évaluation d'une procédure d'authentification pour les billets de banque.(Owner of database: Volker Lohweg, University of Applied Sciences, Ostwestfalen-Lippe)
Nombre d'instances : 1372
Type : réel
Nombre de caractéristiques : 4
X1 : variance
X2 : skewness
X3 : curtosis
X4 : entropy
Tâche : Classification
## Architecture# HEADERS<jupyter_code>import tensorflow as tf
import pandas as pan
learning_rate = 0.01
training_epochs = 1000
step_display = 100<jupyter_output><empty_output><jupyter_text># 1. DATA### 1.1. Load Data<jupyter_code>data = pan.read_csv('data/money.csv')
data[0 :5]<jupyter_output><empty_output><jupyter_text>### 1.2. Train data<jupyter_code>x_train = data.loc[0:1000, ['X1', 'X2', 'X3', 'X4']].as_matrix()
y_train = data.loc[0:1000, ['Label1', 'Label2']].as_matrix()<jupyter_output><empty_output><jupyter_text>### 1.3. Test data<jupyter_code>x_test = data.loc[1001: len(data), ['X1', 'X2', 'X3', 'X4']].as_matrix()
y_test = data.loc[1001: len(data), ['Label1', 'Label2']].as_matrix()<jupyter_output><empty_output><jupyter_text># 2. BUILD GRAPH### 2.1. Placholders<jupyter_code>x = tf.placeholder(tf.float32, shape=[None, 4], name='X')
y = tf.placeholder(tf.float32, shape=[None, 2], name='Y')<jupyter_output><empty_output><jupyter_text>### 2.2. Model2.2.1. Layers<jupyter_code>def layer(x, size_in, size_out, name="Layer"):
with tf.name_scope(name):
w = tf.Variable(tf.zeros([size_in, size_out]), name='weight')
b = tf.Variable(tf.zeros([size_out]), name='biais')
z = tf.matmul(x, w) + b
a = tf.sigmoid(z)
tf.summary.histogram("weight", w)
tf.summary.histogram("biais", b)
tf.summary.histogram("Activation", a)
return a
Layer_1 = layer(x , 4, 4)
Layer_2 = layer(Layer_1, 4, 4)
Layer_3 = layer(Layer_2, 4, 2)
with tf.name_scope("softmax"):
y_pred = tf.nn.softmax(Layer_3)<jupyter_output><empty_output><jupyter_text>2.2.2. Cost function<jupyter_code>with tf.name_scope("Error"):
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y * tf.log(y_pred)))
tf.summary.scalar("CrossEnt", cross_entropy)<jupyter_output><empty_output><jupyter_text>2.2.3. Optimizer<jupyter_code>with tf.name_scope("Train"):
optimiser = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)<jupyter_output><empty_output><jupyter_text>2.2.4. Accuracy<jupyter_code> with tf.name_scope('Accuracy'):
correct_prediction = tf.equal(tf.argmax(y_pred,1), tf.argmax(y,1))
final_acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))*100
tf.summary.scalar("Accuracy", final_acc)<jupyter_output><empty_output><jupyter_text># 3.SESSION### 3.1. Start session<jupyter_code>sess = tf.Session()
sess.run(tf.global_variables_initializer())
merged_summary = tf.summary.merge_all()
writer = tf.summary.FileWriter("./logs/Demo_3")
writer.add_graph(sess.graph)<jupyter_output><empty_output><jupyter_text>### 3.2. Training<jupyter_code>for step in range(training_epochs+1):
_, cost = sess.run([optimiser, cross_entropy], feed_dict={x : x_train, y: y_train})
s = sess.run(merged_summary, feed_dict={x : x_train, y: y_train})
writer.add_summary(s,step)
if step % step_display == 0 :
print(step,'::', cost)<jupyter_output>0 :: 693.839
100 :: 547.221
200 :: 386.258
300 :: 350.428
400 :: 338.569
500 :: 332.886
600 :: 329.638
700 :: 327.575
800 :: 326.17
900 :: 325.163
1000 :: 324.413
<jupyter_text>### 3.4. Compute accuracy<jupyter_code>print ('Accuracy = {:05.2f}'.format(sess.run(final_acc,feed_dict={x: x_test, y:y_test})),'%')<jupyter_output>Accuracy = 99.19 %
|
no_license
|
/code/money_3.ipynb
|
LaTICE-laboratory-Monastir-unit/Atelier-TensorBoard
| 12 |
<jupyter_start><jupyter_text># Práctica 6b: análisis de clustering de partición<jupyter_code>packages <- c("stats", "cluster")
lapply(packages, library, character.only = TRUE)
data("mtcars") # Load the data set
df <- scale(mtcars)
head(df, n = 3)
kmeans(df, 3, iter.max = 10, nstart = 1)
# Standardize the data
df <- scale(mtcars)
head(df)
# Elbow method
fviz_nbclust(df, kmeans, method = "wss") + geom_vline(xintercept = 4, linetype = 2) + labs(subtitle = "Elbow method")
library("NbClust")
nb <- NbClust(df, distance = "euclidean", min.nc = 2, max.nc = 10, method = "kmeans")
library("factoextra")
fviz_nbclust(nb)
<jupyter_output><empty_output>
|
no_license
|
/Práctica 6b.ipynb
|
daniel-perez15/Mineria
| 1 |
<jupyter_start><jupyter_text>***
* [Outline](../0_Introduction/0_introduction.ipynb)
* [Glossary](../0_Introduction/1_glossary.ipynb)
* [1. Radio Science using Interferometric Arrays](#)
* Next: [1.1 What are we trying to do? What is the point?]()
***<jupyter_code>import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline<jupyter_output><empty_output>
|
non_permissive
|
/1_Radio_Science/1_0_introduction.ipynb
|
JSKenyon/fundamentals_of_interferometry
| 1 |
<jupyter_start><jupyter_text># Improvise a Jazz Solo with an LSTM Network
Welcome to your final programming assignment of this week! In this notebook, you will implement a model that uses an LSTM to generate music. You will even be able to listen to your own music at the end of the assignment.
**You will learn to:**
- Apply an LSTM to music generation.
- Generate your own jazz music with deep learning.
## Updates
#### If you were working on the notebook before this update...
* The current notebook is version "3a".
* You can find your original work saved in the notebook with the previous version name ("v3")
* To view the file directory, go to the menu "File->Open", and this will open a new tab that shows the file directory.
#### List of updates
* `djmodel`
- Explains `Input` layer and its parameter `shape`.
- Explains `Lambda` layer and replaces the given solution with hints and sample code (to improve the learning experience).
- Adds hints for using the Keras `Model`.
* `music_inference_model`
- Explains each line of code in the `one_hot` function.
- Explains how to apply `one_hot` with a Lambda layer instead of giving the code solution (to improve the learning experience).
- Adds instructions on defining the `Model`.
* `predict_and_sample`
- Provides detailed instructions for each step.
- Clarifies which variable/function to use for inference.
* Spelling, grammar and wording corrections.Please run the following cell to load all the packages required in this assignment. This may take a few minutes. <jupyter_code>from __future__ import print_function
import IPython
import sys
from music21 import *
import numpy as np
from grammar import *
from qa import *
from preprocess import *
from music_utils import *
from data_utils import *
from keras.models import load_model, Model
from keras.layers import Dense, Activation, Dropout, Input, LSTM, Reshape, Lambda, RepeatVector
from keras.initializers import glorot_uniform
from keras.utils import to_categorical
from keras.optimizers import Adam
from keras import backend as K<jupyter_output><empty_output><jupyter_text>## 1 - Problem statement
You would like to create a jazz music piece specially for a friend's birthday. However, you don't know any instruments or music composition. Fortunately, you know deep learning and will solve this problem using an LSTM network.
You will train a network to generate novel jazz solos in a style representative of a body of performed work.
### 1.1 - Dataset
You will train your algorithm on a corpus of Jazz music. Run the cell below to listen to a snippet of the audio from the training set:<jupyter_code>IPython.display.Audio('./data/30s_seq.mp3')<jupyter_output><empty_output><jupyter_text>We have taken care of the preprocessing of the musical data to render it in terms of musical "values."
#### Details about music (optional)
You can informally think of each "value" as a note, which comprises a pitch and duration. For example, if you press down a specific piano key for 0.5 seconds, then you have just played a note. In music theory, a "value" is actually more complicated than this--specifically, it also captures the information needed to play multiple notes at the same time. For example, when playing a music piece, you might press down two piano keys at the same time (playing multiple notes at the same time generates what's called a "chord"). But we don't need to worry about the details of music theory for this assignment.
#### Music as a sequence of values
* For the purpose of this assignment, all you need to know is that we will obtain a dataset of values, and will learn an RNN model to generate sequences of values.
* Our music generation system will use 78 unique values.
Run the following code to load the raw music data and preprocess it into values. This might take a few minutes.<jupyter_code>X, Y, n_values, indices_values = load_music_utils()
print('number of training examples:', X.shape[0])
print('Tx (length of sequence):', X.shape[1])
print('total # of unique values:', n_values)
print('shape of X:', X.shape)
print('Shape of Y:', Y.shape)<jupyter_output>number of training examples: 60
Tx (length of sequence): 30
total # of unique values: 78
shape of X: (60, 30, 78)
Shape of Y: (30, 60, 78)
<jupyter_text>You have just loaded the following:
- `X`: This is an (m, $T_x$, 78) dimensional array.
- We have m training examples, each of which is a snippet of $T_x =30$ musical values.
- At each time step, the input is one of 78 different possible values, represented as a one-hot vector.
- For example, X[i,t,:] is a one-hot vector representing the value of the i-th example at time t.
- `Y`: a $(T_y, m, 78)$ dimensional array
- This is essentially the same as `X`, but shifted one step to the left (to the past).
- Notice that the data in `Y` is **reordered** to be dimension $(T_y, m, 78)$, where $T_y = T_x$. This format makes it more convenient to feed into the LSTM later.
- Similar to the dinosaur assignment, we're using the previous values to predict the next value.
- So our sequence model will try to predict $y^{\langle t \rangle}$ given $x^{\langle 1\rangle}, \ldots, x^{\langle t \rangle}$.
- `n_values`: The number of unique values in this dataset. This should be 78.
- `indices_values`: python dictionary mapping integers 0 through 77 to musical values.### 1.2 - Overview of our model
Here is the architecture of the model we will use. This is similar to the Dinosaurus model, except that you will implement it in Keras.
* $X = (x^{\langle 1 \rangle}, x^{\langle 2 \rangle}, \cdots, x^{\langle T_x \rangle})$ is a window of size $T_x$ scanned over the musical corpus.
* Each $x^{\langle t \rangle}$ is an index corresponding to a value.
* $\hat{y}^{t}$ is the prediction for the next value.
* We will be training the model on random snippets of 30 values taken from a much longer piece of music.
- Thus, we won't bother to set the first input $x^{\langle 1 \rangle} = \vec{0}$, since most of these snippets of audio start somewhere in the middle of a piece of music.
- We are setting each of the snippets to have the same length $T_x = 30$ to make vectorization easier.## Overview of parts 2 and 3
* We're going to train a model that predicts the next note in a style that is similar to the jazz music that it's trained on. The training is contained in the weights and biases of the model.
* In Part 3, we're then going to use those weights and biases in a new model which predicts a series of notes, using the previous note to predict the next note.
* The weights and biases are transferred to the new model using 'global shared layers' described below"
## 2 - Building the model
* In this part you will build and train a model that will learn musical patterns.
* The model takes input X of shape $(m, T_x, 78)$ and labels Y of shape $(T_y, m, 78)$.
* We will use an LSTM with hidden states that have $n_{a} = 64$ dimensions.<jupyter_code># number of dimensions for the hidden state of each LSTM cell.
n_a = 64 <jupyter_output><empty_output><jupyter_text>
#### Sequence generation uses a for-loop
* If you're building an RNN where, at test time, the entire input sequence $x^{\langle 1 \rangle}, x^{\langle 2 \rangle}, \ldots, x^{\langle T_x \rangle}$ is given in advance, then Keras has simple built-in functions to build the model.
* However, for **sequence generation, at test time we don't know all the values of $x^{\langle t\rangle}$ in advance**.
* Instead we generate them one at a time using $x^{\langle t\rangle} = y^{\langle t-1 \rangle}$.
* The input at time "t" is the prediction at the previous time step "t-1".
* So you'll need to implement your own for-loop to iterate over the time steps.
#### Shareable weights
* The function `djmodel()` will call the LSTM layer $T_x$ times using a for-loop.
* It is important that all $T_x$ copies have the same weights.
- The $T_x$ steps should have shared weights that aren't re-initialized.
* Referencing a globally defined shared layer will utilize the same layer-object instance at each time step.
* The key steps for implementing layers with shareable weights in Keras are:
1. Define the layer objects (we will use global variables for this).
2. Call these objects when propagating the input.
#### 3 types of layers
* We have defined the layers objects you need as global variables.
* Please run the next cell to create them.
* Please read the Keras documentation and understand these layers:
- [Reshape()](https://keras.io/layers/core/#reshape): Reshapes an output to a certain shape.
- [LSTM()](https://keras.io/layers/recurrent/#lstm): Long Short-Term Memory layer
- [Dense()](https://keras.io/layers/core/#dense): A regular fully-connected neural network layer.
<jupyter_code>n_values = 78 # number of music values
reshapor = Reshape((1, n_values)) # Used in Step 2.B of djmodel(), below
LSTM_cell = LSTM(n_a, return_state = True) # Used in Step 2.C
densor = Dense(n_values, activation='softmax') # Used in Step 2.D<jupyter_output><empty_output><jupyter_text>* `reshapor`, `LSTM_cell` and `densor` are globally defined layer objects, that you'll use to implement `djmodel()`.
* In order to propagate a Keras tensor object X through one of these layers, use `layer_object()`.
- For one input, use `layer_object(X)`
- For more than one input, put the inputs in a list: `layer_object([X1,X2])` **Exercise**: Implement `djmodel()`.
#### Inputs (given)
* The `Input()` layer is used for defining the input `X` as well as the initial hidden state 'a0' and cell state `c0`.
* The `shape` parameter takes a tuple that does not include the batch dimension (`m`).
- For example,
```Python
X = Input(shape=(Tx, n_values)) # X has 3 dimensions and not 2: (m, Tx, n_values)
```
#### Step 1: Outputs (TODO)
1. Create an empty list "outputs" to save the outputs of the LSTM Cell at every time step.#### Step 2: Loop through time steps (TODO)
* Loop for $t \in 1, \ldots, T_x$:
#### 2A. Select the 't' time-step vector from X.
* X has the shape (m, Tx, n_values).
* The shape of the 't' selection should be (n_values,).
* Recall that if you were implementing in numpy instead of Keras, you would extract a slice from a 3D numpy array like this:
```Python
var1 = array1[:,1,:]
```
#### Lambda layer
* Since we're using Keras, we need to define this step inside a custom layer.
* In Keras, this is a Lambda layer [Lambda](https://keras.io/layers/core/#lambda)
* As an example, a Lambda layer that takes the previous layer and adds '1' looks like this
```
lambda_layer1 = Lambda(lambda z: z + 1)(previous_layer)
```
* The previous layer in this case is `X`.
* `z` is a local variable of the lambda function.
* The `previous_layer` gets passed into the parameter `z` in the lowercase `lambda` function.
* You can choose the name of the variable to be something else if you want.
* The operation after the colon ':' should be the operation to extract a slice from the previous layer.
* **Hint**: You'll be using the variable `t` within the definition of the lambda layer even though it isn't passed in as an argument to Lambda.#### 2B. Reshape x to be (1,n_values).
* Use the `reshapor()` layer. It is a function that takes the previous layer as its input argument.
#### 2C. Run x through one step of LSTM_cell.
* Initialize the `LSTM_cell` with the previous step's hidden state $a$ and cell state $c$.
* Use the following formatting:
```python
next_hidden_state, _, next_cell_state = LSTM_cell(inputs=input_x, initial_state=[previous_hidden_state, previous_cell_state])
```
* Choose appropriate variables for inputs, hidden state and cell state.
#### 2D. Dense layer
* Propagate the LSTM's hidden state through a dense+softmax layer using `densor`.
#### 2E. Append output
* Append the output to the list of "outputs".
#### Step 3: After the loop, create the model
* Use the Keras `Model` object to create a model.
* specify the inputs and outputs:
```Python
model = Model(inputs=[input_x, initial_hidden_state, initial_cell_state], outputs=the_outputs)
```
* Choose the appropriate variables for the input tensor, hidden state, cell state, and output.
* See the documentation for [Model](https://keras.io/models/model/)<jupyter_code># GRADED FUNCTION: djmodel
def djmodel(Tx, n_a, n_values):
"""
Implement the model
Arguments:
Tx -- length of the sequence in a corpus
n_a -- the number of activations used in our model
n_values -- number of unique values in the music data
Returns:
model -- a keras instance model with n_a activations
"""
# Define the input layer and specify the shape
X = Input(shape=(Tx, n_values))
# Define the initial hidden state a0 and initial cell state c0
# using `Input`
a0 = Input(shape=(n_a,), name='a0')
c0 = Input(shape=(n_a,), name='c0')
a = a0
c = c0
### START CODE HERE ###
# Step 1: Create empty list to append the outputs while you iterate (≈1 line)
outputs = []
# Step 2: Loop
for t in range(Tx):
# Step 2.A: select the "t"th time step vector from X.
x = Lambda(lambda x: X[:,t,:])(X)
# Step 2.B: Use reshapor to reshape x to be (1, n_values) (≈1 line)
x = reshapor(x)
# Step 2.C: Perform one step of the LSTM_cell
a, _, c = LSTM_cell(x,initial_state = [a,c])
# Step 2.D: Apply densor to the hidden state output of LSTM_Cell
out = densor(a)
# Step 2.E: add the output to "outputs"
outputs.append(out)
# Step 3: Create model instance
model = Model(inputs=[X,a0,c0],outputs = outputs)
### END CODE HERE ###
return model<jupyter_output><empty_output><jupyter_text>#### Create the model object
* Run the following cell to define your model.
* We will use `Tx=30`, `n_a=64` (the dimension of the LSTM activations), and `n_values=78`.
* This cell may take a few seconds to run. <jupyter_code>model = djmodel(Tx = 30 , n_a = 64, n_values = 78)
# Check your model
model.summary()<jupyter_output>____________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
====================================================================================================
input_7 (InputLayer) (None, 30, 78) 0
____________________________________________________________________________________________________
lambda_61 (Lambda) (None, 78) 0 input_7[0][0]
____________________________________________________________________________________________________
reshape_2 (Reshape) (None, 1, 78) 0 lambda_61[0][0]
lambda_62[0][0]
lambda_63[0][0] [...]<jupyter_text>**Expected Output**
Scroll to the bottom of the output, and you'll see the following:
```Python
Total params: 41,678
Trainable params: 41,678
Non-trainable params: 0
```#### Compile the model for training
* You now need to compile your model to be trained.
* We will use:
- optimizer: Adam optimizer
- Loss function: categorical cross-entropy (for multi-class classification)<jupyter_code>opt = Adam(lr=0.01, beta_1=0.9, beta_2=0.999, decay=0.01)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])<jupyter_output><empty_output><jupyter_text>#### Initialize hidden state and cell state
Finally, let's initialize `a0` and `c0` for the LSTM's initial state to be zero. <jupyter_code>m = 60
a0 = np.zeros((m, n_a))
c0 = np.zeros((m, n_a))<jupyter_output><empty_output><jupyter_text>#### Train the model
* Lets now fit the model!
* We will turn `Y` into a list, since the cost function expects `Y` to be provided in this format
- `list(Y)` is a list with 30 items, where each of the list items is of shape (60,78).
- Lets train for 100 epochs. This will take a few minutes. <jupyter_code>model.fit([X, a0, c0], list(Y), epochs=100)<jupyter_output><empty_output><jupyter_text>#### Expected Output
The model loss will start high, (100 or so), and after 100 epochs, it should be in the single digits. These won't be the exact number that you'll see, due to random initialization of weights.
For example:
```
Epoch 1/100
60/60 [==============================] - 3s - loss: 125.7673
...
```
Scroll to the bottom to check Epoch 100
```
...
Epoch 100/100
60/60 [==============================] - 0s - loss: 6.1861
```
Now that you have trained a model, let's go to the final section to implement an inference algorithm, and generate some music! ## 3 - Generating music
You now have a trained model which has learned the patterns of the jazz soloist. Lets now use this model to synthesize new music.
#### 3.1 - Predicting & Sampling
At each step of sampling, you will:
* Take as input the activation '`a`' and cell state '`c`' from the previous state of the LSTM.
* Forward propagate by one step.
* Get a new output activation as well as cell state.
* The new activation '`a`' can then be used to generate the output using the fully connected layer, `densor`.
##### Initialization
* We will initialize the following to be zeros:
* `x0`
* hidden state `a0`
* cell state `c0` **Exercise:**
* Implement the function below to sample a sequence of musical values.
* Here are some of the key steps you'll need to implement inside the for-loop that generates the $T_y$ output characters:
* Step 2.A: Use `LSTM_Cell`, which takes in the input layer, as well as the previous step's '`c`' and '`a`' to generate the current step's '`c`' and '`a`'.
```Python
next_hidden_state, _, next_cell_state = LSTM_cell(input_x, initial_state=[previous_hidden_state, previous_cell_state])
```
* Choose the appropriate variables for the input_x, hidden_state, and cell_state
* Step 2.B: Compute the output by applying `densor` to compute a softmax on '`a`' to get the output for the current step.
* Step 2.C: Append the output to the list `outputs`.
* Step 2.D: Sample x to be the one-hot version of '`out`'.
* This allows you to pass it to the next LSTM's step.
* We have provided the definition of `one_hot(x)` in the 'music_utils.py' file and imported it.
Here is the definition of `one_hot`
```Python
def one_hot(x):
x = K.argmax(x)
x = tf.one_hot(indices=x, depth=78)
x = RepeatVector(1)(x)
return x
```
Here is what the `one_hot` function is doing:
* argmax: within the vector `x`, find the position with the maximum value and return the index of that position.
* For example: argmax of [-1,0,1] finds that 1 is the maximum value, and returns the index position, which is 2. Read the documentation for [keras.argmax](https://www.tensorflow.org/api_docs/python/tf/keras/backend/argmax).
* one_hot: takes a list of indices and the depth of the one-hot vector (number of categories, which is 78 in this assignment). It converts each index into the one-hot vector representation. For instance, if the indices is [2], and the depth is 5, then the one-hot vector returned is [0,0,1,0,0]. Check out the documentation for [tf.one_hot](https://www.tensorflow.org/api_docs/python/tf/one_hot) for more examples and explanations.
* RepeatVector(n): This takes a vector and duplicates it `n` times. Notice that we had it repeat 1 time. This may seem like it's not doing anything. If you look at the documentation for [RepeatVector](https://keras.io/layers/core/#repeatvector), you'll notice that if x is a vector with dimension (m,5) and it gets passed into `RepeatVector(1)`, then the output is (m,1,5). In other words, it adds an additional dimension (of length 1) to the resulting vector.
* Apply the custom one_hot encoding using the [Lambda](https://keras.io/layers/core/#lambda) layer. You saw earlier that the Lambda layer can be used like this:
```Python
result = Lambda(lambda x: x + 1)(input_var)
```
If you pre-define a function, you can do the same thing:
```Python
def add_one(x)
return x + 1
# use the add_one function inside of the Lambda function
result = Lambda(add_one)(input_var)
```#### Step 3: Inference Model:
This is how to use the Keras `Model`.
```Python
model = Model(inputs=[input_x, initial_hidden_state, initial_cell_state], outputs=the_outputs)
```
* Choose the appropriate variables for the input tensor, hidden state, cell state, and output.
* **Hint**: the inputs to the model are the **initial** inputs and states.<jupyter_code># GRADED FUNCTION: music_inference_model
def music_inference_model(LSTM_cell, densor, n_values = 78, n_a = 64, Ty = 100):
"""
Uses the trained "LSTM_cell" and "densor" from model() to generate a sequence of values.
Arguments:
LSTM_cell -- the trained "LSTM_cell" from model(), Keras layer object
densor -- the trained "densor" from model(), Keras layer object
n_values -- integer, number of unique values
n_a -- number of units in the LSTM_cell
Ty -- integer, number of time steps to generate
Returns:
inference_model -- Keras model instance
"""
# Define the input of your model with a shape
x0 = Input(shape=(1, n_values))
# Define s0, initial hidden state for the decoder LSTM
a0 = Input(shape=(n_a,), name='a0')
c0 = Input(shape=(n_a,), name='c0')
a = a0
c = c0
x = x0
### START CODE HERE ###
# Step 1: Create an empty list of "outputs" to later store your predicted values (≈1 line)
outputs = []
# Step 2: Loop over Ty and generate a value at every time step
for t in range(Ty):
# Step 2.A: Perform one step of LSTM_cell (≈1 line)
a, _, c = LSTM_cell(x,initial_state=[a,c])
# Step 2.B: Apply Dense layer to the hidden state output of the LSTM_cell (≈1 line)
out = densor(a)
# Step 2.C: Append the prediction "out" to "outputs". out.shape = (None, 78) (≈1 line)
outputs.append(out)
# Step 2.D:
# Select the next value according to "out",
# Set "x" to be the one-hot representation of the selected value
# See instructions above.
x = Lambda(one_hot)(out)
# Step 3: Create model instance with the correct "inputs" and "outputs" (≈1 line)
inference_model = Model(inputs = [x0,a0,c0],outputs = outputs)
### END CODE HERE ###
return inference_model<jupyter_output><empty_output><jupyter_text>Run the cell below to define your inference model. This model is hard coded to generate 50 values.<jupyter_code>inference_model = music_inference_model(LSTM_cell, densor, n_values = 78, n_a = 64, Ty = 50)
# Check the inference model
inference_model.summary()<jupyter_output>____________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
====================================================================================================
input_8 (InputLayer) (None, 1, 78) 0
____________________________________________________________________________________________________
a0 (InputLayer) (None, 64) 0
____________________________________________________________________________________________________
c0 (InputLayer) (None, 64) 0
____________________________________________________________________________________________________
lstm_2 (LSTM) [(None, 64), (None, 6 36608 input_8[0][0] [...]<jupyter_text>** Expected Output**
If you scroll to the bottom of the output, you'll see:
```
Total params: 41,678
Trainable params: 41,678
Non-trainable params: 0
```#### Initialize inference model
The following code creates the zero-valued vectors you will use to initialize `x` and the LSTM state variables `a` and `c`. <jupyter_code>x_initializer = np.zeros((1, 1, 78))
a_initializer = np.zeros((1, n_a))
c_initializer = np.zeros((1, n_a))<jupyter_output><empty_output><jupyter_text>**Exercise**: Implement `predict_and_sample()`.
* This function takes many arguments including the inputs [x_initializer, a_initializer, c_initializer].
* In order to predict the output corresponding to this input, you will need to carry-out 3 steps:
#### Step 1
* Use your inference model to predict an output given your set of inputs. The output `pred` should be a list of length $T_y$ where each element is a numpy-array of shape (1, n_values).
```Python
inference_model.predict([input_x_init, hidden_state_init, cell_state_init])
```
* Choose the appropriate input arguments to `predict` from the input arguments of this `predict_and_sample` function.
#### Step 2
* Convert `pred` into a numpy array of $T_y$ indices.
* Each index is computed by taking the `argmax` of an element of the `pred` list.
* Use [numpy.argmax](https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmax.html).
* Set the `axis` parameter.
* Remember that the shape of the prediction is $(m, T_{y}, n_{values})$
#### Step 3
* Convert the indices into their one-hot vector representations.
* Use [to_categorical](https://keras.io/utils/#to_categorical).
* Set the `num_classes` parameter. Note that for grading purposes: you'll need to either:
* Use a dimension from the given parameters of `predict_and_sample()` (for example, one of the dimensions of x_initializer has the value for the number of distinct classes).
* Or just hard code the number of distinct classes (will pass the grader as well).
* Note that using a global variable such as n_values will not work for grading purposes.<jupyter_code># GRADED FUNCTION: predict_and_sample
def predict_and_sample(inference_model, x_initializer = x_initializer, a_initializer = a_initializer,
c_initializer = c_initializer):
"""
Predicts the next value of values using the inference model.
Arguments:
inference_model -- Keras model instance for inference time
x_initializer -- numpy array of shape (1, 1, 78), one-hot vector initializing the values generation
a_initializer -- numpy array of shape (1, n_a), initializing the hidden state of the LSTM_cell
c_initializer -- numpy array of shape (1, n_a), initializing the cell state of the LSTM_cel
Returns:
results -- numpy-array of shape (Ty, 78), matrix of one-hot vectors representing the values generated
indices -- numpy-array of shape (Ty, 1), matrix of indices representing the values generated
"""
### START CODE HERE ###
# Step 1: Use your inference model to predict an output sequence given x_initializer, a_initializer and c_initializer.
pred = inference_model.predict([x_initializer, a_initializer, c_initializer])
# Step 2: Convert "pred" into an np.array() of indices with the maximum probabilities
indices = np.argmax(pred,axis = 2)
# Step 3: Convert indices to one-hot vectors, the shape of the results should be (Ty, n_values)
results = to_categorical(indices)
### END CODE HERE ###
return results, indices
results, indices = predict_and_sample(inference_model, x_initializer, a_initializer, c_initializer)
print("np.argmax(results[12]) =", np.argmax(results[12]))
print("np.argmax(results[17]) =", np.argmax(results[17]))
print("list(indices[12:18]) =", list(indices[12:18]))<jupyter_output>np.argmax(results[12]) = 66
np.argmax(results[17]) = 9
list(indices[12:18]) = [array([66]), array([1]), array([1]), array([1]), array([2]), array([9])]
<jupyter_text>**Expected (Approximate) Output**:
* Your results **may likely differ** because Keras' results are not completely predictable.
* However, if you have trained your LSTM_cell with model.fit() for exactly 100 epochs as described above:
* You should very likely observe a sequence of indices that are not all identical.
* Moreover, you should observe that:
* np.argmax(results[12]) is the first element of list(indices[12:18])
* and np.argmax(results[17]) is the last element of list(indices[12:18]).
**np.argmax(results[12])** =
1
**np.argmax(results[17])** =
42
**list(indices[12:18])** =
[array([1]), array([42]), array([54]), array([17]), array([1]), array([42])]
#### 3.3 - Generate music
Finally, you are ready to generate music. Your RNN generates a sequence of values. The following code generates music by first calling your `predict_and_sample()` function. These values are then post-processed into musical chords (meaning that multiple values or notes can be played at the same time).
Most computational music algorithms use some post-processing because it is difficult to generate music that sounds good without such post-processing. The post-processing does things such as clean up the generated audio by making sure the same sound is not repeated too many times, that two successive notes are not too far from each other in pitch, and so on. One could argue that a lot of these post-processing steps are hacks; also, a lot of the music generation literature has also focused on hand-crafting post-processors, and a lot of the output quality depends on the quality of the post-processing and not just the quality of the RNN. But this post-processing does make a huge difference, so let's use it in our implementation as well.
Let's make some music! Run the following cell to generate music and record it into your `out_stream`. This can take a couple of minutes.<jupyter_code>out_stream = generate_music(inference_model)<jupyter_output>Predicting new values for different set of chords.
Generated 51 sounds using the predicted values for the set of chords ("1") and after pruning
Generated 51 sounds using the predicted values for the set of chords ("2") and after pruning
Generated 51 sounds using the predicted values for the set of chords ("3") and after pruning
Generated 51 sounds using the predicted values for the set of chords ("4") and after pruning
Generated 51 sounds using the predicted values for the set of chords ("5") and after pruning
Your generated music is saved in output/my_music.midi
<jupyter_text>To listen to your music, click File->Open... Then go to "output/" and download "my_music.midi". Either play it on your computer with an application that can read midi files if you have one, or use one of the free online "MIDI to mp3" conversion tools to convert this to mp3.
As a reference, here is a 30 second audio clip we generated using this algorithm. <jupyter_code>IPython.display.Audio('./data/30s_trained_model.mp3')<jupyter_output><empty_output>
|
no_license
|
/Improvise_a_Jazz_Solo_with_an_LSTM_Network_v3a (1).ipynb
|
Stairwaytoknowledge/RepoA
| 16 |
<jupyter_start><jupyter_text>Formação Cientista de Dados - Sessão 31
Naive Bayes<jupyter_code>import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import confusion_matrix, accuracy_score
from yellowbrick.classifier import ConfusionMatrix
# Carregamento da base de dados e deifinição dos previsores (variáveis independentes - x) e a classe (variável dependente - y)
credito = pd.read_csv('Credit.csv')
credito.shape
credito.head()
# Formato de matriz
previsores = credito.iloc[:, 0:20].values
classe = credito.iloc[:, 20].values
# Transformação dos atributos categóricos em atributos numéricos, passando o índice de cada coluna categórica
# Precisamos criar um objeto para cada atributo categórico, pois na sequência vamos executar o processo de encoding novamente para o registro de teste
# Se forem utilizados objetos diferentes, o número atribuído a cada valor poderá ser diferente, o que deixará o teste inconsistente
labelencoder1 = LabelEncoder()
previsores[:, 0] = labelencoder1.fit_transform(previsores[:, 0])
labelencoder2 = LabelEncoder()
previsores[:, 2] = labelencoder2.fit_transform(previsores[:, 2])
labelencoder3 = LabelEncoder()
previsores[:, 3] = labelencoder3.fit_transform(previsores[:, 3])
labelencoder4 = LabelEncoder()
previsores[:, 5] = labelencoder4.fit_transform(previsores[:, 5])
labelencoder5 = LabelEncoder()
previsores[:, 6] = labelencoder5.fit_transform(previsores[:, 6])
labelencoder6 = LabelEncoder()
previsores[:, 8] = labelencoder6.fit_transform(previsores[:, 8])
labelencoder7 = LabelEncoder()
previsores[:, 9] = labelencoder7.fit_transform(previsores[:, 9])
labelencoder8 = LabelEncoder()
previsores[:, 11] = labelencoder8.fit_transform(previsores[:, 11])
labelencoder9 = LabelEncoder()
previsores[:, 13] = labelencoder9.fit_transform(previsores[:, 13])
labelencoder10 = LabelEncoder()
previsores[:, 14] = labelencoder10.fit_transform(previsores[:, 14])
labelencoder11 = LabelEncoder()
previsores[:, 16] = labelencoder11.fit_transform(previsores[:, 16])
labelencoder12 = LabelEncoder()
previsores[:, 18] = labelencoder12.fit_transform(previsores[:, 18])
labelencoder13 = LabelEncoder()
previsores[:, 19] = labelencoder13.fit_transform(previsores[:, 19])
# Divisão da base de dados entre treinamento e teste (30% para testar e 70% para treinar)
x_treinamento, x_teste, y_treinamento, y_teste = train_test_split(previsores, classe, test_size = 0.3, random_state = 0)
x_teste
# Criação e treinamento do modelo (geração da tablea de probabilidades)
naive_bayes = GaussianNB()
naive_bayes.fit(x_treinamento, y_treinamento)
# Previsões utilizando os registros de teste
previsoes = naive_bayes.predict(x_teste)
# Geração da matriz de confusão e cálculo da taxa de acerto e erro
confusao = confusion_matrix(y_teste, previsoes)
confusao
taxa_acerto = accuracy_score(y_teste, previsoes)
taxa_erro = 1 - taxa_acerto
taxa_acerto
# Visualização da matriz de confusão
# Warning interno da biblioteca yellowbrick, já está na última versão (sem solução para o warning no momento)
v = ConfusionMatrix(GaussianNB())
v.fit(x_treinamento, y_treinamento)
v.score(x_teste, y_teste)
v.poof()
# Previsão com novo registro, transformando os atributos categóricos em numéricos
novo_credito = pd.read_csv('NovoCredit.csv')
novo_credito.shape
# Usamos o mesmo objeto que foi criado antes, para manter o padrão dos dados
# Chamamos somente o método "transform", pois a adaptação aos dados (fit) já foi feita anteriormente
novo_credito = novo_credito.iloc[:, 0:20].values
novo_credito[:, 0] = labelencoder1.transform(novo_credito[:, 0])
novo_credito[:, 2] = labelencoder2.transform(novo_credito[:, 2])
novo_credito[:, 3] = labelencoder3.transform(novo_credito[:, 3])
novo_credito[:, 5] = labelencoder4.transform(novo_credito[:, 5])
novo_credito[:, 6] = labelencoder5.transform(novo_credito[:, 6])
novo_credito[:, 8] = labelencoder6.transform(novo_credito[:, 8])
novo_credito[:, 9] = labelencoder7.transform(novo_credito[:, 9])
novo_credito[:, 11] = labelencoder8.transform(novo_credito[:, 11])
novo_credito[:, 13] = labelencoder9.transform(novo_credito[:, 13])
novo_credito[:, 14] = labelencoder10.transform(novo_credito[:, 14])
novo_credito[:, 16] = labelencoder11.transform(novo_credito[:, 16])
novo_credito[:, 18] = labelencoder12.transform(novo_credito[:, 18])
novo_credito[:, 19] = labelencoder13.transform(novo_credito[:, 19])
# Resultado da previsão
naive_bayes.predict(novo_credito)<jupyter_output><empty_output>
|
no_license
|
/Machine Learning/naive_bayes.ipynb
|
higor-gomes93/formacao_cientista_de_dados_udemy_python
| 1 |
<jupyter_start><jupyter_text>## population and sample<jupyter_code>import numpy as np
np.random.seed(101)
population=np.random.randint(0,80,100000)
population
len(population)
np.random.seed(101)
sample=np.random.choice(population, 100)
np.random.seed(101)
sample_1000=np.random.choice(population, 1000)
len(sample)
len(sample_1000)
sample
sample.mean()
sample_1000.mean()
population.mean()
np.random.seed(101)
for i in range(10):
sample=np.random.choice(population, 100)
print(sample.mean())
np.random.seed(101)
sample_means=[]
for i in range(10):
sample=np.random.choice(population, 100)
sample_means.append(sample.mean())
sample_means
np.mean(sample_means)
population.mean()
sum(sample_means)/len(sample_means)<jupyter_output><empty_output><jupyter_text>## skewness and kurtosis<jupyter_code>import numpy as np
from scipy.stats import kurtosis, skew
from scipy import stats
import matplotlib.pyplot as plt
x=np.random.normal(0,2,1000000)
# print(excess kurtosis of normal distribution (should be 0): {}'.format( kurtosis(x) ))
# print(skewness of normal distribution (should be 0): {}'.format( skew(x) ))
#In finance, high excess kurtosis is an indication of high risk.
plt.hist(x,bins=100);
kurtosis(x)
skew(x)
shape, scale = 2, 2
s=np.random.gamma(shape,scale, 100000)
plt.hist(s, bins=100);
kurtosis(s)
skew(s)
shape, scale = 5, 2
s=np.random.gamma(shape,scale, 100000)
plt.hist(s, bins=100);
kurtosis(s)
skew(s)<jupyter_output><empty_output>
|
no_license
|
/02_Statistics_Data Analysi/stats_session_2.ipynb
|
Mustafa2356/Github_Data_Science
| 2 |
<jupyter_start><jupyter_text># Predicting Recession in the United States : A Data Science Tutorial
*Yuze Hu*
## Introduction
As [Retuer](https://www.reuters.com/article/us-usa-economy-recession/u-s-economy-entered-recession-in-february-business-cycle-arbiter-says-idUSKBN23F28L) reported,the U.S. economy ended its longest expansion in history in this February and entered recession. Just after 2 monthes the unemployment rate in U.S. reaches historical high and U.S. GDP is expexted to contract 30% in second quarter, 5% in 2020.
The term [Recession](https://en.wikipedia.org/wiki/Recession) represents a business cycle contraction when there is a general decline in economic activity. The business cycle model says the economy always exchange between recession and expansion. The U.S. economy ,following this model, had many recessions in the past, and there are different explanlation to the cause of each recession. This recession in 2020 is often considered to be a result of the [coronavirus pandemic](https://en.wikipedia.org/wiki/Coronavirus_disease_2019). Clearly it is, but are there other factors contribute to the recession? Can we predit this recession before the coronavirus pandemic happens? Should we expect the pandemic to end in the following year? May be we can answer these question with the power of data science.
The tutorial will lead you to find and explore several possible indictors than can be used to predict the economy recession in the future. It will also teach you how to build regression models with chosen indicaters to actually predict the probability of recession in a certain month in the future.
<jupyter_code># import lib we need
# we will use numpy and pandas to deal with datas
# pyplot is used to draw graphs
# sklearn.linear_model is used to build regression model
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.model_selection import cross_val_score
#for decision tree and visualization
from sklearn.tree import DecisionTreeClassifier
from IPython.display import Image
from sklearn import tree
import pydotplus<jupyter_output><empty_output><jupyter_text>If you are unfamilier with the above libraries you can use the following tutorial as reference
- [Pandas tutorial](https://pandas.pydata.org/pandas-docs/stable/getting_started/tutorials.html)
- [Numpy tutorial](https://pandas.pydata.org/pandas-docs/stable/getting_started/install.html)
- [sklearn logistic regression doc](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html#sklearn.linear_model.LogisticRegression.score)## Data Collection & Processing & Exploration & Visualization
### Recession peroid
First of all, we need to get the historical recession peroid data.
In fact, aprivate economics research group NBER acts as the arbiter for determining U.S. business cycles. We can find recession data on [their website](https://www.nber.org/cycles/). We will use the excel listed on the wesite for labeling recession.
With the excel, We have recessions period data from 1854 to today. It would be nice to use all of these period, but the reallity is we have limited indicators that could data back to 1854. As a result, we need to cut in someyear between.Generally, the smaller ranger of recession data we choose, the more and more accurate indicators will be availiable to us. We have to get a balance on this. I decide to choose 1947-2020 at the end. 1947 is the first availiable date of the first indicator I choosed which will soon be explained below.<jupyter_code>#get recession period and clean up
recess_raw = pd.read_excel("https://www.nber.org/cycles/NBER%20chronology_062020.xlsx")
recess_raw
#first row is infact column name
recess_raw.columns = recess_raw.iloc[1]
recess_raw = recess_raw.iloc[27:37,2:4].apply(pd.to_datetime)
recess = recess_raw.reset_index().iloc[:,1:]
#since we are in the midle of a recession, we need to set the through month manully to the current month
recess["Trough month"][9] = pd.to_datetime("2020-06-01")
recess<jupyter_output><empty_output><jupyter_text>Now we are going to find indicators that is prior to 1957 and may have a relationship to the future recession
The first possible indicator is Treasury yield spread
### Treasury Yield Spread
Interst rate plays a very critical rule in the more economy. It's the major tool center banks across world inclding federal reserve(Center bank of United States) used to facilitate the economy. You can watch this video provided by Federal Reserve to know more about this :https://www.federalreserve.gov/faqs/why-do-interest-rates-matter.html.
[United States Treasury security](https://en.wikipedia.org/wiki/United_States_Treasury_security) are government debt instruments issued by the United States Department of the Treasury to finance government spending as an alternative to taxation.
The interest rate of Treasury, as a risk-free(or share the risk of the whole economy) loan issued by the government, is the basline of the interest market. Long-short term treasury spread is a pretty classical indicator for the economy. The spread or the difference between the interest rate of long term treasury and the short term treasury generally represent the investor's expectation of the future and the expectation will transform to real economy impact. Sometimes the spread becomes negative menaing that the interate of long term Treasury is lower than the short term Treasury, when this happens there are always a recession within a few years.
You can read this paper for further explanation:https://www.jstor.org/stable/2328836?seq=1
We get treasury rate with different terms from the website of Federal Reserve: https://www.federalreserve.gov/data.htm
We will also include a special columns "fed fund rate" which the average overnight borrowing rate within bank sector. You can consider it as a treasury with a half day term.
After cleaning up the data we get form federal reserve, we will calculate the spread of four different combination of long-short term spread<jupyter_code>#get tresury data from federal reserve
treasury_rate_raw = pd.read_csv("https://www.federalreserve.gov/datadownload/Output.aspx?rel=H15&series=c7fcd02fcec21b6486c58c6c92ea81bf&lastobs=&from=01/01/1953&to=07/31/2020&filetype=csv&label=include&layout=seriescolumn")[5:]
treasury_rate_raw.columns = ["date","fed_rate","3m_treasury","1y_treasury","10y_treasury","20y_treasury"]
treasury_rate = treasury_rate_raw.copy()
treasury_rate
#clean up
#some losing data is marked with NC
treasury_rate.replace("NC",np.nan,inplace=True)
treasury_rate["date"] = pd.to_datetime(treasury_rate["date"])
treasury_rate.iloc[:,1:] = treasury_rate.iloc[:,1:].apply(pd.to_numeric)
#calculate spread data
treasury_rate['3m_10y_spread'] = treasury_rate["10y_treasury"] - treasury_rate["3m_treasury"]
treasury_rate['1y_10y_spread'] = treasury_rate["10y_treasury"] - treasury_rate["1y_treasury"]
treasury_rate['fed_10y_spread'] = treasury_rate["10y_treasury"] - treasury_rate["fed_rate"]
treasury_rate['3m_20y_spread'] = treasury_rate["20y_treasury"] - treasury_rate["3m_treasury"]
treasury_rate<jupyter_output><empty_output><jupyter_text>Now we can get some insights about the realtion between the spread and recession by drawing a spread over time line graph. Look at the following graph, grey region reprecent recession period. You should be able to see a great linear relation ship between the the spread and the (probability of )recession. For evry recession there is a drop in the spread just before the recession and it comes back before the recession ends.
You can also see that the four combinations we choose have a strong correlation with each other.We want to choose one that is smooth(less variance) and sensitive to the future recession. And here we will choose to use 3m_10y_spread, but one should expect similar performance when we choose a different one.<jupyter_code>#visualization
fig = plt.figure(figsize=(20,10))
for i in treasury_rate.iloc[ :,-4:] :
plt.plot(treasury_rate["date"],treasury_rate[[i]],label=i)
plt.legend()
#mark recession period with grey color
for _, row in recess.iterrows() :
fig.axes[0].axvspan(row["Peak month"],row["Trough month"],color='grey',alpha=0.5)<jupyter_output><empty_output><jupyter_text>### PPI & CPI
Next we will consider two price index that reflects price change
[The Producer Price Index (PPI)](https://www.bls.gov/ppi/) measures the average change over time in the selling prices received by domestic producers for their output.
[The Consumer Price Index (CPI)](https://www.bls.gov/cpi/) is a measure of the average change over time in the prices paid by urban consumers for a market basket of consumer goods and services.
we can get these two price from fred database https://fred.stlouisfed.org/<jupyter_code>#get ppi data
ppi_full = pd.read_csv("https://fred.stlouisfed.org/graph/fredgraph.csv?bgcolor=%23e1e9f0&chart_type=line&drp=0&fo=open%20sans&graph_bgcolor=%23ffffff&height=450&mode=fred&recession_bars=on&txtcolor=%23444444&ts=12&tts=12&width=1168&nt=0&thu=0&trc=0&show_legend=yes&show_axis_titles=yes&show_tooltip=yes&id=PPIACO&scale=left&cosd=1947-01-01&coed=2020-06-01&line_color=%234572a7&link_values=false&line_style=solid&mark_type=none&mw=3&lw=2&ost=-99999&oet=99999&mma=0&fml=a&fq=Monthly&fam=avg&fgst=lin&fgsnd=2020-02-01&line_index=1&transformation=lin&vintage_date=2020-07-20&revision_date=2020-07-20&nd=1913-01-01")
ppi_full["DATE"] = ppi_full["DATE"].apply(pd.to_datetime)
ppi = ppi_full[90:].copy()
#get cpi data
cpi_full = pd.read_csv("https://fred.stlouisfed.org/graph/fredgraph.csv?bgcolor=%23e1e9f0&chart_type=line&drp=0&fo=open%20sans&graph_bgcolor=%23ffffff&height=450&mode=fred&recession_bars=on&txtcolor=%23444444&ts=12&tts=12&width=1168&nt=0&thu=0&trc=0&show_legend=yes&show_axis_titles=yes&show_tooltip=yes&id=CPIAUCSL&scale=left&cosd=1947-01-01&coed=2020-06-01&line_color=%234572a7&link_values=false&line_style=solid&mark_type=none&mw=3&lw=2&ost=-99999&oet=99999&mma=0&fml=a&fq=Monthly&fam=avg&fgst=lin&fgsnd=2020-02-01&line_index=1&transformation=lin&vintage_date=2020-07-19&revision_date=2020-07-19&nd=1947-01-01")
cpi_full["DATE"] = cpi_full["DATE"].apply(pd.to_datetime)
cpi = cpi_full[90:].copy()
#visualization
fig = plt.figure(figsize=(20,10))
plt.plot(ppi['DATE'],ppi["PPIACO"],label='ppi')
plt.plot(cpi['DATE'],cpi["CPIAUCSL"],label='cpi')
plt.legend()<jupyter_output><empty_output><jupyter_text>Clearly, we can not use this data directly since it just increases over time and we will not get any linear relation between this and recession. We need to extract some meaningful information
The first guess is the 3-month change in percent, again to achive a linear relation we will take absolute value on it<jupyter_code>
ppi["PPIACO"] = abs( np.concatenate(([np.nan]*3,((ppi["PPIACO"][3:].values - ppi["PPIACO"][:-3].values)/ ppi["PPIACO"][3:].values)) ))
cpi["CPIAUCSL"] = abs( np.concatenate(([np.nan]*3,((cpi["CPIAUCSL"][3:].values - cpi["CPIAUCSL"][:-3].values)/ cpi["CPIAUCSL"][3:].values)) ))
fig = plt.figure(figsize=(20,10))
plt.plot(ppi['DATE'],ppi["PPIACO"],label='ppi')
plt.plot(cpi['DATE'],cpi["CPIAUCSL"],label='cpi')
for _, row in recess.iterrows() :
fig.axes[0].axvspan(row["Peak month"],row["Trough month"],color='grey',alpha=0.5)
plt.legend()<jupyter_output><empty_output><jupyter_text>To some degree, there seems to have a relationship but it's still unclear.
We need data that is more smooth. To obtain such data, we need to consider the change within a longer term. So we will next try to calculate the change relative to the 28 monthes average. <jupyter_code>#reset ppi
ppi = ppi_full[65:].copy()
cpi = cpi_full[65:].copy()
#calculate ppiavg
ppi['28avg'] = ppi['DATE'].apply(lambda r: np.average( ppi_full[(ppi_full["DATE"] >= r - pd.DateOffset(months=28)) & (ppi_full["DATE"] < r) ]["PPIACO"] ))
ppi["PPIACO"] = abs(ppi["PPIACO"] - ppi['28avg'])/ppi["PPIACO"]
#calculate cpiavg
cpi['28avg'] = cpi['DATE'].apply(lambda r: np.average( cpi_full[(cpi_full["DATE"] >= r - pd.DateOffset(months=28)) & (cpi_full["DATE"] < r) ]["CPIAUCSL"] ))
cpi["CPIAUCSL"] = abs(cpi["CPIAUCSL"] - cpi['28avg'])/cpi["CPIAUCSL"]
#draw linear graph
fig = plt.figure(figsize=(20,10))
plt.plot(ppi['DATE'],ppi["PPIACO"],label='ppi')
plt.plot(cpi['DATE'],cpi["CPIAUCSL"],label='cpi')
for _, row in recess.iterrows() :
fig.axes[0].axvspan(row["Peak month"],row["Trough month"],color='grey',alpha=0.5)
plt.legend()<jupyter_output><empty_output><jupyter_text>The grpha looks much better now!
Genereally ppi is much more senesitive than cpi.
And we should notice that while the linear relationship apears to be significant in some periods, we can hardly see a relationship from 2010-2020 and -1960. Treasury thread yiled is much more consistent overtime.
### Shiller PE S&P 500
Finally, we will take a look at the stock market. Clearly, in order to get linear relationship we can not use the price data directly. A good indicator in the stock market is the Price earning Ratio(PE)
The price-earnings ratio (P/E ratio) is ratio between a company's current share price to its earnings per share.It's a classical tool to determine if the company's undervalued or overvalued by the stock markert.
You can learn more about PE from this website https://www.investopedia.com/terms/p/price-earningsratio.asp.
And what we are going to use is the P/E of the [S&P500](https://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=&cad=rja&uact=8&ved=2ahUKEwjlt8zZw97qAhVJmXIEHZkgBxcQmhMwMHoECAMQAg&url=https%3A%2F%2Fen.wikipedia.org%2Fwiki%2FS%2526P_500_Index&usg=AOvVaw3_VMll_kjp8utmgToclDkN),which is a collections of majority stocks in the market. It coulde serve as a indicater to tell us if the whole market is overvalued/undervalued
We can get the pe data from https://www.quandl.com<jupyter_code>#get sp p/e data
sppe = pd.read_csv("https://www.quandl.com/api/v3/datasets/MULTPL/SP500_PE_RATIO_MONTH.csv?api_key=TvLsY3Psu3xwtWufwYnG")[:-860]
# reverse the dataframe
sppe = sppe[::-1].reset_index(drop=True)
sppe['Date'] = pd.to_datetime(sppe['Date'] )
#visualization
fig = plt.figure(figsize=(20,10))
plt.plot(sppe["Date"] ,sppe["Value"])
#mark recession
for _, row in recess.iterrows() :
fig.axes[0].axvspan(row["Peak month"],row["Trough month"],color='grey',alpha=0.5)<jupyter_output><empty_output><jupyter_text>As you can see there are certain drawbacks in the way we calculate the raw PE ratio which make the data is not smooth enough (eg. aroud 2008). It's because that the earning become very unstable in the time of recession. Like ppi&cpi, We need some long term insights about the comapnies's earning ability.
Dr.shiller delveloped a new way to calculate P/E adding cyclelly adjustion to the original one.
You can learn more about the new way of calculation from this: https://en.wikipedia.org/wiki/Cyclically_adjusted_price-to-earnings_ratio
We will use the data from Dr.shiller's website : http://www.econ.yale.edu/~shiller/data.htm
Since both undervalued and overvalued market could lead to recession, we will use the absolute value of the difference between current pe and a normal 25PE.<jupyter_code>#get shiller sp500 pe
sppe = pd.read_excel("http://www.econ.yale.edu/~shiller/data/ie_data.xls",1)
#clean up
sppe = sppe[["Unnamed: 0","Unnamed: 12"]][1000:-1]
sppe.columns = ["date","cape"]
sppe["date"] =pd.to_datetime(sppe["date"].apply(str).replace(r"\.1$",".10",regex=True))
#25 pe as a standard
sppe["cape"] = abs(sppe["cape"].apply(pd.to_numeric) -25)
#visualization
fig = plt.figure(figsize=(20,10))
plt.plot(sppe["date"] ,sppe["cape"])
for _, row in recess.iterrows() :
fig.axes[0].axvspan(row["Peak month"],row["Trough month"],color='grey',alpha=0.5)<jupyter_output><empty_output><jupyter_text>The grpha looks good. But again treasury yield spread shows a much more consistent relationship with recession.
## Building Model
### Logistic Regression With Treasury Yiled Spread
Since the Treasury Yiled Spread shows much more consistent relationship than others, first we will use it alone to build the regression model.
We will use sklearn's logistic Regression model. If you want to know howlogistic regression works you cna read this tutorial : https://machinelearningmastery.com/logistic-regression-for-machine-learning/
As what said previously, we will use the spread between 3month tresury and 10year treasury as samples.
For labels, we wll use the the value of this question :
if there is a regression in the next n monthes?
True will mark with 1 false will mark with 0
We will expore the accuracy with different n
We won't use 2020 recession to train the model as it's still going on , but will use it in testing
We will use 3-fold cross validation to calculate score which means we will seperate the data into three different sets of train and test data. You can learn more about crossvalidation in https://machinelearningmastery.com/k-fold-cross-validation/<jupyter_code>#choess 3m_10y_spread to predict recession
spread = "3m_10y_spread"
treasury_rate_x = treasury_rate[["date",spread]].dropna().reset_index()
# build recess labels
#return 1 if delta monthes after dat is in arecession
def in_recess(dat,delta):
before = (recess["Peak month"]- pd.DateOffset(months=delta)<= dat)
after = (dat<=recess["Trough month"]- pd.DateOffset(months=delta))
return int((before & after).any())
recess_y = pd.DataFrame()
for i in range(36)[1:]:
recess_y[f"{i}m_rec"] = treasury_rate_x["date"].apply(in_recess,delta=i)
#cut 2020 recession
treasury_rate_x_e = treasury_rate_x[:798].copy()
recess_y_e=recess_y[:798].copy()
#accruacy with different monthes ahead
models =[]
scores =[]
for i in range(36)[1:]:
reg_log = linear_model.LogisticRegression()
reg_log.fit(treasury_rate_x_e[spread].values.reshape(-1,1),recess_y_e[f"{i}m_rec"])
models.append(reg_log)
#cross validation
scores.append( np.mean(cross_val_score(linear_model.LogisticRegression(),treasury_rate_x_e[spread].values.reshape(-1,1),recess_y_e[f"{i}m_rec"],cv=3)))
#visualization
fig = plt.figure(figsize=(20,20))
# draw a selected predicted probability overtime
plt.subplot(2,1,1)
plt.title("Predicted Probability of Recession Over Time")
for i in [1,8,12]:
reg_log = models[i-1]
pb = reg_log.predict_proba(treasury_rate_x[spread].values.reshape(-1,1))
#date is adjusted, so that the line value directly reprecent the probility of recession in current month
plt.plot(treasury_rate_x["date"]+pd.DateOffset(months=i),pd.DataFrame(pb)[1],label=f"{i} month ahead")
#mark recession
for _, row in recess.iterrows() :
fig.axes[0].axvspan(row["Peak month"],row["Trough month"],color='grey',alpha=0.5)
plt.legend()
plt.ylabel("Probability of Recession")
plt.xlabel("Time")
plt.subplot(2,1,2)
plt.title("Accuracy of Diffrent Lookahead Periods")
plt.plot(range(36)[1:],scores)
plt.ylabel("Mean Accuracy in Cross Validation")
plt.xlabel("Months Ahead")<jupyter_output><empty_output><jupyter_text>The first graph is 1,8,12 months ahead prediction. (Be awared that the line is moved by respective time period, so value at a month directly reprecent the model's prediction on that month despite the data we used to predict is x months ago)
The second graph illustate different accurcy over different time period we choose.
From the above two graphs, one can see that the model woks great and predicts the 8-9 months ahead with best accuracy.
Then we can take a close look at the model's prediction on the recent 4 years, based on the data 8 months ago(before co the model expect there is ~50% probability of recession around feb 2020. Despite it's not over 50%, the number is high. This means Covid-19 may play a rule in the receesion 2020 but there may be some other fundemental reasons behind it.
And we can see the chance of existing recession in the end of the year is about 75% percent.
But be aware that the model have no idea if the previous month is a recession, one may expect the probability of recession in the next month is higher if previous month is in a recession.
It also has no idea about the situation of covid-19, in reality the progress of convid-19 will has huge imapct on the economy.<jupyter_code>fig = plt.figure(figsize=(20,10))
for i in [8,12]:
reg_log = models[i-1]
pb = reg_log.predict_proba(treasury_rate_x["3m_10y_spread"].values.reshape(-1,1))
#date is adjusted, so that the line value directly reprecent the probility of recession in current month
plt.plot((treasury_rate_x["date"]+pd.DateOffset(months=i))[-48:],pd.DataFrame(pb)[1][-48:],label=i)
plt.legend()<jupyter_output><empty_output><jupyter_text>### Logistic Regression With Multivariable
Use the same setting as previous model but now we will have multiple features
We will compare the accuracy we get from this model to the previous model.<jupyter_code>
#cut 2020 recession
indicators_x = treasury_rate_x[6:807].copy()
recess_y =recess_y[6:807].copy()
indicators_x['cpi'] = cpi.loc[81:881]['CPIAUCSL'].values
indicators_x['ppi'] = ppi.loc[81:881]['PPIACO'].values
indicators_x['sppe'] = sppe['cape'].values
recess_y_e = recess_y[:-8].copy()
indicators_x_e = indicators_x[:-8].copy()
#accruacy with different monthes ahead
models_multi =[]
scores_multi =[]
for i in range(36)[1:]:
reg_log = linear_model.LogisticRegression()
reg_log.fit(indicators_x_e[[spread,'cpi','ppi','sppe']].values,recess_y_e[f"{i}m_rec"])
models_multi.append(reg_log)
scores_multi.append(np.mean(cross_val_score(linear_model.LogisticRegression(),indicators_x_e[[spread,'cpi','ppi','sppe']].values,recess_y_e[f"{i}m_rec"],cv=3)))
#visualization
fig = plt.figure(figsize=(20,20))
# draw a selected predicted probability overtime
plt.subplot(2,1,1)
plt.title("Predicted Probability of Recession Over Time")
for i in [1,8,12]:
reg_log = models_multi[i-1]
pb = reg_log.predict_proba(indicators_x[[spread,'cpi','ppi','sppe']].values)
#date is adjusted, so that the line value directly reprecent the probility of recession in current month
plt.plot(indicators_x["date"]+pd.DateOffset(months=i),pd.DataFrame(pb)[1],label=f"{i} month ahead")
#mark recession
for _, row in recess.iterrows() :
fig.axes[0].axvspan(row["Peak month"],row["Trough month"],color='grey',alpha=0.5)
plt.legend()
plt.ylabel("Probability of Recession")
plt.xlabel("Time")
plt.subplot(2,1,2)
plt.title("Accuracy of Diffrent Lookahead Periods")
plt.plot(range(36)[1:],scores_multi,label="multi-variable")
plt.plot(range(36)[1:],scores,label="treasury-spread")
plt.ylabel("Mean Accuracy in Cross Validation ")
plt.xlabel("Months Ahead")
plt.legend()<jupyter_output><empty_output><jupyter_text>We can see from the above grpahs that our multi-varible outperforms model depends solely on treasury spread with 4-13 months ahead, but the difference between the two model is relatively small.This means the additional factors can not really provide more information to us.
We will also try to train a non-linear model
### Decision Tree
If you don't know what a decsion tree, you can read this introduction: https://www.geeksforgeeks.org/decision-tree/
Generally,decison Tree is closer to the way we make decision which means it could be more easy to understand when the depth is low.
We will just use one depth here, because we can't improve the cross-validation score by increasing depths here. You can try it out by yourself.
<jupyter_code>models_multi_dec =[]
scores_multi_dec =[]
for i in range(36)[1:]:
decision_tree = DecisionTreeClassifier(max_depth=1)
decision_tree.fit(indicators_x_e[[spread,'cpi','ppi','sppe']].values,recess_y_e[f"{i}m_rec"])
models_multi_dec.append(decision_tree)
#cross validation
scores_multi_dec.append(np.mean(cross_val_score(DecisionTreeClassifier(max_depth=4),indicators_x_e[[spread,'cpi','ppi','sppe']].values,recess_y_e[f"{i}m_rec"],cv=3)))
#visualization
fig = plt.figure(figsize=(20,10))
plt.title("Accuracy of Diffrent Lookahead Periods")
plt.plot(range(36)[1:],scores_multi,label="logistic regression - multi-variable")
plt.plot(range(36)[1:],scores_multi_dec,label="decision tree -multi")
plt.plot(range(36)[1:],scores,label="treasury-spread")
plt.ylabel("Mean Accuracy in Cross Validation")
plt.xlabel("Months Ahead")
plt.legend()
# Create DOT data
dot_data = tree.export_graphviz(models_multi_dec[4-1], out_file=None,feature_names=[spread,"cpi","ppi","sppe"],
class_names=["recission","not recession"])
# Draw graph
graph = pydotplus.graph_from_dot_data(dot_data)
# Show graph
Image(graph.create_png())<jupyter_output><empty_output>
|
no_license
|
/.ipynb_checkpoints/A Data Science Tutorial -checkpoint.ipynb
|
futakey/320proj
| 13 |
<jupyter_start><jupyter_text> 1 - Transformada Cosseno Analógico
 2 - Transformada Cosseno Discreto
onde:
ficamos com:
## Inversa da Transformada Cosseno Discreto
# Exemplo: <jupyter_code>#imports
import math
import matplotlib.pyplot as plt
#instanciação
x = [10, 5, 8.5, 2, 1, 1.5, 0, 0.1]
N = len(x) #Amostras
#função do valor de K
def c_val(k):
if k == 0:
return math.sqrt(1/2)
else:
return 1
#função Inversa DTC - IDCT
def idct(x,N):
#Iniciando array multidimensional
v = [[0 for col in range(N)] for row in range(N)]
for n in range(N):
for k in range(N):
ck = c_val(k)
v[k][n] = round(math.sqrt(2/N)*ck*x[k]*math.cos(((2*math.pi*n*k)/(2*N))+(k*math.pi)/(2*N)),4)
plt.plot(v[k],'bo')
plt.xlabel("Pontos para k = "+str(k))
plt.grid(True, lw = 1, ls = '--', c = '.55')
plt.show()
return v
v = idct(x,N)
j = 0
for i in v:
print(f" Para k = {j}, temos: {i}")
j+=1
soma = [0]*N
for i in range(len(v)):
for j in range(N):
soma[j] = soma[j]+v[i][j]
#plt.plot(soma,'bo')
#plt.ylabel("Soma dos pontos de 0 a "+str(i))
#plt.grid(True, lw = 1, ls = '--', c = '.55')
#plt.show()
plt.plot(soma,'bo')
plt.xlabel("Soma dos Pontos")
plt.grid(True, lw = 1, ls = '--', c = '.55')
plt.show()
#Valor da soma do cosseno discreto
print(" VALOR DA SOMA = ",soma)<jupyter_output> VALOR DA SOMA = [11.5256, 5.9285000000000005, 2.1515, 0.46930000000000033, -0.5441000000000003, 0.9594999999999997, 3.6880999999999995, 4.1056]
<jupyter_text> 3 - Transformada Cosseno Discreto
<jupyter_code>#função DTC
def dct(x,N):
print (x)
#Iniciando array multidimensional
v = [[0 for col in range(N)] for row in range(N)]
for k in range(N):
ck = c_val(k)
for n in range(N):
v[k][n] = round(math.sqrt(2/N)*ck*x[k]*math.cos(math.radians(((2*math.pi*n*k)/(2*N))+(k*math.pi)/(2*N))),4)
plt.plot(v[n],'bo')
plt.xlabel("Pontos para k = "+str(n))
plt.grid(True, lw = 1, ls = '--', c = '.55')
plt.show()
return v
w = dct(soma,N)
j = 0
for i in w:
print(f" Para k = {j}, temos: {i}")
j+=1
soma_inv = [0]*N
for i in range(len(w)):
for j in range(N):
soma_inv[i] = soma_inv[i]+w[i][j]
#plt.plot(soma_inv,'bo')
#plt.ylabel("Soma dos pontos de 0 a "+str(i))
#plt.grid(True, lw = 1, ls = '--', c = '.55')
#plt.show()
plt.plot(soma_inv,'bo')
plt.grid(True, lw = 1, ls = '--', c = '.55')
plt.xlabel("Soma dos Pontos")
plt.show()
print(" VALOR DA SOMA = ",soma_inv)<jupyter_output> VALOR DA SOMA = [32.5992, 23.702, 8.5888, 1.8687, -2.159, 3.7903999999999995, 14.4888, 16.0236]
|
no_license
|
/PDI.ipynb
|
IcaroTARique/DCT_PDI
| 2 |
<jupyter_start><jupyter_text># Lab Assignment 2 for CSE 7324 Fall 2017
___Members___: Hongning Yu, Hui Jiang, Hao Pan## 1. Business Understanding
The dataset we use is a lyrics dataset (lyrics from MetrLyrics), which can be downloaded from Kaggle for free: https://www.kaggle.com/gyani95/380000-lyrics-from-metrolyrics. By exploring this dataset, we are able to know the key features of certain song genre and predict the corresponding genre for new songs.
In this dataset, there are 362237 records and 5 features (song name, year, artist, genre, and lyrics). It is comprised of text documents and contains only text divided into documents. Besides, we can predict song genres according to lyrics, so it meets requirements for Lab 2.
For this project, our mainly purpose is to find the features for different song genres by analyzing the most frequent words in lyrics. And visualizing features will reveal more information about those features in the dataset. And then we may be able to figure out the relationship among features, which might benefit our genre prediction as well.
The statictic and prediction results can be applied to applications related to song searching or displaying. For example, song searching applications, like Siri may use when you ask her "What song is it?", can narrow down song searching scope by classify songs according to lyric features. As for song displaying application, it could reconmend songs by analyzing lyrics from users' favorite songs.
To ensure the correct rate of our prediction, we will keep a predict accuracy(AUC) target, like 80%, using accuracy measurement functions. We will use other more helpful evaluation metrics and functions if needed.
## 2. Data Encoding
First let's load the data in to dataframe. The data is already in a csv file but all of the lyrics are in raw text with different formats. Our gold is to predict genre basing on lyrics, so we still need to clean all lyrics.<jupyter_code>import pandas as pd
import nltk
import numpy as np
import string
pd.set_option('display.max_columns', 60)
df = pd.read_csv("./lyrics.csv", encoding="utf-8")
df.head()<jupyter_output><empty_output><jupyter_text>### check null values in dataset.<jupyter_code>df.isnull().sum()<jupyter_output><empty_output><jupyter_text>Looks like there are null values in lyrics and song. Just drop them.<jupyter_code>df.dropna(inplace=True)
df.isnull().sum()<jupyter_output><empty_output><jupyter_text>### check genre<jupyter_code>df.genre.value_counts()<jupyter_output><empty_output><jupyter_text>As we can see, some genres have way more records than others. For our genre-predicting classification problem, we could sample the dataset and choose subsets of some genres to avoid bias. But let's now keep it as it is and deal with this later.
Check certain genres:<jupyter_code>df.info()<jupyter_output><class 'pandas.core.frame.DataFrame'>
Int64Index: 266556 entries, 0 to 362236
Data columns (total 6 columns):
index 266556 non-null int64
song 266556 non-null object
year 266556 non-null int64
artist 266556 non-null object
genre 266556 non-null object
lyrics 266556 non-null object
dtypes: int64(2), object(4)
memory usage: 14.2+ MB
<jupyter_text>### 2.1 Read in data and check data quality### Change to ASCII
First let's try to get rid of all non-ascii characters, since we only want english characters
**Takes too much time**<jupyter_code># %%time
# import re
# for row in df.index[:1000]:
# df.loc[row, 'lyrics'] = df.loc[row, 'lyrics'].encode('ascii', errors='ignore').decode()
# for row in df.index[:1000]:
# df.loc[row, 'lyrics'] = re.sub(r'[^\x00-\x7f]',
# r'',
# df.loc[row, 'lyrics']) <jupyter_output><empty_output><jupyter_text>### English Filter
We want to focus on song's with english lyrics, so let's delete all non-english records if they exist.
I tried to build a English-ratio detector to eliminate all non-english songs.
Reference: https://github.com/rasbt/musicmood/blob/master/code/collect_data/data_collection.ipynb
But the loop of set calculation **takes too much time**. Need to improve.<jupyter_code># %%time
# def eng_ratio(text):
# ''' Returns the ratio of non-English to English words from a text '''
# english_vocab = set(w.lower() for w in nltk.corpus.words.words())
# text_vocab = set(w.lower() for w in text.split('-') if w.lower().isalpha())
# unusual = text_vocab.difference(english_vocab)
# diff = len(unusual)/(len(text_vocab)+1)
# return diff
# # first let's eliminate non-english songs by their names
# before = df.shape[0]
# for row_id in range(100):
# text = df.loc[row_id]['song']
# diff = eng_ratio(text)
# if diff >= 0.5:
# df = df[df.index != row_id]
# after = df.shape[0]
# rem = before - after
# print('%s have been removed.' %rem)
# print('%s songs remain in the dataset.' %after)<jupyter_output><empty_output><jupyter_text>### English Filter Ver.2
This is another approach, which uses a package from https://github.com/saffsd/langid.py. This package can detect language in a fairly quicker way. But still, 260k records takes around 50 mins.<jupyter_code># # package from https://github.com/saffsd/langid.py
# import langid
# before = df.shape[0]
# for row in df.index:
# lang = langid.classify(df.loc[row]['lyrics'])[0]
# if lang != 'en':
# df = df[df.index != row]
# after = df.shape[0]
# rem = before - after
# print('%s have been removed.' %rem)
# print('%s songs remain in the dataset.' %after)<jupyter_output>23693 have been removed.
242863 songs remain in the dataset.
<jupyter_text>### save english songs to a new csv<jupyter_code># df.to_csv('lyrics_new.csv',index_label='index')<jupyter_output><empty_output><jupyter_text>*****
### Re-read csv file as df
Now only English songs exists in our dataset.<jupyter_code>df = pd.read_csv("./lyrics_new.csv", encoding="utf-8").drop('index.1', axis=1)
df.genre.value_counts()<jupyter_output><empty_output><jupyter_text>### Resampling df --> df_sample
300k records easily run out of memory. So I tried to resample the dataset and choose equal size of each genre.<jupyter_code>grouped = df.groupby('genre')
df_sample = grouped.apply(lambda x: x.sample(n=1800, random_state=7))
print("Size of dataframe: {}".format(df_sample.shape[0]))
df_sample.genre.value_counts()
# reset index means remove index (and change index to a column if not drop)
df_sample.reset_index(drop=True, inplace=True)
df_sample.head(10)<jupyter_output><empty_output><jupyter_text>### Check the lyrics' quality<jupyter_code># check lyrics with length less than 100
less_than_100 = 0
for row in df_sample.index[:1000]:
if len(df_sample.loc[row]['lyrics'])<=100:
print(df_sample.loc[row]['lyrics'])
less_than_100 += 1
print("\nNum of lyrics with length less than 100 in first 1000: {}".format(less_than_100))<jupyter_output>instrumental
This track is an instrumental and has no lyrics.
guitars and cadilacs
hillbilly music
only thing that keeps me hanging on
instrumental
INSTRUMENTAL
Num of lyrics with length less than 100 in first 1000: 5
<jupyter_text>It looks like lots of songs don't have meaningful lyrics(instrumental music, or something wrong happened when crawling).
So we just drop all song records with less than 100 lyric length### df_sample --> df_clean<jupyter_code>print("Deleting records with lyric length < 100")
len_before = df_sample.shape[0]
df_clean = df_sample.copy()
for row in df_clean.index:
if len(df_clean.loc[row]['lyrics']) <= 100:
df_clean.drop(row, inplace=True)
len_after = df_clean.shape[0]
print("Before: {}\nAfter : {}\nDeleted: {}".format(len_before, len_after, len_before-len_after))
df_clean.genre.value_counts()<jupyter_output><empty_output><jupyter_text>***
### transfer lyrics to list
### df_clean --> x & y<jupyter_code>x = df_clean['lyrics'].values
y = df_clean['genre'].values
print('Size of x: {}\nSize of y: {}'.format(x.size, y.size))
x = x.tolist()
x[1]<jupyter_output>Size of x: 20954
Size of y: 20954
<jupyter_text>### removing punctuation and \n
reference: https://stackoverflow.com/questions/13970203/how-to-count-average-sentence-length-in-words-from-a-text-file-contains-100-se<jupyter_code># def count_sentence_len(lyric):
# """count average sentence len for a lyric"""
# sents_list = lyric.split('\n')
# avg_len = sum(len(x.split()) for x in sents_list) / len(sents_list)
# return avg_len
# sentence_length_avg = []
x_clean = []
translator = str.maketrans('', '', string.punctuation)
for l in x:
l = l.translate(translator)
# sentence_len = count_sentence_len(l)
# sentence_length_avg.append(sentence_len)
l = l.replace('\n', ' ')
x_clean.append(l)
# randomly print 5 lyrics
import random
for i in random.sample(range(len(x_clean)), 5):
print(x_clean[i])
print("=============================")
print(len(x_clean))<jupyter_output>20954
<jupyter_text>### 2.2 Removing stop words
nltk package has a build in library of stop words. Here I build my own stop-words dictionary basing on sklearn buildin stop word dictionary.<jupyter_code>%%time
x_clean = [x.lower() for x in x_clean]
x_clean_new = []
from sklearn.feature_extraction.stop_words import ENGLISH_STOP_WORDS
stop_words = list(ENGLISH_STOP_WORDS)
stop_words = stop_words + ['will', 'got', 'ill', 'im', 'let']
for text in x_clean:
text = ' '.join([word for word in text.split() if word not in stop_words])
x_clean_new.append(text)
x_clean = x_clean_new<jupyter_output>CPU times: user 19.9 s, sys: 136 ms, total: 20 s
Wall time: 20.3 s
<jupyter_text>### 2.3 Bag-of-words representationHere I used a english dictionary from https://github.com/eclarson/MachineLearningNotebooks/tree/master/data<jupyter_code>with open('./ospd.txt', encoding='utf-8', errors='ignore') as f1:
vocab1 = f1.read().split("\n")
print(len(vocab1))
from sklearn.feature_extraction.text import CountVectorizer
# CounterVectorizer can automatically change words into lower case
cv = CountVectorizer(stop_words='english',
encoding='utf-8',
lowercase=True,
vocabulary=vocab1)
bag_words = cv.fit_transform(x_clean)
print('Shape of bag words: {}'.format(bag_words.shape))
print("Length of Vocabulary: {}".format(len(cv.vocabulary_)))<jupyter_output>Shape of bag words: (20954, 79340)
Length of Vocabulary: 79340
<jupyter_text>Let's createe a pandas dataframe containing bag-of-words(bow) model<jupyter_code>df_bow = pd.DataFrame(data=bag_words.toarray(),columns=cv.get_feature_names())
df_bow.head()
%%time
word_freq = df_bow.sum().sort_values(ascending=False)
word_freq[:30]<jupyter_output><empty_output><jupyter_text>### 2.4 Tf-idf representation<jupyter_code>from sklearn.feature_extraction.text import TfidfVectorizer
tfidf_vect = TfidfVectorizer(stop_words='english',
encoding='utf-8',
lowercase=True,
vocabulary=vocab1)
tfidf_mat = tfidf_vect.fit_transform(x_clean)
print('Shape of bag words: {}'.format(tfidf_mat.shape))
print("Length of Vocabulary: {}".format(len(tfidf_vect.vocabulary_)))
df_tfidf = pd.DataFrame(data=tfidf_mat.toarray(),columns=tfidf_vect.get_feature_names())
df_tfidf.head()
%%time
word_score = df_tfidf.sum().sort_values(ascending=False)
word_score[:30]<jupyter_output><empty_output><jupyter_text>We can also calculate the corelation matrix, where number in each position (i,j) represents the correlation between song i and song j.<jupyter_code>corr = (tfidf_mat * tfidf_mat.T).A
corr.shape<jupyter_output><empty_output><jupyter_text>## 3. Data Visualization
### 3.1 Summary<jupyter_code>df_clean.head()
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.simplefilter('ignore', DeprecationWarning)
%matplotlib inline
plt.style.use('ggplot')
freq = pd.DataFrame(word_freq, columns = ['frequency'])
fig = freq[:20].plot(kind = 'barh', figsize=(9,8), fontsize=18)
# plt.legend('number of occurrences', loc = 'upper right')
plt.gca().invert_yaxis()
plt.title('words frequencies', fontsize=20)<jupyter_output><empty_output><jupyter_text>As we can see in this histogram, the top frequent words are "love", "know", "like" and so on. Among these top 20 frequent words listed in the histogram, the frequency of the top 4 words (love, know, like, just) is almost trible of the last 3 words, i.e. there's a considerable difference between the frequency of different words. One more thing we notice is that, there is a interjection in the list, "oh", and it is the top 6 frequent word. We didn't even notice artists used so many "oh" in the lyrics!<jupyter_code>score = pd.DataFrame(word_score, columns = ['Score'])
ax = score[:20].plot(kind = 'barh', figsize=(9,8), fontsize=18)
plt.legend('score', loc = 'lower right', fontsize=15)
plt.gca().invert_yaxis()
plt.title('tf-idf score')<jupyter_output><empty_output><jupyter_text>To figure out the most frequency word for each genre, TF-IDF may be more appropriate (given that TF-IDF reflects how important words to the document).
From the plot above, we can see that the top frequent words are totally different from those words listed according to term frequency.
And we can see that there are some words, like "al", "bo", "dor", "la", have high TF-IDF score. This may due to the phynominon that these words only exist in some documents (songs), makes them so "special" and are highlighted as important words for documents.
TF-IDF analysis for each genre is needed.<jupyter_code># code example from https://www.kaggle.com/carrie1/drug-of-choice-by-genre-using-song-lyrics
df_clean['word_count'] = df_clean['lyrics'].str.split().str.len()
df_clean.info()
f, ax = plt.subplots(figsize=(10, 9))
sns.violinplot(x = df_clean.word_count)
plt.xlim(-100, 1000)
plt.title('Word count distribution', fontsize=26)<jupyter_output><empty_output><jupyter_text>The violinplot plot the distribution of all the songs according to number of words of lyrics.
The figure shows that most of songs have lyric length form 100 to 300 words. The lyric length median locates around 200. And only a small part of songs' lyric length longer than 400.
This make sense for the real lyrics length. After all, people may get tired of songs with too many lyrics and are more unlikely to fall in love with the songs only have a few words.
Above plot is for all lyrics, without classifying by genre. We still cannot get the desired feature for each genre.<jupyter_code>f, ax = plt.subplots(figsize=(10, 9))
sns.boxplot(x = "genre", y = "word_count", data = df_clean, palette = "Set1")
plt.ylim(1,2000)<jupyter_output><empty_output><jupyter_text>To figure out the lyric length feature for each genre. We group the data by genre and get box plot for each genre.
According to the plot, medians of most box are under 250 (around 200). Only the median for Hip-Hop is around 500, more than double length than the others. For the maximum, Electronic, Rock, Hip-Hop have the top 3 longest lyrics. And there's no big difference for the minimum for all the genres.
In general, the top 5 longest lyrics genres(named) are Hip-Hop, Pop, R&B, Electronic, Indie. The last 3 genres(named) are Jazz, Metal, Country. It seems that the genres with up tempo are more likely to have longer lyrics, and vice-versa. But we still need to pay attention to some exception. Metal songs with up tempo, however, mostly they have shorter lyrics than the other up-tempo songs. Thus, the length of lyrics can be a reference for genre classification but should not be the decision metric.### distribution across time<jupyter_code>mpl.rc("figure", figsize=(12,12))
sns.violinplot(x='genre', y='year', data=df_clean)<jupyter_output><empty_output><jupyter_text>Looks like the distribution is biased with extreme values. So let's check outliers.<jupyter_code>df_clean[df_clean['year'] <= 2000].shape[0]<jupyter_output><empty_output><jupyter_text>Drop songs before 2000 and plot again.<jupyter_code>for row in df_clean[df_clean['year'] <= 2000].index:
df_clean.drop(row, inplace=True)
mpl.rc("figure", figsize=(15, 25))
sns.violinplot(x='year', y='genre', data=df_clean, inner="quartile")<jupyter_output><empty_output><jupyter_text>We can see that the distributions are quite different. Country, Metal, Pop, R&B and Rock have a more centrilized distribution, mostly created during 2005~2010. Other genres have a quite streched distribution. Other songs(song's not labled with a genre) are mostly composed after 2012, propably because new songs don't have labels yet.
Several geners had a big-bang around 2006~2009. We are wondering if this distribution was due to reality or just crawlling problems### top artist<jupyter_code>top_artist = df.artist.value_counts().head(8).index.tolist()
# df_clean['artist'].isin(top_artist)
# df_clean.loc[df_clean['artist'] in]
df_top_artist = df_clean.loc[df_clean['artist'].isin(top_artist), :]
df_top_artist.head()
df_top_artist.info()
mpl.rc("figure", figsize=(25, 15))
sns.violinplot(x='artist', y='year', data=df_top_artist, inner="quartile")
sns.set(font_scale=3)<jupyter_output><empty_output><jupyter_text>For the top 8 artists, we plot this figure to explore their high-yield years.
For artist eddy-arnold, dolly-parton, eminem, barba-streisan and bee-gees, their most songs were composed during 2005~2010. And for the cris-crown and bob dylan, it seems they kept creating for a long time. However, bob-dylan's works are sort of "ahead of time". It may due to the mis-input of the information. <jupyter_code>print(df_bow.shape)
print(len(y))<jupyter_output><empty_output><jupyter_text>### length of songs<jupyter_code>df_bow['length'] = df_bow.sum(axis=1)
# create two new columns:
# @ length: length of documents basing on bag-of-word model
# @ genre: genre of the record
df_bow['genre'] = pd.Series(y).values
mpl.rc("figure", figsize=(25, 15))
sns.violinplot(x='length', y='genre', data=df_bow, inner="quartile")
sns.set(font_scale=3)<jupyter_output><empty_output><jupyter_text>This is another way to calculate lyrics' length basing on word bags. The violin plot for the lyric length among each genre plot corresponding to the box-plot above.Next we want to check the top 10 frequent words of each genre.<jupyter_code>genre_count = df_bow.groupby('genre').sum()
genre_count.drop('length', axis=1, inplace=True)
genre_count.head()
genre_count_new = genre_count.transpose()
genre_list = df_clean.genre.unique().tolist()
for genre in genre_list:
t = genre_count_new.nlargest(10, genre, keep='first')[genre]
fig = plt.figure(figsize=(6,4))
fig.suptitle(genre, fontsize=20)
plt.xticks(rotation='vertical')
sns.barplot(t.values, t.index, alpha=0.8)
sns.set(font_scale=3)<jupyter_output><empty_output><jupyter_text>In above histogram, we list some top frequent words for each genre. For different genres, they have top-10 frequent words in common and. And these information can be visualized in word cloud figures in Part 4.From those histograms, it is pretty straightforward that 'love' is almost every types of music cared about. And also other words they share in common, which are 'know','time', 'oh' etc. And also many of those words are verbs.
It looks like hip-hop music has a quite differnet set of frequent words, distinctive from other genres.## 4. Word Cloud
Now it is 'wordcloud' time.
Word cloud is a visual representation of text data, and it is a very efficient way to represent word frequencies.
First let's try to draw the overall wordcloud basing on term frequency.<jupyter_code>from wordcloud import WordCloud
import matplotlib.pyplot as plt
import warnings
warnings.simplefilter('ignore', DeprecationWarning)
%matplotlib inline
plt.style.use('ggplot')
all_lyrics = ''
for lyric in x_clean:
all_lyrics += (' '+lyric)
# code example from https://amueller.github.io/word_cloud/index.html
wordcloud = WordCloud(max_font_size=60).generate(all_lyrics)
import matplotlib.pyplot as plt
plt.figure(figsize=(15,15))
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")<jupyter_output><empty_output><jupyter_text>We can clearly see that the most frequently used word is 'love' over all, then comes with 'got',<jupyter_code>word_freq[:30]<jupyter_output><empty_output><jupyter_text>As we can see, the word cloud describes word frequency in a visuable way.
Let's try plot word clouds in different genres.<jupyter_code>d = {'genre': y.tolist(), "lyric": x_clean}
df_plot = pd.DataFrame(d)
df_plot.head(10)<jupyter_output><empty_output><jupyter_text>Now let's separate those lyrics into different genres.<jupyter_code># create a dictionary and store all lyrics basing on their genre
lyrics = {}
for genre in df_plot.genre.unique().tolist():
lyrics[genre] = ' '
for row in (df_plot[df_plot['genre'] == genre].index):
lyrics[genre] = lyrics[genre] + ' ' + df_plot.loc[row, 'lyric']
for genre, lyric in lyrics.items():
wordcloud = WordCloud(max_font_size=60).generate(lyric)
fig = plt.figure(figsize=(10,8))
fig.suptitle(genre, fontsize=24)
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.tight_layout()<jupyter_output><empty_output>
|
no_license
|
/lab2/lab2_final.ipynb
|
JasonHaoPan/Machine-Learning
| 35 |
<jupyter_start><jupyter_text>### Problem Statement -
To predict the fire forest burn area<jupyter_code>#Importing the necessary libraries
import matplotlib.pyplot as plt
import math
import numpy as np
import pandas as pd
import random
# Loading the dataset
db = pd.read_csv('forest_fires.csv')
#Printing the first 5 rows of the loaded Dataset
db.head()
# Extracting the dataset information
db.info()
# Libraries and configurations for figure plotting
%matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('seaborn')
db.hist(bins=30, figsize=(20,15)) # plotting the histogram
# Coverting the days and months into the integers
db.month.replace(('jan','feb','mar','apr','may','jun','jul','aug','sep','oct','nov','dec'),(1,2,3,4,5,6,7,8,9,10,11,12), inplace=True)
db.day.replace(('mon','tue','wed','thu','fri','sat','sun'),(1,2,3,4,5,6,7), inplace=True)
#Printing after replacement
db.head(10)
# Correlation analysis of the dataset
db.corr()
db.describe() # Generate descriptive statistics that summarize the central tendency,dispersion and shape of a dataset's distribution
from sklearn.model_selection import train_test_split
# dividing the data into test and training sets
train_set, test_set = train_test_split(db, test_size=0.2, random_state=42)
work_set = train_set.copy() # assigning a copy of train set to work_set
train_set.head()
test_set.head()
work_set.plot(kind='scatter', x='X', y='Y', alpha=0.1, s=300) # scatter plot for the dataset
work_set.plot(kind='scatter', x='X', y='Y', alpha=0.2, s=20*work_set['area']) # plotting the graphs by increasing the size to see the affect of area over the datapoints
# Extracting featuresfrom the dataset
# converting to list
x_values = list(work_set['X'])
y_values = list(work_set['Y'])
loc_values = []
for index in range(0, len(x_values)):
temp_value = []
temp_value.append(x_values[index])
temp_value.append(y_values[index])
# counting the instances location in the dataset
def count_points(x_points, y_points, scaling_factor):
count_array = []
for index in range(0, len(x_points)):
temp_value = [x_points[index], y_points[index]]
count = 0
for value in loc_values:
if(temp_value == value):
count = count + 1
count_array.append(count * scaling_factor )
return count_array
work_set.head()
# Plotting the histogram for the RH attribute
from pandas.plotting import scatter_matrix
attributes = ['RH']
scatter_matrix(work_set[attributes], figsize=(15,10))
# Plotting the histogram for the temp attribute
from pandas.plotting import scatter_matrix
attributes = ['temp']
scatter_matrix(work_set[attributes], figsize=(15,10))
# Plotting the histogram for the DMC attribute
from pandas.plotting import scatter_matrix
attributes = ['DMC']
scatter_matrix(work_set[attributes], figsize=(15,10))
# Plotting the histogram for the area attribute
from pandas.plotting import scatter_matrix
attributes = ['area']
scatter_matrix(work_set[attributes], figsize=(15,10))<jupyter_output><empty_output><jupyter_text>#### Finding the unique values in month , day and area ( the values could be repetitive)<jupyter_code>db['month'].unique()
db['day'].unique()
db['area'].unique()
# defining the method for plotting the histogram
def histogram_plot(db, title):
plt.figure(figsize=(8, 6))
ax = plt.subplot()
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
plt.title(title, fontsize = 22)
plt.hist(db, edgecolor='black', linewidth=1.2)
plt.show()
# Scattering the plot with the help of the location
plt.figure(figsize=(8, 6))
ax = plt.subplot()
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
plt.title("Fire location plot", fontsize = 22)
plt.scatter(x_values, y_values, s = count_points(x_values, y_values, 25), alpha = 0.3)
plt.show()
#Encoding the data using the Label Encoder
from sklearn.preprocessing import LabelEncoder
month_encoder = LabelEncoder()
day_encoder = LabelEncoder()
months = db['month']
days = db['day']
month_1hot = month_encoder.fit_transform(months) # label encoding month
day_1hot = day_encoder.fit_transform(days) # label encoding day<jupyter_output><empty_output><jupyter_text>###### Seeing the data after label encoding<jupyter_code>month_1hot
day_1hot
# Standardizing the data (Feature Scaling) so that all the features are of the same scale
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
numerical_features = db.drop(['month', 'day'], axis=1)
scaled_features = scaler.fit_transform(numerical_features)
scaled_features
from sklearn.base import BaseEstimator, TransformerMixin
# defining the methods for the AttributeSelector
class AttributeSelector(BaseEstimator, TransformerMixin):
def __init__(self, attribute_names):
self.attribute_names = attribute_names
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.attribute_names].values
from sklearn.preprocessing import MultiLabelBinarizer
# defining the methods for the CustomBinarizer
class CustomBinarizer(BaseEstimator, TransformerMixin):
def __init__(self, class_labels):
self.class_labels = class_labels
def fit(self, X, y=None,**fit_params):
return self
def transform(self, X):
return MultiLabelBinarizer(classes=self.class_labels).fit_transform(X)
from sklearn.pipeline import Pipeline
numerical_attributes = ['X', 'Y', 'FFMC', 'DMC', 'DC', 'ISI', 'temp', 'RH', 'wind', 'rain'] # Selecting the numerical columns
categorical_attributes = ['month', 'day'] # # Selecting the categorical columns
categorical_classes = np.concatenate((db['month'].unique(), db['day'].unique()), axis=0)
# creating the separate numerical and categorical pipelines
numerical_pipeline = Pipeline([
('selector', AttributeSelector(numerical_attributes)),
('standardize', StandardScaler()),
])
categorical_pipeline = Pipeline([
('selector', AttributeSelector(categorical_attributes)),
('encode', CustomBinarizer(categorical_classes)),
])
#FFMC distrubution
# Creating Histogram based on FFMC attribute
histogram_plot(db['FFMC'], title = "FFMC distribution")
plt.show()
#DC distrubution
# Creating Histogram based on DC attribute
histogram_plot(db['DC'], title = "DC distribution")
plt.show()
# Separating the features and labels into X and Y
X = db.iloc[:,[0,1,2,3,4,5,6,7,8,9,10,11]].values
Y = db.iloc[:, 11].values
# Separating the test and training set
train_x, test_x, train_y, test_y = train_test_split(X,Y, test_size=0.3, random_state = 9)
mse_values = []
variance_score = []
train_x
train_y<jupyter_output><empty_output>
|
no_license
|
/EDA_forest_fires.ipynb
|
JSparsh/Exploratory-Data-Analysis
| 3 |
<jupyter_start><jupyter_text>Checking the datatype of all the columns <jupyter_code>df.shape
df.columns
# We drop all columns expect the listed above
df.drop(df.columns.difference(['SEVERITYDESC', 'ADDRTYPE', 'JUNCTIONTYPE', 'SDOT_COLDESC', 'WEATHER', 'LIGHTCOND'])\
, axis=1, inplace=True)
df.head()
#show in a df format null value in boolean results
null_values=df.isnull()
null_values
# Check null elements
for column in null_values.columns.values.tolist():
print(column)
print(null_values[column].value_counts().sort_values(ascending=True))
print("")
#gives statistics for categorical variables
df.describe(include='O')
df_with_nans=df.dropna()
df_with_nans.shape
a=(1-(182954/194673))*100
print("%.2f" % a,"%")
# With 6.02% rows with nans, we will drop these rows
df=df.dropna()
df.shape
df
# we will reset index to correct rows numbers
df=df.reset_index(drop=True)
df
#Also check to confirm no more null values
df.isnull().sum()
#Check groups of features
print("ADDRTYPE: \n", df['ADDRTYPE'].value_counts() )
print("LIGHTCOND: \n",df['LIGHTCOND'].value_counts())
print("\n WEATHER: \n",df['WEATHER'].value_counts())
print("\n JUNCTIONTYPE: \n",df['JUNCTIONTYPE'].value_counts())
print("\n SDOT_COLDESC: \n",df['SDOT_COLDESC'].value_counts())<jupyter_output>ADDRTYPE:
Block 119393
Intersection 63326
Alley 235
Name: ADDRTYPE, dtype: int64
LIGHTCOND:
Daylight 113868
Dark - Street Lights On 47563
Unknown 10453
Dusk 5775
Dawn 2454
Dark - No Street Lights 1462
Dark - Street Lights Off 1158
Other 210
Dark - Unknown Lighting 11
Name: LIGHTCOND, dtype: int64
WEATHER:
Clear 109084
Raining 32653
Overcast 27200
Unknown 11642
Snowing 881
Other 747
Fog/Smog/Smoke 556
Sleet/Hail/Freezing Rain 112
Blowing Sand/Dirt 49
Severe Crosswind 25
Partly Cloudy 5
Name: WEATHER, dtype: int64
JUNCTIONTYPE:
Mid-Block (not related to intersection) 86635
At Intersection (intersection[...]<jupyter_text> Data Visualisation and Exploratory <jupyter_code>df.groupby(['JUNCTIONTYPE', 'SEVERITYDESC']).agg('size').unstack().plot(kind = 'bar', legend=True, figsize=(12, 8), fontsize=18)
plt.ylim((-1000,80000))
df.groupby(['ADDRTYPE', 'SEVERITYDESC']).agg('size').unstack().plot(kind = 'bar', figsize=(20,16), legend=True, fontsize=18)
plt.ylim((-1000, 100000))<jupyter_output><empty_output><jupyter_text>Data Wrangling (Back)
Looking at the elements I am going to use One Hot Encoding to turn these categorical variables to numerical variables but before we will check data type and convert them<jupyter_code>df.dtypes
df['ADDRTYPE']=df['ADDRTYPE'].astype('object')
df['JUNCTIONTYPE']=df['JUNCTIONTYPE'].astype('object')
df['SDOT_COLDESC']=df['SDOT_COLDESC'].astype('object')
df['WEATHER']=df['WEATHER'].astype('object')
df['LIGHTCOND']=df['LIGHTCOND'].astype('object')
df.dtypes
Feature=df['SEVERITYDESC']
Feature=pd.concat([Feature, pd.get_dummies(df[['ADDRTYPE','JUNCTIONTYPE','SDOT_COLDESC','WEATHER','LIGHTCOND']])], axis=1)
Feature.head()
Feature.drop(['SEVERITYDESC'], axis=1, inplace=True)
Feature
X=Feature
X[0:5]
y=df['SEVERITYDESC'].values
y[0:5]
print("Feature shape:", Feature.shape)
print("X shape:",X.shape)
print ("y shape:", y.shape)<jupyter_output>Feature shape: (182954, 69)
X shape: (182954, 69)
y shape: (182954,)
<jupyter_text> MODEL DEVELOPMENT <jupyter_code>from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=4)
print('train set:', X_train.shape, y_train.shape)
print('test set:', X_test.shape, y_test.shape)<jupyter_output>train set: (146363, 69) (146363,)
test set: (36591, 69) (36591,)
<jupyter_text> DECISION TREE <jupyter_code>
from sklearn.tree import DecisionTreeClassifier
from sklearn import preprocessing
#Modelling phase
Accident_Severity_Model=DecisionTreeClassifier(criterion='entropy', max_depth=5)
Accident_Severity_Model.fit(X_train, y_train)
#Predicting phase
Predicting_Severity=Accident_Severity_Model.predict(X_test)
print(Predicting_Severity [0:5])
print(y_test [0:5])
#Accuracy of the model using sklearn
from sklearn import metrics
print("Decision Tress Accuracy:", metrics.accuracy_score(y_test, Predicting_Severity))
#for visualization of the model
from sklearn.externals.six import StringIO
import pydotplus
import matplotlib.image as mpimg
from sklearn import tree
dot_data=StringIO()
filename="Severity_Tree.png"
featureNames=Feature.columns
out=tree.export_graphviz(Accident_Severity_Model, feature_names=featureNames, out_file=dot_data, class_names=np.unique(y_train),\
filled=True, special_characters=True, rotate=False)
graph=pydotplus.graph_from_dot_data(dot_data.getvalue())
graph.write_png(filename)
img=mpimg.imread(filename)
plt.figure(figsize=(100, 200))
plt.imshow(img, interpolation='nearest')<jupyter_output><empty_output>
|
no_license
|
/Project.ipynb
|
Dev3096/Coursera_Capstone
| 5 |
<jupyter_start><jupyter_text># 소스코드 인코딩
- 기본적으로 파이썬 소스파일은 UTF-8으로 인코드 된것으로 취급된다.
# 숫자
- division은 항상 float을 반환한다.<jupyter_code>17//3 # 몫을 구하는 나눗셈
17 %3 # 나머지
n # 변수 미정의시 NameError
price + _
round(_,2) # _ 변수에 값을 대입하지 말라
print(r'C:\some\name')
print("""\
Usage : thingy [OPTIONS]
-h
-H hostname
""") # \를통해 개행이 사라진다.
('un'*3 ) 'ium'
'py' 'thon'
text = ('Put several strings within parentheses '
'to have them joined together')
text
prefix = 'Py'
prefix 'thon'
word = 'Python'
word[:2] + 'py'
squares = [1,4,9,16,25]
squares + [36,49,64,81,100]
letters = ['a','b','c','d','e','f','g']
letters[2:5] = ['C','D',"E"]
letters[2:5] = [] # 모든 슬라이스 연산은 복사본을 돌려준다!
letters[:]= []
letters
a,b = 0,1
while a<1000:
print(a,end=',')
a,b = b,a+b # 다중대입
i = 256*256
print('The value of i is',i) # 띄어쓰기 알아서 해줌
words = ['cat','window','defenestrate']
for w in words[:]: # 모든 슬라이스 연산은 복사본을 돌려준다!
if len(w) >6:
words.insert(0,w)
# words[:] 대신 words를 넣으면 무한연산을 한다.
words
list(range(5)) # iterable객체
for n in range(2,10):
for x in range(2,n):
if n%x == 0:
print(n,'equals',x,'*',n//x)
break
else: # break가 실행되지 않으면 for문 후에 실행됨.
print(n,'is a prime number')
for num in range(2,10):
if num%2 == 0:
print('Found an even number',num)
continue # 루프 다음 것으로 넘어감.
print("Found a number",num)
def fib(n): #Write Fibonacci
"""Print a Fibonacch series up to n."""
a,b = 0,1
while a<n:
print(a,end=" ")
a,b = b,a+b
print()
fib(2000)
print(fib.__doc__) # Docstring
def fib2(n):
"""Return a list containing"""
result = []
a,b = 0,1
while a<n:
result.append(a)
a,b = b,a+b
return result
f100 = fib2(100)
f100
def ask_ok(prompt, retries=4,reminder = 'Please try again!'):
while True:
ok = input(prompt)
if ok in ('y','ye','yes'):
return True
if ok in ('n','no','nop','nope'):
return False
retries = retries - 1
if retries < 0:
raise ValueError('invalid user response')
print(reminder)
ask_ok("정말 끝내길 원해?",2,'자 예나 아니오로 대답해라')
i =5
def f(arg=i):
print(arg)
i = 6
f()
def f(a,L=[]):
L.append(a)
return L
print(f(1))
print(f(2))
print(f(3)) # 기본값은 공유된다★★
def f(a,L=None): # 기본값이 공유 안되길 바라면,
if L is None:
L = []
L.append(a)
return L
print(f(1))
print(f(2,[1]))
print(f(3))
def parrot(volage, state='a stiff', action='voom', type='Norwegian Blue'):
print("-- This parrot wouldn't", action, end=' ')
print("if you put", volage, "volts through it.")
print("-- Lovely plumage, the", type)
print("-- It's", state, "!")
parrot(1000)
parrot(action = 'VOOOOOM',voltage = 1000000) # 인자 순서를 바꿔도 됨.
parrot('a million', 'bereft of life', 'jump')
parrot('a thousand', state='pushing up the daisies')
parrot(voltage=5.0, 'dead') # 키워드 인자 뒤는 다 키워드 인자여야 한다.
def function(a):
pass
function(0,a=0)
def cheeseshop(kind, *arguments, **keywords):
print("-- Do you have any", kind, "?")
print("-- I'm sorry, we're all out of", kind)
for arg in arguments:
print(arg)
print("-" * 40)
for kw in keywords:
print(kw, ":", keywords[kw])
cheeseshop("Limburger", "It's very runny, sir.",
"It's really very, VERY runny, sir.",1,
shopkeeper="Michael Palin",
client="John Cleese",
sketch="Cheese Shop Sketch")
def write_multiple_items(file,separator,*args):
file.write(separator.join(args))
def concat(*args,sep="/"):
return sep.join(args) # join사용법
concat("earth",'mars','venus','@')
list(range(3,6))
# 인자목록
args = [3,6]
list(range(*args))
range(*[3,6]) # positional 방식으로 unpack해준다.
d = {"volage":"four million","state":"bleedin' demised","action":"VOOM"}
parrot(**d) # keyword 방식으로 unpack해준다.
parrot(*d) # positional 방식으로 unpack시에 keyword가 나온다.
pairs = [(1,'one'),(2,'two'),(3,'three'),(4,'four'),(5,'ti')]
pairs.sort(key = lambda pair:pair[1])
# list의 각 요소중에 두번째 것을 꺼내서 sort함.
pairs
a = lambda pair:pair[1]
#a(pairs)
pairs = [(1,'one'),(2,'two'),(3,'three'),(4,'four')]
pairs[1]
a = [1,2,3]
x = 4
a[len(a):] = [x]
a[len(a):] = range(5,6) # extend와 같다.
a
a.clear()
a
a.count(5)
fruits = ['orange', 'apple', 'pear', 'banana', 'kiwi', 'apple', 'banana']
fruits.index('banana',4)
from collections import deque
queue = deque(["Eric","John","Michael"])
queue.append("Terry")
queue.popleft()
queue
squares = list(map(lambda x:x**2,range(10))) # map은 함수에 사용
squares
map(lambda x:x**2,range(10))
squares = [x**2 for x in range(10)]
squares
[(x,y) for x in [1,2,3] for y in [3,1,4] if x!=y]
combs = []
for x in [1,2,3]:
for y in [3,1,4]:
if x != y:
combs.append((x,y))
combs
freshfruit = [' banana', ' loganberry ', 'passion fruit ']
[weapon.strip() for weapon in freshfruit]
vec = [[1,2,3],[4,5,6],[7,8,9]]
[num for elem in vec for num in elem]
from math import pi
[str(round(pi,i)) for i in range(1,6)]
matrix = [
[1,2,3,4],
[5,6,7,8],
[9,10,11,12],
]
[[row[i] for row in matrix] for i in range(4)]
list(zip(*matrix))
list(zip([1,2],[3,4]))
t = 12345,54321,'hello',[1,2,3,4,5]
t[3][0] = 2 # 튜플속의 리스트 바꾸기가능
t
singleton = ('hello',) # 1개를 튜플로 만드려면 ,를 뒤에 해줘야함
len(singleton)
x,y,z,u = t # unpack
x
a = [1,2,3,4,5]
1 in a<jupyter_output><empty_output>
|
no_license
|
/서동욱/과제용/.ipynb_checkpoints/자습서정리_1-checkpoint.ipynb
|
bigdata07/bigdatalearning
| 1 |
<jupyter_start><jupyter_text># Simple linear model <jupyter_code>w = tf.Variable([3.0],tf.float32)
b = tf.Variable([1.0],tf.float32)
x = tf.placeholder(tf.float32)
d = np.random.rand(10)
model = w*x+b
init = tf.global_variables_initializer()
with tf.Session() as ses:
ses.run(init)
print(ses.run(model, feed_dict={x:d}))
print('just verifying...')
print(3*d+t1)
y = tf.placeholder(tf.float32)
# creating noisy target, the true parms ar 2,-3
yd = 2*d - 3 + 0.2*np.random.rand(len(d))
#loss
loss = tf.reduce_sum(tf.squared_difference(y,model))
#train
train = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
with tf.Session() as ses:
ses.run(init)
for i in range(2000):
ses.run(train, feed_dict={x:d, y:yd})
cost = ses.run(loss, feed_dict={x:d, y:yd})
print('epoch=',i,cost,ses.run([w,b]))
<jupyter_output>epoch= 0 188.055 [array([ 2.95144129], dtype=float32), array([ 0.91110235], dtype=float32)]
epoch= 1 178.447 [array([ 2.90417242], dtype=float32), array([ 0.82449877], dtype=float32)]
epoch= 2 169.331 [array([ 2.85815978], dtype=float32), array([ 0.74012971], dtype=float32)]
epoch= 3 160.683 [array([ 2.8133707], dtype=float32), array([ 0.65793717], dtype=float32)]
epoch= 4 152.479 [array([ 2.76977348], dtype=float32), array([ 0.57786453], dtype=float32)]
epoch= 5 144.695 [array([ 2.72733712], dtype=float32), array([ 0.49985677], dtype=float32)]
epoch= 6 137.311 [array([ 2.68603134], dtype=float32), array([ 0.42386025], dtype=float32)]
epoch= 7 130.305 [array([ 2.64582658], dtype=float32), array([ 0.3498227], dtype=float32)]
epoch= 8 123.659 [array([ 2.60669446], dtype=float32), array([ 0.27769327], dtype=float32)]
epoch= 9 117.353 [array([ 2.56860685], dtype=float32), array([ 0.20742238], dtype=float32)]
epoch= 10 111.371 [array([ 2.53153682], dtype=float32), array([ 0.13896175], dtype[...]
|
no_license
|
/tensFlow_py/TensorFlowIntro.ipynb
|
QuantField/python_misc
| 1 |
<jupyter_start><jupyter_text># Training<jupyter_code>delays=[1,2,3,6,12]
neurons=[20,25,30,35,40,45,50]
batch_sizes=[100,125,150]
dropouts=[0.1,0.2,0.3]
epochs=100
file_mask=MODELS_DIR+'\\simple_fort_model'
simple_fort_model_result=pd.DataFrame()
for delay in delays:
for neuron in neurons:
for btch_sz in batch_sizes:
train_mse, test_mse, end=train(Fort, epochs, file_mask, delay=delay, neuron=neuron, btch_sz=btch_sz)
simple_fort_model_result=simple_fort_model_result.append([[delay, neuron, btch_sz, train_mse, test_mse, end]], ignore_index=True)
simple_fort_model_result.columns=['delay', 'neuron','btch_sz', 'train_mse', 'test_mse', 'time']
simple_fort_model_result=simple_fort_model_result.sort_values(['test_mse'])
simple_fort_model_result.to_excel('{}_result.xlsx'.format(file_mask), index=None)
epochs=100
file_mask=MODELS_DIR+'\\complex_fort_model'
complex_fort_model_result=pd.DataFrame()
for delay in delays:
for neuron in neurons:
for btch_sz in batch_sizes:
for drpout in dropouts:
train_mse, test_mse, end=train(Fort, epochs, file_mask, delay=delay, neuron=neuron, btch_sz=btch_sz, drpout=drpout)
complex_fort_model_result=complex_fort_model_result.append([[delay, neuron, btch_sz, drpout, train_mse, test_mse, end]], ignore_index=True)
complex_fort_model_result.columns=['delay', 'neuron','btch_sz', 'drpout', 'train_mse', 'test_mse', 'time']
complex_fort_model_result=complex_fort_model_result.sort_values(['test_mse'])
complex_fort_model_result.to_excel('{}_result.xlsx'.format(file_mask), index=None)
t = np.linspace(0, 1, 1024)
#линейная частота сезона
f = 10 #P = 1/10
S1 = np.sin(2*np.pi*2*f*t)
S2 = np.sin(2*np.pi*4*f*t)
S3 = np.sin(2*np.pi*8*f*t)
noise = 0.2*np.random.randn(len(t))
F_lab = S1 + S2 + S3 + noise
F_lab=F_lab.reshape(-1,1)
N = 1024
plt.figure(figsize = (15, 5))
plt.plot(t, F_lab, 'k', label = 'ts')
plt.plot(t, S1, 'b', label = 'S1')
plt.plot(t, S2, 'r', label = 'S2')
plt.plot(t, S3, 'g', label = 'S3')
plt.legend()
plt.show()
delays=[6,12]
neurons=[10,20,25,30,35,40]
batch_sizes=[800,850,900]
dropouts=[0.1,0.2,0.3]
epochs=100
file_mask=MODELS_DIR+'\\simple_lab_model'
simple_lab_model_result=pd.DataFrame()
for delay in delays:
for neuron in neurons:
for btch_sz in batch_sizes:
train_mse, test_mse, end=train(F_lab, epochs, file_mask, delay=delay, neuron=neuron, btch_sz=btch_sz)
simple_lab_model_result=simple_lab_model_result.append([[delay, neuron, btch_sz, train_mse, test_mse, end]], ignore_index=True)
simple_lab_model_result.columns=['delay', 'neuron','btch_sz', 'train_mse', 'test_mse', 'time']
simple_lab_model_result=simple_lab_model_result.sort_values(['test_mse'])
simple_lab_model_result.to_excel('{}_result.xlsx'.format(file_mask), index=None)
epochs=100
file_mask=MODELS_DIR+'\\complex_lab_model'
complex_lab_model_result=pd.DataFrame()
for delay in delays:
for neuron in neurons:
for btch_sz in batch_sizes:
for drpout in dropouts:
train_mse, test_mse, end=train(F_lab, epochs, file_mask, delay=delay, neuron=neuron, btch_sz=btch_sz, drpout=drpout)
complex_lab_model_result=complex_lab_model_result.append([[delay, neuron, btch_sz, drpout, train_mse, test_mse, end]], ignore_index=True)
complex_lab_model_result.columns=['delay', 'neuron','btch_sz', 'drpout', 'train_mse', 'test_mse', 'time']
complex_lab_model_result=complex_lab_model_result.sort_values(['test_mse'])
complex_lab_model_result.to_excel('{}_result.xlsx'.format(file_mask), index=None)
F_trendless=Fort.ravel()-[-9.8576*x + 3872.7 for x in range(174)]
F_trendless=F_trendless.reshape(-1,1)
plt.figure(figsize = (15, 5))
plt.plot(F_trendless, 'k')
plt.show()
delays=[1,2,3,6,12]
neurons=[10,20,25,30,35,40]
batch_sizes=[100,125,150]
dropouts=[0.1,0.2,0.3]
epochs=100
file_mask=MODELS_DIR+'\\simple_fort_trendless_model'
simple_fort_trendless_model_result=pd.DataFrame()
for delay in delays:
for neuron in neurons:
for btch_sz in batch_sizes:
train_mse, test_mse, end=train(F_trendless, epochs, file_mask, delay=delay, neuron=neuron, btch_sz=btch_sz)
simple_fort_trendless_model_result=simple_fort_trendless_model_result.append([[delay, neuron, btch_sz, train_mse, test_mse, end]], ignore_index=True)
simple_fort_trendless_model_result.columns=['delay', 'neuron','btch_sz', 'train_mse', 'test_mse', 'time']
simple_fort_trendless_model_result=simple_fort_trendless_model_result.sort_values(['test_mse'])
simple_fort_trendless_model_result.to_excel('{}_result.xlsx'.format(file_mask), index=None)
epochs=100
file_mask=MODELS_DIR+'\\complex_fort_trendless_model'
complex_fort_trendless_model_result=pd.DataFrame()
for delay in delays:
for neuron in neurons:
for btch_sz in batch_sizes:
for drpout in dropouts:
train_mse, test_mse, end=train(F_trendless, epochs, file_mask, delay=delay, neuron=neuron, btch_sz=btch_sz, drpout=drpout)
complex_fort_trendless_model_result=complex_fort_trendless_model_result.append([[delay, neuron, btch_sz, drpout, train_mse, test_mse, end]], ignore_index=True)
complex_fort_trendless_model_result.columns=['delay', 'neuron','btch_sz', 'drpout', 'train_mse', 'test_mse', 'time']
complex_fort_trendless_model_result=complex_fort_trendless_model_result.sort_values(['test_mse'])
complex_fort_trendless_model_result.to_excel('{}_result.xlsx'.format(file_mask), index=None)<jupyter_output>0:00:49.593663 delay=1 neuron=10 btch_sz=100 drpout=0.1 train_mse=0.042 test_mse=0.028
0:00:49.843521 delay=1 neuron=10 btch_sz=100 drpout=0.2 train_mse=0.041 test_mse=0.027
0:00:49.641635 delay=1 neuron=10 btch_sz=100 drpout=0.3 train_mse=0.042 test_mse=0.028
0:00:49.791550 delay=1 neuron=10 btch_sz=125 drpout=0.1 train_mse=0.041 test_mse=0.025
0:00:49.781555 delay=1 neuron=10 btch_sz=125 drpout=0.2 train_mse=0.040 test_mse=0.024
0:00:49.973446 delay=1 neuron=10 btch_sz=125 drpout=0.3 train_mse=0.040 test_mse=0.024
0:00:49.734582 delay=1 neuron=10 btch_sz=150 drpout=0.1 train_mse=0.037 test_mse=0.023
0:00:50.149346 delay=1 neuron=10 btch_sz=150 drpout=0.2 train_mse=0.038 test_mse=0.024
0:00:49.713595 delay=1 neuron=10 btch_sz=150 drpout=0.3 train_mse=0.038 test_mse=0.024
0:00:51.031842 delay=1 neuron=20 btch_sz=100 drpout=0.1 train_mse=0.042 test_mse=0.028
0:00:51.227729 delay=1 neuron=20 btch_sz=100 drpout=0.2 train_mse=0.042 test_mse=0.029
0:00:51.047832 delay=1 neuron=20 btch_sz=10[...]<jupyter_text># Torch results<jupyter_code>simple_fort_model_result.head()
complex_fort_model_result.head()
simple_lab_model_result.head()
complex_lab_model_result.head()
simple_fort_trendless_model_result.head()
complex_fort_trendless_model_result.head()<jupyter_output><empty_output><jupyter_text>## Fort simple model<jupyter_code>delay=simple_fort_model_result.head(1)['delay'].values[0]
neuron=simple_fort_model_result.head(1)['neuron'].values[0]
train_size=simple_fort_model_result.head(1)['btch_sz'].values[0]
checkpoint=torch.load(MODELS_DIR+'\\simple_fort_model_{}_{}_{}.t7'.format(delay,neuron,train_size))
net=Net(1, delay, neuron).to(device)
net.load_state_dict(checkpoint['state'])
net.eval()
train_mse = checkpoint['train_mse']
test_mse = checkpoint['test_mse']
scaler = MinMaxScaler(feature_range=(0, 1))
F_tr, F_test = split_signal(Fort, delay, train_size, scaler, True)
train_xx, train_yy = get_ts(F_tr, delay, train_size)
test_xx, test_yy = get_ts(F_test, delay, train_size)
pred_train=[]
for i in range(len(train_xx)):
out=net(train_xx[i].unsqueeze(-1).to(device))
pred_train.append(out.detach().cpu().item())
pred_train=np.array(pred_train)
trainPredict = scaler.inverse_transform(pred_train.reshape(-1,1))
plt.figure(figsize = (10, 5))
plt.plot(Fort, 'k')
plt.plot(np.r_[delay:train_size],trainPredict, 'b')
plt.show()
pred_test=[]
for i in range(len(test_xx)):
out=net(test_xx[i].unsqueeze(-1).to(device))
pred_test.append(out.detach().cpu().item())
pred_test=np.array(pred_test)
testPredict = scaler.inverse_transform(pred_test.reshape(-1,1))
plt.figure(figsize = (10, 5))
plt.plot(Fort, 'k')
plt.plot(np.r_[train_size:len(Fort)],testPredict, 'b')
plt.show()
pred_forecast=[]
inp=test_xx[-1].unsqueeze(-1).to(device)
for i in range(24):
out=net(inp)
out=out.detach().cpu().item()
pred_forecast.append(out)
inp[0,0,0]=out
inp=inp.roll(-1)
pred_forecast=np.array(pred_forecast)
forecastPredict = scaler.inverse_transform(pred_forecast.reshape(-1,1))
plt.figure(figsize = (10, 5))
plt.plot(Fort, 'k')
plt.plot(np.r_[len(Fort)-1:len(Fort)+24-1],forecastPredict, 'b')
plt.show()
plt.figure(figsize = (10, 5))
plt.plot(train_mse, label='train')
plt.plot(test_mse, label='test')
plt.xlabel('epochs')
plt.ylabel('mse')
plt.legend()
plt.show()
plot_acf(test_yy.ravel()-pred_test, lags = 20)
plt.show()<jupyter_output><empty_output><jupyter_text>## Fort complex model<jupyter_code>delay=complex_fort_model_result.head(1)['delay'].values[0]
neuron=complex_fort_model_result.head(1)['neuron'].values[0]
train_size=complex_fort_model_result.head(1)['btch_sz'].values[0]
drpout=complex_fort_model_result.head(1)['drpout'].values[0]
checkpoint=torch.load(MODELS_DIR+'\\complex_fort_model_{}_{}_{}_{}.t7'.format(delay, neuron, train_size, drpout))
net=Net(1, delay, neuron, drpout).to(device)
net.load_state_dict(checkpoint['state'])
net.eval()
train_mse = checkpoint['train_mse']
test_mse = checkpoint['test_mse']
scaler = MinMaxScaler(feature_range=(0, 1))
F_tr, F_test = split_signal(Fort, delay, train_size, scaler, True)
train_xx, train_yy = get_ts(F_tr, delay, train_size)
test_xx, test_yy = get_ts(F_test, delay, train_size)
pred_train=[]
for i in range(len(train_xx)):
out=net(train_xx[i].unsqueeze(-1).to(device))
pred_train.append(out.detach().cpu().item())
pred_train=np.array(pred_train)
trainPredict = scaler.inverse_transform(pred_train.reshape(-1,1))
plt.figure(figsize = (10, 5))
plt.plot(Fort, 'k')
plt.plot(np.r_[delay:train_size],trainPredict, 'b')
plt.show()
pred_test=[]
for i in range(len(test_xx)):
out=net(test_xx[i].unsqueeze(-1).to(device))
pred_test.append(out.detach().cpu().item())
pred_test=np.array(pred_test)
testPredict = scaler.inverse_transform(pred_test.reshape(-1,1))
plt.figure(figsize = (10, 5))
plt.plot(Fort, 'k')
plt.plot(np.r_[train_size:len(Fort)],testPredict, 'b')
plt.show()
pred_forecast=[]
inp=test_xx[-1].unsqueeze(-1).to(device)
for i in range(24):
out=net(inp)
out=out.detach().cpu().item()
pred_forecast.append(out)
inp[0,0,0]=out
inp=inp.roll(-1)
pred_forecast=np.array(pred_forecast)
forecastPredict = scaler.inverse_transform(pred_forecast.reshape(-1,1))
plt.figure(figsize = (10, 5))
plt.plot(Fort, 'k')
plt.plot(np.r_[len(Fort)-1:len(Fort)+24-1],forecastPredict, 'b')
plt.show()
plt.figure(figsize = (10, 5))
plt.plot(train_mse, label='train')
plt.plot(test_mse, label='test')
plt.xlabel('epochs')
plt.ylabel('mse')
plt.legend()
plt.show()
plot_acf(test_yy.ravel()-pred_test, lags = 20)
plt.show()<jupyter_output><empty_output><jupyter_text>## Fort trendless simple model<jupyter_code>delay=simple_fort_trendless_model_result.head(1)['delay'].values[0]
neuron=simple_fort_trendless_model_result.head(1)['neuron'].values[0]
train_size=simple_fort_trendless_model_result.head(1)['btch_sz'].values[0]
checkpoint=torch.load(MODELS_DIR+'\\simple_fort_trendless_model_{}_{}_{}.t7'.format(delay,neuron,train_size))
net=Net(1, delay, neuron).to(device)
net.load_state_dict(checkpoint['state'])
net.eval()
train_mse = checkpoint['train_mse']
test_mse = checkpoint['test_mse']
scaler = MinMaxScaler(feature_range=(0, 1))
F_tr, F_test = split_signal(F_trendless, delay, train_size, scaler, True)
train_xx, train_yy = get_ts(F_tr, delay, train_size)
test_xx, test_yy = get_ts(F_test, delay, train_size)
pred_train=[]
for i in range(len(train_xx)):
out=net(train_xx[i].unsqueeze(-1).to(device))
pred_train.append(out.detach().cpu().item())
pred_train=np.array(pred_train)
trainPredict = scaler.inverse_transform(pred_train.reshape(-1,1))
plt.figure(figsize = (10, 5))
plt.plot(F_trendless, 'k')
plt.plot(np.r_[delay:train_size],trainPredict, 'b')
plt.show()
pred_test=[]
for i in range(len(test_xx)):
out=net(test_xx[i].unsqueeze(-1).to(device))
pred_test.append(out.detach().cpu().item())
pred_test=np.array(pred_test)
testPredict = scaler.inverse_transform(pred_test.reshape(-1,1))
plt.figure(figsize = (10, 5))
plt.plot(F_trendless, 'k')
plt.plot(np.r_[train_size:len(F_trendless)],testPredict, 'b')
plt.show()
pred_forecast=[]
inp=test_xx[-1].unsqueeze(-1).to(device)
for i in range(24):
out=net(inp)
out=out.detach().cpu().item()
pred_forecast.append(out)
inp[0,0,0]=out
inp=inp.roll(-1)
pred_forecast=np.array(pred_forecast)
forecastPredict = scaler.inverse_transform(pred_forecast.reshape(-1,1))
plt.figure(figsize = (10, 5))
plt.plot(F_trendless, 'k')
plt.plot(np.r_[len(F_trendless)-1:len(F_trendless)+24-1],forecastPredict, 'b')
plt.show()
plt.figure(figsize = (10, 5))
plt.plot(train_mse, label='train')
plt.plot(test_mse, label='test')
plt.xlabel('epochs')
plt.ylabel('mse')
plt.legend()
plt.show()
plot_acf(test_yy.ravel()-pred_test, lags = 20)
plt.show()<jupyter_output><empty_output><jupyter_text>## Fort trendless complex model<jupyter_code>delay=complex_fort_trendless_model_result.head(1)['delay'].values[0]
neuron=complex_fort_trendless_model_result.head(1)['neuron'].values[0]
train_size=complex_fort_trendless_model_result.head(1)['btch_sz'].values[0]
drpout=complex_fort_trendless_model_result.head(1)['drpout'].values[0]
checkpoint=torch.load(MODELS_DIR+'\\complex_fort_trendless_model_{}_{}_{}_{}.t7'.format(delay,neuron,train_size,drpout))
net=Net(1, delay, neuron,drpout).to(device)
net.load_state_dict(checkpoint['state'])
net.eval()
train_mse = checkpoint['train_mse']
test_mse = checkpoint['test_mse']
scaler = MinMaxScaler(feature_range=(0, 1))
F_tr, F_test = split_signal(F_trendless, delay, train_size, scaler, True)
train_xx, train_yy = get_ts(F_tr, delay, train_size)
test_xx, test_yy = get_ts(F_test, delay, train_size)
pred_train=[]
for i in range(len(train_xx)):
out=net(train_xx[i].unsqueeze(-1).to(device))
pred_train.append(out.detach().cpu().item())
pred_train=np.array(pred_train)
trainPredict = scaler.inverse_transform(pred_train.reshape(-1,1))
plt.figure(figsize = (10, 5))
plt.plot(F_trendless, 'k')
plt.plot(np.r_[delay:train_size],trainPredict, 'b')
plt.show()
pred_test=[]
for i in range(len(test_xx)):
out=net(test_xx[i].unsqueeze(-1).to(device))
pred_test.append(out.detach().cpu().item())
pred_test=np.array(pred_test)
testPredict = scaler.inverse_transform(pred_test.reshape(-1,1))
plt.figure(figsize = (10, 5))
plt.plot(F_trendless, 'k')
plt.plot(np.r_[train_size:len(F_trendless)],testPredict, 'b')
plt.show()
pred_forecast=[]
inp=test_xx[-1].unsqueeze(-1).to(device)
for i in range(24):
out=net(inp)
out=out.detach().cpu().item()
pred_forecast.append(out)
inp[0,0,0]=out
inp=inp.roll(-1)
pred_forecast=np.array(pred_forecast)
forecastPredict = scaler.inverse_transform(pred_forecast.reshape(-1,1))
plt.figure(figsize = (10, 5))
plt.plot(F_trendless, 'k')
plt.plot(np.r_[len(F_trendless)-1:len(F_trendless)+24-1],forecastPredict, 'b')
plt.show()
plt.figure(figsize = (10, 5))
plt.plot(train_mse, label='train')
plt.plot(test_mse, label='test')
plt.xlabel('epochs')
plt.ylabel('mse')
plt.legend()
plt.show()
plot_acf(test_yy.ravel()-pred_test, lags = 20)
plt.show()<jupyter_output><empty_output><jupyter_text>## Lab simple model<jupyter_code>delay=simple_lab_model_result.head(1)['delay'].values[0]
neuron=simple_lab_model_result.head(1)['neuron'].values[0]
train_size=simple_lab_model_result.head(1)['btch_sz'].values[0]
checkpoint=torch.load(MODELS_DIR+'\\simple_lab_model_{}_{}_{}.t7'.format(delay,neuron,train_size))
net=Net(1, delay, neuron).to(device)
net.load_state_dict(checkpoint['state'])
net.eval()
train_mse = checkpoint['train_mse']
test_mse = checkpoint['test_mse']
scaler = MinMaxScaler(feature_range=(0, 1))
F_tr, F_test = split_signal(F_lab, delay, train_size, scaler, True)
train_xx, train_yy = get_ts(F_tr, delay, train_size)
test_xx, test_yy = get_ts(F_test, delay, train_size)
pred_train=[]
for i in range(len(train_xx)):
out=net(train_xx[i].unsqueeze(-1).to(device))
pred_train.append(out.detach().cpu().item())
pred_train=np.array(pred_train)
trainPredict = scaler.inverse_transform(pred_train.reshape(-1,1))
plt.figure(figsize = (10, 5))
plt.plot(F_lab, 'k')
plt.plot(np.r_[delay:train_size],trainPredict, 'b')
plt.show()
pred_test=[]
for i in range(len(test_xx)):
out=net(test_xx[i].unsqueeze(-1).to(device))
pred_test.append(out.detach().cpu().item())
pred_test=np.array(pred_test)
testPredict = scaler.inverse_transform(pred_test.reshape(-1,1))
plt.figure(figsize = (10, 5))
plt.plot(F_lab, 'k')
plt.plot(np.r_[train_size:len(F_lab)],testPredict, 'b')
plt.show()
for_forecast=96
pred_forecast=[]
inp=test_xx[-1].unsqueeze(-1).to(device)
for i in range(for_forecast):
out=net(inp)
out=out.detach().cpu().item()
pred_forecast.append(out)
inp[0,0,0]=out
inp=inp.roll(-1)
pred_forecast=np.array(pred_forecast)
forecastPredict = scaler.inverse_transform(pred_forecast.reshape(-1,1))
plt.figure(figsize = (10, 5))
plt.plot(F_lab, 'k')
plt.plot(np.r_[len(F_lab)-1:len(F_lab)+for_forecast-1],forecastPredict, 'b')
plt.show()
plt.figure(figsize = (10, 5))
plt.plot(train_mse, label='train')
plt.plot(test_mse, label='test')
plt.xlabel('epochs')
plt.ylabel('mse')
plt.legend()
plt.show()
plot_acf(test_yy.ravel()-pred_test, lags = 20)
plt.show()<jupyter_output><empty_output><jupyter_text>## Lab complex model<jupyter_code>delay=complex_lab_model_result.head(1)['delay'].values[0]
neuron=complex_lab_model_result.head(1)['neuron'].values[0]
train_size=complex_lab_model_result.head(1)['btch_sz'].values[0]
drpout=complex_lab_model_result.head(1)['drpout'].values[0]
checkpoint=torch.load(MODELS_DIR+'\\complex_lab_model_{}_{}_{}_{}.t7'.format(delay,neuron,train_size,drpout))
net=Net(1, delay, neuron, drpout).to(device)
net.load_state_dict(checkpoint['state'])
net.eval()
train_mse = checkpoint['train_mse']
test_mse = checkpoint['test_mse']
scaler = MinMaxScaler(feature_range=(0, 1))
F_tr, F_test = split_signal(F_lab, delay, train_size, scaler, True)
train_xx, train_yy = get_ts(F_tr, delay, train_size)
test_xx, test_yy = get_ts(F_test, delay, train_size)
pred_train=[]
for i in range(len(train_xx)):
out=net(train_xx[i].unsqueeze(-1).to(device))
pred_train.append(out.detach().cpu().item())
pred_train=np.array(pred_train)
trainPredict = scaler.inverse_transform(pred_train.reshape(-1,1))
plt.figure(figsize = (10, 5))
plt.plot(F_lab, 'k')
plt.plot(np.r_[delay:train_size],trainPredict, 'b')
plt.show()
pred_test=[]
for i in range(len(test_xx)):
out=net(test_xx[i].unsqueeze(-1).to(device))
pred_test.append(out.detach().cpu().item())
pred_test=np.array(pred_test)
testPredict = scaler.inverse_transform(pred_test.reshape(-1,1))
plt.figure(figsize = (10, 5))
plt.plot(F_lab, 'k')
plt.plot(np.r_[train_size:len(F_lab)],testPredict, 'b')
plt.show()
for_forecast=96
pred_forecast=[]
inp=test_xx[-1].unsqueeze(-1).to(device)
for i in range(for_forecast):
out=net(inp)
out=out.detach().cpu().item()
pred_forecast.append(out)
inp[0,0,0]=out
inp=inp.roll(-1)
pred_forecast=np.array(pred_forecast)
forecastPredict = scaler.inverse_transform(pred_forecast.reshape(-1,1))
plt.figure(figsize = (10, 5))
plt.plot(F_lab, 'k')
plt.plot(np.r_[len(F_lab)-1:len(F_lab)+for_forecast-1],forecastPredict, 'b')
plt.show()
plt.figure(figsize = (10, 5))
plt.plot(train_mse, label='train')
plt.plot(test_mse, label='test')
plt.xlabel('epochs')
plt.ylabel('mse')
plt.legend()
plt.show()
plot_acf(test_yy.ravel()-pred_test, lags = 20)
plt.show()<jupyter_output><empty_output><jupyter_text># Keras <jupyter_code>model = Sequential() # слои соединены последовательно
model.add(LSTM(units=20, input_shape=(1, 6))) # 20 нейронов
model.add(Dense(units = 1)) # выход одномерный
model.compile(optimizer = 'adam', loss = 'mean_squared_error')
plot_model(model, to_file='model.png') # рисунок полученной сети
model.fit(xx, yy, epochs = 100) # 100 эпох по 144 точки
trainPredict = model.predict(xx)
trainPredict = scaler.inverse_transform(trainPredict)
plt.figure(figsize = (10, 5))
plt.plot(Fort, 'k')
plt.plot(np.r_[6:150],trainPredict, 'b')
plt.show()
testPredict = model.predict(xx_test)
testPredict = scaler.inverse_transform(testPredict)
plt.figure(figsize = (10, 5))
plt.plot(Fort, 'k')
plt.plot(np.r_[150:174],testPredict, 'b')
plt.show()
model = Sequential()
model.add(LSTM(units=20, return_sequences=True, input_shape=(1, 6)))
model.add(Dropout(0.2))
model.add(LSTM(units=20, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units=20))
model.add(Dense(units = 1))
model.compile(optimizer = 'adam', loss = 'mean_squared_error')
plot_model(model, to_file='model.png')
model.fit(xx, yy, epochs = 100) # 100 эпох по 144 точки
trainPredict = model.predict(xx)
trainPredict = scaler.inverse_transform(trainPredict)
plt.figure(figsize = (10, 5))
plt.plot(Fort, 'k')
plt.plot(np.r_[6:150],trainPredict, 'b')
plt.show()
testPredict = model.predict(xx_test)
testPredict = scaler.inverse_transform(testPredict)
plt.figure(figsize = (10, 5))
plt.plot(Fort, 'k')
plt.plot(np.r_[150:174],testPredict, 'b')
plt.show()<jupyter_output><empty_output>
|
no_license
|
/12-LSTM/j_lab3.ipynb
|
julia-komleva/time-series
| 9 |
<jupyter_start><jupyter_text>Copyright (c) 2015, 2016
[Sebastian Raschka](http://sebastianraschka.com/)
[Li-Yi Wei](http://liyiwei.org/)
https://github.com/1iyiwei/pyml
[MIT License](https://github.com/1iyiwei/pyml/blob/master/LICENSE.txt)# Python Machine Learning - Code Examples# Chapter 3 - A Tour of Machine Learning Classifiers
* Logistic regression
* Binary and multiple classes
* Support vector machine
* Kernel trick
* Decision tree
* Random forest for ensemble learning
* K nearest neighborsNote that the optional watermark extension is a small IPython notebook plugin that I developed to make the code reproducible. You can just skip the following line(s).<jupyter_code>%load_ext watermark
%watermark -a '' -u -d -v -p numpy,pandas,matplotlib,scikit-learn<jupyter_output>
last updated: 2016-08-08
CPython 3.5.1
IPython 4.1.2
numpy 1.10.4
pandas 0.18.0
matplotlib 1.5.1
scikit-learn 0.17.1
<jupyter_text>*The use of `watermark` is optional. You can install this IPython extension via "`pip install watermark`". For more information, please see: https://github.com/rasbt/watermark.*### Overview- [Choosing a classification algorithm](#Choosing-a-classification-algorithm)
- [First steps with scikit-learn](#First-steps-with-scikit-learn)
- [Training a perceptron via scikit-learn](#Training-a-perceptron-via-scikit-learn)
- [Modeling class probabilities via logistic regression](#Modeling-class-probabilities-via-logistic-regression)
- [Logistic regression intuition and conditional probabilities](#Logistic-regression-intuition-and-conditional-probabilities)
- [Learning the weights of the logistic cost function](#Learning-the-weights-of-the-logistic-cost-function)
- [Handling multiple classes](#Handling-multiple-classes)
- [Training a logistic regression model with scikit-learn](#Training-a-logistic-regression-model-with-scikit-learn)
- [Tackling overfitting via regularization](#Tackling-overfitting-via-regularization)
- [Maximum margin classification with support vector machines](#Maximum-margin-classification-with-support-vector-machines)
- [Maximum margin intuition](#Maximum-margin-intuition)
- [Dealing with the nonlinearly separable case using slack variables](#Dealing-with-the-nonlinearly-separable-case-using-slack-variables)
- [Alternative implementations in scikit-learn](#Alternative-implementations-in-scikit-learn)
- [Solving nonlinear problems using a kernel SVM](#Solving-nonlinear-problems-using-a-kernel-SVM)
- [Using the kernel trick to find separating hyperplanes in higher dimensional space](#Using-the-kernel-trick-to-find-separating-hyperplanes-in-higher-dimensional-space)
- [Decision tree learning](#Decision-tree-learning)
- [Maximizing information gain – getting the most bang for the buck](#Maximizing-information-gain-–-getting-the-most-bang-for-the-buck)
- [Building a decision tree](#Building-a-decision-tree)
- [Combining weak to strong learners via random forests](#Combining-weak-to-strong-learners-via-random-forests)
- [K-nearest neighbors – a lazy learning algorithm](#K-nearest-neighbors-–-a-lazy-learning-algorithm)
- [Summary](#Summary)<jupyter_code>from IPython.display import Image
%matplotlib inline<jupyter_output><empty_output><jupyter_text># Choosing a classification algorithmThere is no free lunch; different algorithms are suitable for different data and applications.
# First steps with scikit-learn
Many library routines and data-sets to use, as exemplified below for main steps for a machine learning pipeline.## Iris dataset
Let's use this dataset for comparing machine learning methods
Setosa
Versicolor
Virginica
Loading the Iris dataset from scikit-learn. Here, the third column represents the petal length, and the fourth column the petal width of the flower samples. The classes are already converted to integer labels where 0=Iris-Setosa, 1=Iris-Versicolor, 2=Iris-Virginica.<jupyter_code>from sklearn import datasets
import numpy as np
iris = datasets.load_iris()
print('Data set size: ' + str(iris.data.shape))
X = iris.data[:, [2, 3]]
y = iris.target
print('Class labels:', np.unique(y))
import pandas as pd
df = pd.DataFrame(iris.data)
df.tail()<jupyter_output><empty_output><jupyter_text>## Data sets: training versus test
Use different data sets for training and testing a model (generalization)<jupyter_code>from sklearn.cross_validation import train_test_split
# splitting data into 70% training and 30% test data:
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=0)
num_training = y_train.shape[0]
num_test = y_test.shape[0]
print('training: ' + str(num_training) + ', test: ' + str(num_test))<jupyter_output>training: 105, test: 45
<jupyter_text>## Data scaling
It is better to scale the data so that different features/channels have similar mean/std.<jupyter_code>from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)<jupyter_output><empty_output><jupyter_text>## Training a perceptron via scikit-learnWe learned and coded perceptron in chapter 2.
Here we use the scikit-learn library version.
The perceptron only handles 2 classes for now.
We will discuss how to handle $N$ classes.
<jupyter_code>from sklearn.linear_model import Perceptron
ppn = Perceptron(n_iter=40, eta0=0.1, random_state=0)
_ = ppn.fit(X_train_std, y_train)
y_pred = ppn.predict(X_test_std)
print('Misclassified samples: %d out of %d' % ((y_test != y_pred).sum(), y_test.shape[0]))
from sklearn.metrics import accuracy_score
print('Accuracy: %.2f' % accuracy_score(y_test, y_pred))
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
import warnings
def versiontuple(v):
return tuple(map(int, (v.split("."))))
def plot_decision_regions(X, y, classifier, test_idx=None,
resolution=0.02, xlabel='', ylabel='', title=''):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1],
alpha=0.8, c=cmap(idx),
marker=markers[idx], label=cl)
# highlight test samples
if test_idx:
# plot all samples
if not versiontuple(np.__version__) >= versiontuple('1.9.0'):
X_test, y_test = X[list(test_idx), :], y[list(test_idx)]
warnings.warn('Please update to NumPy 1.9.0 or newer')
else:
X_test, y_test = X[test_idx, :], y[test_idx]
plt.scatter(X_test[:, 0],
X_test[:, 1],
c='',
alpha=1.0,
linewidths=1,
marker='o',
s=55, label='test set')
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.legend(loc='upper left')
plt.tight_layout()
plt.show()<jupyter_output><empty_output><jupyter_text>Training a perceptron model using the standardized training data:<jupyter_code>X_combined_std = np.vstack((X_train_std, X_test_std))
y_combined = np.hstack((y_train, y_test))
test_idx = range(X_train_std.shape[0], X_combined_std.shape[0])
plot_decision_regions(X=X_combined_std, y=y_combined,
classifier=ppn, test_idx=test_idx,
xlabel='petal length [standardized]',
ylabel='petal width [standardized]')<jupyter_output><empty_output><jupyter_text># Modeling class probabilities via logistic regression
* $\mathbf{x}$: input
* $\mathbf{w}$: weights
* $z = \mathbf{w}^T \mathbf{x}$
* $\phi(z)$: transfer function
* $y$: predicted class
## Perceptron
$
y = \phi(z) =
\begin{cases}
1 \; z \geq 0 \\
-1 \; z < 0
\end{cases}
$
## Adaline
$
\begin{align}
\phi(z) &= z \\
y &=
\begin{cases}
1 \; \phi(z) \geq 0 \\
-1 \; \phi(z) < 0
\end{cases}
\end{align}
$
## Logistic regression
$
\begin{align}
\phi(z) &= \frac{1}{1 + e^{-z}} \\
y &=
\begin{cases}
1 \; \phi(z) \geq 0.5 \\
0 \; \phi(z) < 0.5
\end{cases}
\end{align}
$
Note: this is actually classification (discrete output) not regression (continuous output); the naming is historical.<jupyter_code>import matplotlib.pyplot as plt
import numpy as np
def sigmoid(z):
return 1.0 / (1.0 + np.exp(-z))
z = np.arange(-7, 7, 0.1)
phi_z = sigmoid(z)
plt.plot(z, phi_z)
plt.axvline(0.0, color='k')
plt.ylim(-0.1, 1.1)
plt.xlabel('z')
plt.ylabel('$\phi (z)$')
# y axis ticks and gridline
plt.yticks([0.0, 0.5, 1.0])
ax = plt.gca()
ax.yaxis.grid(True)
plt.tight_layout()
# plt.savefig('./figures/sigmoid.png', dpi=300)
plt.show()<jupyter_output><empty_output><jupyter_text>### Logistic regression intuition and conditional probabilities
$\phi(z) = \frac{1}{1 + e^{-z}}$: sigmoid function
$\phi(z) \in [0, 1]$, so can be interpreted as probability: $P(y = 1 \; | \; \mathbf{x} ; \mathbf{w}) = \phi(\mathbf{w}^T \mathbf{x})$
We can then choose class by interpreting the probability:
$
\begin{align}
y &=
\begin{cases}
1 \; \phi(z) \geq 0.5 \\
0 \; \phi(z) < 0.5
\end{cases}
\end{align}
$
The probability information can be very useful for many applications
* knowing the confidence of a prediction in addition to the prediction itself
* e.g. weather forecast: tomorrow might rain versus tomorrow might rain with 70% chanceLogistic regression:
Adaline:
Perceptron:
### Learning the weights of the logistic cost function
$J(\mathbf{w})$: cost function to minimize with parameters $\mathbf{w}$
$z = \mathbf{w}^T \mathbf{x}$
For Adaline, we minimize sum-of-squared-error:
$$
J(\mathbf{w}) = \frac{1}{2} \sum_i \left( y^{(i)} - t^{(i)}\right)^2
= \frac{1}{2} \sum_i \left( \phi\left(z^{(i)}\right) - t^{(i)}\right)^2
$$#### Maximum likelihood estimation (MLE)
For logistic regression, we take advantage of the probability interpretation to maximize the likelihood:
$$
L(\mathbf{w}) = P(t \; | \; \mathbf{x}; \mathbf{w}) = \prod_i P\left( t^{(i)} \; | \; \mathbf{x}^{(i)} ; \mathbf{w} \right) = \prod_i \phi\left(z^{(i)}\right)^{t^{(i)}} \left(1 - \phi\left(z^{(i)}\right)\right)^{1-t^{(i)}}
$$
This is equivalent to minimize the negative log likelihood:
$$
J(\mathbf{w})
= -\log L(\mathbf{w})
= \sum_i -t^{(i)}\log\left(\phi\left(z^{(i)}\right)\right) - \left(1 - t^{(i)}\right) \log\left(1 - \phi\left(z^{(i)}\right) \right)
$$
Converting prod to sum via log() is a common math trick for easier computation.<jupyter_code>def cost_1(z):
return - np.log(sigmoid(z))
def cost_0(z):
return - np.log(1 - sigmoid(z))
z = np.arange(-10, 10, 0.1)
phi_z = sigmoid(z)
c1 = [cost_1(x) for x in z]
plt.plot(phi_z, c1, label='J(w) if t=1')
c0 = [cost_0(x) for x in z]
plt.plot(phi_z, c0, linestyle='--', label='J(w) if t=0')
plt.ylim(0.0, 5.1)
plt.xlim([0, 1])
plt.xlabel('$\phi$(z)')
plt.ylabel('J(w)')
plt.legend(loc='best')
plt.tight_layout()
# plt.savefig('./figures/log_cost.png', dpi=300)
plt.show()<jupyter_output><empty_output><jupyter_text>### Relationship to cross entropy### Optimizing for logistic regression
$$
J(\mathbf{w})
= \sum_i -t^{(i)}\log\left(\phi\left(z^{(i)}\right)\right) - \left(1 - t^{(i)}\right) \log\left(1 - \phi\left(z^{(i)}\right) \right)
$$
$$
\frac{\partial J(\mathbf{w})}{\partial \mathbf{w}} =
\sum_i \left(\frac{-t^{(i)}}{\phi\left(z^{(i)}\right)} + \frac{1- t^{(i)}}{1 - \phi\left(z^{(i)}\right)} \right) \frac{\partial \phi \left(z^{(i)}\right)}{\partial \mathbf{w}}
$$For sigmoid
$
\frac{\partial \phi(z)}{\partial z} = \phi(z)\left(1-\phi(z)\right)
$
Thus
$$
\begin{align}
\delta J =
\frac{\partial J(\mathbf{w})}{\partial \mathbf{w}} &=
\sum_i \left(\frac{-t^{(i)}}{\phi\left(z^{(i)}\right)} + \frac{1- t^{(i)}}{1 - \phi\left(z^{(i)}\right)} \right)
\phi\left(z^{(i)}\right)\left(1 - \phi\left(z^{(i)}\right) \right)
\frac{\partial z^{(i)}}{\partial \mathbf{w}} \\
&=
\sum_i \left( -t^{(i)}\left(1 - \phi\left(z^{(i)}\right)\right) + \left(1-t^{(i)}\right)\phi\left(z^{(i)}\right) \right) \mathbf{x}^{(i)} \\
&=
\sum_i \left( -t^{(i)} + \phi\left( z^{(i)} \right) \right) \mathbf{x}^{(i)}
\end{align}
$$
For gradient descent
$$
\begin{align}
\delta \mathbf{w} &= -\eta \delta J = \eta \sum_i \left( t^{(i)} - \phi\left( z^{(i)} \right) \right) \mathbf{x}^{(i)} \\
\mathbf{w} & \leftarrow \mathbf{w} + \delta \mathbf{w}
\end{align}
$$
as related to what we did for optimizing in chapter 2.### Handling multiple classes
So far we have discussed only binary classifiers for 2 classes.
How about $K > 2$ classes, e.g. $K=3$ for the Iris dataset?#### Multiple binary classifiers
##### One versus one
Build $\frac{K(K-1)}{2}$ classifiers,
each separating a different pair of classes $C_i$ and $C_j$
##### One versus rest
Build $K$ binary classifiers,
each separating class $C_k$ from the rest
Both have ambiguous regions and incur excessive complexity/computation.#### One multi-class classifier
Multiple activation functions $\phi_k, k=1, 2, ... K$ each with different parameters
$$
\phi_k\left(\mathbf{x}\right) = \phi\left(\mathbf{x}, \mathbf{w_k}\right) = \phi\left(\mathbf{w}_k^T \mathbf{x} \right)
$$
We can then choose the class based on maximum activation:
$$
y = argmax_k \; \phi_k\left( \mathbf{x} \right)
$$For $\phi \geq 0$, we can normalize for probabilistic interpretation:
$$
P\left(k \; | \; \mathbf{x} ; \{\mathbf{w}_k\} \right) =
\frac{\phi_k\left(\mathbf{x}\right)}{\sum_{m=1}^K \phi_m\left(\mathbf{x}\right) }
$$Or use softmax (normalized exponential) for any activation $\phi$:
$$
P\left(k \; | \; \mathbf{x} ; \{\mathbf{w}_k\} \right) =
\frac{e^{\phi_k\left(\mathbf{x}\right)}}{\sum_{m=1}^K e^{\phi_m\left(\mathbf{x}\right)} }
$$
For example, if $\phi(z) = z$:
$$
P\left(k \; | \; \mathbf{x} ; \{\mathbf{w}_k\} \right) =
\frac{e^{\mathbf{w}_k^T\mathbf{x}}}{\sum_{m=1}^K e^{\mathbf{w}_m^T \mathbf{x}} }
$$
For training, the model can be optimized via gradient descent.
The likelihood function (to maximize):
$$
L(\mathbf{w})
= P(t \; | \; \mathbf{x}; \mathbf{w})
= \prod_i P\left( t^{(i)} \; | \; \mathbf{x}^{(i)} ; \mathbf{w} \right)
$$
The loss function (to minimize):
$$
J(\mathbf{w})
= -\log{L(\mathbf{w})}
= -\sum_i \log{P\left( t^{(i)} \; | \; \mathbf{x}^{(i)} ; \mathbf{w} \right)}
$$### Training a logistic regression model with scikit-learn
The code is quite simple.<jupyter_code>from sklearn.linear_model import LogisticRegression
lr = LogisticRegression(C=1000.0, random_state=0)
lr.fit(X_train_std, y_train)
plot_decision_regions(X_combined_std, y_combined,
classifier=lr, test_idx=test_idx,
xlabel = 'petal length [standardized]',
ylabel='petal width [standardized]')
print(lr.predict_proba(X_test_std[0, :].reshape(1, -1)))<jupyter_output>[[ 2.05743774e-11 6.31620264e-02 9.36837974e-01]]
<jupyter_text>### Tackling overfitting via regularization
Recall our general representation of our modeling objective:
$$\Phi(\mathbf{X}, \mathbf{T}, \Theta) = L\left(\mathbf{X}, \mathbf{T}, \mathbf{Y}=f(\mathbf{X}, \Theta)\right) + P(\Theta)$$
* $L$ - loss/objective for data fitting
* $P$ - regularization to favor simple modelNeed to balance between accuracy/bias (L) and complexity/variance (P)
* If the model is too simple, it might be inaccurate (high bias)
* If the model is too complex, it might over-fit and over-sensitive to training data (high variance)
A well-trained model should
* fit the training data well (low bias)
* remain stable with different training data for good generalization (to unseen future data; low variance)
The following illustrates bias and variance for a potentially non-linear model
$L_2$ norm is a common form for regularization, e.g.
$
P = \frac{1}{\lambda} ||\mathbf{w}||^2
$
for the linear weights $\mathbf{w}$
$\lambda$ is a parameter to weigh between bias and variance
$C = \frac{1}{\lambda}$ for scikit-learn<jupyter_code>weights, params = [], []
for c in np.arange(-5, 5):
lr = LogisticRegression(C=10**c, random_state=0)
lr.fit(X_train_std, y_train)
weights.append(lr.coef_[1])
params.append(10**c)
weights = np.array(weights)
plt.plot(params, weights[:, 0],
label='petal length')
plt.plot(params, weights[:, 1], linestyle='--',
label='petal width')
plt.ylabel('weight coefficient')
plt.xlabel('C')
plt.legend(loc='upper left')
plt.xscale('log')
# plt.savefig('./figures/regression_path.png', dpi=300)
plt.show()<jupyter_output><empty_output><jupyter_text># Reading
* PML Chapter 3# Maximum margin classification with support vector machines
Another popular type of machine learning algorithm
Basic version for linear classification
* decision boundary
$
\mathbf{w}^T \mathbf{x}
\begin{cases}
\geq 0 \; class +1 \\
< 0 \; class -1
\end{cases}
$
* similar to perceptron
* based on different criteria
Perceptron
* minimize misclassification error
* more sensitive to outliers
* incremental learning
SVM
* maximize margins to nearest samples (called support vectors)
* more robust against outliers
* batch learning## Maximum margin intuition
Maximize the margins of support vectors to the decision plane $\rightarrow$ more robust classification for future samples (that may lie close to the decision plane)Let us start with the simple case of two classes with labels +1 and -1.
(We choose this particular combination of labeling for numerical simplicity, as follows.)
Let the training dataset be $\{\mathbf{x}^{(i)}, y^{(i)}\}$, $i=1$ to $N$.
The goal is to find hyper-plane parameters $\mathbf{w}$ and $w_0$ so that
$$y^{(i)} \left( \mathbf{w}^T\mathbf{x}^{(i)} + w_0\right) \geq 1, \; \forall i$$.
Note that $y^{(i)} = \pm1$ above.
We use t or y for target labels depending on the context
We separate out $w_0$ from the rest of
$
\mathbf{w} =
\begin{bmatrix}
w_1 \\
w_2 \\
\vdots \\
w_n
\end{bmatrix}
$ for math derivation below
For the purpose of optimization, we can cast the problem as maximize $\rho$ for:
$$\frac{y^{(i)} \left( \mathbf{w}^T\mathbf{x}^{(i)} + w_0\right)}{||\mathbf{w}||} \geq \rho, \; \forall i$$
; note that the left-hand side can be interpreted as the distance from $\mathbf{x}^{(i)}$ to the hyper-plane.
To optimize the above, we can fix $\rho ||\mathbf{w}|| = 1$ and minimize $||\mathbf{w}||$, i.e.:
min $\frac{1}{2} ||\mathbf{w}||^2$ subject to $y^{(i)}\left( \mathbf{w}^T \mathbf{x}^{(i)} + w_0\right) \geq 1, \; \forall i$
We can use Lagrangian multipliers $\alpha^{(i)}$ for this constrained optimization problem:
$$L(\mathbf{w}, w_0, \alpha) = \frac{1}{2} ||\mathbf{w}||^2 - \sum_i \alpha^{(i)} y^{(i)} \left( \mathbf{w}^T \mathbf{x}^{(i)} + w_0\right) + \sum_i \alpha^{(i)}$$
(The last term above is for $\alpha^{(i)} \geq 0$.)
With some calculus/algebraic manipulations:
$$\frac{\partial L}{\partial \mathbf{w}} = 0 \Rightarrow \mathbf{w} = \sum_i \alpha^{(i)} y^{(i)} \mathbf{x}^{(i)}$$
$$\frac{\partial L}{\partial w_0} = 0 \Rightarrow \sum_i \alpha^{(i)} y^{(i)} = 0$$
Plug the above two into $L$ above, we have:
$$
\begin{align}
L(\mathbf{w}, w_0, \alpha) &= \frac{1}{2} \mathbf{w}^T \mathbf{w} - \mathbf{w}^T \sum_i \alpha^{(i)}y^{(i)}\mathbf{x}^{(i)} - w_0 \sum_i \alpha^{(i)} y^{(i)} + \sum_i \alpha^{(i)} \\
&= -\frac{1}{2} \mathbf{w}^T \mathbf{w} + \sum_i \alpha^{(i)} \\
&= -\frac{1}{2} \sum_i \sum_j \alpha^{(i)} \alpha^{(j)} y^{(i)} y^{(j)} \left( \mathbf{x}^{(i)}\right)^T \mathbf{x}^{(j)} + \sum_i \alpha^{(i)}
\end{align}
$$
, which can be maximized, via quandratic optimization, with $\alpha^{(i)}$ only subject to the constraints: $\sum_i \alpha^{(i)} y^{(i)} = 0$ and $\alpha^{(i)} \geq 0, \; \forall i$Once we solve $\{ \alpha^{(i)} \}$ we will see that most of them are $0$ with a few $> 0$. The latter are those lie on the decision boundaries and thus called support vectors:
$$y^{(i)} \left( \mathbf{w}^T \mathbf{x}^{(i)} + w_0\right) = 1$$
from which we can calculate $w_0$.## Dealing with the nonlinearly separable case using slack variables
Soft margin classification
Some datasets are not linearly separable
Avoid thin margins for linearly separable cases
* bias variance tradeoffFor datasets that are not linearly separable, we can introduce slack variables $\{\xi^{(i)}\}$ as follows:
$$y^{(i)} \left( \mathbf{w}^T \mathbf{x}^{(i)} + w_0\right) \geq 1 - \xi^{(i)}, \; \forall i$$
* If $\xi^{(i)} = 0$, it is just like the original case without slack variables.
* If $0 < \xi^{(i) <1}$, $\mathbf{x}^{(i)}$ is correctly classified but lies within the margin.
* If $\xi^{(i)} \geq 1$, $\mathbf{x}^{(i)}$ is mis-classified.
For optimization, the goal is to minimize
$$\frac{1}{2} ||\mathbf{w}||^2 + C \sum_i \xi^{(i)}$$
, where $C$ is the strength of the penalty factor (like in regularization).
Using the Lagrangian multipliers $\{\alpha^{(i)}, \mu^{(i)} \}$ with constraints we have:
$$L = \frac{1}{2} ||\mathbf{w}||^2 + C \sum_i \xi^{(i)} - \sum_i \alpha^{(i)} \left( y^{(i)} \left( \mathbf{w}^{(i)}\mathbf{x}^{(i)} + w_0\right) - 1 + \xi^{(i)}\right) - \sum_i \mu^{(i)} \xi^{(i)}$$
, which can be solved via a similar process as in the original case without slack variables.<jupyter_code>from sklearn.svm import SVC
svm = SVC(kernel='linear', C=1.0, random_state=0)
svm.fit(X_train_std, y_train)
plot_decision_regions(X_combined_std, y_combined,
classifier=svm, test_idx=test_idx,
xlabel='petal length [standardized]', ylabel='petal width [standardized]')<jupyter_output><empty_output><jupyter_text>## Alternative implementations in scikit-learn# Solving non-linear problems using a kernel SVM
SVM can be extended for non-linear classification
This is called kernel SVM
* will explain what kernel means
* and introduce kernel tricks :-)## Intuition
The following 2D circularly distributed data sets are not linearly separable.
However, we can elevate them to a higher dimensional space for linear separable:
$
\phi(x_1, x_2) = (x_1, x_2, x_1^2 + x_2^2)
$
,
where $\phi$ is the mapping function.
<jupyter_code>from IPython.display import YouTubeVideo
YouTubeVideo('3liCbRZPrZA')
YouTubeVideo('9NrALgHFwTo')<jupyter_output><empty_output><jupyter_text>## Using the kernel trick to find separating hyperplanes in higher dimensional spaceFor datasets that are not linearly separable, we can map them into a higher dimensional space and make them linearly separable.
Let $\phi$ be this mapping:
$\mathbf{z} = \phi(\mathbf{x})$
And we perform the linear decision in the $\mathbf{z}$ instead of the original $\mathbf{x}$ space:
$$y^{(i)} \left( \mathbf{w}^{(i)} \mathbf{z}^{(i)} + w_0\right) \geq 1 - \xi^{(i)}$$Following similar Lagrangian multiplier optimization as above, we eventually want to optimize:
$$
\begin{align}
L &= \frac{1}{2} \sum_i \sum_j \alpha^{(i)} \alpha^{(j)} y^{(i)} y^{(j)} \left(\mathbf{z}^{(i)}\right)^T \mathbf{z}^{(j)} + \sum_i \alpha^{(i)} \\
&= \frac{1}{2} \sum_i \sum_j \alpha^{(i)} \alpha^{(j)} y^{(i)} y^{(j)} \phi\left(\mathbf{x}^{(i)}\right)^T \phi\left(\mathbf{x}^{(j)}\right) + \sum_i \alpha^{(i)}
\end{align}
$$
The key idea behind kernel trick, and kernel machines in general, is to represent the high dimensional dot product by a kernel function:
$$K\left(\mathbf{x}^{(i)}, \mathbf{x}^{(j)}\right) = \phi\left(\mathbf{x}^{(i)}\right)^T \phi\left(\mathbf{x}^{(j)}\right)$$
Intuitively, the data points become more likely to be linearly separable in a higher dimensional space.## Kernel trick for evaluation
Recall from part of our derivation above:
$$
\frac{\partial L}{\partial \mathbf{w}} = 0 \Rightarrow \mathbf{w} = \sum_i \alpha^{(i)} y^{(i)} \mathbf{z}^{(i)}
$$
Which allows us to compute the discriminant via kernel trick as well:
$$
\begin{align}
\mathbf{w}^T \mathbf{z}
&=
\sum_i \alpha^{(i)} y^{(i)} \left(\mathbf{z}^{(i)}\right)^T \mathbf{z}
\\
&=
\sum_i \alpha^{(i)} y^{(i)} \phi\left(\mathbf{x}^{(i)}\right)^T \phi(\mathbf{x})
\\
&=
\sum_i \alpha^{(i)} y^{(i)} K\left(\mathbf{x}^{(i)}, \mathbf{x}\right)
\end{align}
$$## Non-linear classification example
x y xor(x, y)
0 0 0
0 1 1
1 0 1
1 1 0
Xor is not linearly separable
* math proof left as exerciseRandom point sets classified via XOR based on the signs of 2D coordinates:<jupyter_code>import matplotlib.pyplot as plt
import numpy as np
np.random.seed(0)
X_xor = np.random.randn(200, 2)
y_xor = np.logical_xor(X_xor[:, 0] > 0,
X_xor[:, 1] > 0)
y_xor = np.where(y_xor, 1, -1)
plt.scatter(X_xor[y_xor == 1, 0],
X_xor[y_xor == 1, 1],
c='b', marker='x',
label='1')
plt.scatter(X_xor[y_xor == -1, 0],
X_xor[y_xor == -1, 1],
c='r',
marker='s',
label='-1')
plt.xlim([-3, 3])
plt.ylim([-3, 3])
plt.legend(loc='best')
plt.tight_layout()
# plt.savefig('./figures/xor.png', dpi=300)
plt.show()<jupyter_output><empty_output><jupyter_text>This is the classification result using a rbf (radial basis function) kernel
Notice the non-linear decision boundaries<jupyter_code>svm = SVC(kernel='rbf', random_state=0, gamma=0.10, C=10.0)
svm.fit(X_xor, y_xor)
plot_decision_regions(X_xor, y_xor,
classifier=svm)<jupyter_output><empty_output><jupyter_text>## Types of kernels
A variety of kernel functions can be used.
The only requirement is that the kernel function behaves like inner product;
larger $K(\mathbf{x}, \mathbf{y})$ for more similar $\mathbf{x}$ and $\mathbf{y}$
### Linear
$
K\left(\mathbf{x}, \mathbf{y}\right) = \mathbf{x}^T \mathbf{y}
$### Polynomials of degree $q$
$
K\left(\mathbf{x}, \mathbf{y}\right) =
(\mathbf{x}^T\mathbf{y} + 1)^q
$
Example for $d=2$ and $q=2$
$$
\begin{align}
K\left(\mathbf{x}, \mathbf{y}\right) &= \left( x_1y_1 + x_2y_2 + 1 \right)^2 \\
&= 1 + 2x_1y_1 + 2x_2y_2 + 2x_1x_2y_1y_2 + x_1^2y_1^2 + x_2^2y_2^2
\end{align}
$$
, which corresponds to the following kernel function:
$$
\phi(x_1, x_2) = \left[1, \sqrt{2}x_1, \sqrt{2}x_2, \sqrt{2}x_1x_2, x_1^2, x_2^2 \right]^T
$$### Radial basis function (RBF)
Scalar variance:
$$
K\left(\mathbf{x}, \mathbf{y} \right) = e^{-\frac{\left|\mathbf{x} - \mathbf{y}\right|^2}{2s^2}}
$$
General co-variance matrix:
$$
K\left(\mathbf{x}, \mathbf{y} \right) = e^{-\frac{1}{2} \left(\mathbf{x}-\mathbf{y}\right)^T \mathbf{S}^{-1} \left(\mathbf{x} - \mathbf{y}\right)}
$$
General distance function $D\left(\mathbf{x}, \mathbf{y}\right)$:
$$
K\left(\mathbf{x}, \mathbf{y} \right) = e^{-\frac{D\left(\mathbf{x}, \mathbf{y} \right)}{2s^2}}
$$
RBF essentially projects to an infinite dimensional space.### Sigmoid
$$
K\left(\mathbf{x}, \mathbf{y} \right) = \tanh\left(2\mathbf{x}^T\mathbf{y} + 1\right)
$$## Kernel SVM for the Iris dataset
Let's apply RBF kernel
The kernel width is controlled by a gamma $\gamma$ parameter for kernel influence
$
K\left(\mathbf{x}, \mathbf{y} \right) =
e^{-\gamma D\left(\mathbf{x}, \mathbf{y} \right)}
$
and $C$ for regularization<jupyter_code>from sklearn.svm import SVC
svm = SVC(kernel='rbf', random_state=0, gamma=0.2, C=1.0)
svm.fit(X_train_std, y_train)
plot_decision_regions(X_combined_std, y_combined,
classifier=svm, test_idx=test_idx,
xlabel = 'petal length [standardized]',
ylabel = 'petal width [standardized]')
svm = SVC(kernel='rbf', random_state=0, gamma=100.0, C=1.0)
svm.fit(X_train_std, y_train)
plot_decision_regions(X_combined_std, y_combined,
classifier=svm, test_idx=test_idx,
xlabel = 'petal length [standardized]',
ylabel='petal width [standardized]')<jupyter_output><empty_output><jupyter_text># Reading
* PML Chapter 3
* IML Chapter 13-1 to 13.7
* [The kernel trick](http://www.eric-kim.net/eric-kim-net/posts/1/kernel_trick.html)# Decision tree learning
Machine learning can be like black box/magic; the model/method works after tuning parameters and such, but how and why?
Decision tree shows you how it makes decision, e.g. classification.## Example decision tree
* analogous to flow charts for designing algorithms
* every internal node can be based on some if-statement
* automatically learned from data, not manually programmed by human
## Decision tree learning
1. Start with a single node that contains all data
1. Select a node and split it via some criterion to optimize some objective, usually information/impurity $I$
2. Repeat until convergence:
good enough classification measured by $I$;
complex enough model (overfitting);
2. Each leaf node belongs to one class
* Multiple leaf nodes can be of the same class
* Each leaf node can have misclassified samples - majority voting
* usually split along one dimension/feature
* a finite number of choices from the boundaries of sample classes## Maximizing information gain - getting the most bang for the buck
$I(D)$ information/impurity for a tree node with dataset $D$
Maximize information gain $IG$ for splitting each (parent) node $D_p$ into $m$ child nodes $j$:
$$
IG = I(D_p) - \sum_{j=1}^m \frac{N_j}{N_p} I(D_j)
$$
Usually $m=2$ for simplicity (binary split)
Commonly used impurity measures $I$
$p(i|t)$ - probability/proportion of dataset in node $t$ belongs to class $i$### Entropy
$$
I_H(t) = - \sum_{i=1}^c p(i|t) \log_2 p(i|t)
$$
* $0$ if all samples belong to the same class
* $1$ if uniform distribution
$
0.5 = p(0|t) = p(1|t)
$Entropy (information theory)
Random variable $X$ with probability mass/density function $P(X)$
Information content
$
I(X) = -\log_b\left(P(X)\right)
$
Entropy is the expectation of information
$$
H(X) = E(I(X)) = E(-\log_b(P(X)))
$$
log base $b$ can be $2$, $e$, $10$
Continuous $X$:
$$
H(X) = \int P(x) I(x) \; dx = -\int P(x) \log_b P(x) \;dx
$$
Discrete $X$:
$$
H(X) = \sum_i P(x_i) I(x_i) = -\sum_i P(x_i) \log_b P(x_i)
$$
$-\log_b P(x)$ - number of bits needed to represent $P(x)$
* the rarer the event $\rightarrow$ the less $P(x)$ $\rightarrow$ the more bits### Gini index
Minimize expected value of misclassification
$$
I_G(t) = \sum_{i=1}^c p(i|t) \left( 1 - p(i|t) \right) = 1 - \sum_{i=1}^c p(i|t)^2
$$
* $p(i|t)$ - probability of class $i$
* $1-p(i|t)$ - probability of misclassification, i.e. $t$ is not class $i$
Similar to entropy
* expected value of information: $-\log_2 p(i|t)$
* information and classification probability: both larger for lower $p(i|t)$### Classification error
$$
I_e(t) = 1 - \max_i p(i|t)
$$
$
argmax_i \; p(i|t)
$
as the class label for node $t$
## Compare different information measures
via a 2-class case
Entropy and Gini index are similar, and tend to behave better than classification error
* curves below
* example in the PML textbook<jupyter_code>import matplotlib.pyplot as plt
import numpy as np
def gini(p):
return p * (1 - p) + (1 - p) * (1 - (1 - p))
def entropy(p):
return - p * np.log2(p) - (1 - p) * np.log2((1 - p))
def error(p):
return 1 - np.max([p, 1 - p])
x = np.arange(0.0, 1.0, 0.01)
ent = [entropy(p) if p != 0 else None for p in x]
sc_ent = [e * 0.5 if e else None for e in ent]
err = [error(i) for i in x]
fig = plt.figure()
ax = plt.subplot(111)
for i, lab, ls, c, in zip([ent, sc_ent, gini(x), err],
['Entropy', 'Entropy (scaled)',
'Gini Impurity', 'Misclassification Error'],
['-', '-', '--', '-.'],
['black', 'lightgray', 'red', 'green', 'cyan']):
line = ax.plot(x, i, label=lab, linestyle=ls, lw=2, color=c)
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15),
ncol=3, fancybox=True, shadow=False)
ax.axhline(y=0.5, linewidth=1, color='k', linestyle='--')
ax.axhline(y=1.0, linewidth=1, color='k', linestyle='--')
plt.ylim([0, 1.1])
plt.xlabel('p(i=1)')
plt.ylabel('Impurity Index')
plt.tight_layout()
#plt.savefig('./figures/impurity.png', dpi=300, bbox_inches='tight')
plt.show()<jupyter_output><empty_output><jupyter_text>## Building a decision tree<jupyter_code>from sklearn.tree import DecisionTreeClassifier
tree = DecisionTreeClassifier(criterion='entropy', max_depth=3, random_state=0)
tree.fit(X_train, y_train)
X_combined = np.vstack((X_train, X_test))
y_combined = np.hstack((y_train, y_test))
plot_decision_regions(X_combined, y_combined,
classifier=tree, test_idx=test_idx,
xlabel='petal length [cm]',ylabel='petal width [cm]')<jupyter_output><empty_output><jupyter_text>## Visualize the decision tree<jupyter_code>from sklearn.tree import export_graphviz
export_graphviz(tree,
out_file='tree.dot',
feature_names=['petal length', 'petal width'])<jupyter_output><empty_output><jupyter_text>Install [Graphviz](http://www.graphviz.org/)
<!--
-->
dot -Tsvg tree.dot -o tree.svg
## Pruning a decision tree
Split until all leaf nodes are pure?
* not always a good idea due to potential over-fittingSimplify the tree via pruning
Pre-pruning
* stop splitting a node if the contained data size is below some threshold (e.g. 5% of all data)
Post-pruning
* build a tree first, and remove excessive branches
* reserve a pruning subset separate from the training data
* for each sub-tree (top-down or bottom-up), replace it with a leaf node labeled with the majority vote if not worsen performance for the pruning subset
Pre-pruning is simpler, post-pruning works better## Combining weak to strong learners via random forests
Forest = collection of trees
An example of ensemble learning (more about this later)
* combine multiple weak learners to build a strong learner
* better generalization, less overfitting
Less interpretable than a single tree### Random forest algorithm
Decide how many trees to build
To train each tree:
* Draw a random subset of samples (e.g. random sample with replacement of all samples)
* Split each node via a random subset of features (e.g. $d = \sqrt{m}$ of the original dimensionality)
(randomization is a key)
Majority vote from all trees### Code example<jupyter_code>from sklearn.ensemble import RandomForestClassifier
forest = RandomForestClassifier(criterion='entropy',
n_estimators=10,
random_state=1,
n_jobs=2)
forest.fit(X_train, y_train)
plot_decision_regions(X_combined, y_combined,
classifier=forest, test_idx=test_idx,
xlabel = 'petal length [cm]', ylabel = 'petal width [cm]')<jupyter_output><empty_output><jupyter_text># Reading
* PML Chapter 3
* IML Chapter 9# Parametric versus non-parametric models
* (fixed) number of parameters trained and retained
* amount of data retained
* trade-off between training and evaluation time
## Example
Linear classifiers (SVM, perceptron)
* parameters: $\mathbf{w}$
* data throw away after training
* extreme end of parametric
Kernel SVM
* depends on the type of kernel used (exercise)
Decision tree
* parameters: decision boundaries at all nodes
* number of parameters vary depending on the training data
* data throw away after training
* less parametric than SVM
Personal take:
Parametric versus non-parametric is more of a continuous spectrum than a binary decision.
Many algorithms lie somewhere in between.# K-nearest neighbors - a lazy learning algorithm
KNN keeps all data and has no trained parameters
* extreme end of non-parametricHow it works:
* Choose the number $k$ of neighbors and a distance measure
* For each sample to classify, find the $k$ nearest neighbors in the dataset
* Assign class label via majority vote
$k$ is a hyper-parameter (picked by human), not a (ordinary) parameter (trained from data by machines)
Pro:
* zero training time
* very simple
Con:
* need to keep all data
* evaluation time linearly proportional to data size
* vulnerable to curse of dimensionality## Practical usage
[Minkowski distance](https://en.wikipedia.org/wiki/Minkowski_distance) of order $p$:
$
d(\mathbf{x}, \mathbf{y}) = \sqrt[p]{\sum_k |\mathbf{x}_k - \mathbf{y}_k|^p}
$
* $p = 2$, Euclidean distance
* $p = 1$, Manhattan distance
Number of neighbors $k$ trade-off between bias and variance
* too small $k$ - low bias, high variance<jupyter_code>from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=5, p=2, metric='minkowski')
knn.fit(X_train_std, y_train)
plot_decision_regions(X_combined_std, y_combined,
classifier=knn, test_idx=test_idx,
xlabel='petal length [standardized]', ylabel='petal width [standardized]')<jupyter_output><empty_output>
|
permissive
|
/code/ch03/ch03.ipynb
|
zhili-zh/pyml
| 21 |
<jupyter_start><jupyter_text># Lesson 8. Pipe Networks
Network Hydraulics
Examples
```{note}
This lesson introduces specalized software. The portions below on installation are Architecture Specific.
```
## Pipeline Network Simulation
```{note}
The network simulator uses input files to convey the network to the program These files are listed below.
```
> PipeNetwork.txt
```
4
6
200 200 200 200 200
1.00 0.67 0.67 0.67 0.67 0.5
800 800 700 700 800 600
0.00001 0.00001 0.00001 0.00001 0.00001 0.00001
0.000011
1 1 1 1 1 1
1 -1 0 -1 0 0
0 1 -1 0 0 1
0 0 0 1 -1 -1
0 0 1 0 1 0
0 4 3 1 -100 0 0 0 0 0
```
### Background: Pipelines and Networks
Pipe networks are analyzed for head losses in order to size pumps, determine demand management strategies, and precict minimum pressures in the system.
### Pipe Networks -- Topology
Network topology refers to the layout and connections.
Networks are built of nodes (junctions) and arcs (links).
### Continunity (at a node)
Water is considered incompressible in steady flow in pipelines and pipe networks, and the
conservation of mass reduces to the volumetric flow rate, $Q$,
$Q = AV$
where $A$ is the cross sectional of the pipe, and $V$ is the mean section velocity. Typical units
for discharge is liters per second (lps), gallons per minute (gpm), cubic meters per second
(cms), cubic feet per second (cfs), and million gallons per day (mgd).
The continuity equation in two cross-sections of a pipe as depicted in {numref}`continuity-across-sections` is
$ A_1V_1 = A_2V_2 $
Junctions (nodes) are where two or more pipes join together.
A three-pipe junction node with constant external demand is shown in {numref}`continuity-at-node`.
The continuity equation for the
junction node is
\begin{equation}
Q_1 - Q_2 - Q_3 - D = 0
\end{equation}
```{figure} continuity-across-sections.png
---
width: 400px
name: continuity-across-sections
---
Continuity of mass (discharge) across a change in cross section
```
```{figure} continuity-at-node.png
---
width: 400px
name: continuity-at-node
---
Continuity of mass (discharge) across a node (junction)
```
In pipe network analysis, all demands on the system are stipulated to belocated at junctions (nodes), and the flow connecting junctions is assumed to be uniform across the cross sections (so that mean velocities apply). If a substantial demand is located between nodes, then an additional node is established at the demand location.
### Energy Loss (along a link)
The equation below is the one-dimensional steady flow form of the energy equation typically applied for pressurized conduit hydraulics.
$
\begin{equation}
\frac{p_1}{\rho g}+\alpha_1 \frac{V_1^2}{2g} + z_1 + h_p =
\frac{p_2}{\rho g}+\alpha_2 \frac{V_2^2}{2g} + z_2 + h_t + h_l
\label{eqn:closed-conduit-energy-equation}
\end{equation}
$
where $\frac{p}{\rho g}$ is the pressure head at a location, $\alpha \frac{V^2}{2g}$ is the velocity head at a location, $z$ is the elevation, $h_p$ is the added head from a pump, $h_t$ is the added head extracted by a turbine, and $h_l$ is the head loss between sections 1 and 2. {numref}`closed-conduit-energy` is a sketch that illustrates the various components in the energy equation.
```{figure} closed-conduit-energy.png
---
width: 400px
name: closed-conduit-energy
---
Definition sketch for energy equation
```
In network analysis this energy equation is applied to a link that joins two nodes.
Pumps and turbines would be treated as separate components (links) and their hydraulic behavior must be supplied using their respective pump/turbine curves.
### Velocity Head
The velocity in $\alpha \frac{V^2}{2g}$ is the mean section velocity and is the ratio of discharge to flow area. The kinetic energy correction coefficient is
$\begin{equation}
\alpha=\frac{\int_A u^3 dA}{V^3 A}
\label{eqn:kinetic-energy-correction}
\end{equation}
$
where $u$ is the point velocity in the cross section (usually measured relative to the centerline or the pipe wall; axial symmetry is assumed). Generally values of $\alpha$ are 2.0 if the flow is laminar, and approach unity (1.0) for turbulent flow. In most water distribution systems the flow is usually turbulent so $\alpha$ is assumed to be unity and the velocity head is simply $\frac{V^2}{2g}$.
### Added Head --- Pumps
The head supplied by a pump is related to the mechanical power supplied to the flow. Equation \ref{eqn:pump-power} is the relationship of mechanical power to added pump head.
$
\begin{equation}
\eta P=Q\rho g h_p
\label{eqn:pump-power}
\end{equation}
$
where the power supplied to the motor is $P$ and the "wire-to-water" efficiency is $\eta$.
If the relationship is re-written in terms of added head(A negative head loss!} the pump curve is
$
\begin{equation}
h_p = \frac{\eta P}{Q\rho g}
\label{eqn:pump-curve}
\end{equation}
$
This relationship illustrates that as discharge increases (for a fixed power) the added head decreases.
Power scales at about the cube of discharge, so pump curves for computational application typically have a mathematical structure like
$
\begin{equation}
h_p = H_{\text{shutoff}} - K_{\text{pump}}Q^{\text{exponent}}
\label{eqn:pump-curve-2}
\end{equation}
$
### Extracted Head --- Turbines
The head recovered by a turbine is also an "added head" but appears on the loss side of the equation.
The power that can be recovered by a turbine (again using the concept of "water-to-wire" efficiency is
$
\begin{equation}
P=\eta Q\rho g h_t
\label{eqn:turbine-power}
\end{equation}
$
### Pipe Head Loss Models
The Darcy-Weisbach, Chezy-Manning, and Hazen-Williams formulas are relationships between physical pipe characteristics, flow parameters, and head loss. The Darcy-Weisbach formula is the most consistent with the energy equation formulation being derivable (in structural form) from elementary principles (continunity and linear momentum), whereas the other two are empirical (despite the empirical nature of these two models all three are of practical use, and given a choice select your favorite!)
$
\begin{equation}
h_{L_f}=f \frac{L}{D} \frac{V^2}{2g}
\label{eqn:dw-headloss}
\end{equation}
$
where $h_{L_f}$ is the head loss from pipe friction, $f$ is a dimensionless friction factor, $L$ is the pipe length, $D$ is the pipe characteristic diameter, $V$ is the mean section velocity, and $g$ is the gravitational acceleration.
The friction factor, $f$, is a function of Reynolds number $Re_D$ and the roughness ratio $\frac{k_s}{D}$.
$
\begin{equation}
f=\sigma(Re_D,\frac{k_s}{D})
\label{eqn:friction-factor-dimensionless}
\end{equation}
$
The structure of $\sigma$ is determined experimentally. Over the last century the structure is generally accepted to be one of the following depending on flow conditions and pipe properties
#### Laminar flow (Eqn 2.36, pg. 17~\cite{chin2006}) :
$\begin{equation}
f=\frac{64}{Re_D}
\label{eqn:friction-factor-laminar}
\end{equation}
$
#### Hydraulically Smooth Pipes(Eqn 2.34 pg. 16~\cite{chin2006}):
$\begin{equation}
\frac{1}{\sqrt{f}}=-2 log_{10} (\frac{2.51}{Re_d \sqrt{f} })
\label{eqn:friction-factor-smooth}
\end{equation}
$
#### Hydraulically Rough Pipes(Eqn 2.34 pg. 16~\cite{chin2006}):
$
\begin{equation}
\frac{1}{\sqrt{f}}=-2 log_{10} (\frac{\frac{k_e}{D}} {3.7})
\label{eqn:friction-factor-rough}
\end{equation}
$
#### Transitional Pipes (Colebrook-White Formula)(Eqn 2.35 pg. 17~\cite{chin2006}):
$
\begin{equation}
\frac{1}{\sqrt{f}}=-2 log_{10} (\frac{\frac{k_e}{D}} {3.7} + \frac{2.51}{Re_d \sqrt{f} } )
\label{eqn:friction-factor-CW}
\end{equation}
$
#### Transitional Pipes (Jain Formula)(Eqn 2.39 pg. 19~\cite{chin2006}):
$\begin{equation}
f=\frac{0.25}{[log_{10} (\frac{\frac{k_e}{D}} {3.7} + \frac{5.74}{Re_d^{0.9} } )] ^2}
\label{eqn:friction-factor-Jain}
\end{equation}
$
### Pipe Networks Solution Methods
Several methods are used to produce solutions (estimates of discharge, head loss, and pressure) in a network.
An early one, that only involves analysis of loops is the Hardy-Cross method.
A later one, more efficient, is a Newton-Raphson method that uses node equations to balance discharges and demands, and loop equations to balance head losses.
However, a rather ingenious method exists developed by \cite{Haman1971}, where the flow distribution and head values are determined simultaneously. The task here is to outline the \cite{Haman1971} method on the problem below -- first some necessary definitions and analysis.
The fundamental procedure is:
- Continuity is written at nodes (node equations).
- Energy loss (gain) is written along links (pipe equations).
- The entire set of equations is solved simultaneously.
### Network Analysis Example
```{figure} pipe-net-hybrid.png
---
width: 400px
name: pipe-net-hybrid
---
Pipe network for illustrative example with supply and demands identified. Pipe dimensions and diameters are also depicted.
```
{numref}`pipe-net-hybrid` is a sketch of the problem that will be used.
The network supply is the fixed-grade node in the upper left hand corner of the drawing.
The remaining nodes (N1 -- N4) have demands specified as the purple outflow arrows.
The pipes are labeled (P1 -- P6), and the red arrows indicate a positive flow direction, that is, if the flow is in the indicated direction, the numerical value of flow (or velocity) in that link would be a positive number.
Define the flows in each pipe and the total head at each node as $Q_i$ and $H_i$ where the subscript indicates the particular component identification. Expressed as a vector, these unknowns are:
$
\begin{matrix}
[Q_1, & Q_2, & Q_3, & Q_4, & Q_5, & Q_6, & H_1, & H_2, & H_3, &H_4 ]& = & \textbf{x} \\
\end{matrix}
$
If we analyze continuity for each node we will have 4 equations (corresponding to each node) for continunity, for instance for Node N2 the equation is
$
\begin{matrix}
~& Q_2 & -Q_3 & ~ & ~ & Q_6 & ~ & ~ & ~ &~ & = & 4\\
\end{matrix}
$
Similarily if we define head loss in any pipe as $\Delta H_i = f \frac{8 L_i}{\pi^2 g D_i^5} |Q_i| Q_i$ or $\Delta H_i = L_i Q_i$, where $L_i = f \frac{8 L_i}{\pi^2 g D_i^5} |Q_i|$, then we have 6 equations (corresponding to each pipe) for energy, for instance for Pipe (P2) the equation is:
$
\begin{matrix}
~& -L_2Q_2& ~ & ~ & ~ & ~& H_1 & -H_2 & ~ & ~ & = & 0\\
\end{matrix}
$
If we now write all the node equations then all the pipe equations we could construct the following coefficient matrix below
```{note}
The horizontal lines divide the node and the pipe equations.
```
The upper partition are the node equations in Q and H, the lower partition are the pipe equations in Q and H}
$
\begin{matrix}
\hline
~1&-1 & 0 & -1 & 0 & 0 & 0 & 0 & 0 &0 \\
0&~1 & -1 & 0 & 0 &~1 & 0 & 0 & 0 &0 \\
0& 0 & 0 &~1 & -1 & -1 & 0 & 0 & 0 &0 \\
0& 0 &~1 & 0 &~1 & 0 & 0 & 0 & 0 &0 \\
\hline
-L_1& 0& 0 & 0 & 0 & 0 & -1 & 0 & 0 &0 \\
0& -L_2& 0 & 0 & 0 & 0&~1 & -1 & 0 &0 \\
0& 0& -L_3 & 0 & 0 & 0& 0 &~1 & 0 & -1 \\
0& 0& 0 & -L_4 & 0 & 0&~1 & 0 & -1 & 0 \\
0& 0& 0 & 0 & -L_5 & 0& 0 & 0 &~1 & -1 \\
0& 0& 0 &0 & 0 & -L_6& 0 & -1 &~1 & 0 \\
\hline
\end{matrix}
$
Declare the name of this matrix $\textbf{A(x)}$, where $\textbf{x}$ denotes the unknown vector of Q augmented by H as above. Next consider the right-hand-side at the correct solution (as of yet still unknown!) as
$
\begin{matrix}
[0, & 4, & 3, & 1, & -100 , & 0, & 0, & 0, & 0, &0 ] = \textbf{b}\\
\end{matrix}
$
So if the coefficient matrix is correct then the following system would result:
$
\mathbf{A(x)} \cdot \mathbf{x} = \mathbf{b}
$
which would look like
```{figure} VM-system.png
---
width: 600px
name: VM-system
---
Pipe network illustrative example as Matrtx-Vector system of equations.
```
Observe, the system is non-linear because the coefficient matrix depends on the current values of $Q_i$ for the $L_i$ terms.
However, the system is full-rank (rows == columns) so it is a candidate for Newton-Raphson.
Further observe that the upper partition from column 6 and smaller is simply the node-arc incidence matrix, and the lower partition for the same columns only contains $L_i$ terms on its diagonal, the remainder is zero.
Next observe that the partition associated with heads in the node equations is the zero-matrix.
Lastly (and this is important!) the lower right partition is the transpose of the node-arc incidence matrix subjected to scalar multiplication of $-1$.
The importance is that all the information needed to find a solution is contained in the node-arc incidence matrix and the right-hand-side -- the engineer does not need to identify closed loops (nor does the computer need to find closed loops).
The trade-off is a much larger system of equations, however solving large systems is far easier that searching a directed graph to identify closed loops, furthermore we obtain the heads as part of the solution process.
#### Script Structure
The script will need to accomplish several tasks including reading the node-arc incidence matrix supplied as a file and convert the strings into numeric values. The script will also need some support functions defined before constructing the matrix. First the file for the example is:
> PipeNetwork.txt
```
4
6
200.0 200.0 200.0 200.0
1.00 0.67 0.67 0.67 0.67 0.5
800 800 700 700 800 600
0.00001 0.00001 0.00001 0.00001 0.00001 0.00001
0.000011
1 1 1 1 1 1
1 -1 0 -1 0 0
0 1 -1 0 0 1
0 0 0 1 -1 -1
0 0 1 0 1 0
0 4 3 1 -000 0 0 0 0 0
```
The rows of the input file are:
- The node count.
- The pipe count.
- Pipe diameters, in feet.
- Pipe lengths, in feet.
- Pipe roughness heights, in feet.
- Kinematic viscosity in feet$^2$/second.
- Initial guess of flow rates (unbalanced OK, non-zero vital!)
- The next four rows are the node-arc incidence matrix.
- The last row is the demand (and fixed-grade node total head) vector.
#### Support Functions
The Reynolds number will need to be calculated for each pipe at each iteration of the solution, so a Reynolds number function will be useful. For circular pipes, the following equation should work,
$Re_D=\frac{V_i \cdot D_i }{\mu}$
The Jain equation (Jain, 1976) that directly computes friction factor from Reynolds number, diameter, and roughness is
$f= \frac{0.25}{(log_{10}(\frac{\epsilon}{3.7D}+\frac{5.74}{Re_D^{0.9}}))^2}$
Once you have the Reynolds number for a pipe, and the friction factor, then the head loss factor that will be used in the coefficient matrix (and the Jacobian) is
$k_i = \frac{8 \cdot L_i}{\pi^2 g D_i^5}$
We will also find it handy to be able to compute velocity heads from discharge and pipe diameters so we can have a velocity function as
$V_i = \frac{Q_i}{0.25 \cdot \pi D_i^2}$
These support functions are coded below (in a code cell) as:<jupyter_code># hydraulic elements prototype functions
# Jain Friction Factor Function -- Tested OK 23SEP16
import math # This will import math module
def friction_factor(roughness,diameter,reynolds):
temp1 = roughness/(3.7*diameter)
temp2 = 5.74/(reynolds**(0.9))
temp3 = math.log10(temp1+temp2)
temp3 = temp3**2
friction_factor = 0.25/temp3
return(friction_factor)
# Velocity Function
def velocity(diameter,discharge):
velocity=discharge/(0.25*math.pi*diameter**2)
return(velocity)
# Reynolds Number Function
def reynolds_number(velocity,diameter,mu):
reynolds_number = abs(velocity)*diameter/mu
return(reynolds_number)
# Geometric factor function
def k_factor(howlong,diameter,gravity):
k_factor = (16*howlong)/(2.0*gravity*math.pi**2*diameter**5)
return(k_factor)<jupyter_output><empty_output><jupyter_text>We will need our linear solver:<jupyter_code># SolveLinearSystem.py
# Code to read A and b
# Then solve Ax = b for x by Gaussian elimination with back substitution
#
##########
def linearsolver(A,b):
n = len(A)
# M = A #this is object to object equivalence
# copy A into M element by element - to operate on M without destroying A
M=[[0.0 for jcol in range(n)]for irow in range(n)]
for irow in range(n):
for jcol in range(n):
M[irow][jcol]=A[irow][jcol]
#
i = 0
for x in M:
x.append(b[i])
i += 1
for k in range(n):
for i in range(k,n):
if abs(M[i][k]) > abs(M[k][k]):
M[k], M[i] = M[i],M[k]
else:
pass
for j in range(k+1,n):
q = float(M[j][k]) / M[k][k]
for m in range(k, n+1):
M[j][m] -= q * M[k][m]
x = [0 for i in range(n)]
x[n-1] =float(M[n-1][n])/M[n-1][n-1]
for i in range (n-1,-1,-1):
z = 0
for j in range(i+1,n):
z = z + float(M[i][j])*x[j]
x[i] = float(M[i][n] - z)/M[i][i]
# print (x)
return(x)
#<jupyter_output><empty_output><jupyter_text>We will also find some vector-matrix manipulation functions handy<jupyter_code>def writeM(M,ir,jc,label):
print ("------",label,"------")
for i in range(0,ir,1):
print (M[i][0:jc])
print ("-----------------------------")
return()
def writeV(V,ir,label):
print ("------",label,"------")
for i in range(0,ir,1):
print (V[i])
print ("-----------------------------")
return()
def matrixmatrixmult(amatrix,bmatrix,rowNumA,colNumA,rowNumB,colNumB):
AB =[[0.0 for j in range(colNumB)] for i in range(rowNumA)]
for i in range(0,rowNumA):
for j in range(0,colNumB):
for k in range(0,colNumA):
AB[i][j]=AB[i][j]+amatrix[i][k]*bmatrix[k][j]
return(AB)
def matrixvectormult(amatrix,xvector,rowNumA,colNumA):
bvector=[0.0 for i in range(rowNumA)]
for i in range(0,rowNumA):
for j in range(0,1):
for k in range(0,colNumA):
bvector[i]=bvector[i]+amatrix[i][k]*xvector[k]
return(bvector)
def vectoradd(avector,bvector,length):
cvector=[]
for i in range(length):
cvector.append(avector[i]+bvector[i])
return(cvector)
def vectorsub(avector,bvector,length):
cvector=[]
for i in range(length):
cvector.append(avector[i]-bvector[i])
return(cvector)
def vdotv(avector,bvector,length):
adotb=0.0
for i in range(length):
adotb=adotb+avector[i]*bvector[i]
return(adotb)<jupyter_output><empty_output><jupyter_text>#### Augmented and Jacobian Matrices
The $\textbf{A(x)}$ is built using the node-arc incidence matrix (which does not change), and the current values of $L_i$.
We also need to build the Jacobian of $\textbf{A(x)}$ to implement the update as-per Newton-Raphson.
A brief review; at the solution we can write
$\begin{equation}
[\mathbf{A}(\mathbf{x})] \cdot \mathbf{x} - \mathbf{b} = \mathbf{f}(\mathbf{x}) = \mathbf{0}
\end{equation}
$
Lets assume we are not at the solution, so we need a way to update the current value of $\textbf{x}$.
Recall from Newton's method (for univariate cases) that the update formula is
$
\begin{equation}
x_{k+1}=x_{k} - (\frac{df}{dx}\mid_{x_k})^{-1} f(x_k)
\end{equation}
$
The Jacobian will play the role of the derivative, and $\textbf{x}$ is now a vector (instead of a single variable).
Division is not defined for matrices, but the multiplicative inverse is (the inverse matrix), and plays the role of division.
Hence, the extension to the pipeline case is
$
\begin{equation}
\mathbf{x}_{k+1}=\mathbf{x}_{k} - [\mathbf{J}(\mathbf{x}_{k})]^{-1} \mathbf{f}(\mathbf{x}_k)
\end{equation}
$
where $\mathbf{J}(\mathbf{x}_{k})$ is the Jacobian of the coefficient matrix $\mathbf{A}$ evaluated at $\mathbf{x}_{k}$.
Although a bit cluttered, here is the formula for a single update step, with the matrix, demand vector, and the solution vector in their proper places.
$
\begin{equation}
\mathbf{x}_{k+1}=\mathbf{x}_{k} - [\mathbf{J}(\mathbf{x}_{k})]^{-1} \{[\mathbf{A}(\mathbf{x}_k)] \cdot \mathbf{x}_k - \mathbf{b}\}
\end{equation}
$
As a practical matter we actually never invert the Jacobian, instead we solve the related Linear system of
$
[\mathbf{J}(\mathbf{x}_{k})] \cdot \Delta \mathbf{x} = \{[\mathbf{A}(\mathbf{x}_k)] \cdot \mathbf{x}_k - \mathbf{b}\}
$
for $\Delta\textbf{x}$, then perform the update as $\textbf{x}_{k+1} = \textbf{x}_{k} - \Delta\textbf{x}$
```{note}
Inverting the matrix every step is computationally inefficient, and unnecessary. As an example, solving the system in this case would at worst take 10 row operations each step, but nearly 100 row operations to invert at each step -- to accomplish the same result, generate an update. Now imagine when there are hundreds of nodes and pipes!
```
The Jacobian of the pipeline model is a matrix with the following properties:
- The partition of the matrix that corresponds to the node formulas (upper left partition) is identical to the original coefficient matrix --- it will be comprised of $0~\text{or}~\pm~1$ in the same pattern at the equivalent partition of the $\mathbf{A}$ matrix.
- The partition of the matrix that corresponds to the pipe head loss terms (lower left partition), will consist of values that are twice the values of the coefficients in the original coefficient matrix (at any supplied value of $\mathbf{x}_k$.
- The partition of the matrix that corresponds to the head terms (lower right partition), will consist of values that are identical to the original matrix.
- The partition of the matrix that corresponds to the head coefficients in the node equations (upper right partition) will also remain unchanged.
We will want to take advantage of problem structure to build the Jacobian (you could just finite-difference the coefficient matrix to approximate the partial derivatives, but that is terribly inefficient if you already know the structure).
So now lets code reading the input file and building at least the starting instance of the matrix equations
First allocate some memory<jupyter_code>bvector = []
rowNumA = 0
colNumA = 0
rowNumB = 0
verbose = 'false' # set to true for in-class demonstration
#############################################
elevation = [] # null list node elevations
diameter = [] # null list pipe diameters
distance = [] # null list pipe lengths
roughness = [] # null list pipe roughness
flowguess = [] # null list pipe flow rates
nodearcs = [] # node-arc incidence matrix
rhs_true = [] # null list for nodal demands
tempvect = []<jupyter_output><empty_output><jupyter_text>Now read in the data from a file (useful to expand problem scale to many,many pipes and nodes).<jupyter_code>##############################################
# connect and read file for Pipeline Network #
##############################################
afile = open("PipeNetwork.txt","r")
nnodes = int(afile.readline())
npipes = int(afile.readline())
# read elevation vector
tempvect.append([float(n) for n in afile.readline().strip().split()])
for i in range(0,nnodes,1):
elevation.append(float(tempvect[0][i]))
tempvect = [] # reset vector
# read diameter vector
tempvect.append([float(n) for n in afile.readline().strip().split()])
for i in range(0,npipes,1):
diameter.append(float(tempvect[0][i]))
tempvect = [] # reset vector
# read length vector
tempvect.append([float(n) for n in afile.readline().strip().split()])
for i in range(0,npipes,1):
distance.append(float(tempvect[0][i]))
tempvect = [] # reset vector
# read roughness vector
tempvect.append([float(n) for n in afile.readline().strip().split()])
for i in range(0,npipes,1):
roughness.append(float(tempvect[0][i]))
tempvect = [] # reset vector
# read viscosity (scalar)
viscosity = float(afile.readline())
# read current flow guess
tempvect.append([float(n) for n in afile.readline().strip().split()])
for i in range(0,npipes,1):
flowguess.append(float(tempvect[0][i]))
tempvect = [] # reset vector
# read nodearc incidence matrix
## future revisions read directly into augmented matrix, or find way to release nodearc from stack
for irow in range(0,nnodes,1): # then read each row
nodearcs.append([float(n) for n in afile.readline().strip().split()])
# read demands guess
tempvect.append([float(n) for n in afile.readline().strip().split()])
for i in range(0,nnodes+npipes,1):
rhs_true.append(float(tempvect[0][i]))
tempvect = [] # reset vector
######################################
# end file read ,disconnect file #
######################################
afile.close() # Disconnect the file<jupyter_output><empty_output><jupyter_text>Echo the data just read, could put into a conditional and choose not to display except for debugging.<jupyter_code>######################################
# echo the input in human readable #
######################################
print('number of nodes : ',nnodes)
print('number of pipes : ',npipes)
print('viscosity : ',viscosity)
print ("-----------------------------")
for irow in range(0,nnodes):
print('node id:',irow, ', elevation :',elevation[irow],' head :',rhs_true[irow+npipes])
print ("-----------------------------")
for jcol in range(0,npipes):
print('pipe id:',jcol,', diameter : ' ,diameter[jcol],', distance : ',distance[jcol],
', roughness : ',roughness[jcol],', flow : ',flowguess[jcol])
print ("-----------------------------")
##for jcol in range(0,nnodes+npipes):
## print('irow :',jcol,' RHS True :',rhs_true[jcol])
##print ("-----------------------------")
print("node-arc incidence matrix")
for i in range(0,nnodes,1):
print (nodearcs[i][0:npipes])
print ("-----------------------------")<jupyter_output>number of nodes : 4
number of pipes : 6
viscosity : 1.1e-05
-----------------------------
node id: 0 , elevation : 200.0 head : 0.0
node id: 1 , elevation : 200.0 head : 0.0
node id: 2 , elevation : 200.0 head : 0.0
node id: 3 , elevation : 200.0 head : 0.0
-----------------------------
pipe id: 0 , diameter : 1.0 , distance : 800.0 , roughness : 1e-05 , flow : 1.0
pipe id: 1 , diameter : 0.67 , distance : 800.0 , roughness : 1e-05 , flow : 1.0
pipe id: 2 , diameter : 0.67 , distance : 700.0 , roughness : 1e-05 , flow : 1.0
pipe id: 3 , diameter : 0.67 , distance : 700.0 , roughness : 1e-05 , flow : 1.0
pipe id: 4 , diameter : 0.67 , distance : 800.0 , roughness : 1e-05 , flow : 1.0
pipe id: 5 , diameter : 0.5 , distance : 600.0 , roughness : 1e-05 , flow : 1.0
-----------------------------
node-arc incidence matrix
[1.0, -1.0, 0.0, -1.0, 0.0, 0.0]
[0.0, 1.0, -1.0, 0.0, 0.0, 1.0]
[0.0, 0.0, 0.0, 1.0, -1.0, -1.0]
[0.0, 0.0, 1.0, 0.0, 1.0, 0.0]
[...]<jupyter_text>Now create the augmented matrix using the structure of the problem.<jupyter_code># create augmented matrix
colNumA = npipes+nnodes
rowNumA = nnodes+npipes
augmentedMat = [] # null list to store augmented matrix
#######################################################################################
augmentedMat = [[0.0 for j in range(colNumA)]for i in range(rowNumA)] #fill with zeroes
#build upper left partition -- from nodearcs
for ir in range(0,nnodes):
for jc in range (0,npipes):
augmentedMat[ir][jc] = nodearcs[ir][jc]
istart=nnodes
iend=nnodes+npipes
jstart=npipes
jend=npipes+nnodes
for ir in range(istart,iend):
for jc in range (jstart,jend):
augmentedMat[ir][jc] = -1.0*nodearcs[jc-jstart][ir-istart] + 0.0
if verbose == 'true' :
print("augmented matrix before loss factors")
writeM(augmentedMat,rowNumA,colNumA,"augmented matrix")<jupyter_output><empty_output><jupyter_text>#### Stopping Criteria, and Solution Report}
You will need some way to stop the process -- the three most obvious (borrowed from Newton's method) are:
- Approaching the correct solution (e.g. $[\mathbf{A}(\mathbf{x})] \cdot \mathbf{x} - \mathbf{b} = \mathbf{f}(\mathbf{x}) = \mathbf{0}$).
- Update vector is not changing (e.g. $\mathbf{x}_{k+1}=\mathbf{x}_{k}$), so either have an answer, or the algorithm is stuck.
- You have done a lot of iterations (say 100).
We want to have the script determine when to stop and report the conditions (which stopping criterion was used), and the values of flows and heads in the system.
Fisrt lets set some tolerances and an iteration limit, as well as allocate memory to store the auxiliary functions results<jupyter_code>#######################################################################################
howmany=50 #iterations max
tolerance1 = 1e-24
tolerance2 = 1e-24
velocity_pipe = [0 for i in range(npipes)] # null list velocities
reynolds = [0 for i in range(npipes)] # null list reynolds numbers
friction = [0 for i in range(npipes)] # null list friction
geometry = [0 for i in range(npipes)] # null list geometry
lossfactor = [0 for i in range(npipes)] # null list loss
jacbMat = [] # null list to store jacobian matrix
jacbMat = [[0.0 for j in range(colNumA)]for i in range(rowNumA)] #fill with zeroes
solvecguess =[ 0.0 for i in range(rowNumA)]
solvecnew =[ 0.0 for i in range(rowNumA)]
for i in range(0,npipes,1):
solvecguess[i] = flowguess[i]
geometry[i] = k_factor(distance[i],diameter[i],32.2)
#solvecguess is a current guess -- wonder if more pythonic way for this assignment
## print('irow :',i,' Geometry Factor :',geometry[i])
##print ("-----------------------------")<jupyter_output><empty_output><jupyter_text>And finally the money shot; where we wrap everything into a for loop to iteratively find a solution<jupyter_code>###############################################################
## ITERATION LOOP #
###############################################################
for iteration in range(howmany): # iteration outer loop
if verbose == 'true' :
print("solutions at begin of iteration",iteration)
for jcol in range(0,nnodes+npipes):
print('irow :',jcol,' solvecnew :',solvecnew[jcol]," solvecguess ",solvecguess[jcol])
print ("-----------------------------")
for i in range(0,npipes,1):
velocity_pipe[i] = velocity(diameter[i],flowguess[i])
reynolds[i]=reynolds_number(velocity_pipe[i],diameter[i],viscosity)
friction[i]=friction_factor(roughness[i],diameter[i],reynolds[i])
lossfactor[i]=friction[i]*geometry[i]*abs(flowguess[i])
if verbose == 'true' :
for jcol in range(0,npipes):
print('pipe id:',jcol,', velocity : ' ,velocity_pipe[jcol],', reynolds : ',reynolds[jcol],
', friction : ',friction[jcol],', loss factor : ',lossfactor[jcol],'flow guess',flowguess[jcol])
################################################################
# BUILD AUGMENTED MATRIX CURRENT Q+H SOLUTION #
################################################################
augmentedMat = [[0.0 for j in range(colNumA)]for i in range(rowNumA)] #fill with zeroes
#build upper left partition -- from nodearcs
for ir in range(0,nnodes):
for jc in range (0,npipes):
augmentedMat[ir][jc] = nodearcs[ir][jc]
#build lower right == transpose of upper left
istart=nnodes
iend=nnodes+npipes
jstart=npipes
jend=npipes+nnodes
for ir in range(istart,iend):
for jc in range (jstart,jend):
augmentedMat[ir][jc] = -1.0*nodearcs[jc-jstart][ir-istart] + 0.0
# build lower left partition of the matrix
istart = nnodes
iend = nnodes+npipes
jstart = 0
jend = npipes
for i in range(istart,iend ):
for j in range(jstart,jend ):
# print('i =',i,'j=',j)
if (i-istart) == j :
# print('i =',i,'j=',j)
augmentedMat[i][j] = -1.0*lossfactor[j] + 0.0
if verbose == 'true' :
print("updated augmented matrix in iteration",iteration)
writeM(augmentedMat,rowNumA,colNumA,"augmented matrix")
################################################################
# BUILD JACOBIAN MATRIX CURRENT Q+H SOLUTION #
################################################################
# now build current jacobian
for i in range(rowNumA):
for j in range(colNumA):
jacbMat[i][j] = augmentedMat[i][j]
# modify lower left partition
istart = nnodes
iend = nnodes+npipes
jstart = 0
jend = npipes
for i in range(istart,iend ):
for j in range(jstart,jend ):
# print('i =',i,'j=',j)
if (i-istart) == j :
# print('i =',i,'j=',j)
jacbMat[i][j] = 2.0*jacbMat[i][j]
## for jcol in range(0,nnodes+npipes):
## print('irow :',jcol,' solvecnew :',solvecnew[jcol]," solvecguess ",solvecguess[jcol])
## print ("-----------------------------")
# matrix multiply augmentedMat*solvecguess to get current g(Q)
# gq = [0.0 for i in range(rowNumA)] # zero gradient vector
## if verbose == 'true' :
## print("augmented matrix in iteration",iteration)
## writeM(augmentedMat,rowNumA,colNumA,"augmented matrix before mmult")
gq = matrixvectormult(augmentedMat,solvecguess,rowNumA,colNumA)
## if verbose == 'true' :
## writeV(gq,rowNumA,"gq vectorbefore subtract rhs_true")
# subtract rhs
# for i in range(rowNumA):
gq = vectorsub(gq,rhs_true,rowNumA)#vector subtract
if verbose == 'true' :
print("computed g(q) in iteration",iteration)
writeV(gq,rowNumA,"gq vector")
print("compare current and new guess")
for jcol in range(0,nnodes+npipes):
print('irow :',jcol,' solvecnew :',solvecnew[jcol]," solvecguess ",solvecguess[jcol])
print ("-----------------------------")
dq = [0.0 for i in range(rowNumA)] # zero update vector
if verbose == 'true' :
writeV(dq,rowNumA,"dq vector before linear solve")
if verbose == 'true' :
print("jacobian before linearsolve in iteration",iteration)
writeM(jacbMat,rowNumA,colNumA,"jabobian matrix")
dq = linearsolver(jacbMat,gq) # memory leak after this call - linearsolve clobbers input lists
# dq = np.linalg.solve(jacbMat,gq)
if verbose == 'true' :
print("jacobian after linearsolve in iteration",iteration)
writeM(jacbMat,rowNumA,colNumA,"jabobian matrix")
if verbose == 'true' :
writeV(dq,rowNumA,"dq vector -after linear solve")
solvecnew = vectorsub(solvecguess,dq,rowNumA)#vector subtract
if verbose == 'true' :
print("Q_new = Q_old - DQ")
writeV(solvecnew,rowNumA,"new guess vector")
# tempvect =[ 0.0 for i in range(rowNumA)]
## tempvect = matrixvectormult(jacbMat,dq,rowNumA,colNumA)
## writeV(tempvect,rowNumA,"J*dq vector")
## tempvect = vectorsub(tempvect,gq,rowNumA)
## writeV(tempvect,rowNumA,"J*dq - gq vector")
print("just after computing new guess, should be different")
for jcol in range(0,nnodes+npipes):
print('irow :',jcol,' solvecnew :',solvecnew[jcol]," solvecguess ",solvecguess[jcol])
print ("-----------------------------")
#test for stopping
tempvect =[ 0.0 for i in range(rowNumA)]
for i in range(rowNumA):
tempvect[i] = abs(solvecnew[i] - solvecguess[i])
test1 = vdotv(tempvect,tempvect,rowNumA)
if verbose == 'true' :
print('test1',test1)
tempvect =[ 0.0 for i in range(rowNumA)]
for i in range(rowNumA):
tempvect[i] = abs(gq[i])
test2 = vdotv(tempvect,tempvect,rowNumA)
if verbose == 'true' :
print('test2',test2)
if test1 < tolerance1 :
print("update not changing --exit and report current update")
print("iteration",iteration)
# update guess
solvecguess[:] = solvecnew[:]
for i in range(0,npipes,1):
flowguess[i] = solvecguess[i]
break
if test2 < tolerance2 :
print("gradient near zero --exit and report current update")
print("iteration",iteration)
# update guess
solvecguess[:] = solvecnew[:]
for i in range(0,npipes,1):
flowguess[i] = solvecguess[i]
break
if verbose == 'true' :
print("solution continuing")
print("iteration",iteration)
# update guess
solvecguess[:] = solvecnew[:]
if verbose == 'true' :
for i in range(0,npipes,1):
flowguess[i] = solvecguess[i]
## Write Current State ######################
gq = matrixvectormult(augmentedMat,solvecguess,rowNumA,colNumA)
print('number of nodes : ',nnodes)
print('number of pipes : ',npipes)
print('viscosity : ',viscosity)
print ("-----------------------------")
for irow in range(0,nnodes):
print('node id:',irow, ', elevation :',elevation[irow])
print ("-----------------------------")
for jcol in range(0,npipes):
print('pipe id:',jcol,', diameter : ' ,diameter[jcol],', distance : ',distance[jcol],
', roughness : ',roughness[jcol],', flow guess : ',round(flowguess[jcol],3))
print ("-----------------------------")
for jcol in range(0,nnodes+npipes):
print('irow :',jcol,' RHS True :',rhs_true[jcol],"RHS Current",round(gq[jcol],3))
print ("-----------------------------")
for jcol in range(0,nnodes+npipes):
print('irow :',jcol,' solvecnew :',solvecnew[jcol]," solvecguess ",solvecguess[jcol])
print ("-----------------------------")
################################################
# end of outer loop<jupyter_output>update not changing --exit and report current update
iteration 39
<jupyter_text>Finally write the results and exit the process<jupyter_code>print("results at iteration = :",iteration)
for i in range(0,npipes,1):
flowguess[i] = solvecguess[i]
print('number of nodes : ',nnodes)
print('number of pipes : ',npipes)
print('viscosity : ',viscosity)
print ("-----------------------------")
istart = int(npipes)
for irow in range(0,nnodes):
print('node id:',irow, ', elevation :',elevation[irow],' head :',round(solvecnew[irow+npipes],3))
print ("-----------------------------")
for jcol in range(0,npipes):
print('pipe id:',jcol,', diameter : ' ,diameter[jcol],', distance : ',distance[jcol],
', roughness : ',roughness[jcol],', flow : ',round(flowguess[jcol],3))
print ("-----------------------------")
if verbose == 'true' :
for jcol in range(0,nnodes+npipes):
print('irow :',jcol,' RHS True :',rhs_true[jcol],"RHS Current",gq[jcol])
print ("-----------------------------")
for jcol in range(0,nnodes+npipes):
print('irow :',jcol,' solvecnew :',solvecnew[jcol]," solvecguess ",solvecguess[jcol])
print ("-----------------------------")
<jupyter_output>results at iteration = : 39
number of nodes : 4
number of pipes : 6
viscosity : 1.1e-05
-----------------------------
node id: 0 , elevation : 200.0 head : 297.197
node id: 1 , elevation : 200.0 head : 287.489
node id: 2 , elevation : 200.0 head : 288.871
node id: 3 , elevation : 200.0 head : 287.013
-----------------------------
pipe id: 0 , diameter : 1.0 , distance : 800.0 , roughness : 1e-05 , flow : 8.0
pipe id: 1 , diameter : 0.67 , distance : 800.0 , roughness : 1e-05 , flow : 4.04
pipe id: 2 , diameter : 0.67 , distance : 700.0 , roughness : 1e-05 , flow : 0.227
pipe id: 3 , diameter : 0.67 , distance : 700.0 , roughness : 1e-05 , flow : 3.96
pipe id: 4 , diameter : 0.67 , distance : 800.0 , roughness : 1e-05 , flow : 0.773
pipe id: 5 , diameter : 0.5 , distance : 600.0 , roughness : 1e-05 , flow : 0.187
-----------------------------
|
no_license
|
/ce3372-jb/ce3372jb/_build/html/_sources/lessons/lesson08/EPA-RD-5.ipynb
|
dustykat/ce-3372-psuedo-course
| 10 |
<jupyter_start><jupyter_text># 파이썬 기초 정규강의 1회차 복습
## 자료 형태
### 1.1 boolean
### 1.2 숫자 vs 문자
### 1.3 변수
### 1.4 문자열
### 1.5 리스트
### 1.6딕셔너리
### 1.7 제어문
### 1.8 함수
## 판다스 사용법
### 판다스 사용버 및 응용<jupyter_code># boolean = 불리언 자료형 == 논리형 자료형
True==1
False==0
True & False
True | False
# True = 1 / False = 0
# & 시 둘 다 True여야만 True, 아니면 False
# | 시 둘 중 하나라도 True 시 True, 둘 다 False여야 False
print(type(1), type("1"))
#따옴표 사이에 숫자를 입력할 경우 문자형으로 입력되므로, 자료를 어떤 형태로 쓸 지 잘 확인 한 후 작성할 것
a = 1
a = a+1
a
#변수 지정 후 동일한 변수에 어떠한 연산도 가능하지만 실행에 따라 값이 지속적으로 변하므로
#가능한 다른 변수명으로 값을 지정하여 사용하는 것이 문제 방지 차원에서 효과적임
til= "Today I learned"
til
til.capitalize() # 첫번째 문자만 대문자 나머진 소문자
til.lower() # 모든 문자를 소문자화
til.upper() #모든 문자를 대문자화
address = " 서울시 강남구 역삼동 강남대로94길 15 S2빌딩 3층 "
address
address = address.strip() #문자의 좌우 공백, 특수문자 등을 제거
address
address[1] #인덱싱
len(address) #문자열 길이 확인
address[0:10] #슬라이싱
medicine = ["소화제","감기약","비타민"] #리스트 형태로 자료 생성
medicine
medicine.append("마그네슘") # 원소 추가
medicine
medicine.remove("마그네슘") # 원소 제거
medicine
medicine[-1] #리스트 인덱싱
medicine[0:2] #리스트 슬라이싱
#문자열 인덱싱
address[0:7]
address.startswith("서울시 강남구")
#문자열이 특정 문자로 시작되는지 확인
#실제 업무에서 자료에 행정구 컬럼을 추가 시 유용하게 활용 가능
"서울" in address
#특정 문자열이 포함되어 있는지 확인
address_list = address.split()
address_list
#split한 문자열을 다시 합치기
" ".join(address_list) # 따옴표 안에 기입한 문자가 구분자로 되어 리스트의 문자를 합쳐줌
#딕셔너리 타입
phone = {'서울' : '02',
'경기': '031',
'인천': '032',
'대전': '042',
'부산': '051',
'울산': '052',
'대구': '053',
'광주': '062'}
phone
#서울시의 지역번호 가져오기
phone["서울"]
#제주시를 딕셔너리에 추가
phone["제주"] = "063"
phone
#제어문
for i in phone:
print(f'{i} : {phone[i]}')
#while문
i = 0
while i <= 10:
print(f'i = {i**2}')
i = i+1
store = ["서울역점", "강남점", "마포점", "여의도점"]
store
#store를 인덱스 번호와 함께 출력
for loc in enumerate(store):
print(loc)
#enumerate에서 인덱스 번호와 원소명을 따로 저장하여 출력
for i,loc in enumerate(store):
print(i,loc)
phone = ">경기 031 >강원 033 >충남 041 >충북 043 >경북 054 >경남 055 >전남 061 >전북 063"
phone_split = phone.split(">")
phone_split<jupyter_output><empty_output><jupyter_text># 과제<jupyter_code>#딕셔너리 형태로 만들기
phone_dic = {}
for i in phone_split:
key_value=i.split()
if len(key_value) >=2:
phone_dic[key_value[0]] = key_value[1]
phone_dic
date = "2020.02.02"
def parse_month(date):
"""월을 반환하는 함수"""
month = date.split(".")[1]
return month
parse_month(date)
import pandas as pd
import numpy as np
df = pd.DataFrame()
df["약품명"]= ["소화제", "감기약", "비타민",
"digestive", "Omega3", "오메가3",
"vitamin", "Vitamin"]
df
df["가격"] = 3500
df
df["가격"].tolist() #리스트 형태로 만들기
df["가격"] = [3500, 3200, 4000, 3200, 3700, np.nan, 2000, 1000]
df
type(np.nan)
df["지역"] = "서울"
df["지역2"] = "서울"
df
df = df.drop(columns = "지역2")
#df.drop("지역2", axis = 1) 둘다 동일함
df
df.info()
df.describe()
df.describe(include = np.object)
df.loc[1,"약품명"]
#%timeit df.loc[1,"약품명"] #작업시간 확인
#특정 문자가 포함된 데이터만 가져오기
df[df["약품명"].str.contains("소화|vit")]
df["약품명_lower"] = df["약품명"].str.lower() #데이터프레임은 데이터를 string화 한 다음에 대소문자 변환이 가능함
df
df[df["약품명_lower"].str.contains("소화|vit")]
df[df["가격"]>3500]
df.sort_values(["가격","약품명"],ascending = [False,True])<jupyter_output><empty_output>
|
no_license
|
/python/정규강의) python 기초/1회차/210202_python basic.ipynb
|
changyong93/nanodegree
| 2 |
<jupyter_start><jupyter_text># Bag of Words para Spam/Ham<jupyter_code>import tensorflow as tf
import matplotlib.pyplot as plt
import os
import numpy as np
import csv
import string
import requests
import io
from zipfile import ZipFile
from tensorflow.contrib import learn
save_file_name = os.path.join('..','..','datasets', 'spam_data.csv')
if os.path.isfile(save_file_name):
#el fichero existe y solo hay que cargarlo
text_data = []
with open(save_file_name, 'r') as temp_output_file:
reader = csv.reader(temp_output_file)
for row in reader:
text_data.append(row)
else:
##el fichero no existe y debemos descargarlo
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00228/smsspamcollection.zip"
req = requests.get(url)
z = ZipFile(io.BytesIO(req.content))
file = z.read('SMSSpamCollection')
##Transformar el fichero binario a texto
text_data = file.decode()
text_data = text_data.encode('ascii', errors='ignore')
text_data = text_data.decode().split('\n')
text_data = [x.split('\t') for x in text_data if len(x)>=1]
##Guardar datos en CSV
with open(save_file_name, 'w') as temp_output_file:
writer = csv.writer(temp_output_file)
writer.writerows(text_data)
text_data
texts = [x[1] for x in text_data]
target = [x[0] for x in text_data]
target = [1 if x == 'spam' else 0 for x in target]
texts
target<jupyter_output><empty_output><jupyter_text>### Pre procesado del texto<jupyter_code>texts = [x.lower() for x in texts] # Convertir los textos a minúscula
texts = [''.join(c for c in x if c not in string.punctuation) for x in texts] # Eliminamos signos de puntuación
texts = [''.join(c for c in x if c not in '0123456789') for x in texts] # Eliminamos los números
texts = [' '.join(x.split()) for x in texts] # Eliminar espacios en blanco y separadores extras
texts_lengths = [len(x.split()) for x in texts]
texts_lengths = [x for x in texts_lengths if x < 50]
plt.hist(texts_lengths, bins=25)
plt.title("Histograma del número de palabras por frase")
plt.show()
sentence_size = 40
min_word_freq = 3
vocab_processor = learn.preprocessing.VocabularyProcessor(sentence_size, min_frequency=min_word_freq)
vocab_processor.fit_transform(texts)
transformed_texts = np.array([x for x in vocab_processor.transform(texts)])
embedding_size = len(np.unique(transformed_texts))
embedding_size
transformed_texts
texts
train_idx = np.random.choice(len(texts), round(len(texts)*0.8), replace=False)
test_idx = np.array(list(set(range(len(texts)))-set(train_idx)))
texts_train = [x for ix, x in enumerate(texts) if ix in train_idx]
texts_test = [x for ix, x in enumerate(texts) if ix in test_idx]
target_train = [x for ix, x in enumerate(target) if ix in train_idx ]
target_test = [x for ix, x in enumerate(target) if ix in test_idx]
identity_matrix = tf.diag(tf.ones(shape = [embedding_size]))
A = tf.Variable(tf.random_normal(shape=[embedding_size,1]))
b = tf.Variable(tf.random_normal(shape=[1,1]))
x_data = tf.placeholder(shape=[sentence_size], dtype=tf.int32)
y_target = tf.placeholder(shape = [1,1], dtype = tf.float32)
x_embed = tf.nn.embedding_lookup(identity_matrix, x_data)
x_col_sums = tf.reduce_sum(x_embed,0)
x_col_sum_2D = tf.expand_dims(x_col_sums,0)
model_output = tf.add(tf.matmul(x_col_sum_2D, A), b)
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=model_output, labels=y_target))
prediction = tf.sigmoid(model_output)
my_optim = tf.train.GradientDescentOptimizer(0.001)
train_step = my_optim.minimize(loss)
session = tf.Session()
init = tf.global_variables_initializer()
session.run(init)
loss_vect = []
train_acc_all = []
train_acc_avg = []
for ix, t in enumerate(vocab_processor.fit_transform(texts_train)):
y_data = [[target_train[ix]]]
session.run(train_step, feed_dict={x_data:t, y_target:y_data})
temp_loss = session.run(loss, feed_dict={x_data:t, y_target:y_data})
loss_vect.append(temp_loss)
if(ix+1)%10==0:
print("Observación de entrenamiento #{}, Perdidas = {}".format(ix+1, temp_loss))
[[temp_pred]] = session.run(prediction, feed_dict={x_data:t, y_target:y_data})
train_acc_temp = target_train[ix]==np.round(temp_pred)
train_acc_all.append(train_acc_temp)
if len(train_acc_all)>=50:
train_acc_avg.append(np.mean(train_acc_all[-50:]))
train_acc_all = []
train_acc_all
plt.plot(train_acc_avg, 'k-', label='Precisión')
plt.ylim([0,1])
test_acc_all = []
test_acc_avg = []
for ix, t in enumerate(vocab_processor.fit_transform(texts_test)):
y_data = [[target_test[ix]]]
[[temp_pred]] = session.run(prediction, feed_dict={x_data:t, y_target:y_data})
test_acc_temp = target_test[ix]==np.round(temp_pred)
test_acc_all.append(test_acc_temp)
if len(test_acc_all)>=50:
test_acc_avg.append(np.mean(test_acc_all[-50:]))
test_acc_all = []
if(ix+1)%50==0:
print("Observación de entrenamiento #{}".format(ix+1))
print("Frase: {}".format(t))
print("Es {} y la predicción dice: {}".format(y_data, temp_pred))
print("Eficacia de predicción globale en test {}".format(np.mean(test_acc_all)))
plt.plot(test_acc_all, 'k-', label = "Eficacia en la predicción")<jupyter_output><empty_output>
|
no_license
|
/2020-06-TensorFlow/scripts/tema08/01-bag-of-words.ipynb
|
ncostamagna/tensorflow
| 2 |
<jupyter_start><jupyter_text>## Course Similarity Inspector
This document is used to inspect which courses have been flagged as being similar to each other in the Orange and Udacity
course catelogues.<jupyter_code>import pandas as pd
import numpy as np
from ipywidgets import interact
import numpy as np
from ipywidgets import widgets
from IPython.display import *
course_query = %sql SELECT * FROM course_description_catalog;
udacity_course_query = %sql SELECT * FROM udacity_course_list;
course_similarity_query = %sql SELECT * FROM course_similarity_table;
udacity_similarity_query = %sql SELECT * FROM udacity_course_similarity_table;
df_courses = course_query.DataFrame()
df_udacity_courses = udacity_course_query.DataFrame()
df_course_similarity = course_similarity_query.DataFrame()
df_udacity_similarity = udacity_similarity_query.DataFrame()
def GetSimilarCourses(row, df, return_limit = None):
course_id = row['course#']
ii = np.where(df['course_number_1'] == course_id)[0]
jj = np.where(df['course_number_2'] == course_id)[0]
similar_course_1_id = df['course_number_2'].values[ii]
similar_scores_1 = df['similiarity'].values[ii]
similar_course_2_id = df['course_number_1'].values[jj]
similar_scores_2 = df['similiarity'].values[jj]
df_result = pd.DataFrame({'course_id': np.append(similar_course_1_id, similar_course_2_id),
'similarity': np.append(similar_scores_1, similar_scores_2)})
df_result.sort_values('similarity', ascending=False, inplace=True)
if return_limit is not None:
df_result = df_result[0:return_limit].copy()
return(df_result)
def GetSimilarUdacityCourses(row, df, return_limit = None):
course_id = row['course#']
ii = np.where(df['orange_course_id'] == course_id)[0]
similar_course_id = df['udacity_course_id'].values[ii]
similar_scores = df['similiarity'].values[ii]
df_result = pd.DataFrame({'course_id': similar_course_id,
'similarity': similar_scores})
df_result.sort_values('similarity', ascending=False, inplace=True)
if return_limit is not None:
df_result = df_result[0:return_limit].copy()
return(df_result)
def GetAllSimilarCourses(row, df_orange, df_udacity, return_limit = None):
df_result = GetSimilarCourses(row, df_orange, return_limit=return_limit)
df_udacity_result = GetSimilarUdacityCourses(row, df_udacity_similarity, return_limit=return_limit)
df_result = pd.merge(df_result, df_courses, left_on = 'course_id', right_on = 'course#', how='left')[['course#', 'series', 'course title', 'similarity']]
df_udacity_result = pd.merge(df_udacity_result, df_udacity_courses, left_on = 'course_id', right_on = 'key', how='left')[['course_id', 'title', 'similarity']]
return((df_result, df_udacity_result))
df_courses_grp = df_courses.groupby("series")
def print_course(course):
df_sub = df_courses_grp.get_group(select_series_widget.value)
row = df_sub[df_sub['course title'] == course].iloc[0]
df_similiar_sub = GetSimilarCourses(row, df_course_similarity, return_limit=5)
df_courses_aug = pd.merge(df_courses, df_similiar_sub, left_on = 'course#', right_on='course_id', how='inner')
df_courses_aug = df_courses_aug[['course#', 'series', 'course title', 'similarity']].copy()
df_courses_aug.sort_values("similarity", inplace=True, ascending=False)
return(HTML(df_courses_aug.to_html()))
def select_series(series):
select_course_widget.options = df_courses_grp.get_group(series)['course title'].sort_values().tolist()
select_series_widget = widgets.Select(options=df_courses.series.drop_duplicates().sort_values().tolist())
init = select_series_widget.value
select_course_widget = widgets.Select(options=df_courses_grp.get_group(init)['course title'].tolist())
j = widgets.interactive(print_course, course=select_course_widget)
i = widgets.interactive(select_series, series=select_series_widget)
display(i)
display(j)
def print_udacity_course(course):
df_sub = df_courses_grp.get_group(select_udacity_series_widget.value)
row = df_sub[df_sub['course title'] == course].iloc[0]
df_similiar_sub = GetSimilarUdacityCourses(row, df_udacity_similarity, return_limit=5)
df_courses_aug = pd.merge(df_udacity_courses, df_similiar_sub, left_on = 'key', right_on='course_id', how='inner')
df_courses_aug = df_courses_aug[['key', 'title', 'short_summary', 'similarity']].copy()
df_courses_aug.sort_values("similarity", inplace=True, ascending=False)
return(HTML(df_courses_aug.to_html()))
def select_udacity_series(series):
select_udacity_course_widget.options = df_courses_grp.get_group(series)['course title'].sort_values().tolist()
select_udacity_series_widget = widgets.Select(options=df_courses.series.drop_duplicates().sort_values().tolist())
init = select_udacity_series_widget.value
select_udacity_course_widget = widgets.Select(options=df_courses_grp.get_group(init)['course title'].tolist())
j_udacity = widgets.interactive(print_udacity_course, course=select_udacity_course_widget)
i_udacity = widgets.interactive(select_udacity_series, series=select_udacity_series_widget)
display(i_udacity)
display(j_udacity)<jupyter_output><empty_output>
|
no_license
|
/orange_project/Course+Similarity+Inspector.ipynb
|
justwjr/Recommendation-Engine
| 1 |
<jupyter_start><jupyter_text># How to save a LightCurve in FITS format?
Once you have detrended or altered a lightcurve in some way, you may want to save it as a FITS file. This allows you to easily share the file with your collaborators or submit your lightcurves as a [MAST High Level Science Product](https://archive.stsci.edu/hlsp/hlsp_guidelines.html) (HLSP). Lightkurve provides a `to_fits()` method which will easily convert your `LightCurve` object into a fits file.
Below is a brief demostration showing how `to_fits()` works.
Note: if you are considering contributing a HLSP you may want to read the [guidelines](https://archive.stsci.edu/hlsp/hlsp_guidelines_timeseries.html) for contributing fits files. These include which fits headers are required/suggested for your HLSP to be accepted.## Example: editing and writing a lightcurve
First we'll obtain a random Kepler lightcurve from MAST.<jupyter_code>from lightkurve import search_lightcurve
lc = search_lightcurve('KIC 757076', author="Kepler", quarter=3).download()<jupyter_output><empty_output><jupyter_text>Now we'll make some edits to the lightcurve. Below we use the PDCSAP flux from MAST, remove NaN values and clip out any outliers.<jupyter_code>lc = lc.remove_nans().remove_outliers()
lc.scatter();<jupyter_output><empty_output><jupyter_text>Now we can use the `to_fits` method to save the lightcurve to a file called *output.fits*.<jupyter_code>lc.to_fits(path='demo-lightcurve.fits', overwrite=True)<jupyter_output><empty_output><jupyter_text>Let's take a look at the file and check that it behaved as we expect<jupyter_code>from astropy.io import fits
hdu = fits.open('demo-lightcurve.fits')
type(hdu)
hdu.info()<jupyter_output><empty_output><jupyter_text>`hdu` is a set of astropy.io.fits objects, which is what we would expect. Lets take a look at the header of the first extension.<jupyter_code>hdu[0].header<jupyter_output><empty_output><jupyter_text>Looks like it has all the correct information about the target. What about the second extension?<jupyter_code>hdu[1].header<jupyter_output><empty_output><jupyter_text>This extension has 7 columns, `TIME`, `FLUX`, `FLUX_ERR`, `SAP_QUALITY`, `CADENCENO`, `MOM_CENTR1`, and `MOM_CENTR2`. What if we wanted to add new keywords to our fits file? HLSP products require some extra keywords. Let's add some keywords to explain who made the data, and what our HLSP is. <jupyter_code>lc.to_fits(path='demo-lightcurve.fits',
overwrite=True,
HLSPLEAD='Kepler/K2 GO office',
HLSPNAME='TUTORIAL',
CITATION='HEDGES2018')
hdu = fits.open('demo-lightcurve.fits')
hdu[0].header<jupyter_output><empty_output><jupyter_text>Now our new keywords are included in the primary header! What about if we want to add more **data columns** to our fits file? We can simply add data columns in the same way. Let's add the data quality to our fits file.<jupyter_code>demo_vector = lc.fold(period=1.23456789).phase
demo_vector
lc.to_fits(path='demo-lightcurve.fits',
overwrite=True,
HLSPLEAD='Kepler/K2 GO office',
HLSPNAME='TUTORIAL',
CITATION='HEDGES2018',
DEMO_COLUMN=demo_vector)
hdu = fits.open('demo-lightcurve.fits')
hdu[1].data.columns<jupyter_output><empty_output>
|
permissive
|
/docs/source/tutorials/2-creating-light-curves/2-1-saving-a-light-curve.ipynb
|
burke86/lightkurve
| 8 |
<jupyter_start><jupyter_text>#**Assignment 4**<jupyter_code>#imports
import matplotlib.pyplot as plt
import random as rn
import imageio
import os
import math
import numpy as np
import networkx as nx<jupyter_output><empty_output><jupyter_text>##1st task<jupyter_code>def RandomWalk(N,position_x=[0],position_y=[0],boundary_x=[-10,10],boundary_y=[-10,10]):
'''Function generate a random walk on the lattice. It returns list of positions on axis x and list of positions on axis y.
Keyword arguments:
N -- number of steps
position_x -- list with start position on x axis
position_y -- list with start position on x axis
boundary_x -- list with left and right border on x axis
bundary_y -- list with left and right border on y axis
'''
i = 0 #iterator of steps
while i<N:
#each of 4 possible steps has probability p=0.25
r = rn.random()
if r<=.25:
#check if location is in borders
if position_x[-1]+1>boundary_x[1]:
pass
else:
#add a new position: one step right
position_x.append(position_x[-1]+1)
position_y.append(position_y[-1])
i+=1
elif .25<r<=.5:
#check if location is in borders
if position_y[-1]+1>boundary_y[1]:
pass
else:
#add a new position: one step up
position_x.append(position_x[-1])
position_y.append(position_y[-1]+1)
i+=1
if .5<r<=.75:
#check if location is in borders
if position_x[-1]-1<boundary_x[0]:
pass
else:
#add a new position: one step left
position_x.append(position_x[-1]-1)
position_y.append(position_y[-1])
i+=1
elif .75<r<=1:
#check if location is in borders
if position_y[-1]-1<boundary_y[0]:
pass
else:
#add a new position: one step down
position_x.append(position_x[-1])
position_y.append(position_y[-1]-1)
i+=1
return position_x, position_y
def GifGenerator(name,position_x,position_y,boundary_x=[-10,10],boundary_y=[-10,10]):
'''Function generate a gif file.
Keyword arguments:
name -- name of file
position_x -- list with start position of agent on x axis
position_y -- list with start position of agent on x axis
boundary_x -- list with left and right border on x axis
bundary_y -- list with left and right border on y axis
'''
path = os.getcwd() #check path
if 'img' not in os.listdir(path):
#create folder dor plots if it not exist
os.mkdir(path+"/img/")
images = [] #list for snapshot
#prepare lattice
plt.xlim(boundary_x[0]-1,boundary_x[1]+1)
plt.ylim(boundary_y[0]-1,boundary_y[1]+1)
plt.title('Random Walk')
plt.xlabel('x')
plt.ylabel('y')
for i in range(1,len(position_x)):
#draw step
plt.plot(position_x[:i+1],position_y[:i+1],'b')
plt.plot(position_x[:i],position_y[:i],'b.')
plt.plot(position_x[i],position_y[i],'r.')
#save figure in folder as "step"+number
plt.savefig(os.path.join(path+"/img/","step{i}.png".format(i=i)))
#add to images
images.append(imageio.imread(path+"/img/step{i}.png".format(i=i)))
#make a gif of images
imageio.mimsave(name+'.gif', images, duration = 0.8)
return 'Gif file is saved'
rw=RandomWalk(50)
GifGenerator('movie',rw[0],rw[1])<jupyter_output><empty_output><jupyter_text>##2nd task<jupyter_code>def PearsonWalk(N,position_x=[0],position_y=[0],boundary_x = [-10,10],boundary_y = [-10,10]):
'''Function generate a Pearson random walk on the lattice. It returns the fraction of time steps when the walker is in right half plane (x > 0),
the fraction of time the walker is in the first quadrant of positions on axis x and list of positions on axis y.
Keyword arguments:
N -- number of steps
position_x -- list with start position on x axis
position_y -- list with start position on x axis
boundary_x -- list with left and right border on x axis
undary_y -- list with left and right border on y axis
'''
i=0
An=0
Bn=0
while i<N:
#step = 1, so delta_x=cos(phi) and delta_y=sin(phi)
#phi is between 0 and 2*pi
#r*2*pi gives all possible angles
r = rn.random()
#check if new location is in boundries
if position_x[-1]+math.cos(r*2*math.pi)>boundary_x[1] or position_x[-1]+math.cos(r*2*math.pi)<boundary_x[0] or position_y[-1]+math.sin(r*2*math.pi)>boundary_y[1] or position_y[-1]+math.sin(r*2*math.pi)<boundary_y[0]:
pass
else:
if position_x[-1]+math.cos(r*2*math.pi)>0:
#check if agent is in right half of plane and add 1 to iterator
An+=1
if position_y[-1]+math.sin(r*2*math.pi)>0:
#check if agent is in first quadrant of plane and add 1 to iterator
Bn+=1
#new position of agent
position_x.append(position_x[-1]+math.cos(r*2*math.pi))
position_y.append(position_y[-1]+math.sin(r*2*math.pi))
i+=1
return An/N,Bn/N,position_x,position_y
pw=PearsonWalk(50)
GifGenerator('movie2',pw[2],pw[3])
An=[]
Bn=[]
for _ in range(1000):
An.append(PearsonWalk(1000,position_x=[0],position_y=[0],boundary_x = [-10,10],boundary_y = [-10,10])[0])
Bn.append(PearsonWalk(1000,position_x=[0],position_y=[0],boundary_x = [-10,10],boundary_y = [-10,10])[1])
np.mean(An)
np.mean(Bn)
plt.hist(An,density=True,bins=30)
plt.title('Density of An')
plt.hist(Bn,density=True,bins=30)
plt.title('Density of Bn')<jupyter_output><empty_output><jupyter_text>##3dt task<jupyter_code>def WalkOnGraph(start_node,graph,N):
'''Function generate a random walk on the graph. It returns the list of hitting neighbors.
Keyword arguments:
start_node -- start position of walk
graph -- generated graph that the walk will be simulated on
N -- number of steps
'''
#list of hitting nodes with start point
steps=[start_node]
for i in range(N):
#choosing neighbor of last hitted node
steps.append(rn.choice(list(graph.neighbors(steps[-1]))))
return steps
nodes=20 #number of nodes
N=20 #number of steps
#generating walks for different graphs
G1 = nx.barabasi_albert_graph(nodes,4)
ba_walk = WalkOnGraph(0,G1,N)
G2 = nx.watts_strogatz_graph(nodes,2,.4)
ws_walk = WalkOnGraph(0,G2,N)
G3 = nx.gnp_random_graph(nodes,.4)
random_walk = WalkOnGraph(0,G3,N)
def GraphAnimation(G,steps,name='graph_gif'):
'''Function generate a gif of a random walk on the graph.
Keyword arguments:
G -- generated graph that the walk will be simulated on
steps -- list of steps
name -- name of gif file
'''
path = os.getcwd() #check path
if 'graph' not in os.listdir(path):
#create folder for plots if it not exist
os.mkdir(path+"/graph/")
#list of images
images = []
f = plt.figure()
plt.title('Random walk on graph')
#nodes will be randomly situated
pos=nx.random_layout(G)
#draw edges
nx.draw_networkx_edges(G,pos)
for i in steps:
#for every step I create a list of hitted node
red_node = [i]
#draw nodes
nx.draw_networkx_nodes(G,pos,node_color='b')
#draw hitted node on red
nx.draw_networkx_nodes(G,pos,nodelist=red_node,node_color='r')
#save plot
f.savefig(os.path.join(path+"/graph/","graph{i}.png".format(i=i)))
#add to list of images
images.append(imageio.imread(path+"/graph/graph{i}.png".format(i=i)))
#create graph
imageio.mimsave(name+'.gif', images, duration = 0.8)
GraphAnimation(G3,random_walk)
G1 = nx.barabasi_albert_graph(15,4)
ba_walk = WalkOnGraph(7,G1,100)
G2 = nx.watts_strogatz_graph(15,2,.4)
ws_walk = WalkOnGraph(7,G2,100)
G3 = nx.gnp_random_graph(15,.4)
random_walk = WalkOnGraph(7,G3,100)
def AverageHitting(number_of_nodes,start_node,graph,N,mc):
av_hit = {}
for i in range(number_of_nodes):
av_hit['{i}'.format(i=i)] = 0
for _ in range(mc):
steps = WalkOnGraph(start_node,graph,N)
for i in range(number_of_nodes):
av_hit['{i}'.format(i=i)] += np.count_nonzero(np.array(steps) == i)/mc
return av_hit
AverageHitting(15,7,G3,100,100)
AverageHitting(15,7,G2,100,100)
AverageHitting(15,7,G1,100,100)
<jupyter_output><empty_output>
|
no_license
|
/Diff_List_4_234875_Studzienna_Patrycja.ipynb
|
PatrycjaStu/DPOCN
| 4 |
<jupyter_start><jupyter_text># Total Outstanding Cases
> The Total Number of Outstanding Cases and Predicted Recoveries by Geographic Area.
- comments: true
- author: Adrian Turcato
- categories: [growth, compare, interactive]
- hide: false
- image: images/covid-outstanding-cases.png
- permalink: /outstanding_cases/
- toc: true> Note: This dashboard contains the results of a predictive model. The author has tried to make it as accurate as possible. But the COVID-19 situation is changing quickly, and these models inevitably include some level of speculation.## Outstanding Cases by Geography
The chart below shows the total predicted number of outstanding cases, i.e. number of individuals who are still currently ill.
The chart also represents the reported case fatality rate (CFR) via the color of the country, **which is heavily biased by the amount of testing which is performed in each country**.
> Tip: Change the scale of the y axis with the toggle button and hover over chart areas for more details.<jupyter_code>#hide
from IPython.display import HTML, Javascript, display
from string import Template
import numpy as np
import pandas as pd
import json
import math
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib as mpl
import time
#hide
mpl.rcParams['figure.dpi'] = 100
display(Javascript("require.config({paths: {d3: 'https://d3js.org/d3.v2.min.js'}});"))
#hide
def get_frame(name):
url = ('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/'
f'csse_covid_19_time_series/time_series_19-covid-{name}.csv')
df = pd.read_csv(url)
return df
def get_frameNew(name):
url = ('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/'
f'csse_covid_19_time_series/time_series_covid19_{name}_global.csv')
df = pd.read_csv(url)
return df
def get_frameArchived(name):
url = (f'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/archived_data/archived_time_series/time_series_19-covid-{name}_archived_0325.csv')
df = pd.read_csv(url)
return df
def flatten(df):
array = []
d = df.to_dict()
for v in d.items():
day = v[0]
for c in v[1].items():
country = c[0]
value = c[1]
array.append({"key":country,"value":value,"date":day})
return array
def combine(cx,dx,rx):
all = []
for i, v in enumerate(cx):
v['confirmed'] = v['value']
v['deaths'] = dx[i]['value']
v['recovered'] = rx[i]['value']
if cx[i]['key'] != rx[i]['key']:
print("error: country mismatch")
if cx[i]['date'] != rx[i]['date']:
print("error: date mismatch")
v['value'] = v['value'] - dx[i]['value'] - rx[i]['value']
if v['value'] < 0:
print("error:", cx[i]['date'], cx[i]['key'], v['value'], v['confirmed'], dx[i]['value'], rx[i]['value'])
v['value'] = 0
#calculate CFR
fr = 0
if rx[i]['value'] + dx[i]['value'] + v['value'] > 0:
fr = dx[i]['value'] / (rx[i]['value'] + dx[i]['value'] + v['value'])
v['fatality'] = fr
all.append(v)
return all;
def makeJSVar(array,varname):
json_string = json.dumps(array)
pre = "var " + varname + " ="
post = ";" #"; get" + varname + " = function (){ return " + varname + "};})();"
jsVar = pre + json_string + post
return jsVar
#hide
alternateNames = [
[["East Timor"],"Timor-Leste"]
,[["Taiwan*"],"Taiwan"]
,[["Korea, South"],"South Korea"]
,[["The Bahamas","Bahamas, The"],"Bahamas"]
,[["The Gambia","Gambia, The"],"Gambia"]
,[["Cape Verde"],"Cabo Verde"]
,[["French Guiana","Guadeloupe","Martinique","Mayotte","Reunion"],"France"]
,[["Guam","Puerto Rico"],"US"]
,[["Greenland"],"Denmark"]
,[["Cruise Ship","Diamond Princess"],"International Conveyance"]
,[["Guernsey","Jersey"],"United Kingdom"]
,[["Republic of the Congo"],"Congo (Brazzaville)"]
]
def replaceCountries(y):
for countries in alternateNames:
if y in countries[0]:
return countries[1]
return y
def extractGlobal(df):
df = df.drop(columns=['Province/State', 'Lat', 'Long'])
df['Country/Region'] = df['Country/Region'].apply(lambda x: replaceCountries(x))
df = df.groupby('Country/Region').sum()
return df
abbreviations=[{"name":"Alabama","abbreviation":"AL"},{"name":"Alaska","abbreviation":"AK"},{"name":"American Samoa","abbreviation":"AS"},{"name":"Arizona","abbreviation":"AZ"},{"name":"Arkansas","abbreviation":"AR"},{"name":"California","abbreviation":"CA"},{"name":"Colorado","abbreviation":"CO"},{"name":"Connecticut","abbreviation":"CT"},{"name":"Delaware","abbreviation":"DE"},{"name":"District of Columbia","abbreviation":"D.C."},{"name":"Federated States Of Micronesia","abbreviation":"FM"},{"name":"Florida","abbreviation":"FL"},{"name":"Georgia","abbreviation":"GA"},{"name":"Guam","abbreviation":"GU"},{"name":"Hawaii","abbreviation":"HI"},{"name":"Idaho","abbreviation":"ID"},{"name":"Illinois","abbreviation":"IL"},{"name":"Indiana","abbreviation":"IN"},{"name":"Iowa","abbreviation":"IA"},{"name":"Kansas","abbreviation":"KS"},{"name":"Kentucky","abbreviation":"KY"},{"name":"Louisiana","abbreviation":"LA"},{"name":"Maine","abbreviation":"ME"},{"name":"Marshall Islands","abbreviation":"MH"},{"name":"Maryland","abbreviation":"MD"},{"name":"Massachusetts","abbreviation":"MA"},{"name":"Michigan","abbreviation":"MI"},{"name":"Minnesota","abbreviation":"MN"},{"name":"Mississippi","abbreviation":"MS"},{"name":"Missouri","abbreviation":"MO"},{"name":"Montana","abbreviation":"MT"},{"name":"Nebraska","abbreviation":"NE"},{"name":"Nevada","abbreviation":"NV"},{"name":"New Hampshire","abbreviation":"NH"},{"name":"New Jersey","abbreviation":"NJ"},{"name":"New Mexico","abbreviation":"NM"},{"name":"New York","abbreviation":"NY"},{"name":"North Carolina","abbreviation":"NC"},{"name":"North Dakota","abbreviation":"ND"},{"name":"Northern Mariana Islands","abbreviation":"MP"},{"name":"Ohio","abbreviation":"OH"},{"name":"Oklahoma","abbreviation":"OK"},{"name":"Oregon","abbreviation":"OR"},{"name":"Palau","abbreviation":"PW"},{"name":"Pennsylvania","abbreviation":"PA"},{"name":"Puerto Rico","abbreviation":"PR"},{"name":"Rhode Island","abbreviation":"RI"},{"name":"South Carolina","abbreviation":"SC"},{"name":"South Dakota","abbreviation":"SD"},{"name":"Tennessee","abbreviation":"TN"},{"name":"Texas","abbreviation":"TX"},{"name":"Utah","abbreviation":"UT"},{"name":"Vermont","abbreviation":"VT"},{"name":"Virgin Islands","abbreviation":"VI"},{"name":"Virginia","abbreviation":"VA"},{"name":"Washington","abbreviation":"WA"},{"name":"West Virginia","abbreviation":"WV"},{"name":"Wisconsin","abbreviation":"WI"},{"name":"Wyoming","abbreviation":"WY"}]
def replaceStates(y):
print(y)
x = y.split(",")
if len(x) > 1:
for i, e in enumerate(abbreviations):
if e['abbreviation'].strip() == x[1].strip():
return e['name']
if y == "United States Virgin Islands":
return "Virgin Islands"
return y
def extractUSA(df):
df = df[df['Country/Region'] == "US"]
df = df.drop(columns=['Country/Region', 'Lat', 'Long'])
df = df[~df['Province/State'].isin(["US"])]
df['Province/State'] = df['Province/State'].apply(lambda x: replaceStates(x))
df = df.groupby('Province/State').sum()
dates = df.columns[:32].values
df = df.drop(columns=dates)
return df
def extractCanada(df):
df = df[df['Country/Region'] == "Canada"]
df = df.drop(columns=['Country/Region', 'Lat', 'Long'])
df = df.groupby('Province/State').sum()
dates = df.columns[:32].values
df = df.drop(columns=dates)
return df
eu = ['Germany','Finland','Italy','Spain','Belgium','Switzerland','Austria','Greece','Norway','Romania','Estonia','San Marino','Belarus','Iceland','Lithuania','Ireland','Luxembourg','Monaco','Portugal','Andorra','Latvia','Ukraine' 'Hungary','Liechtenstein','Poland','Bosnia and Herzegovina','Slovenia','Serbia','Slovakia','Bulgaria','Albania','Holy See','France','Denmark','Czechia','Moldova','United Kingdom','Kosovo','Netherlands','Montenegro']
def extractEurope(df):
df = df[df['Country/Region'].isin(eu)]
df = df.drop(columns=['Province/State', 'Lat', 'Long'])
df = df.groupby('Country/Region').sum()
datesEU = df.columns[:32].values
df = df.drop(columns=datesEU)
return df
#hide
confirmedRaw = get_frameNew('confirmed')
deathsRaw = get_frameNew('deaths')
recoveredRaw = get_frameArchived('Recovered')
#hide
def predictRecovered(data):
c = data["confirmed"]
d = data['deaths']
r = data['recovered']
rp = data['recovered_predicted']
###Add columns
for col in c.columns:
if col not in rp.columns:
rp[col] = float('nan')
#Replace column values
for i, row in rp.iterrows():
for j, v in enumerate(row.values):
if math.isnan(v):
date = pd.to_datetime(rp.columns[j])
dateMinus14 = date - pd.to_timedelta('9 days')
colMinus14 = dateMinus14.strftime('%-m/%-d/%y')
lastr = rp.loc[i,rp.columns[j-1]]
rp.loc[i,rp.columns[j]] = round(lastr + (c.loc[i,colMinus14]-lastr)*0.07)
return {"confirmed":c,"deaths":d,"recovered_predicted":rp,"recovered":r}
def collect(extract):
c = extract(confirmedRaw)
r = extract(recoveredRaw)
d = extract(deathsRaw)
notInC = [x for x in r.index if x not in c.index]
notInR = [x for x in c.index if x not in r.index]
if len(notInC) > 0:
print("country mismatch- missing in new data:",notInC)
for country in notInR:
r.loc[country] = 0
r = r.sort_index();
data = predictRecovered({"confirmed":c,"deaths":d,"recovered_predicted":r,"recovered":r})
return data
#hide
globalData = collect(extractGlobal)
#hide
def prepData(data,varname):
c = flatten(data["confirmed"])
r = flatten(data["recovered_predicted"])
d = flatten(data["deaths"])
all = combine(c,d,r)
return makeJSVar(all,varname)
#hide
getGlobalDataJson = prepData(globalData,"globalData")
# getUsaDataJson = prepData(extractUSA,"usaData")
# getCanadaDataJson = prepData(extractCanada,"canadaData")
# getEuropeDataJson = prepData(extractEurope,"europeData")
#hide
countries = ["China","Italy","France","Germany","Spain","US","Korea, South","Iran","Switzerland","United Kingdom","Austria","Netherlands","Belgium"]
def topCountries(data,varname):
confirmed = data['confirmed']
recovered = data['recovered_predicted']
deaths = data['deaths']
df = confirmed - recovered - deaths
all = []
l = len(df.columns.values)
for i in range(0,l):
day = []
col = df.columns[i:i+1].values
sumA = sum(confirmed[col[0]].values)
#top cases
nonzero = df[df.index.isin(countries)]
nonzero = nonzero[nonzero[col[0]] / sumA > 0.0025]
onecol = nonzero[col[0]]
onecol = onecol.sort_values(ascending=False)
topten = onecol #.head(10)
sumB = sum(topten.values)
d = topten.to_dict()
for j, c in enumerate(d.items()):
day.append({"rank":j,"place":c[0],"count":c[1], "pct":c[1]/sumA, "date":col[0]})
#all deaths and recoveries
sumD = sum(deaths[col[0]])
sumR = sum(recovered[col[0]])
#all other
if sumA - (sumB + sumD + sumR) > 0:
n = int(sumA - (sumB + sumD + sumR))
m = float(n/sumA)
day.append({"rank":100,"place":"All Other","count":n, "pct":m, "date":col[0]})
#add
day.append({"rank":101,"place":"Recoveries","count":sumR, "pct":sumR/sumA, "date":col[0]})
day.append({"rank":102,"place":"Deaths","count":sumD, "pct":sumD/sumA, "date":col[0]})
all.append(day)
return makeJSVar(all,varname)
#hide
getTopGlobalDataJson = topCountries(globalData,"topGlobalData")
#hide
html_temp = Template('''
<script src="https://d3js.org/d3.v2.min.js"></script>
<style scoped>
$css_text
</style>
<div id="streamgraph">
<button onclick="toggleScale()" id="toggle">Toggle Scale</button>
<div class="chart">
</div>
</div>
<script>
$getGlobal
$d3_script
</script>
''')
#hide
css_text = '''
.chart, .bump-chart {
font: 10px sans-serif;
background: #fff;
height: 575px;
}
.stream-tooltip, .bump-tooltip {
font: 10px sans-serif;
}
.axis path, .axis line {
fill: none;
stroke: #000;
stroke-width: 1px;
shape-rendering: crispEdges;
}
.buttons {
font: 11px sans-serif;
position: relative;
left: 50px;
top: 10px;
}
#toggle {
font: 11px sans-serif;
position: relative;
left: 660px;
top: 10px;
}'''
#hide
d3_script = '''
drawStream();
function drawStream(){
var margin, width, height;
var svg, tooltip, area, blank;
var y, x, yAxis, xAxis;
var data, layers, logScale;
var usaLayers, globalLayers, euLayers;
var dataClass, prevClass;
margin = {top: 20, right: 70, bottom: 100, left: 30};
width = 750 - margin.left - margin.right;
height = 550 - margin.top - margin.bottom;
svg = d3.select(".chart").append("svg")
.attr("id","stream-svg")
.attr("width", width + margin.left + margin.right)
.attr("height", height + margin.top + margin.bottom)
.append("g")
.attr("transform", "translate(" + margin.left + "," + margin.top + ")");
var cs = d3.scale.linear()
.domain([0,0.5,1])
.interpolate(d3.interpolateRgb)
.range([d3.rgb('#cc0000'),d3.rgb(249, 247, 174),d3.rgb(0, 104, 55)]);
var format = d3.time.format("%m/%d/%y");
function formatDate (d) {
d.date = format.parse(d.date);
d.value = +d.value;
}
// axis
x = d3.time.scale()
.range([0, width])
.clamp(true);
xAxis = d3.svg.axis()
.orient("bottom")
.ticks(d3.time.weeks);
// .tickFormat(d3.time.format("%b %d"));
svg.append("g")
.attr("class", "x axis")
.attr("transform", "translate(0," + height + ")");
yAxis = d3.svg.axis()
.orient("right")
.tickFormat(d3.format(","));
svg.append("g")
.attr("class", "y axis")
.attr("transform", `translate(${width + 10}, 0)`);
// clipping paths
svg.append("defs")
.append("clipPath")
.attr("id","clip")
.append("rect")
.attr("width",width)
.attr("height",height-10)
.attr("x",0)
.attr("y",0);
svg.append("text")
.attr("text-anchor", "middle")
.attr("transform", "translate("+ (width + margin.right*0.8) +","+(height/2)+")rotate(90)")
.text("Current Outstanding Cases");
svg.append("text")
.attr("text-anchor", "middle")
.attr("transform", "translate("+ (width/2) +","+(height + 40)+")")
.text("Date");
area = d3.svg.area()
.interpolate("cardinal")
.x(d => x(d.date));
blank = d3.svg.area()
.interpolate("cardinal")
.x(d => x(d.date));
//usaData.forEach(formatDate);
//usaLayers = prepareData(usaData);
globalData.forEach(formatDate);
globalLayers = prepareData(globalData);
//europeData.forEach(formatDate);
//euLayers = prepareData(europeData);
//canadaData.forEach(formatDate);
//canadaLayers = prepareData(canadaData);
dataClass = "globalData";
prevClass = "usaData";
data = globalData;
layers = globalLayers;
logScale = true;
drawScale();
drawLegend();
draw();
toggleScale = function (){
logScale = !logScale;
drawScale();
}
toggleUSA = function(){
prevClass = dataClass;
dataClass = "usaData";
data = usaData;
layers = usaLayers;
drawScale();
draw();
}
toggleGlobal = function (){
prevClass = dataClass;
dataClass = "globalData";
data = globalData;
layers = globalLayers;
drawScale();
draw();
}
toggleEU = function(){
prevClass = dataClass;
dataClass = "europeData";
data = europeData;
layers = euLayers;
drawScale();
draw();
}
toggleCanada = function(){
prevClass = dataClass;
dataClass = "canadaData";
data = canadaData;
layers = canadaLayers;
drawScale();
draw();
}
function drawScale(){
area.y0(d => {return y(d.y0 + 1)})
.y1(d => {return y(d.y0 + d.y + 1)});
blank.y0(d => height)
.y1(d => height);
var yExt = d3.extent(data, d => {return d.y0 + d.y});
var logMin = 1000;
if(dataClass == "usaData") logMin = 300;
if(dataClass == "canadaData") logMin = 10;
y = d3.scale.log()
.range([height-10, 0])
.domain([yExt[1]/200,yExt[1]])
.clamp(true);
if(!logScale){
y = d3.scale.linear()
.range([height-10, 0])
.domain(yExt)
.clamp(true);
}
yAxis.scale(y);
x.domain(d3.extent(data, d => d.date));
xAxis.scale(x);
svg.selectAll(".y")
.transition("axis")
.duration(1000)
.call(yAxis);
svg.selectAll(".x")
.transition("axis")
.duration(1000)
.call(xAxis);
// .selectAll("text")
// .style("text-anchor", "end")
// .attr("dx", "-.8em")
// .attr("dy", ".15em")
// .attr("transform", "rotate(-65)" );
svg.selectAll(".layer")
.transition("axis")
.duration(1000)
.attr("d", d => area(d.values))
}
function draw(){
var leaving = svg.selectAll(`.${prevClass}`);
leaving.transition("load")
.duration(1000)
.attr("d", d => blank(d.values))
.remove();
var countries = svg.selectAll(`.${dataClass}`)
.data(layers);
countries.enter()
.append("path")
.attr("d", d => blank(d.values))
.on("mousemove", d => updateTooltip(d))
.on("mouseout", function(d, i) {
tooltip.style("visibility", "hidden");
})
.transition("load")
.duration(1000)
.attr("d", d => blank(d.values))
.attr("class", `layer ${dataClass}`)
.attr("d", d => area(d.values))
.attr("fill", d => {return `url(#${d.gradient})`})
.attr("clip-path","url(#clip)")
.attr("opacity", 1);
}
function drawLegend(){
var defs = svg.append("defs");
var gradient = defs.append("linearGradient")
.attr("id", "legend-grad")
.attr("x1", "0%")
.attr("x2", "100%")
.attr("y1", "0%")
.attr("y2", "0%");
for(var i = 0; i <= 10; i++){
gradient.append("stop")
.attr("offset", `${i*10}%`)
.attr("stop-color", cs(1-i/10)) // d3.interpolateRdYlGn(1-i/10))
.attr("stop-opacity", 1);
}
var legend = svg.append("g")
.attr("id","legend")
.attr("transform", `translate(${(width/2)-150},${height+65})`)
legend.append("rect")
.attr("width",300)
.attr("height",15)
.attr("fill", "url(#legend-grad)");
legend.append("text")
.attr("text-anchor", "end")
.attr("transform", "translate("+ -10 +","+10+")")
.text("Case Fatality Rate (CFR)");
var lScale = d3.scale.linear()
.range([0, 300])
.domain([0.0,0.1]);
var lAxis = d3.svg.axis()
.orient("bottom")
.scale(lScale)
.tickSize(18,0)
.tickFormat(d3.format(".1%"));
var l2 = svg.append("g")
.attr("class", "legend-axis axis")
.attr("transform", `translate(${(width/2)-150},${height+65})`)
.call(lAxis);
l2.selectAll("path")
.remove();
}
function prepareData(data){
//manipulate data
var nest = d3.nest()
.key(function(d) { return d.key; });
var alphabetic = nest.entries(data);
var l = alphabetic.length;
//order data
for(var i = 0; i < l; i++){
var vals = alphabetic[i].values;
var lv = vals.length;
var current = vals[lv-1];
alphabetic[i].fatality = current.fatality;
alphabetic[i].current = current.value;
alphabetic[i].index = i
}
alphabetic.sort((a,b) => (a.current - b.current));
var stack = d3.layout.stack()
.offset("zero")
.order(() => alphabetic.map(e => e.index))
.values(d => d.values)
.x(d => d.date)
.y(d => d.value);
//add color gradients
for(var i = 0; i < l; i++){
var name = alphabetic[i].key.replace(/\s/g, '')
var id = `gradient-${name}`;
alphabetic[i].gradient = id;
var defs = svg.append("defs");
var gradient = defs.append("linearGradient")
.attr("id", id)
.attr("x1", "0%")
.attr("x2", "100%")
.attr("y1", "0%")
.attr("y2", "0%");
var values = alphabetic[i].values;
var lv = alphabetic[i].values.length;
for(var j = 0; j < lv; j++){
var offset = `${Math.round(j*100/(lv-1))}%`;
var fr = values[j].fatality/0.1;
var col = cs(1-fr); //d3.interpolateRdYlGn(1-fr);
gradient.append("stop")
.attr("offset", offset)
.attr("stop-color", col)
.attr("stop-opacity", 1);
}
}
var layers = stack(nest.entries(data));
for(var i = 0; i < l; i++){
var name = layers[i].key.replace(/\s/g, '')
var id = `gradient-${name}`;
layers[i].gradient = id;
}
return layers;
}
function updateTooltip(d){
var leftOffset = document.getElementById("stream-svg").getBoundingClientRect().left;
var date = roundDate(x.invert(event.clientX - leftOffset - margin.left));
var val = d.values[d.values.length-1];
d.values.forEach(e => {
if(e.date.getTime()==date.getTime()){
val = e;
}
})
var current = d3.format(",")(val.value)
var deaths = d3.format(",")(val.deaths)
var recoveries = d3.format(",")(val.recovered)
var fatality = d3.format(".1%")(val.fatality)
var day = d3.time.format("%m/%d/%y")(val.date)
var n = d.key;
tooltip = svg.selectAll(".stream-tooltip")
.data([n,day,`est. cases: ${current}`,`deaths: ${deaths}`,`recoveries: ${recoveries}`,`CFR: ${fatality}`]);
tooltip.style("visibility", "visible")
.attr("opacity",1)
.text(d => d);
tooltip.enter()
.append("text")
.attr("class", "stream-tooltip")
.attr("x","0")
.attr("y",(d,i) => `${i}em`)
.attr("font-weight", (d,i) => i == 0 ? "bold" : "normal")
.attr("opacity",1)
.text(d => d);
}
function roundDate(timeStamp){
var d = new Date(timeStamp);
d.setHours(0);
d.setMinutes(0);
d.setSeconds(0);
d.setMilliseconds(0);
return d;
}
}
'''
#hide
html_text = html_temp.substitute({
'css_text':css_text
,'d3_script':d3_script
,"getGlobal":getGlobalDataJson
# ,"getUSA":getUsaDataJson
# ,"getEU":getEuropeDataJson
# ,"getCanada":getCanadaDataJson
})
#hide_input
HTML(html_text)<jupyter_output><empty_output><jupyter_text>The table below shows summary statistics for the last 7 days. $Oustanding = Confirmed - Deaths - Recovered$.<jupyter_code>#hide_input
totalConfirmed = globalData['confirmed'].sum(axis=0)
totalDeaths = globalData['deaths'].sum(axis=0)
totalRecovered = pd.to_numeric(globalData['recovered'].sum(axis=0), downcast='integer')
frame = {'Confirmed': totalConfirmed, 'Deaths': totalDeaths, 'Est. Recoveries': totalRecovered, 'Outstanding':totalConfirmed-totalDeaths-totalRecovered}
result = pd.DataFrame(frame)
result = result.set_index(pd.to_datetime(result.index)).sort_index()
result.tail(7)<jupyter_output><empty_output><jupyter_text>## Percent of Global Total
This next chart shows the number of outstanding cases as a percent of the total confirmed global cases. Only countries representing a significant contribution to global totals are shown.
> Tip: Hover over chart areas for more details.<jupyter_code>#hide
areabump_temp = Template('''
<script src="https://d3js.org/d3.v2.min.js"></script>
<style scoped>
$css_text
</style>
<div id="bumpchart">
<div class="bump-chart">
</div>
</div>
<script>
$getTopGlobal
$d3_bump_script
</script>
''')
#hide
d3_bump_script = '''
drawBump();
function drawBump(){
var colors = {
"Korea, South":"darkred"
,"China":"pink"
,"US":"blue"
,"United Kingdom":"orange"
,"France":"lightblue"
,"Germany":"yellow"
,"Italy":"tan"
,"Spain":"purple"
,"Switzerland":"violet"
,"Iran":"green"
,"Netherlands":"cyan"
,"Austria":"teal"
,"Belgium":"brown"
,"All Other":"lightgray"
,"Recoveries":"lightgreen"
,"Deaths":"#ff590d"
};
var margin, width, height;
var svg, tooltip, area, blank;
var x, y, h, yAxis, xAxis;
var data, layers, logScale, numDays;
var dataClass = "globalData", prevClass;
var format = d3.time.format("%m/%d/%y");
var nest = d3.nest()
.key(function(d) { return d.place; });
margin = {top: 20, right: 60, bottom: 40, left: 30};
width = 750 - margin.left - margin.right;
height = 500 - margin.top - margin.bottom;
svg = d3.select(".bump-chart").append("svg")
.attr("id","bump-svg")
.attr("width", width + margin.left + margin.right)
.attr("height", height + margin.top + margin.bottom)
.append("g")
.attr("transform", "translate(" + margin.left + "," + margin.top + ")");
// axis
x = d3.time.scale()
.range([0, width])
.clamp(true);
xAxis = d3.svg.axis()
.orient("bottom")
.ticks(d3.time.weeks);
svg.append("g")
.attr("class", "x-bump axis")
.attr("transform", `translate(0,${height-5})`);
yAxis = d3.svg.axis()
.orient("right")
.tickFormat(d3.format("%"));
svg.append("g")
.attr("class", "y-bump axis")
.attr("transform", `translate(${width+10}, 0)`);
// clipping paths
svg.append("defs")
.append("clipPath")
.attr("id","clip-bump")
.append("rect")
.attr("width", width)
.attr("height", height-10)
.attr("x",0)
.attr("y",0);
svg.append("text")
.attr("text-anchor", "middle")
.attr("transform", "translate("+ (width + margin.right*0.8) +","+(height/2)+")rotate(90)")
.text("% of Total Cases");
svg.append("text")
.attr("text-anchor", "middle")
.attr("transform", "translate("+ (width/2) +","+(height + 40)+")")
.text("Date");
area = d3.svg.area()
.interpolate("basis")
.x(d => x(d.date));
blank = d3.svg.area()
.interpolate("basis")
.x(d => x(d.date));
globalTuple = prepareData(topGlobalData);
data = globalTuple.data;
numDays = topGlobalData.length;
layers = globalTuple.layers;
logScale = false;
drawScale();
draw();
this.toggleBumpScale = function (){
logScale = !logScale;
drawScale();
}
function drawScale(){
area.y0(d => {return y(d.y0)})
.y1(d => {return y(d.y0 + d.y)});
blank.y0(d => height)
.y1(d => height);
y = d3.scale.linear()
.range([height-10, 0])
.domain([0,1]);
yAxis.scale(y);
x.domain(d3.extent(data, d => d.date));
xAxis.scale(x);
svg.selectAll(".y-bump")
.transition("axis")
.duration(1000)
.call(yAxis);
svg.selectAll(".x-bump")
.transition("axis")
.duration(1000)
.call(xAxis);
svg.selectAll(".layer-bump")
.transition("axis")
.duration(1000)
.attr("d", d => area(d.values))
}
function draw(){
var countries = svg.selectAll(`.${dataClass}`)
.data(layers);
countries.enter()
.append("path")
.attr("d", d => blank(d.values))
.on("mousemove", d => updateBumpTooltip(d))
.on("mouseout", function(d, i) {
tooltip.style("visibility", "hidden");
})
.transition("load")
.duration(1000)
.attr("d", d => blank(d.pct))
.attr("class", `layer-bump ${dataClass}`)
.attr("d", d => area(d.values))
.attr("fill", (d,i) => colors[d.key]) //d => {return `url(#${d.gradient})`})
.attr("stroke","black")
.attr("opacity", 1);
}
function updateBumpTooltip(d){
var leftOffset = document.getElementById("bump-svg").getBoundingClientRect().left;
var date = roundDate(x.invert(event.clientX - leftOffset - margin.left));
var val = d.values[d.values.length-1];
d.values.forEach(e => {
if(e.date.getTime()==date.getTime()){
val = e;
}
})
var current = d3.format(",")(val.count)
var pct = d3.format("%")(val.pct)
var day = d3.time.format("%m/%d/%y")(val.date)
var n = d.key;
tooltip = svg.selectAll(".bump-tooltip")
.data([n,day,`est. cases: ${current}`,`percent: ${pct}`]);
tooltip.style("visibility", "visible")
.text(d => d);
tooltip.enter()
.append("text")
.attr("class", "bump-tooltip")
.attr("x","10")
.attr("y",(d,i) => `${2+i}em`)
.attr("font-weight", (d,i) => i == 0 ? "bold" : "normal")
.text(d => d);
}
function prepareData(data){
//fill in zeros
/*var ld = data.length;
for(var i = 0; i < ld; i++){
var b = data[i].map(x => x.place);
var lb = b.length;
if(i-1 >= 0) var a = data[i-1].map(x => x.place);
if(i+1 < ld) var c = data[i+1].map(x => x.place);
for(var j = 0; j < lb; j++){
if(i-1 >= 0){
if(a.indexOf(b[j]) == -1){
data[i-1].push({"rank":11,"place":data[i][j].place,"count":0,"pct":0,"date":data[i][j].date});
}
}
if(i+1 < ld){
if(c.indexOf(b[j]) == -1){
data[i+1].push({"rank":11,"place":data[i][j].place,"count":0,"pct":0,"date":data[i][j].date});
}
}
}
}*/
//find a y and y0 for each data point
var ld = data.length;
var flat = [];
for(var i = 0; i < ld; i++){
var sum = 0;
var padding = 0;
var lid = data[i].length;
while(lid--){
data[i][lid]["date"] = format.parse(data[i][lid]["date"]);
data[i][lid]["y0"] = sum;
data[i][lid]["y"] = data[i][lid]["pct"];
flat.push(data[i][lid])
sum += data[i][lid]["pct"] + padding;
}
}
var layers = nest.entries(flat);
layers = layers.sort(
(a,b) => b.values[b.values.length-1].rank - a.values[a.values.length-1].rank
);
return {data:flat, layers:layers}
}
function roundDate(timeStamp){
var d = new Date(timeStamp);
d.setHours(0);
d.setMinutes(0);
d.setSeconds(0);
d.setMilliseconds(0);
return d;
}
}
'''
#hide
areabump_text = areabump_temp.substitute({
'css_text':css_text
,'d3_bump_script':d3_bump_script
,'getTopGlobal':getTopGlobalDataJson
})
#hide_input
HTML(areabump_text)<jupyter_output><empty_output><jupyter_text>## Appendix: Methodology of Predicting Recovered Cases
[John Hopkin's University's (JHU) dataset](https://systems.jhu.edu/research/public-health/ncov/) initially reported recovered cases but has since discontinued this, however estimating the recovery duration and extrapolating for current cases should be possible from this original data.
For the time being (I hope to draw from other discussions of this topic), I will use an empirically derived formula from the limited data available from JHU:
$$R_{n} = R_{n-1} + (C_{n-9} - R_{n-1})*0.07$$
Where $R_{n}$ is the total number of recovered cases on day $n$, and $C_{n}$ is the total number of confirmed cases on day $n$.
What it implies is that on a given day, of the cases which were first reported 9 days previously 7% of those cases would have either recovered or passed away. After 16 days therefore 49% of cases would have recovered or passed away and after 23 days 98% of cases would have recovered or passsed away.
This formula is only being used to predict the number of recoveries from the time that JHU's data is not available. We can compare the results of this formula to the existing data from JHU to show the level of fit. This can be seen in the following 2 graphs.<jupyter_code>#hide
totalConfirmedChina = globalData['confirmed'].loc["China"]
tcvC = totalConfirmedChina.values
totalRecoveredC = pd.to_numeric(globalData['recovered'].loc["China"], downcast='integer')
trvC = totalRecoveredC.values
tprvC = trvC[:9]
#calculate
for n in range(9,len(tcvC)):
v = tprvC[n-1] + (tcvC[n-9] - tprvC[n-1])*0.07
tprvC = np.append(tprvC,v)
frameC = {"Predicted Recoveries":tprvC,'Recoveries': totalRecoveredC}
resultC = pd.DataFrame(frameC)
resultC = resultC.set_index(pd.to_datetime(resultC.index)).sort_index()
#hide
totalConfirmedGlobal = globalData['confirmed'].sum(axis=0)
tcvG = totalConfirmedGlobal.values
totalRecoveredGlobal = pd.to_numeric(globalData['recovered'].sum(axis=0), downcast='integer')
trvG = totalRecoveredGlobal.values
tprvG = trvG[:9]
#calculate
for n in range(9,len(tcvG)):
v = tprvG[n-1] + (tcvG[n-9] - tprvG[n-1])*0.07
tprvG = np.append(tprvG,v)
frameG = {"Predicted Recoveries":tprvG,'Recoveries': totalRecoveredGlobal}
resultG = pd.DataFrame(frameG)
resultG = resultG.set_index(pd.to_datetime(resultG.index)).sort_index()
#hide_input
#create subplot figure with having two side by side plots
fig, axes = plt.subplots(nrows=1,ncols=2,figsize=(12,6))
axes[0].yaxis.set_major_formatter(mpl.ticker.StrMethodFormatter('{x:,.0f}'))
axes[1].yaxis.set_major_formatter(mpl.ticker.StrMethodFormatter('{x:,.0f}'))
axes[0].set_xlabel('Date')
axes[1].set_xlabel('Date')
axes[0].set_ylabel('Cases')
axes[1].set_ylabel('Cases')
axes[0].set_title('Global: Predicted Recoveries')
axes[1].set_title("China: Predicted Recoveries")
resultG.plot(ax = axes[0])
resultC.plot(ax = axes[1])
plt.show()<jupyter_output><empty_output>
|
permissive
|
/_notebooks/2020-03-23-cumulative-outstanding.ipynb
|
hanhabesha/covid19-dashboard
| 4 |
<jupyter_start><jupyter_text># README.mdの作成方法<jupyter_code>#!/usr/bin/env python
from sys import version_info
if version_info[0] < 3:
from urllib import quote
else:
from urllib.request import quote
from glob import glob
import json
import os
header = '''
Private notebook files
--------------------------------------------------
Welcome this is morishin web-page
File List
--------------------------------------------------
'''
footer = '''
--------------------------------------------------
Thanks!
'''
#http://192.168.98.100:8081/url/192.168.99.100:8090/a8838/NoteBook/raw/master/index.ipynb
format_item = '* [{name}]({url})'.format
#bb_url = u'192.168.99.100:8090/a8838/NoteBook/raw/master/{}'.format
bb_url = u'192.168.99.100:8090/a8838/NoteBook/raw/master/'
def notebooks():
for root, dirs, files in os.walk(os.getcwd()):
for filename in files:
if filename.lower().endswith('.' + 'ipynb'):
#print (os.path.join(root, filename))
if "ipynb_checkpoints" in os.path.join(root, filename):
#print (os.path.join(root, filename))
pass
else:
yield os.path.join(root, filename)
def get_filename(filename):
#with open(filename) as fo:
# return json.load(fo)['metadata']['name']
#print (os.path.relpath(filename,os.getcwd()))
return (os.path.relpath(filename,os.getcwd()))
#return os.path.basename(filename)
def nb_url(filename):
# The double quote is not an error
#raw_url = bb_url(quote(quote(filename)))
#raw_url = bb_url+os.path.basename(filename)
raw_url = bb_url+(os.path.relpath(filename,os.getcwd()))
return u'http://192.168.98.100:8081/url/{}'.format(raw_url)
def write_readme(nblist, fo):
fo.write('{}\n'.format(header))
for nb in nblist:
name = get_filename(nb)
url = nb_url(nb)
fo.write('{}\n'.format(format_item(name=name, url=url)))
fo.write('{}\n'.format(footer))
def main():
nblist = notebooks()
with open('README.md', 'w') as fo:
write_readme(nblist, fo)
if __name__ == '__main__':
main()<jupyter_output><empty_output>
|
no_license
|
/01_README.md作成方法.ipynb
|
morishin8838/nbviewer
| 1 |
<jupyter_start><jupyter_text><jupyter_code><jupyter_output><empty_output>
|
no_license
|
/Regression.ipynb
|
LongntLe/CS4780
| 1 |
<jupyter_start><jupyter_text># Sample NER Workflow for FromThePage
Read data from FTP XML file and pass through the SpaCY NER <jupyter_code>import spacy
import pandas as pd
import utils
%load_ext autoreload
%autoreload 2
# download the spacy models we need
model = 'en_core_web_md'
#spacy.cli.download(model)
nlp = spacy.load(model)<jupyter_output><empty_output><jupyter_text>We first read the data from the tei.xml file exported from FromThePage.<jupyter_code>texts = utils.read_ftp_xml('data/tei.xml')
texts.head()<jupyter_output><empty_output><jupyter_text>## NER
We now perform NER on the text using the Spacy library. For we generate a list of location entities and for each entity, record a snippet of text around the occurence. The result is a DataFrame containing the placename, the context and the document number - really the row number in the original spreadsheet.<jupyter_code>locations = utils.apply_ner(texts, text='text', ident='id', keep_entities=['GPE'])
locations.head()<jupyter_output><empty_output><jupyter_text>## Visualisation
Spacy can be used to visualise the NER results in the notebook. This might not be too useful but illustrates what is possible. <jupyter_code>from spacy import displacy
from IPython.core.display import display, HTML
doc = nlp(texts['text'][4])
display(HTML(displacy.render(doc, style='ent')))<jupyter_output><empty_output><jupyter_text>## Geocoding
We can use the `geocoder` module to submit these place names to a geocoding service. Here we use the Geonames service and make a new table with the results.<jupyter_code>locations = utils.geolocate_locations(locations)
locations<jupyter_output><empty_output>
|
no_license
|
/FromThePage NER.ipynb
|
HASSCloud/hassdevl-samples
| 5 |
<jupyter_start><jupyter_text>
$$
{\LARGE S_t = S_{t-1} \mathbin{\cdot} e^{((r - \frac{1}{2} \cdot stdev^2) \mathbin{\cdot} \delta_t + stdev \mathbin{\cdot} \sqrt{\delta_t} \mathbin{\cdot} Z_t)} }
$$
<jupyter_code>log_returns.tail()
data.plot(figsize=(10, 6));
r = 0.025 #TLR
stdev = log_returns.std() * 250 ** 0.5
stdev
type(stdev)
stdev = stdev.values
stdev
T = 1.0
t_intervals = 250
delta_t = T / t_intervals
iterations = 10000
Z = np.random.standard_normal((t_intervals + 1, iterations))
S = np.zeros_like(Z)
S0 = data.iloc[-1]
S[0] = S0
for t in range(1, t_intervals + 1):
S[t] = S[t-1] * np.exp((r - 0.5 * stdev ** 2) * delta_t + stdev * delta_t ** 0.5 * Z[t])
S
S.shape
plt.figure(figsize=(10, 6))
plt.plot(S[:, :10]);<jupyter_output><empty_output><jupyter_text>******<jupyter_code>p = np.maximum(S[-1] - 110, 0)
p
p.shape<jupyter_output><empty_output><jupyter_text>Use the following formula to forecast the price of a stock option.
$$
C = \frac{exp(-r \cdot T) \cdot \sum{p_i}}{iterations}
$$<jupyter_code>C = np.exp(-r * T) * np.sum(p) / iterations
C <jupyter_output><empty_output>
|
no_license
|
/Codigo/16 Monte Carlos Euler Discretization.ipynb
|
MateoHeras77/Python-for-Finance-Investment-Fundamentals-Data-Analytics
| 3 |
<jupyter_start><jupyter_text># Nearest neighbor for spine injury classificationIn this homework notebook we use **nearest neighbor classification** to classify back injuries for patients in a hospital, based on measurements of the shape and orientation of their pelvis and spine.
The data set contains information from **310** patients. For each patient, there are: six measurements (the x) and a label (the y). The label has **3** possible values, `’NO’` (normal), `’DH’` (herniated disk), or `’SL’` (spondilolysthesis). **Note:** Before attempting this homework, please go through the *Nearest neighbor for handwritten digit recognition* notebook.# 1. Setup notebookWe import all necessary packages for the homework. Notice that we do **NOT** import any of the `sklearn` packages. This is because we want you to implement a nearest neighbor classifier **manually**, as in the *Nearest neighbor for handwritten digit recognition* notebook.
<jupyter_code>import numpy as np<jupyter_output><empty_output><jupyter_text>We now load the dataset. We divide the data into a training set of 248 patients and a separate test set of 62 patients. The following arrays are created:
* **`trainx`** : The training data's features, one point per row.
* **`trainy`** : The training data's labels.
* **`testx`** : The test data's features, one point per row.
* **`testy`** : The test data's labels.
We will use the training set (`trainx` and `trainy`), with nearest neighbor classification, to predict labels for the test data (`testx`). We will then compare these predictions with the correct labels, `testy`.Notice that we code the three labels as `0. = ’NO’, 1. = ’DH’, 2. = ’SL’`.<jupyter_code># Load data set and code labels as 0 = ’NO’, 1 = ’DH’, 2 = ’SL’
labels = [b'NO', b'DH', b'SL']
data = np.loadtxt('/ml/NN_spine/column_3C.dat', converters={6: lambda s: labels.index(s)} )
# Separate features from labels
x = data[:,0:6]
y = data[:,6]
# Divide into training and test set
training_indices = list(range(0,20)) + list(range(40,188)) + list(range(230,310))
test_indices = list(range(20,40)) + list(range(188,230))
trainx = x[training_indices,:]
trainy = y[training_indices]
testx = x[test_indices,:]
testy = y[test_indices]<jupyter_output><empty_output><jupyter_text>## 2. Nearest neighbor classification with L2 distanceIn this exercise we will build a nearest neighbor classifier based on L2 (*Euclidean*) distance.
**For you to do:** Write a function, **NN_L2**, which takes as input the training data (`trainx` and `trainy`) and the test points (`testx`) and predicts labels for these test points using 1-NN classification. These labels should be returned in a `numpy` array with one entry per test point. For **NN_L2**, the L2 norm should be used as the distance metric.
**Code**
```python
# test function
testy_L2 = NN_L2(trainx, trainy, testx)
print( type( testy_L2) )
print( len(testy_L2) )
print( testy_L2[40:50] )
```
**Output**
```
62
[ 2. 2. 1. 0. 0. 0. 0. 0. 0. 0.]
```
<jupyter_code># Modify this Cell
def NN_L2(trainx, trainy, testx):
# inputs: trainx, trainy, testx <-- as defined above
# output: an np.array of the predicted values for testy
### BEGIN SOLUTION
### END SOLUTION<jupyter_output><empty_output><jupyter_text>After you are done, run the cell below to check your function. If an error is triggered, you should go back and revise your function.<jupyter_code>testy_L2 = NN_L2(trainx, trainy, testx)
assert( type( testy_L2).__name__ == 'ndarray' )
assert( len(testy_L2) == 62 )
assert( np.all( testy_L2[50:60] == [ 0., 0., 0., 0., 2., 0., 2., 0., 0., 0.] ) )
assert( np.all( testy_L2[0:10] == [ 0., 0., 0., 1., 1., 0., 1., 0., 0., 1.] ) )<jupyter_output><empty_output><jupyter_text># 3. Nearest neighbor classification with L1 distanceWe now compute nearest neighbors using the L1 distance (sometimes called *Manhattan Distance*).
**For you to do:** Write a function, **NN_L1**, which again takes as input the arrays `trainx`, `trainy`, and `testx`, and predicts labels for the test points using 1-nearest neighbor classification. For **NN_L1**, the L1 distance metric should be used. As before, the predicted labels should be returned in a `numpy` array with one entry per test point.
Notice that **NN_L1** and **NN_L2** may well produce different predictions on the test set.
**Code**
```python
# test function
testy_L2 = NN_L2(trainx, trainy, testx)
testy_L1 = NN_L1(trainx, trainy, testx)
print( type( testy_L1) )
print( len(testy_L1) )
print( testy_L1[40:50] )
print( all(testy_L1 == testy_L2) )
```
**Output**
```
62
[ 2. 2. 0. 0. 0. 0. 0. 0. 0. 0.]
False
```
<jupyter_code># Modify this Cell
def NN_L1(trainx, trainy, testx):
# inputs: trainx, trainy, testx <-- as defined above
# output: an np.array of the predicted values for testy
### BEGIN SOLUTION
### END SOLUTION<jupyter_output><empty_output><jupyter_text>Again, use the following cell to check your code.<jupyter_code>testy_L1 = NN_L1(trainx, trainy, testx)
testy_L2 = NN_L2(trainx, trainy, testx)
assert( type( testy_L1).__name__ == 'ndarray' )
assert( len(testy_L1) == 62 )
assert( not all(testy_L1 == testy_L2) )
assert( all(testy_L1[50:60]== [ 0., 2., 1., 0., 2., 0., 0., 0., 0., 0.]) )
assert( all( testy_L1[0:10] == [ 0., 0., 0., 0., 1., 0., 1., 0., 0., 1.]) )<jupyter_output><empty_output><jupyter_text># 4. Test errors and the confusion matrixLet's see if the L1 and L2 distance functions yield different error rates for nearest neighbor classification of the test data.<jupyter_code>def error_rate(testy, testy_fit):
return float(sum(testy!=testy_fit))/len(testy)
print("Error rate of NN_L1: ", error_rate(testy,testy_L1) )
print("Error rate of NN_L2: ", error_rate(testy,testy_L2) )<jupyter_output><empty_output><jupyter_text>We will now look a bit more deeply into the specific types of errors made by nearest neighbor classification, by constructing the *confusion matrix*.
Since there are three labels, the confusion matrix is a 3x3 matrix whose rows correspond to the true label and whose columns correspond to the predicted label. For example, the entry at row DH, column SL, contains the number of test points whose correct label was DH but which were classified as SL.
Write a function, **confusion**, which takes as input the true labels for the test set (that is, `testy`) as well as the predicted labels and returns the confusion matrix. The confusion matrix should be a `np.array` of shape `(3,3)` . **Code**
```python
L2_neo = confusion(testy, testy_L2)
print( type(L2_neo) )
print( L2_neo.shape )
print( L2_neo )
```
**Output**
```
(3, 3)
[[ 17. 1. 2.]
[ 10. 10. 0.]
[ 0. 0. 22.]]
```
<jupyter_code># Modify this cell
def confusion(testy,testy_fit):
# inputs: the correct labels, the fitted NN labels
# output: a 3x3 np.array representing the confusion matrix as above
### BEGIN SOLUTION
### END SOLUTION<jupyter_output><empty_output><jupyter_text>Now check your code by running the following cell.<jupyter_code># Test Function
L1_neo = confusion(testy, testy_L1)
assert( type(L1_neo).__name__ == 'ndarray' )
assert( L1_neo.shape == (3,3) )
assert( np.all(L1_neo == [[ 16., 2., 2.],[ 10., 10., 0.],[ 0., 0., 22.]]) )
L2_neo = confusion(testy, testy_L2)
assert( np.all(L2_neo == [[ 17., 1., 2.],[ 10., 10., 0.],[ 0., 0., 22.]]) )<jupyter_output><empty_output>
|
no_license
|
/python-module/fundamentals_of_machine_learning_DSE220x/NN_spine/Nearest_neighbor_spine.ipynb
|
josecponce/spark-stream-kafka-spring-example
| 9 |
<jupyter_start><jupyter_text># Exploratory Data Analysis<jupyter_code>for i in df.columns:
if df[i].dtype == 'object':
df[i] = pd.factorize(df[i])[0]
df.groupby(['cap-shape'])['target'].value_counts()
pd.crosstab(df['cap-shape'],df['target'])
fig = px.violin(df,
x = df['cap-shape'],
y=df['target'])
fig.show()
fig = px.violin(df,
x = df['cap-surface'],
y=df['target'])
fig.show()<jupyter_output><empty_output><jupyter_text>#### Feature selection<jupyter_code>from sklearn.feature_selection import VarianceThreshold
from sklearn.feature_selection import SelectKBest,chi2
from sklearn.feature_selection import mutual_info_classif
y = df.target
df.drop('target',axis =1,inplace=True)
x = df
vrt = VarianceThreshold(threshold=0.01)
vrt.fit(x,y)
sum(vrt.get_support())
X = vrt.transform(df)
chi2_selector = SelectKBest(chi2, k=11)
X_kbest = chi2_selector.fit_transform(X, y)
X_kbest.shape
mut_feat = mutual_info_classif(X_kbest,y)
mut_feat
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
X_train,X_test,y_train,y_test = train_test_split(X_kbest,y,test_size=0.15,random_state=1)
lr = LogisticRegression(max_iter=200)
lr.fit(X_train,y_train)
lr.score(X_train,y_train)
cross_val_score(lr,X_train,y_train,cv=5)
lr.score(X_test,y_test)
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(max_features=9,max_depth=5,n_estimators=10)
rf.fit(X_train,y_train)
rf.score(X_train,y_train)
cross_val_score(rf,X_train,y_train,cv=5)
rf.feature_importances_
rf.score(X_test,y_test)
from sklearn.metrics import classification_report,roc_auc_score,roc_curve,auc
y_pred = rf.predict(X_test)
print(classification_report(y_test,y_pred))
roc_auc_score(y_test,y_pred)
fpr, tpr, thresholds = roc_curve(y_test, y_pred)
plt.plot(fpr,tpr)
plt.xlabel('fpr')
plt.ylabel('tpr')
plt.title(f'tpr vs fpr plot with auc: {roc_auc_score(y_test,y_pred)}')
plt.show()<jupyter_output><empty_output>
|
no_license
|
/notebooks/harsh368/classification-with-random-forest.ipynb
|
Sayem-Mohammad-Imtiaz/kaggle-notebooks
| 2 |
<jupyter_start><jupyter_text># Statistical tests<jupyter_code>res_ml <- read.table("./smo_disc_pfi_lung_ml_100_iter_rep_kfold.csv", header = TRUE, sep = ",")
res_cnn <- read.table("./ros_pt_ft_disc_pfi_lung_ft_100_iter_rep_kfold.csv", header = TRUE, sep = ",")
res_cnn_mean <- read.table("./ros_mean_disc_pfi_lung_ft_100_iter_rep_kfold.csv", header = TRUE, sep = ",")
res_cnn_random <- read.table("./ros_random_disc_pfi_lung_ft_100_iter_rep_kfold.csv", header = TRUE, sep = ",")
res_mlnn <- read.table("./ros_mlnn_disc_pfi_lung_ft_100_iter_rep_kfold.csv", header = TRUE, sep = ",")
res_all <- cbind(res_ml, res_cnn, res_cnn_mean, res_cnn_random, res_mlnn)
summary(res_all)
res_all_mean <- apply(res_all, 2, mean)
(res_all_mean <- res_all_mean[order(res_all_mean, decreasing = FALSE)])
fact <- c()
perf <- c()
for (n in names(res_all_mean)) {
fact <- c(fact, rep(n, times = nrow(res_all)))
perf <- c(perf, res_all[, n])
}
df_res_all <- data.frame(model = factor(fact, levels = names(res_all_mean)), result = perf)
head(df_res_all)<jupyter_output><empty_output><jupyter_text>## Adjust p-value
We adjust all computed p-values together.<jupyter_code>compute_p_value <- function(main_model, sub_model, test_function = wilcox.test) {
p_value <- c()
for (s in sub_model) {
p_value <- c(p_value, test_function(x = df_res_all[df_res_all[, "model"]==main_model, "result"],
y = df_res_all[df_res_all[, "model"]==s, "result"],
alternative = "greater", paired = TRUE)$p.value)
}
return(p_value)
}
colnames(res_all)
length(colnames(res_all))
# Compute CNN p-values
cnn_main_mod <- "Fine.all"
cnn_sub_mod <- colnames(res_all)[-13]
cnn_p_value <- compute_p_value(main_model = cnn_main_mod, sub_model = cnn_sub_mod,
test_function = wilcox.test)
names(cnn_p_value) <- cnn_sub_mod
# Compute Deep-MLNN p-values
mlnn_main_mod <- "MLNN"
mlnn_sub_mod <- colnames(res_all)[c(1, 5, 9)]
mlnn_p_value <- compute_p_value(main_model = mlnn_main_mod, sub_model = mlnn_sub_mod,
test_function = wilcox.test)
names(mlnn_p_value) <- mlnn_sub_mod
all_p_value <- c(cnn_p_value, mlnn_p_value)
# Adjust all computed p-values
bonf_adj_p_value <- p.adjust(p = all_p_value, method = "bonferroni")
holm_adj_p_value <- p.adjust(p = all_p_value, method = "holm")
hoch_adj_p_value <- p.adjust(p = all_p_value, method = "hochberg")
all_adj_p_value <- data.frame(Bonferroni = bonf_adj_p_value, Holm = holm_adj_p_value, Hochberg = hoch_adj_p_value)
# CNN vs all
cnn_vs_all <- all_adj_p_value[c(1:15), ]
rownames(cnn_vs_all) <- names(cnn_p_value)
cnn_vs_all
# MLNN vs NN
deep_vs_ann <- all_adj_p_value[c(16:nrow(all_adj_p_value)), ]
rownames(deep_vs_ann) <- names(mlnn_p_value)
deep_vs_ann<jupyter_output><empty_output>
|
non_permissive
|
/results/Lung_PFI_Statistics.ipynb
|
matsutakk/GeneExpImgTL
| 2 |
<jupyter_start><jupyter_text>
# Compute LCMV beamformer on evoked data
Compute LCMV beamformer on an evoked dataset for three different choices of
source orientation and store the solutions in stc files for visualization.
<jupyter_code># Author: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import numpy as np
import mne
from mne.datasets import sample
from mne.beamformer import make_lcmv, apply_lcmv
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
label_name = 'Aud-lh'
fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
subjects_dir = data_path + '/subjects'<jupyter_output><empty_output><jupyter_text>Get epochs
<jupyter_code>event_id, tmin, tmax = 1, -0.2, 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname, preload=True)
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bads channels
events = mne.read_events(event_fname)
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
exclude='bads')
# Pick the channels of interest
raw.pick_channels([raw.ch_names[pick] for pick in picks])
# Re-normalize our empty-room projectors, so they are fine after subselection
raw.info.normalize_proj()
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
baseline=(None, 0), preload=True, proj=True,
reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
evoked = epochs.average()
forward = mne.read_forward_solution(fname_fwd)
forward = mne.convert_forward_solution(forward, surf_ori=True)
# Compute regularized noise and data covariances
noise_cov = mne.compute_covariance(epochs, tmin=tmin, tmax=0, method='shrunk',
rank=None)
data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15,
method='shrunk', rank=None)
evoked.plot(time_unit='s')<jupyter_output><empty_output><jupyter_text>Run beamformers and look at maximum outputs
<jupyter_code>pick_oris = [None, 'normal', 'max-power', None]
descriptions = ['Free', 'Normal', 'Max-power', 'Fixed']
fig, ax = plt.subplots(1)
max_voxs = list()
colors = list()
for pick_ori, desc in zip(pick_oris, descriptions):
# compute unit-noise-gain beamformer with whitening of the leadfield and
# data (enabled by passing a noise covariance matrix)
if desc == 'Fixed':
use_forward = mne.convert_forward_solution(forward, force_fixed=True)
else:
use_forward = forward
filters = make_lcmv(evoked.info, use_forward, data_cov, reg=0.05,
noise_cov=noise_cov, pick_ori=pick_ori,
weight_norm='unit-noise-gain', rank=None)
print(filters)
# apply this spatial filter to source-reconstruct the evoked data
stc = apply_lcmv(evoked, filters, max_ori_out='signed')
# View activation time-series in maximum voxel at 100 ms:
time_idx = stc.time_as_index(0.1)
max_idx = np.argmax(np.abs(stc.data[:, time_idx]))
# we know these are all left hemi, so we can just use vertices[0]
max_voxs.append(stc.vertices[0][max_idx])
h = ax.plot(stc.times, stc.data[max_idx, :],
label='%s, voxel: %i' % (desc, max_idx))[0]
colors.append(h.get_color())
if pick_ori == 'max-power':
max_stc = stc
ax.axhline(0, color='k')
ax.set(xlabel='Time (ms)', ylabel='LCMV value',
title='LCMV in maximum voxel')
ax.legend(loc='lower right')
mne.viz.utils.plt_show()<jupyter_output><empty_output><jupyter_text>We can also look at the spatial distribution
<jupyter_code># Plot last stc in the brain in 3D with PySurfer if available
brain = max_stc.plot(hemi='lh', views='lat', subjects_dir=subjects_dir,
initial_time=0.1, time_unit='s', smoothing_steps=5)
for color, vertex in zip(colors, max_voxs):
brain.add_foci([vertex], coords_as_verts=True, scale_factor=0.5,
hemi='lh', color=color)<jupyter_output><empty_output>
|
permissive
|
/dev/_downloads/99d2306f38e4ae32ce5d28d8fa3bcba5/plot_lcmv_beamformer.ipynb
|
massich/mne-tools.github.io
| 4 |
<jupyter_start><jupyter_text>## Linear Regression
> Authored by Karan R Nadagoudar
**Linear Regression** is a statistical way of finding relationship between dependent and independent variable.
This method is used to predict outcome (dependent variable) from independent/predictor variable. It is called Linear Regression because it deals with linear data which means dependent variable varies linearly when compared to independent variable.
> _For example_ : Salary(lakhs/annum) = 3 + Experience(years) * 1.5
The above example depicts the relationship betweeen Salary (dependent variable) and Experience (independent variable).
Salary changes linearly with Experience which shows the corelation between the dependent and independent variable
There are few assumptions that linear regression line data should statisfy:
- Linearity
- The relationship between dependent and independent variable should linear
- [Homoscedasticity](http://www.statsmakemecry.com/smmctheblog/confusing-stats-terms-explained-heteroscedasticity-heteroske.html)
- It refers to the circumstance in which the variability of a variable is unequal across the range of values of a second variable that predicts it.
- Normality of error distribution
- Distribution of differences between Actual & Predicted values (Residuals) should be [normally distributed](https://www.coursera.org/learn/machine-learning/lecture/ZYAyC/gaussian-distribution).
- Statistical independence of errors
- The error terms (residuals) should not have any correlation among themselves. The consecutive error terms (residuals) should be independent of each other.
***
A simple linear regression using scikit-learn library and python by considering a sample dataset comprising of experience and salary records (randomly generated).<jupyter_code>import pandas as pd
from sklearn.linear_model import LinearRegression #Linear models ( Regression )
from sklearn.cross_validation import train_test_split #module to split dataset
import matplotlib.pyplot as plt<jupyter_output><empty_output><jupyter_text>Importing a sample dataset using pandas.<jupyter_code>dataset = pd.read_csv('salary.csv')
print("Rows:",dataset.shape[0],"Columns:",dataset.shape[0])
#separating out independent variable
X=dataset.iloc[:,:-1].values
Y=dataset.iloc[:,1].values<jupyter_output><empty_output><jupyter_text>Visualising dataset using matplotlib<jupyter_code>plt.plot(X,Y,'ro')
plt.title("Experience(No.of years) vs Salary(lakhs/annum)")
plt.xlabel("Salary (lakhs per annum)")
plt.ylabel("Experience (No. of Years)")
plt.show()<jupyter_output><empty_output><jupyter_text>Every dataset should be divided into training and testing dataset. Training data set is used to train your model whereas testing dataset is used test and confirm the performance of the model.
Best ratio of splitting training and testing data is **80:20** or **70:30**
We will be using scikit-learn module train_test_split from cross validation to split our dataset accordingly.
We use random state for random sampling.<jupyter_code>X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2, random_state = 39)
print("test X:\n", X_test, "\ntest Y:\n", Y_test)
print("\ntrain X:\n", X_test, "\ntrain Y:", Y_train)<jupyter_output>test X:
[[ 1.]
[ 2.]
[ 0.]]
test Y:
[ 4. 5.5 4. ]
train X:
[[ 1.]
[ 2.]
[ 0.]]
train Y: [ 7.5 8.5 8. 7. 5. 6. 5. 3. 8. 10. 7.5]
<jupyter_text>Defining Linear regressor from sklearn linear_model and train it by fitting training dataset to it<jupyter_code>regressor = LinearRegression()
regressor.fit(X_train,Y_train)<jupyter_output><empty_output><jupyter_text>Our linear regression model named regressor is trained with training dataset.
Let's visualize the linear regression line in comparison with training dataset<jupyter_code>#Visualisong data for training set
plt.plot(X_train,Y_train,'ro')
plt.plot(X_train,regressor.predict(X_train),'b')
plt.title("Experience vs Salary (Training set)")
plt.xlabel("Experience (No. of Years)")
plt.ylabel("Salary (lakhs per annum)")
plt.show()<jupyter_output><empty_output><jupyter_text>Now let's visualize regressor line with test dataset<jupyter_code>#Visualisong data for test set
plt.plot(X_test,Y_test,'ro')
plt.plot(X_train,regressor.predict(X_train),'b')
plt.title("Experience vs Salary (Test set)")
plt.xlabel("Experience (No. of Years)")
plt.ylabel("Salary (lakhs per annum)")
plt.show()<jupyter_output><empty_output><jupyter_text>Since the model is trained, we can use it to predict results for us<jupyter_code>predSalary = regressor.predict(8) #predict salary for experience of 8 years
print("For experience of 8 years: ""%.2f" %predSalary,"lakhs per annum")<jupyter_output>For experience of 8 years: 11.94 lakhs per annum
<jupyter_text>We have metrics to check the model performance on how well it can predict values:
- R – Square ($R^2$)
$R^2$ = $(TSS - RSS)/TSS$
where TSS: Total sum of squares and RSS: residual sum of squares, it will be translated as below
$R^2 = 1 - (\sum_{i}{(Yactual_i - Ypred_i)}^2/\sum_{i}{(Yactual_i - Ymean)}^2)$
- Mean Absolute Percentage Error (MAPE)
$MAPE = \sqrt(\sum_{i}{(Yactual_i - Ypred_i)}^2/N)$
N is total number of recordsLet's check model performance using MAPE<jupyter_code>from math import sqrt
predY = regressor.predict(X_test)
diff = []
for i in range(len(X_test)):
diff.append((Y_test[i] - predY[i])**2)
print(sqrt(sum(diff)/len(X_test))*100,"%")<jupyter_output>44.778690479553305 %
|
no_license
|
/Linear Regression/Linear_Regression.ipynb
|
radulescupetru/Machine_Learning
| 9 |
<jupyter_start><jupyter_text>### Loading datasets<jupyter_code>breast = datasets.load_breast_cancer()
iris = datasets.load_iris()
wine = datasets.load_wine()
yeast = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/yeast/yeast.data', header=None, delim_whitespace=True)
le = LabelEncoder()
yeast.iloc[:, -1] = le.fit_transform(yeast.iloc[:, -1])
yeast.iloc[:, 0] = le.fit_transform(yeast.iloc[:, 0])
spam = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/spambase/spambase.data', header=None)
iono = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/ionosphere/ionosphere.data', header=None)
iono.iloc[:, -1] = le.fit_transform(iono.iloc[:, -1])
captcha = pd.read_csv('dados_captcha.csv')
captcha.iloc[:, -1] = le.fit_transform(captcha.iloc[:, -1])
letters = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/letter-recognition/letter-recognition.data', header=None)
letters = letters[(letters.iloc[:, 0] == 'J')|(letters.iloc[:, 0] == 'L')|(letters.iloc[:, 0] == 'K')]
letters.iloc[:, 0] = le.fit_transform(letters.iloc[:, 0])
image = pd.read_csv('image_segmentation.csv', header=None)
image.iloc[:, 0] = le.fit_transform(image.iloc[:, 0])
pendigits = pd.read_csv('pendigits.csv', header=None)
pendigits = pendigits[(pendigits.iloc[:, -1] == 9)|(pendigits.iloc[:, -1] == 3)|(pendigits.iloc[:, -1] == 8)]
waveform = pd.read_csv('waveform-+noise.data', header=None)
vehicles = pd.read_csv('vehicles.data',sep=' ', header=None)
vehicles = vehicles.iloc[:, :-1]
vehicles.iloc[:, -1] = le.fit_transform(vehicles.iloc[:, -1])
captcha.shape<jupyter_output><empty_output><jupyter_text>Testing method<jupyter_code>def test_algorithm(algorithm, hiperparam, data, target):
accuracy, f1, precision, recall, auc = 0, 0, 0, 0, 0
div = np.zeros(25)
fit = np.zeros(25)
for i in range(0, 10):
print(i, end='')
X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.2, random_state=i*10)
try:classifier = algorithm(**hiperparam, random_state=i*10)#classifier.set_param(random_state=i*10)
except: classifier = algorithm(**hiperparam)#classifier.random_state=i*10
aux = classifier.fit(X_train, y_train)
try:
div += aux[0]
fit += aux[1]
except: pass
y_pred = classifier.predict(X_test)
accuracy += accuracy_score(y_test, y_pred)
try: f1 += f1_score(y_test, y_pred)
except: pass
try: precision += precision_score(y_test, y_pred)
except: pass
try: recall += recall_score(y_test, y_pred)
except: pass
try: auc += roc_auc_score(y_test, y_pred)
except: pass
print('Accuracy :', accuracy/10)
if f1>0: print('F1-score :', f1/10)
if precision>0: print('Precision:', precision/10)
if recall>0: print('Recall :', recall/10)
if auc>0: print('ROC AUC :', auc/10)
return div/10, fit/10
def compare_results(data, target, size):
n_samples = ((data.shape[0]*0.8 * 4) // 5)-4
alg = {
KNeighborsClassifier: {'n_neighbors':[1, n_samples]},
RidgeClassifier: {'alpha':[1.0, 10.0],'max_iter':[10, 100]},
SVC: {'C':[1, 1000],
'gamma':[0.0001, 0.001]
},
DecisionTreeClassifier: {'min_samples_leaf':[1, n_samples], 'max_depth':[1, n_samples]},
ExtraTreeClassifier: {'min_samples_leaf':[1, n_samples], 'max_depth':[1, n_samples]},
GaussianNB: {},
LinearDiscriminantAnalysis: {},
QuadraticDiscriminantAnalysis: {},
BernoulliNB: {},
#GaussianProcessClassifier: {'max_iter_predict':[1, 50]},
LogisticRegression:{'C':[1, 1000], 'max_iter':[100, 1000]},
NearestCentroid:{},
PassiveAggressiveClassifier:{'C':[1, 1000], 'max_iter':[100, 1000]},
SGDClassifier: {'alpha':[1e-5, 1e-2], 'max_iter':[100, 1000]},
RandomForestClassifier:{'n_estimators':[2, size], 'min_samples_leaf':[1, 20]}
}
print('-'*60)
print('Diversity-based Ensemble Classifier')
print('-'*60)
aux = int(round(time.time() * 1000))
dec = {'algorithms':alg, 'population_size':size, 'max_epochs':25}
div, fit = test_algorithm(DEC, dec, data=data, target=target)
print('DEC done in: ',(int(round(time.time() * 1000)) - aux)/10, 'ms')
print('-'*60)
print('Genetic-based Ensemble Classifier')
print('-'*60)
aux = int(round(time.time() * 1000))
gec = {'algorithms':alg, 'population_size':size, 'max_epochs':25}
test_algorithm(GEC, gec, data=data, target=target)
print('GEC done in: ',(int(round(time.time() * 1000)) - aux)/10, 'ms')
print('-'*60)
print('Random Classifier')
print('-'*60)
aux = int(round(time.time() * 1000))
rc = {'algorithms':alg}
test_algorithm(RC, rc, data=data, target=target)
print('RC done in: ',(int(round(time.time() * 1000)) - aux)/10, 'ms')
print('-'*60)
print('Majority Classifier')
print('-'*60)
aux = int(round(time.time() * 1000))
#mc = MC()
test_algorithm(MC, {}, data=data, target=target)
print('MC done in: ',(int(round(time.time() * 1000)) - aux)/10, 'ms')
print('-'*60)
print('Random Forest')
print('-'*60)
aux = int(round(time.time() * 1000))
rf = {'n_estimators':size}
test_algorithm(RandomForestClassifier, rf, data=data, target=target)
print('RF done in: ',(int(round(time.time() * 1000)) - aux)/10, 'ms')
print('-'*60)
print('Ada Boost')
print('-'*60)
aux = int(round(time.time() * 1000))
ada = {'n_estimators':size}
test_algorithm(AdaBoostClassifier, ada, data=data, target=target)
print('Ada done in: ',(int(round(time.time() * 1000)) - aux)/10, 'ms')
return div, fit
d10, f10 =compare_results(data=captcha.iloc[:, 0:-1].values, target=captcha.iloc[:, -1].values, size=10)
d25, f25 =compare_results(data=captcha.iloc[:, 0:-1].values, target=captcha.iloc[:, -1].values, size=25)
d50, f50 =compare_results(data=captcha.iloc[:, 0:-1].values, target=captcha.iloc[:, -1].values, size =50)
plt.plot(range(1, 26), d10, label='10 cromossomos')
plt.plot(range(1, 26), d25, label='25 cromossomos')
plt.plot(range(1, 26), d50, label='50 cromossomos')
plt.legend()
plt.ylabel('Diversidade')
plt.xlabel('Nº de épocas')
plt.grid(ls='dashed')
plt.savefig('captcha.png')
plt.clf()
plt.close()
plt.plot(range(1, 26), f10, label='10 cromossomos')
plt.plot(range(1, 26), f25, label='25 cromossomos')
plt.plot(range(1, 26), f50, label='50 cromossomos')
plt.legend()
plt.ylabel('Fitness')
plt.xlabel('Nº de épocas')
plt.grid(ls='dashed')
plt.savefig('captchaFitness.png')
plt.clf()
plt.close()
d25, f25 =compare_results(data=breast.data, target=breast.target, size=10)
d50, f50 =compare_results(data=breast.data, target=breast.target, size=25)
d100, f100 =compare_results(data=breast.data, target=breast.target, size =50)
plt.plot(range(1, 26), d25, label='10 cromossomos')
plt.plot(range(1, 26), d50, label='25 cromossomos')
plt.plot(range(1, 26), d100, label='50 cromossomos')
plt.legend()
plt.ylabel('Diversidade')
plt.xlabel('Nº de épocas')
plt.grid(ls='dashed')
plt.savefig('breast2.png')
plt.clf()
plt.close()
plt.plot(range(1, 26), f25, label='10 cromossomos')
plt.plot(range(1, 26), f50, label='25 cromossomos')
plt.plot(range(1, 26), f100, label='50 cromossomos')
plt.legend()
plt.ylabel('Fitness')
plt.xlabel('Nº de épocas')
plt.grid(ls='dashed')
plt.savefig('breastFitness.png')
plt.clf()
plt.close()
d25, f25 = compare_results(data=iris.data, target=iris.target, size=10)
d50, f50 = compare_results(data=iris.data, target=iris.target, size=25)
d100, f100 = compare_results(data=iris.data, target=iris.target, size=50)
plt.plot(range(1, 26), d25, label='10 cromossomos')
plt.plot(range(1, 26), d50, label='25 cromossomos')
plt.plot(range(1, 26), d100, label='50 cromossomos')
plt.legend()
plt.ylabel('Diversidade')
plt.xlabel('Nº de épocas')
plt.grid(ls='dashed')
plt.savefig('iris.png')
plt.clf()
plt.close()
plt.plot(range(1, 26), f25, label='10 cromossomos')
plt.plot(range(1, 26), f50, label='25 cromossomos')
plt.plot(range(1, 26), f100, label='50 cromossomos')
plt.legend()
plt.ylabel('Fitness')
plt.xlabel('Nº de épocas')
plt.grid(ls='dashed')
plt.savefig('irisFitness.png')
plt.clf()
plt.close()
d25, f25 = compare_results(data=wine.data, target=wine.target, size=10)
d50, f50 = compare_results(data=wine.data, target=wine.target, size=25)
d100, f100 = compare_results(data=wine.data, target=wine.target, size=50)
plt.plot(range(1, 26), d25, label='10 cromossomos')
plt.plot(range(1, 26), d50, label='25 cromossomos')
plt.plot(range(1, 26), d100, label='50 cromossomos')
plt.legend()
plt.ylabel('Diversidade')
plt.xlabel('Nº de épocas')
plt.grid(ls='dashed')
plt.savefig('wine.png')
plt.clf()
plt.close()
plt.plot(range(1, 26), f25, label='10 cromossomos')
plt.plot(range(1, 26), f50, label='25 cromossomos')
plt.plot(range(1, 26), f100, label='50 cromossomos')
plt.legend()
plt.ylabel('Fitness')
plt.xlabel('Nº de épocas')
plt.grid(ls='dashed')
plt.savefig('wineFitness.png')
plt.clf()
plt.close()
d25, f25 = compare_results(data=yeast.iloc[:, 0:-1].values, target=yeast.iloc[:, -1].values, size = 10)
d50, f50 = compare_results(data=yeast.iloc[:, 0:-1].values, target=yeast.iloc[:, -1].values, size = 25)
d100, f100 = compare_results(data=yeast.iloc[:, 0:-1].values, target=yeast.iloc[:, -1].values, size = 50)
plt.plot(range(1, 26), d25, label='10 cromossomos')
plt.plot(range(1, 26), d50, label='25 cromossomos')
plt.plot(range(1, 26), d100, label='50 cromossomos')
plt.legend()
plt.ylabel('Diversidade')
plt.xlabel('Nº de épocas')
plt.grid(ls='dashed')
plt.savefig('yeast.png')
plt.clf()
plt.close()
plt.plot(range(1, 26), f25, label='10 cromossomos')
plt.plot(range(1, 26), f50, label='25 cromossomos')
plt.plot(range(1, 26), f100, label='50 cromossomos')
plt.legend()
plt.ylabel('Fitness')
plt.xlabel('Nº de épocas')
plt.grid(ls='dashed')
plt.savefig('yeastFitness.png')
plt.clf()
plt.close()
d25, f25 = compare_results(data=spam.iloc[:, :-1].values, target=spam.iloc[:, -1].values, size=10)
d50, f50 = compare_results(data=spam.iloc[:, :-1].values, target=spam.iloc[:, -1].values, size=25)
d100, f100 = compare_results(data=spam.iloc[:, :-1].values, target=spam.iloc[:, -1].values, size=50)
plt.plot(range(1, 26), d25, label='10 cromossomos')
plt.plot(range(1, 26), d50, label='25 cromossomos')
plt.plot(range(1, 26), d100, label='50 cromossomos')
plt.legend()
plt.ylabel('Diversidade')
plt.xlabel('Nº de épocas')
plt.grid(ls='dashed')
plt.savefig('spam.png')
plt.clf()
plt.close()
plt.plot(range(1, 26), f25, label='10 cromossomos')
plt.plot(range(1, 26), f50, label='25 cromossomos')
plt.plot(range(1, 26), f100, label='50 cromossomos')
plt.legend()
plt.ylabel('Fitness')
plt.xlabel('Nº de épocas')
plt.grid(ls='dashed')
plt.savefig('spamFitness.png')
plt.clf()
plt.close()
d10, f10 =compare_results(data=iono.iloc[:, 0:-1].values, target=iono.iloc[:, -1].values, size=10)
d25, f25 =compare_results(data=iono.iloc[:, 0:-1].values, target=iono.iloc[:, -1].values, size=25)
d50, f50 =compare_results(data=iono.iloc[:, 0:-1].values, target=iono.iloc[:, -1].values, size =50)
plt.plot(range(1, 26), d10, label='10 cromossomos')
plt.plot(range(1, 26), d25, label='25 cromossomos')
plt.plot(range(1, 26), d50, label='50 cromossomos')
plt.legend()
plt.ylabel('Diversidade')
plt.xlabel('Nº de épocas')
plt.grid(ls='dashed')
plt.savefig('iono.png')
plt.clf()
plt.close()
plt.plot(range(1, 26), f10, label='10 cromossomos')
plt.plot(range(1, 26), f25, label='25 cromossomos')
plt.plot(range(1, 26), f50, label='50 cromossomos')
plt.legend()
plt.ylabel('Fitness')
plt.xlabel('Nº de épocas')
plt.grid(ls='dashed')
plt.savefig('ionoFitness.png')
plt.clf()
plt.close()
d10, f10 =compare_results(data=letters.iloc[:, 1:].values, target=letters.iloc[:, 0].values, size=10)
d25, f25 =compare_results(data=letters.iloc[:, 1:].values, target=letters.iloc[:, 0].values, size=25)
d50, f50 =compare_results(data=letters.iloc[:, 1:].values, target=letters.iloc[:, 0].values, size =50)
plt.plot(range(1, 26), d10, label='10 cromossomos')
plt.plot(range(1, 26), d25, label='25 cromossomos')
plt.plot(range(1, 26), d50, label='50 cromossomos')
plt.legend()
plt.ylabel('Diversidade')
plt.xlabel('Nº de épocas')
plt.grid(ls='dashed')
plt.savefig('letters.png')
plt.clf()
plt.close()
plt.plot(range(1, 26), f10, label='10 cromossomos')
plt.plot(range(1, 26), f25, label='25 cromossomos')
plt.plot(range(1, 26), f50, label='50 cromossomos')
plt.legend()
plt.ylabel('Fitness')
plt.xlabel('Nº de épocas')
plt.grid(ls='dashed')
plt.savefig('lettersFitness.png')
plt.clf()
plt.close()
d10, f10 =compare_results(data=image.iloc[:, 1:].values, target=image.iloc[:, 0].values, size=10)
d25, f25 =compare_results(data=image.iloc[:, 1:].values, target=image.iloc[:, 0].values, size=25)
d50, f50 =compare_results(data=image.iloc[:, 1:].values, target=image.iloc[:, 0].values, size =50)
plt.plot(range(1, 26), d10, label='10 cromossomos')
plt.plot(range(1, 26), d25, label='25 cromossomos')
plt.plot(range(1, 26), d50, label='50 cromossomos')
plt.legend()
plt.ylabel('Diversidade')
plt.xlabel('Nº de épocas')
plt.grid(ls='dashed')
plt.savefig('image.png')
plt.clf()
plt.close()
plt.plot(range(1, 26), f10, label='10 cromossomos')
plt.plot(range(1, 26), f25, label='25 cromossomos')
plt.plot(range(1, 26), f50, label='50 cromossomos')
plt.legend()
plt.ylabel('Fitness')
plt.xlabel('Nº de épocas')
plt.grid(ls='dashed')
plt.savefig('imageFitness.png')
plt.clf()
plt.close()
d10, f10 =compare_results(data=pendigits.iloc[:, 0:-1].values, target=pendigits.iloc[:, -1].values, size=10)
d25, f25 =compare_results(data=pendigits.iloc[:, 0:-1].values, target=pendigits.iloc[:, -1].values, size=25)
d50, f50 =compare_results(data=pendigits.iloc[:, 0:-1].values, target=pendigits.iloc[:, -1].values, size =50)
plt.plot(range(1, 26), d10, label='10 cromossomos')
plt.plot(range(1, 26), d25, label='25 cromossomos')
plt.plot(range(1, 26), d50, label='50 cromossomos')
plt.legend()
plt.ylabel('Diversidade')
plt.xlabel('Nº de épocas')
plt.grid(ls='dashed')
plt.savefig('pendigits.png')
plt.clf()
plt.close()
plt.plot(range(1, 26), f10, label='10 cromossomos')
plt.plot(range(1, 26), f25, label='25 cromossomos')
plt.plot(range(1, 26), f50, label='50 cromossomos')
plt.legend()
plt.ylabel('Fitness')
plt.xlabel('Nº de épocas')
plt.grid(ls='dashed')
plt.savefig('pendigitsFitness.png')
plt.clf()
plt.close()
d10, f10 =compare_results(data=waveform.iloc[:, 0:-1].values, target=waveform.iloc[:, -1].values, size=10)
d25, f25 =compare_results(data=waveform.iloc[:, 0:-1].values, target=waveform.iloc[:, -1].values, size=25)
d50, f50 =compare_results(data=waveform.iloc[:, 0:-1].values, target=waveform.iloc[:, -1].values, size =50)
plt.plot(range(1, 26), d10, label='10 cromossomos')
plt.plot(range(1, 26), d25, label='25 cromossomos')
plt.plot(range(1, 26), d50, label='50 cromossomos')
plt.legend()
plt.ylabel('Diversidade')
plt.xlabel('Nº de épocas')
plt.grid(ls='dashed')
plt.savefig('waveform.png')
plt.clf()
plt.close()
plt.plot(range(1, 26), f10, label='10 cromossomos')
plt.plot(range(1, 26), f25, label='25 cromossomos')
plt.plot(range(1, 26), f50, label='50 cromossomos')
plt.legend()
plt.ylabel('Fitness')
plt.xlabel('Nº de épocas')
plt.grid(ls='dashed')
plt.savefig('waveformFitness.png')
plt.clf()
plt.close()
d10, f10 =compare_results(data=vehicles.iloc[:, 0:-1].values, target=vehicles.iloc[:, -1].values, size=10)
d25, f25 =compare_results(data=vehicles.iloc[:, 0:-1].values, target=vehicles.iloc[:, -1].values, size=25)
d50, f50 =compare_results(data=vehicles.iloc[:, 0:-1].values, target=vehicles.iloc[:, -1].values, size =50)
plt.plot(range(1, 26), d10, label='10 cromossomos')
plt.plot(range(1, 26), d25, label='25 cromossomos')
plt.plot(range(1, 26), d50, label='50 cromossomos')
plt.legend()
plt.ylabel('Diversidade')
plt.xlabel('Nº de épocas')
plt.grid(ls='dashed')
plt.savefig('vehicles.png')
plt.clf()
plt.close()
plt.plot(range(1, 26), f10, label='10 cromossomos')
plt.plot(range(1, 26), f25, label='25 cromossomos')
plt.plot(range(1, 26), f50, label='50 cromossomos')
plt.legend()
plt.ylabel('Fitness')
plt.xlabel('Nº de épocas')
plt.grid(ls='dashed')
plt.savefig('vehiclesFitness.png')
plt.clf()
plt.close()<jupyter_output>------------------------------------------------------------
Diversity-based Ensemble Classifier
------------------------------------------------------------
0123456789Accuracy : 0.7723529411764706
DEC done in: 27992.9 ms
------------------------------------------------------------
Genetic-based Ensemble Classifier
------------------------------------------------------------
0123456789Accuracy : 0.841764705882353
GEC done in: 23829.1 ms
------------------------------------------------------------
Random Classifier
------------------------------------------------------------
0123456789Accuracy : 0.5064705882352941
RC done in: 24.5 ms
------------------------------------------------------------
Majority Classifier
------------------------------------------------------------
0123456789Accuracy : 0.22941176470588234
MC done in: 1.4 ms
------------------------------------------------------------
Random Forest
------------------------------------------------------------
0123456789Accurac[...]
|
no_license
|
/.ipynb_checkpoints/PGC-checkpoint.ipynb
|
alanAguiar/PGC
| 2 |
<jupyter_start><jupyter_text>#K邻近算法:k-NN<jupyter_code>from numpy import *
import operator
from os import listdir
def createDataSet():
group = array([[1.0,1.1],[1.0,1.0],[0,0],[0,0.1]])
labels = ['A','A','B','B']
return group, labels
group, labels = createDataSet()
print group
print labels<jupyter_output>[[ 1. 1.1]
[ 1. 1. ]
[ 0. 0. ]
[ 0. 0.1]]
['A', 'A', 'B', 'B']
<jupyter_text>###*xA与xB之间的欧式距离为*
####$\sqrt{{(xA_0 - xB_0)}^2 - {(xA_1 -xB_1)}^2}$
<jupyter_code>def classify0(inX, dataSet, labels, k):
dataSetSize = dataSet.shape[0]
diffMat = tile(inX, (dataSetSize,1)) - dataSet
sqDiffMat = diffMat**2
sqDistances = sqDiffMat.sum(axis=1)
distances = sqDistances**0.5
sortedDistIndicies = distances.argsort()
classCount={}
for i in range(k):
voteIlabel = labels[sortedDistIndicies[i]]
classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1
sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
classify0([0,0], group, labels, 3)<jupyter_output><empty_output><jupyter_text>##应用:约会网站配对<jupyter_code>def file2matrix(filename):
love_dictionary={'largeDoses':3, 'smallDoses':2, 'didntLike':1}
fr = open(filename)
arrayOLines = fr.readlines()
numberOfLines = len(arrayOLines) #get the number of lines in the file
returnMat = zeros((numberOfLines,3)) #prepare matrix to return
classLabelVector = [] #prepare labels return
index = 0
for line in arrayOLines:
line = line.strip()
listFromLine = line.split('\t')
returnMat[index,:] = listFromLine[0:3]
if(listFromLine[-1].isdigit()):
classLabelVector.append(int(listFromLine[-1]))
else:
classLabelVector.append(love_dictionary.get(listFromLine[-1]))
index += 1
return returnMat,classLabelVector
datingDataMat, datingLabels = file2matrix('datingTestSet.txt')
print datingDataMat
print datingLabels[0:20]
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(datingDataMat[:,1], datingDataMat[:,2],15.0*array(datingLabels), 15.0*array(datingLabels))
plt.show()
def autoNorm(dataSet):
minVals = dataSet.min(0)
maxVals = dataSet.max(0)
ranges = maxVals - minVals
normDataSet = zeros(shape(dataSet))
m = dataSet.shape[0]
normDataSet = dataSet - tile(minVals, (m,1))
normDataSet = normDataSet/tile(ranges, (m,1)) #element wise divide
return normDataSet, ranges, minVals
normMat, ranges, minVals = autoNorm(datingDataMat)
normMat
ranges
minVals
def datingClassTest():
hoRatio = 0.50 #hold out 10%
datingDataMat,datingLabels = file2matrix('datingTestSet2.txt') #load data setfrom file
normMat, ranges, minVals = autoNorm(datingDataMat)
m = normMat.shape[0]
numTestVecs = int(m*hoRatio)
errorCount = 0.0
for i in range(numTestVecs):
classifierResult = classify0(normMat[i,:],normMat[numTestVecs:m,:],datingLabels[numTestVecs:m],3)
print "the classifier came back with: %d, the real answer is: %d" % (classifierResult, datingLabels[i])
if (classifierResult != datingLabels[i]): errorCount += 1.0
print "the total error rate is: %f" % (errorCount/float(numTestVecs))
datingClassTest()
def classifyPerson():
resultList = ['not at all', 'in small doses', 'in large doses']
percentTats = float(raw_input(\
"percentage of time spent playing video games?"))
ffMiles = float(raw_input("frequent flier miles earned per year?"))
iceCream = float(raw_input("liters of ice cream consumed per year?"))
datingDataMat, datingLabels = file2matrix('datingTestSet2.txt')
normMat, ranges, minVals = autoNorm(datingDataMat)
inArr = array([ffMiles, percentTats, iceCream, ])
classifierResult = classify0((inArr - \
minVals)/ranges, normMat, datingLabels, 3)
print "You will probably like this person: %s" % resultList[classifierResult - 1]
classifyPerson()<jupyter_output>percentage of time spent playing video games?1
frequent flier miles earned per year?1
liters of ice cream consumed per year?1
You will probably like this person: in small doses
<jupyter_text>##示例:手写识别系统<jupyter_code>def img2vector(filename):
returnVect = zeros((1,1024))
fr = open(filename)
for i in range(32):
lineStr = fr.readline()
for j in range(32):
returnVect[0,32*i+j] = int(lineStr[j])
return returnVect
testVector = img2vector('digits/testDigits/0_13.txt')
testVector[0,0:31]
def handwritingClassTest():
hwLabels = []
trainingFileList = listdir('digits/trainingDigits') #load the training set
m = len(trainingFileList)
trainingMat = zeros((m,1024))
for i in range(m):
fileNameStr = trainingFileList[i]
fileStr = fileNameStr.split('.')[0] #take off .txt
classNumStr = int(fileStr.split('_')[0])
hwLabels.append(classNumStr)
trainingMat[i,:] = img2vector('digits/trainingDigits/%s' % fileNameStr)
testFileList = listdir('digits/testDigits') #iterate through the test set
errorCount = 0.0
mTest = len(testFileList)
for i in range(mTest):
fileNameStr = testFileList[i]
fileStr = fileNameStr.split('.')[0] #take off .txt
classNumStr = int(fileStr.split('_')[0])
vectorUnderTest = img2vector('digits/testDigits/%s' % fileNameStr)
classifierResult = classify0(vectorUnderTest, trainingMat, hwLabels, 3)
print "the classifier came back with: %d, the real answer is: %d" % (classifierResult, classNumStr)
if (classifierResult != classNumStr): errorCount += 1.0
print "\nthe total number of errors is: %d" % errorCount
print "\nthe total error rate is: %f" % (errorCount/float(mTest))
handwritingClassTest()<jupyter_output>the classifier came back with: 0, the real answer is: 0
the classifier came back with: 4, the real answer is: 4
the classifier came back with: 4, the real answer is: 4
the classifier came back with: 8, the real answer is: 8
the classifier came back with: 9, the real answer is: 9
the classifier came back with: 6, the real answer is: 6
the classifier came back with: 1, the real answer is: 1
the classifier came back with: 9, the real answer is: 9
the classifier came back with: 9, the real answer is: 9
the classifier came back with: 8, the real answer is: 8
the classifier came back with: 5, the real answer is: 5
the classifier came back with: 6, the real answer is: 6
the classifier came back with: 7, the real answer is: 7
the classifier came back with: 4, the real answer is: 4
the classifier came back with: 1, the real answer is: 1
the classifier came back with: 1, the real answer is: 1
the classifier came back with: 9, the real answer is: 9
the classifier came back with: 1, the real answe[...]
|
permissive
|
/Ch02/Ch02-kNN.ipynb
|
5l1v3r1/MachineLearningInAction
| 4 |
<jupyter_start><jupyter_text>
*This notebook contains an excerpt from the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/PythonDataScienceHandbook).*
*The text is released under the [CC-BY-NC-ND license](https://creativecommons.org/licenses/by-nc-nd/3.0/us/legalcode), and code is released under the [MIT license](https://opensource.org/licenses/MIT). If you find this content useful, please consider supporting the work by [buying the book](http://shop.oreilly.com/product/0636920034919.do)!*
# Customizing TicksMatplotlib's default tick locators and formatters are designed to be generally sufficient in many common situations, but are in no way optimal for every plot. This section will give several examples of adjusting the tick locations and formatting for the particular plot type you're interested in.
Before we go into examples, it will be best for us to understand further the object hierarchy of Matplotlib plots.
Matplotlib aims to have a Python object representing everything that appears on the plot: for example, recall that the ``figure`` is the bounding box within which plot elements appear.
Each Matplotlib object can also act as a container of sub-objects: for example, each ``figure`` can contain one or more ``axes`` objects, each of which in turn contain other objects representing plot contents.
The tick marks are no exception. Each ``axes`` has attributes ``xaxis`` and ``yaxis``, which in turn have attributes that contain all the properties of the lines, ticks, and labels that make up the axes.## Major and Minor Ticks
Within each axis, there is the concept of a *major* tick mark, and a *minor* tick mark. As the names would imply, major ticks are usually bigger or more pronounced, while minor ticks are usually smaller. By default, Matplotlib rarely makes use of minor ticks, but one place you can see them is within logarithmic plots:<jupyter_code>import matplotlib.pyplot as plt
plt.style.use('classic')
%matplotlib inline
import numpy as np
ax = plt.axes(xscale='log', yscale='log')
ax.grid();<jupyter_output><empty_output><jupyter_text>We see here that each major tick shows a large tickmark and a label, while each minor tick shows a smaller tickmark with no label.
These tick properties—locations and labels—that is, can be customized by setting the ``formatter`` and ``locator`` objects of each axis. Let's examine these for the x axis of the just shown plot:<jupyter_code>print(ax.xaxis.get_major_locator())
print(ax.xaxis.get_minor_locator())
print(ax.xaxis.get_major_formatter())
print(ax.xaxis.get_minor_formatter())<jupyter_output><matplotlib.ticker.LogFormatterMathtext object at 0x10db8dbe0>
<matplotlib.ticker.NullFormatter object at 0x10db9af60>
<jupyter_text>We see that both major and minor tick labels have their locations specified by a ``LogLocator`` (which makes sense for a logarithmic plot). Minor ticks, though, have their labels formatted by a ``NullFormatter``: this says that no labels will be shown.
We'll now show a few examples of setting these locators and formatters for various plots.## Hiding Ticks or Labels
Perhaps the most common tick/label formatting operation is the act of hiding ticks or labels.
This can be done using ``plt.NullLocator()`` and ``plt.NullFormatter()``, as shown here:<jupyter_code>ax = plt.axes()
ax.plot(np.random.rand(50))
ax.yaxis.set_major_locator(plt.NullLocator())
ax.xaxis.set_major_formatter(plt.NullFormatter())<jupyter_output><empty_output><jupyter_text>Notice that we've removed the labels (but kept the ticks/gridlines) from the x axis, and removed the ticks (and thus the labels as well) from the y axis.
Having no ticks at all can be useful in many situations—for example, when you want to show a grid of images.
For instance, consider the following figure, which includes images of different faces, an example often used in supervised machine learning problems (see, for example, [In-Depth: Support Vector Machines](05.07-Support-Vector-Machines.ipynb)):<jupyter_code>fig, ax = plt.subplots(5, 5, figsize=(5, 5))
fig.subplots_adjust(hspace=0, wspace=0)
# Get some face data from scikit-learn
from sklearn.datasets import fetch_olivetti_faces
faces = fetch_olivetti_faces().images
for i in range(5):
for j in range(5):
ax[i, j].xaxis.set_major_locator(plt.NullLocator())
ax[i, j].yaxis.set_major_locator(plt.NullLocator())
ax[i, j].imshow(faces[10 * i + j], cmap="bone")<jupyter_output><empty_output><jupyter_text>Notice that each image has its own axes, and we've set the locators to null because the tick values (pixel number in this case) do not convey relevant information for this particular visualization.## Reducing or Increasing the Number of Ticks
One common problem with the default settings is that smaller subplots can end up with crowded labels.
We can see this in the plot grid shown here:<jupyter_code>fig, ax = plt.subplots(4, 4, sharex=True, sharey=True)<jupyter_output><empty_output><jupyter_text>Particularly for the x ticks, the numbers nearly overlap and make them quite difficult to decipher.
We can fix this with the ``plt.MaxNLocator()``, which allows us to specify the maximum number of ticks that will be displayed.
Given this maximum number, Matplotlib will use internal logic to choose the particular tick locations:<jupyter_code># For every axis, set the x and y major locator
for axi in ax.flat:
axi.xaxis.set_major_locator(plt.MaxNLocator(3))
axi.yaxis.set_major_locator(plt.MaxNLocator(3))
fig<jupyter_output><empty_output><jupyter_text>This makes things much cleaner. If you want even more control over the locations of regularly-spaced ticks, you might also use ``plt.MultipleLocator``, which we'll discuss in the following section.## Fancy Tick Formats
Matplotlib's default tick formatting can leave a lot to be desired: it works well as a broad default, but sometimes you'd like do do something more.
Consider this plot of a sine and a cosine:<jupyter_code># Plot a sine and cosine curve
fig, ax = plt.subplots()
x = np.linspace(0, 3 * np.pi, 1000)
ax.plot(x, np.sin(x), lw=3, label='Sine')
ax.plot(x, np.cos(x), lw=3, label='Cosine')
# Set up grid, legend, and limits
ax.grid(True)
ax.legend(frameon=False)
ax.axis('equal')
ax.set_xlim(0, 3 * np.pi);<jupyter_output><empty_output><jupyter_text>There are a couple changes we might like to make. First, it's more natural for this data to space the ticks and grid lines in multiples of $\pi$. We can do this by setting a ``MultipleLocator``, which locates ticks at a multiple of the number you provide. For good measure, we'll add both major and minor ticks in multiples of $\pi/4$:<jupyter_code>ax.xaxis.set_major_locator(plt.MultipleLocator(np.pi / 2))
ax.xaxis.set_minor_locator(plt.MultipleLocator(np.pi / 4))
fig<jupyter_output><empty_output><jupyter_text>But now these tick labels look a little bit silly: we can see that they are multiples of $\pi$, but the decimal representation does not immediately convey this.
To fix this, we can change the tick formatter. There's no built-in formatter for what we want to do, so we'll instead use ``plt.FuncFormatter``, which accepts a user-defined function giving fine-grained control over the tick outputs:<jupyter_code>def format_func(value, tick_number):
# find number of multiples of pi/2
N = int(np.round(2 * value / np.pi))
if N == 0:
return "0"
elif N == 1:
return r"$\pi/2$"
elif N == 2:
return r"$\pi$"
elif N % 2 > 0:
return r"${0}\pi/2$".format(N)
else:
return r"${0}\pi$".format(N // 2)
ax.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
fig<jupyter_output><empty_output>
|
no_license
|
/.ipynb_checkpoints/04_10_Customizing_Ticks-checkpoint.ipynb
|
rishabh25126/DataScienceHandbook
| 9 |
<jupyter_start><jupyter_text># Credit Score Machine Learning ModelWe needed to build a model to predict credit score in order to use the Fannie Mae LLPA matrix. Before this notebook, I uploaded the data to SQLite, where I selected the appropriate columns that overlapped tghe HMDA and Fannie Mae data, and did some basic filtering. <jupyter_code>import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import geopandas as geo
from mortgage import Loan
import requests
import calendar
import math
import random
import sqlite3
from sklearn.preprocessing import MinMaxScaler
import statsmodels.api as sm
%matplotlib inline
con = sqlite3.connect(r"C:\Users\Pavan\OneDrive\Desktop\HMDAData.db")
cur = con.cursor()
query='''SELECT *
FROM credit'''
credit=pd.read_sql_query(query, con)
credit.head()
credit.dtypes
credit=credit[credit["credit_score"]!='']
credit=credit[credit["debt_to_income"]!='']
credit=credit[credit["MSA"]!='00000']
len(credit["MSA"].unique())
len(credit["credit_score"].unique())<jupyter_output><empty_output><jupyter_text>First, I preprocess the data, I converted to proper datatypes, and binned the data. <jupyter_code>credit["loan_to_value"]=credit["loan_to_value"].astype(float)
credit["credit_score"]=credit["credit_score"].astype(float)
credit["total_units"]=credit["total_units"].astype(str)
credit["debt_to_income"]=credit["debt_to_income"].astype(float)
credit.columns= ["loan_to_value","debt_to_income","credit_score","total_units","MSA"]
l=[]
for i in credit["debt_to_income"]:
if i <20:
l.append("<20%")
elif i>=20 and i<30:
l.append("20%-<30%")
elif i>=30 and i<36:
l.append("30%-<36%")
elif i>=36 and i<41:
l.append("36%-<41%")
elif i>=41 and i<46:
l.append("41%-<46%")
elif i>=46 and i<50:
l.append("46%-<50%")
elif i>=50 and i<=60:
l.append("50%-60%")
elif i>60:
l.append(">60%")
credit["debt_to_income_ratio"]=l
credit["debt_to_income_ratio"].value_counts()
credit["debt_to_income"]
av=credit.groupby("MSA").mean()["credit_score"]
av.head()
credit["av_credit"]=[av[i] for i in credit["MSA"]]
sns.distplot(credit["credit_score"])
sns.boxplot(y="credit_score", x="total_units",data=credit);
sns.boxplot(y="credit_score", x="debt_to_income_ratio",data=credit);
plt.xticks(rotation=90);
credit["bin_lv"]=pd.qcut(credit['loan_to_value'], 4,labels=["below 25th percentile","between 25th percentile and 50th percentile","between 50th percentile and 75th percentile","greater than 75th percentile"])
sns.boxplot(y="credit_score", x="bin_lv",data=credit);
plt.xticks(rotation=90);<jupyter_output><empty_output><jupyter_text>Now, I train and test the model<jupyter_code>df=credit[["bin_lv","total_units","debt_to_income_ratio","av_credit","credit_score"]]
X=pd.get_dummies(df[["bin_lv","debt_to_income_ratio","total_units"]])
X["av_credit"]= df["av_credit"]
from sklearn import model_selection
X_train, X_test, y_train, y_test=model_selection.train_test_split(X,df["credit_score"],test_size=0.20)
from sklearn.linear_model import LinearRegression
model= LinearRegression().fit(X_train,y_train)
model.coef_
predict= model.predict(X_test)
residuals= predict-y_test
sns.distplot(residuals);
from sklearn.metrics import r2_score
print(r2_score(y_test,predict))
from sklearn.metrics import mean_squared_error as mse
print(np.sqrt(mse(y_test,predict)))
plt.hist(predict, alpha=0.5, label="linear");
plt.hist(y_test,alpha=0.5, label="actual");
from sklearn.ensemble import RandomForestRegressor
model1= RandomForestRegressor(n_estimators=300).fit(X_train,y_train)
predict1= model1.predict(X_test)
residuals1= predict1-y_test
sns.distplot(residuals1);
np.sqrt(mse(y_test,predict1))
print(r2_score(y_test,predict1))
np.std(residuals1)
plt.figure(figsize=(10,5));
plt.hist(predict1, alpha=0.5, label="tree");
plt.hist(y_test,alpha=0.5, label="actual");<jupyter_output><empty_output><jupyter_text>Save the model in a pickle file to be used in other notebook. <jupyter_code>import pickle
model.coef_
pkl_filename = "credit_model.pkl"
with open(pkl_filename, 'wb') as file:
pickle.dump(model1, file)
with open(pkl_filename, 'rb') as file:
pickle_model = pickle.load(file)
pickle_model.predict(X_test)
predict1
len(pickle_model.coef_)
len(X_test.columns)
r=["debt_to_income_ratio_30%-<36%",'debt_to_income_ratio_36%-<41%','debt_to_income_ratio_41%-<46%','debt_to_income_ratio_46%-<50%','debt_to_income_ratio_50%-60%','debt_to_income_ratio_<20%','total_units_1','total_units_2','total_units_3','total_units_4']
for i in range(16):
print(X_test.columns[i])
len(X_test)
features = X_test.columns
importances = model1.feature_importances_
indices = np.argsort(importances)
plt.title('Feature Importances')
plt.barh(range(len(indices)), importances[indices], color='b', align='center')
plt.yticks(range(len(indices)), [features[i] for i in indices])
plt.xlabel('Relative Importance')
plt.show()
pd.DataFrame(av).to_csv("avg_credit.csv")
credit.shape<jupyter_output><empty_output>
|
no_license
|
/CreditScoreML.ipynb
|
PavanRajGowda/MortgageDiscriminationandSolar
| 4 |
<jupyter_start><jupyter_text>## Introduction to Data Science
### Introduction to Pandas<jupyter_code>import os
import pathlib
import time
import random
import math
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#pd.set_option('display.max_rows', 500)
#pd.set_option('display.max_columns', 500)
#pd.set_option('display.width', 1000)
#import pylab
%matplotlib inline
#%matplotlib notebook<jupyter_output><empty_output><jupyter_text>### Pandas Data Structures: Series<jupyter_code>obj = pd.Series([4, 7, -5, 3, 5])
obj
obj.values
obj.index
obj.index = ['Bob', 'Steve', 'Jeff', 'Ryan', 'Fernie']
obj
obj['Bob']
obj2 = pd.Series([4, 7, -5, 3], index=['d', 'b', 'a', 'c'])
obj2
obj2['c']
obj2[['c', 'a', 'd']]
obj2[obj2 < 0]
obj2 * 2
np.exp(obj2)
sdata = {'Ohio': 35000, 'Texas': 71000, 'Oregon': 16000, 'Utah': 5000}
obj3 = pd.Series(sdata)
obj3
states = ['California', 'Ohio', 'Oregon', 'Texas']
obj4 = pd.Series(sdata, index=states)
obj4
pd.isnull(obj4)
pd.notnull(obj4)
obj3 + obj4
obj4.name = 'population'
obj4.index.name = 'state'
obj4<jupyter_output><empty_output><jupyter_text>### Pandas Data Structures: Dataframe<jupyter_code>data = {'state': ['Ohio', 'Ohio', 'Ohio', 'Nevada', 'Nevada'],'year': [2000, 2001, 2002, 2001, 2002],'pop': [1.5, 1.7, 3.6, 2.4, 2.9]}
frame = pd.DataFrame(data)
frame
frame['pop']
pd.DataFrame(data, columns=['year', 'state', 'pop'])
frame2 = pd.DataFrame(data, columns=['year', 'state', 'pop', 'debt'],index=['one', 'two', 'three', 'four', 'five'])
frame2
frame2['nova'] = 13
frame2
frame2.nova = 23
frame2
frame2.columns
frame2['state']
frame2.state
#frame2.loc['three']
frame2.loc['three','state']
frame2['debt'] = 16.5
frame2
frame2['debt'] = np.arange(5.)
frame2
val = pd.Series([-1.2, -1.5, -1.7], index=['two', 'four', 'five'])
frame2['debt'] = val
frame2
frame2['eastern'] = frame2.state == 'Ohio'
frame2
del frame2['eastern']
frame2.columns
transpose = frame2.pivot(index= 'year', columns='state', values='pop')
transpose
pop = {'Nevada': {2001: 2.4, 2002: 2.9},'Ohio': {2000: 1.5, 2001: 1.7, 2002: 3.6}}
frame3 = pd.DataFrame(pop)
frame3
frame3.T
pd.DataFrame(pop, index=[2001, 2002, 2003])
pdata = {'Ohio': frame3['Ohio'][:-1],'Nevada': frame3['Nevada'][:2]}
pd.DataFrame(pdata)
frame3.index.name = 'year'; frame3.columns.name = 'state'
frame3
pop = {'Nevada': {2001: 2.4, 2002: 2.9},'Ohio': {2000: 1.5, 2001: 1.7, 2002: 3.6}}
frame4 = pd.DataFrame(pop)
frame4
frame4.loc[2000,'Nevada'] = 2
frame4
frame5 = pd.concat([frame4, frame4])
frame5
frame5.drop_duplicates(['Nevada'])
dates = pd.date_range("20160101", periods=10)
data = np.random.random((10,3))
column_names = ['Column1', 'Column2', 'Column3']
df = pd.DataFrame(data, index=dates, columns=column_names)
df.head(10)
df[1:3]
df['20160104':'20160107']
df.loc['20160101':'20160102',['Column1','Column3']]
df.iloc[3:5, 0:2]
df.describe()
df.sort_index(axis=0, ascending=False,) # inplace=True)
df.sort_values(by='Column2')
dates1 = pd.date_range("20160101", periods=6)
data1 = np.random.random((6,2))
column_names1 = ['ColumnA', 'ColumnB']
dates2 = pd.date_range("20160101", periods=7)
data2 = np.random.random((7,2))
column_names2 = ['ColumnC', 'ColumnD']
df1 = pd.DataFrame(data1, index=dates1, columns=column_names1)
df2 = pd.DataFrame(data2, index=dates2, columns=column_names2)
df1.head()
df2.head()
df1.join(df2)
df3 = df1.join(df2)
# add a column to df to group on
df3['ProfitLoss'] = pd.Series(['Profit',
'Loss',
'Profit',
'Profit',
'Profit',
'Loss',
'Profit',
'Profit',
'Profit',
'Loss'], index=dates)
df3.head()
df3.groupby('ProfitLoss').mean()<jupyter_output><empty_output><jupyter_text>### Pandas Functions
#### idmin & idmax<jupyter_code>df3['ColumnA'].idxmax()
df3['ColumnA'].idxmin()<jupyter_output><empty_output><jupyter_text>ne()<jupyter_code>df = pd.DataFrame()
df['x'] = [0,0,0,0,0,0,1,2,3,4,5,6,7]
df['x'].ne(0)
df['x'].ne(0).idxmax()
df['x'].nsmallest(3)
df.nlargest(3, 'x')<jupyter_output><empty_output><jupyter_text>#### Pandas.melt()<jupyter_code># Create a test dataframe
# Untidy dataframe
# x : Subjects
# y : Student names
marks = pd.DataFrame(np.random.randint(0, 100, size = (30,5)),
columns = ['Maths', 'Physics','Chemistry', 'Biology', 'Computer_Science'])
# Add student column to dataset
marks['Student'] = ['Student ' + str(i) for i in range(1,31)]
display(marks.head())
# Bring last column to first position
cols = marks.columns.tolist()
marks = marks[['Student','Chemistry', 'Biology', 'Computer_Science', 'Maths', 'Physics']]
marks.head(10)
tidy = pd.melt(marks,
id_vars = 'Student',
value_name = 'Frequency')
tidy.head(10)<jupyter_output><empty_output><jupyter_text>#### Applying Pandas Operations in Parallel<jupyter_code>from pandarallel import pandarallel
from tqdm._tqdm_notebook import tqdm_notebook
tqdm_notebook.pandas()
pandarallel.initialize(progress_bar=True)
df = pd.DataFrame({
'A' : [random.randint(15,20) for i in range(1,1000000) ],
'B' : [random.randint(10,30) for i in range(1,1000000) ]
})
def func(x):
return math.sin(x.A**2) + math.sin(x.B**2) + math.tan(x.A**2)<jupyter_output><empty_output><jupyter_text>WITHOUT PARALLELIZATION<jupyter_code>%%time
res = df.progress_apply(func, axis=1)<jupyter_output><empty_output><jupyter_text>WITH PARALLELIZATION<jupyter_code>%%time
res_parallel = df.parallel_apply(func, axis=1)<jupyter_output><empty_output>
|
permissive
|
/notebooks/Intro_Pandas.ipynb
|
pedrohesch/FGV_Intro_DS
| 9 |
<jupyter_start><jupyter_text># Recommendations with IBM
In this notebook, you will be putting your recommendation skills to use on real data from the IBM Watson Studio platform.
You may either submit your notebook through the workspace here, or you may work from your local machine and submit through the next page. Either way assure that your code passes the project [RUBRIC](https://review.udacity.com/#!/rubrics/2322/view). **Please save regularly.**
By following the table of contents, you will build out a number of different methods for making recommendations that can be used for different situations.
## Table of Contents
I. [Exploratory Data Analysis](#Exploratory-Data-Analysis)
II. [Rank Based Recommendations](#Rank)
III. [User-User Based Collaborative Filtering](#User-User)
IV. [Content Based Recommendations (EXTRA - NOT REQUIRED)](#Content-Recs)
V. [Matrix Factorization](#Matrix-Fact)
VI. [Extras & Concluding](#conclusions)
At the end of the notebook, you will find directions for how to submit your work. Let's get started by importing the necessary libraries and reading in the data.<jupyter_code>import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import project_tests as t
import pickle
%matplotlib inline
df = pd.read_csv('data/user-item-interactions.csv')
df_content = pd.read_csv('data/articles_community.csv')
del df['Unnamed: 0']
del df_content['Unnamed: 0']
# Show df to get an idea of the data
df.head()
# Show df_content to get an idea of the data
df_content.head()<jupyter_output><empty_output><jupyter_text>### Part I : Exploratory Data Analysis
Use the dictionary and cells below to provide some insight into the descriptive statistics of the data.
`1.` What is the distribution of how many articles a user interacts with in the dataset? Provide a visual and descriptive statistics to assist with giving a look at the number of times each user interacts with an article. <jupyter_code>interaction_distribution = df.email.value_counts()
article_distribution = df.title.value_counts()
sns.set(color_codes=True)
sns.distplot(interaction_distribution)<jupyter_output><empty_output><jupyter_text>Here we can see a highly skewed histogram. Most of the users had only few article interactions.<jupyter_code># median of interactions
print("The median of interactions is {}.".format(np.median(interaction_distribution)))
# max views by user
print("The maximum number of user-article interactions by any 1 user is {}.".format(interaction_distribution[0]))
# Fill in the median and maximum number of user_article interactios below
median_val = 3
max_views_by_user = 364<jupyter_output><empty_output><jupyter_text>`2.` Explore and remove duplicate articles from the **df_content** dataframe. <jupyter_code># Find and explore duplicate articles
df_content.article_id.value_counts().head(10)
# Remove any rows that have the same article_id - only keep the first
df_content = df_content.drop_duplicates(subset=["article_id"], keep="first")<jupyter_output><empty_output><jupyter_text>`3.` Use the cells below to find:
**a.** The number of unique articles that have an interaction with a user.
**b.** The number of unique articles in the dataset (whether they have any interactions or not).
**c.** The number of unique users in the dataset. (excluding null values)
**d.** The number of user-article interactions in the dataset.<jupyter_code>df.head()
# number of unique articles with interactions
print("The number of unique articles with interactions is {}.".format(len(df.article_id.unique())))
# total number of unique articles
print("The number of articles is {}.".format(len(df_content.article_id.unique())))
# number of unique users
print("The number of unique users is {}.".format(len(df[df["email"].isnull() == False].email.unique())))
# number of user-article interactions
print("The number of user-article interactions is {}.".format(df.shape[0]))
unique_articles = 714
total_articles = 1051
unique_users = 5148
user_article_interactions = 45993<jupyter_output><empty_output><jupyter_text>`4.` Use the cells below to find the most viewed **article_id**, as well as how often it was viewed. After talking to the company leaders, the `email_mapper` function was deemed a reasonable way to map users to ids. There were a small number of null values, and it was found that all of these null values likely belonged to a single user (which is how they are stored using the function below).<jupyter_code>df.article_id.value_counts().head(1)
most_viewed_article_id = "1429.0"
max_views = 937
## No need to change the code here - this will be helpful for later parts of the notebook
# Run this cell to map the user email to a user_id column and remove the email column
def email_mapper():
coded_dict = dict()
cter = 1
email_encoded = []
for val in df['email']:
if val not in coded_dict:
coded_dict[val] = cter
cter+=1
email_encoded.append(coded_dict[val])
return email_encoded
email_encoded = email_mapper()
del df['email']
df['user_id'] = email_encoded
# show header
df.head()
## If you stored all your results in the variable names above,
## you shouldn't need to change anything in this cell
sol_1_dict = {
'`50% of individuals have _____ or fewer interactions.`': median_val,
'`The total number of user-article interactions in the dataset is ______.`': user_article_interactions,
'`The maximum number of user-article interactions by any 1 user is ______.`': max_views_by_user,
'`The most viewed article in the dataset was viewed _____ times.`': max_views,
'`The article_id of the most viewed article is ______.`': most_viewed_article_id,
'`The number of unique articles that have at least 1 rating ______.`': unique_articles,
'`The number of unique users in the dataset is ______`': unique_users,
'`The number of unique articles on the IBM platform`': total_articles
}
# Test your dictionary against the solution
t.sol_1_test(sol_1_dict)<jupyter_output>It looks like you have everything right here! Nice job!
<jupyter_text>### Part II: Rank-Based Recommendations
Unlike in the earlier lessons, we don't actually have ratings for whether a user liked an article or not. We only know that a user has interacted with an article. In these cases, the popularity of an article can really only be based on how often an article was interacted with.
`1.` Fill in the function below to return the **n** top articles ordered with most interactions as the top. Test your function using the tests below.<jupyter_code>def get_top_articles(n, df=df):
'''
INPUT:
n - (int) the number of top articles to return
df - (pandas dataframe) df as defined at the top of the notebook
OUTPUT:
top_articles - (list) A list of the top 'n' article titles
'''
# filter for top article titles
top_articles = list(df.title.value_counts().head(n).index)
return top_articles # Return the top article titles from df (not df_content)
def get_top_article_ids(n, df=df):
'''
INPUT:
n - (int) the number of top articles to return
df - (pandas dataframe) df as defined at the top of the notebook
OUTPUT:
top_articles - (list) A list of the top 'n' article titles
'''
# filter for top article ids
top_articles = list(df.article_id.value_counts().head(n).index)
return top_articles # Return the top article ids
print(get_top_articles(10))
print(get_top_article_ids(10))
# Test your function by returning the top 5, 10, and 20 articles
top_5 = get_top_articles(5)
top_10 = get_top_articles(10)
top_20 = get_top_articles(20)
# Test each of your three lists from above
t.sol_2_test(get_top_articles)<jupyter_output>Your top_5 looks like the solution list! Nice job.
Your top_10 looks like the solution list! Nice job.
Your top_20 looks like the solution list! Nice job.
<jupyter_text>### Part III: User-User Based Collaborative Filtering
`1.` Use the function below to reformat the **df** dataframe to be shaped with users as the rows and articles as the columns.
* Each **user** should only appear in each **row** once.
* Each **article** should only show up in one **column**.
* **If a user has interacted with an article, then place a 1 where the user-row meets for that article-column**. It does not matter how many times a user has interacted with the article, all entries where a user has interacted with an article should be a 1.
* **If a user has not interacted with an item, then place a zero where the user-row meets for that article-column**.
Use the tests to make sure the basic structure of your matrix matches what is expected by the solution.<jupyter_code># create the user-article matrix with 1's and 0's
def create_user_item_matrix(df):
'''
INPUT:
df - pandas dataframe with article_id, title, user_id columns
OUTPUT:
user_item - user item matrix
Description:
Return a matrix with user ids as rows and article ids on the columns with 1 values where a user interacted with
an article and a 0 otherwise
'''
# Fill in the function here
user_item = pd.crosstab(df.user_id, df.article_id)
user_item = user_item.where(user_item == 0, 1)
return user_item # return the user_item matrix
user_item = create_user_item_matrix(df)
user_item.head()
## Tests: You should just need to run this cell. Don't change the code.
assert user_item.shape[0] == 5149, "Oops! The number of users in the user-article matrix doesn't look right."
assert user_item.shape[1] == 714, "Oops! The number of articles in the user-article matrix doesn't look right."
assert user_item.sum(axis=1)[1] == 36, "Oops! The number of articles seen by user 1 doesn't look right."
print("You have passed our quick tests! Please proceed!")<jupyter_output>You have passed our quick tests! Please proceed!
<jupyter_text>`2.` Complete the function below which should take a user_id and provide an ordered list of the most similar users to that user (from most similar to least similar). The returned result should not contain the provided user_id, as we know that each user is similar to him/herself. Because the results for each user here are binary, it (perhaps) makes sense to compute similarity as the dot product of two users.
Use the tests to test your function.<jupyter_code>user_item.head()
def find_similar_users(user_id, user_item=user_item):
'''
INPUT:
user_id - (int) a user_id
user_item - (pandas dataframe) matrix of users by articles:
1's when a user has interacted with an article, 0 otherwise
OUTPUT:
similar_users - (list) an ordered list where the closest users (largest dot product users)
are listed first
Description:
Computes the similarity of every pair of users based on the dot product
Returns an ordered
'''
# compute similarity of each user to the provided user
similarity_list = []
index_list = []
for user_id_iterated, row in user_item.iterrows():
similarity_list.append(np.dot(user_item.loc[user_id,:],row))
index_list.append(user_id_iterated)
# sort by similarity
similarity_df = pd.DataFrame(similarity_list, columns=["dot_product"], index=index_list)
similarity_df = similarity_df.sort_values("dot_product", ascending=False)
# remove the own user's id
similarity_df = similarity_df.drop(axis="index", index=user_id)
# create list of just the ids
most_similar_users = list(similarity_df.index)
#most_similar_users = [float(x) for x in most_similar_users]
return most_similar_users # return a list of the users in order from most to least similar
# Do a spot check of your function
print("The 10 most similar users to user 1 are: {}".format(find_similar_users(1)[:10]))
print("The 5 most similar users to user 3933 are: {}".format(find_similar_users(3933)[:5]))
print("The 3 most similar users to user 46 are: {}".format(find_similar_users(46)[:3]))<jupyter_output>The 10 most similar users to user 1 are: [3933, 23, 3782, 203, 4459, 3870, 131, 4201, 46, 5041]
The 5 most similar users to user 3933 are: [1, 23, 3782, 203, 4459]
The 3 most similar users to user 46 are: [4201, 3782, 23]
<jupyter_text>`3.` Now that you have a function that provides the most similar users to each user, you will want to use these users to find articles you can recommend. Complete the functions below to return the articles you would recommend to each user. <jupyter_code>selected_articles = user_item[user_item.index == 1]
selected_articles = selected_articles[selected_articles == 1]
selected_articles = selected_articles.dropna(axis="columns")
selected_articles.columns
def get_article_names(article_ids, df=df):
'''
INPUT:
article_ids - (list) a list of article ids
df - (pandas dataframe) df as defined at the top of the notebook
OUTPUT:
article_names - (list) a list of article names associated with the list of article ids
(this is identified by the title column)
'''
# Your code here
article_names = []
df_getnames = df.drop_duplicates(subset="article_id").drop(axis="columns", columns=["user_id"])
getnames_dict = dict(zip(df_getnames.article_id.astype(str), df_getnames.title))
for i in article_ids:
article_names.append(getnames_dict.get(str(i)))
return article_names # Return the article names associated with list of article ids
def get_user_articles(user_id, user_item=user_item):
'''
INPUT:
user_id - (int) a user id
user_item - (pandas dataframe) matrix of users by articles:
1's when a user has interacted with an article, 0 otherwise
OUTPUT:
article_ids - (list) a list of the article ids seen by the user
article_names - (list) a list of article names associated with the list of article ids
(this is identified by the doc_full_name column in df_content)
Description:
Provides a list of the article_ids and article titles that have been seen by a user
'''
# Your code here
selected_articles = user_item[user_item.index == user_id]
selected_articles = selected_articles[selected_articles == 1]
selected_articles = selected_articles.dropna(axis="columns")
article_ids = selected_articles.columns
article_names = get_article_names(article_ids)
return article_ids, article_names # return the ids and names
def user_user_recs(user_id, m=10):
'''
INPUT:
user_id - (int) a user id
m - (int) the number of recommendations you want for the user
OUTPUT:
recs - (list) a list of recommendations for the user
Description:
Loops through the users based on closeness to the input user_id
For each user - finds articles the user hasn't seen before and provides them as recs
Does this until m recommendations are found
Notes:
Users who are the same closeness are chosen arbitrarily as the 'next' user
For the user where the number of recommended articles starts below m
and ends exceeding m, the last items are chosen arbitrarily
'''
# Find similar users
similar_users = find_similar_users(user_id)
# Prepare lists to operate with
seen_article_ids, seen_article_names = get_user_articles(user_id)
recs = []
# Iterate through similar users
for user in similar_users:
if len(recs) < m:
article_ids, article_names = get_user_articles(user)
for single_id in article_ids:
if single_id not in seen_article_ids and single_id not in recs:
if len(recs) < m:
recs.append(single_id)
else:
break
else:
break
return recs # return your recommendations for this user_id
# Check Results
get_article_names(user_user_recs(1, 10)) # Return 10 recommendations for user 1
# Test your functions here - No need to change this code - just run this cell
assert set(get_article_names([1024.0, 1176.0, 1305.0, 1314.0, 1422.0, 1427.0])) == set(['using deep learning to reconstruct high-resolution audio', 'build a python app on the streaming analytics service', 'gosales transactions for naive bayes model', 'healthcare python streaming application demo', 'use r dataframes & ibm watson natural language understanding', 'use xgboost, scikit-learn & ibm watson machine learning apis']), "Oops! Your the get_article_names function doesn't work quite how we expect."
assert set(get_article_names([1320.0, 232.0, 844.0])) == set(['housing (2015): united states demographic measures','self-service data preparation with ibm data refinery','use the cloudant-spark connector in python notebook']), "Oops! Your the get_article_names function doesn't work quite how we expect."
assert set(get_user_articles(20)[0]) == set([1320.0, 232.0, 844.0])
assert set(get_user_articles(20)[1]) == set(['housing (2015): united states demographic measures', 'self-service data preparation with ibm data refinery','use the cloudant-spark connector in python notebook'])
assert set(get_user_articles(2)[0]) == set([1024.0, 1176.0, 1305.0, 1314.0, 1422.0, 1427.0])
assert set(get_user_articles(2)[1]) == set(['using deep learning to reconstruct high-resolution audio', 'build a python app on the streaming analytics service', 'gosales transactions for naive bayes model', 'healthcare python streaming application demo', 'use r dataframes & ibm watson natural language understanding', 'use xgboost, scikit-learn & ibm watson machine learning apis'])
print("If this is all you see, you passed all of our tests! Nice job!")<jupyter_output>If this is all you see, you passed all of our tests! Nice job!
<jupyter_text>`4.` Now we are going to improve the consistency of the **user_user_recs** function from above.
* Instead of arbitrarily choosing when we obtain users who are all the same closeness to a given user - choose the users that have the most total article interactions before choosing those with fewer article interactions.
* Instead of arbitrarily choosing articles from the user where the number of recommended articles starts below m and ends exceeding m, choose articles with the articles with the most total interactions before choosing those with fewer total interactions. This ranking should be what would be obtained from the **top_articles** function you wrote earlier.<jupyter_code>def get_top_sorted_users(user_id, df=df, user_item=user_item):
'''
INPUT:
user_id - (int)
df - (pandas dataframe) df as defined at the top of the notebook
user_item - (pandas dataframe) matrix of users by articles:
1's when a user has interacted with an article, 0 otherwise
OUTPUT:
neighbors_df - (pandas dataframe) a dataframe with:
neighbor_id - is a neighbor user_id
similarity - measure of the similarity of each user to the provided user_id
num_interactions - the number of articles viewed by the user - if a u
Other Details - sort the neighbors_df by the similarity and then by number of interactions where
highest of each is higher in the dataframe
'''
# compute similarity of each user to the provided user
similarity_list = []
index_list = []
for user_id_iterated, row in user_item.iterrows():
similarity_list.append(np.dot(user_item.loc[user_id,:],row))
index_list.append(user_id_iterated)
# sort by similarity
neighbors_df = pd.DataFrame(similarity_list, columns=["similarity"], index=index_list).rename_axis("neighbor_id")
neighbors_df = neighbors_df.sort_values("similarity", ascending=False)
# remove the own user's id
neighbors_df = neighbors_df.drop(axis="index", index=user_id)
# add number of interactions
neighbors_df["num_interactions"] = [user_item.loc[i,:].sum() for i in neighbors_df.index]
neighbors_df = neighbors_df.sort_values(["similarity","num_interactions"], ascending=False)#.reset_index()
# reset user id to float
# neighbors_df["neighbor_id"] = neighbors_df["neighbor_id"].astype(float)
return neighbors_df # Return the dataframe specified in the doc_string
def sort_articles_by_interactions(article_list):
'''
INPUT:
article_list - List of article ids to sort (int)
OUTPUT:
sorted_list - (list) Sorted list of article ids, starting with articles that had most interactions to least
'''
article_count = [df.article_id.value_counts().loc[i] for i in article_list]
df_article_count = list(zip(article_list,article_count))
df_article_count = pd.DataFrame(df_article_count, columns=['article_id','interactions']).sort_values(["interactions"], ascending=False)
sorted_list = list(df_article_count.article_id)
return sorted_list
def user_user_recs_part2(user_id, m=10):
'''
INPUT:
user_id - (int) a user id
m - (int) the number of recommendations you want for the user
OUTPUT:
recs - (list) a list of recommendations for the user by article id
rec_names - (list) a list of recommendations for the user by article title
Description:
Loops through the users based on closeness to the input user_id
For each user - finds articles the user hasn't seen before and provides them as recs
Does this until m recommendations are found
Notes:
* Choose the users that have the most total article interactions
before choosing those with fewer article interactions.
* Choose articles with the articles with the most total interactions
before choosing those with fewer total interactions.
'''
# Get top sorted users
similar_users = get_top_sorted_users(user_id).index
# Prepare lists to operate with
seen_article_ids, seen_article_names = get_user_articles(user_id)
recs = []
# Iterate through similar users
for user in similar_users:
if len(recs) < m:
article_ids, article_names = get_user_articles(user)
article_ids = sort_articles_by_interactions(article_ids)
for single_id in article_ids:
if single_id not in seen_article_ids and single_id not in recs:
if len(recs) < m:
recs.append(single_id)
else:
break
else:
break
rec_names = get_article_names(recs)
return recs, rec_names
# Quick spot check - don't change this code - just use it to test your functions
rec_ids, rec_names = user_user_recs_part2(20, 10)
print("The top 10 recommendations for user 20 are the following article ids:")
print(rec_ids)
print()
print("The top 10 recommendations for user 20 are the following article names:")
print(rec_names)<jupyter_output>The top 10 recommendations for user 20 are the following article ids:
[1429.0, 1330.0, 1314.0, 1293.0, 1162.0, 1271.0, 43.0, 1351.0, 1368.0, 1305.0]
The top 10 recommendations for user 20 are the following article names:
['use deep learning for image classification', 'insights from new york car accident reports', 'healthcare python streaming application demo', 'finding optimal locations of new store using decision optimization', 'analyze energy consumption in buildings', 'customer demographics and sales', 'deep learning with tensorflow course by big data university', 'model bike sharing data with spss', 'putting a human face on machine learning', 'gosales transactions for naive bayes model']
<jupyter_text>`5.` Use your functions from above to correctly fill in the solutions to the dictionary below. Then test your dictionary against the solution. Provide the code you need to answer each following the comments below.<jupyter_code>get_top_sorted_users(131).head(20)
### Tests with a dictionary of results
user1_most_sim = get_top_sorted_users(1).index[0] # Find the user that is most similar to user 1
user131_10th_sim = get_top_sorted_users(131).index[10] # Find the 10th most similar user to user 131
print(user1_most_sim)
print(user131_10th_sim)
## Dictionary Test Here
sol_5_dict = {
'The user that is most similar to user 1.': user1_most_sim,
'The user that is the 10th most similar to user 131': user131_10th_sim,
}
t.sol_5_test(sol_5_dict)<jupyter_output>This all looks good! Nice job!
<jupyter_text>`6.` If we were given a new user, which of the above functions would you be able to use to make recommendations? Explain. Can you think of a better way we might make recommendations? Use the cell below to explain a better method for new users.**Since a new user hasn't had any interactions with articles yet, we are not able to calculate a useful similarity (other users that haven't seen any articles would be similar but would have no articles to recommend from) to other users. That's why I would suggest to recommend new users the top most seen articles from every user on the platform. The most viewed articles seem to be interesting to a whole bunch of people within the platform, so chances might be higher for those articles to be interesting than for those with few views.**`7.` Using your existing functions, provide the top 10 recommended articles you would provide for the a new user below. You can test your function against our thoughts to make sure we are all on the same page with how we might make a recommendation.<jupyter_code>new_user = '0.0'
# What would your recommendations be for this new user '0.0'? As a new user, they have no observed articles.
# Provide a list of the top 10 article ids you would give to
new_user_recs = list(df.article_id.value_counts().index[0:10].astype(str)) # Your recommendations here
assert set(new_user_recs) == set(['1314.0','1429.0','1293.0','1427.0','1162.0','1364.0','1304.0','1170.0','1431.0','1330.0']), "Oops! It makes sense that in this case we would want to recommend the most popular articles, because we don't know anything about these users."
print("That's right! Nice job!")<jupyter_output>That's right! Nice job!
<jupyter_text>### Part V: Matrix Factorization
In this part of the notebook, you will build use matrix factorization to make article recommendations to the users on the IBM Watson Studio platform.
`1.` You should have already created a **user_item** matrix above in **question 1** of **Part III** above. This first question here will just require that you run the cells to get things set up for the rest of **Part V** of the notebook. <jupyter_code># Load the matrix here
user_item_matrix = pd.read_pickle('user_item_matrix.p')
# quick look at the matrix
user_item_matrix.head()<jupyter_output><empty_output><jupyter_text>`2.` In this situation, you can use Singular Value Decomposition from [numpy](https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.linalg.svd.html) on the user-item matrix. Use the cell to perform SVD, and explain why this is different than in the lesson.<jupyter_code># Perform SVD on the User-Item Matrix
u, s, vt = np.linalg.svd(user_item_matrix) # use the built in to get the three matrices
s.shape, u.shape, vt.shape<jupyter_output><empty_output><jupyter_text>**Other than in the exercise our user-item-matrix only contains 1 and 0 as values for interaction or not, so we don't have any sort of rating by users and don't know how much a user liked an article in the IBM Watson library a user interacted with.**`3.` Now for the tricky part, how do we choose the number of latent features to use? Running the below cell, you can see that as the number of latent features increases, we obtain a lower error rate on making predictions for the 1 and 0 values in the user-item matrix. Run the cell below to get an idea of how the accuracy improves as we increase the number of latent features.<jupyter_code>num_latent_feats = np.arange(10,700+10,20)
sum_errs = []
for k in num_latent_feats:
# restructure with k latent features
s_new, u_new, vt_new = np.diag(s[:k]), u[:, :k], vt[:k, :]
# take dot product
user_item_est = np.around(np.dot(np.dot(u_new, s_new), vt_new))
# compute error for each prediction to actual value
diffs = np.subtract(user_item_matrix, user_item_est)
# total errors and keep track of them
err = np.sum(np.sum(np.abs(diffs)))
sum_errs.append(err)
plt.plot(num_latent_feats, 1 - np.array(sum_errs)/df.shape[0]);
plt.xlabel('Number of Latent Features');
plt.ylabel('Accuracy');
plt.title('Accuracy vs. Number of Latent Features');<jupyter_output><empty_output><jupyter_text>`4.` From the above, we can't really be sure how many features to use, because simply having a better way to predict the 1's and 0's of the matrix doesn't exactly give us an indication of if we are able to make good recommendations. Instead, we might split our dataset into a training and test set of data, as shown in the cell below.
Use the code from question 3 to understand the impact on accuracy of the training and test sets of data with different numbers of latent features. Using the split below:
* How many users can we make predictions for in the test set?
* How many users are we not able to make predictions for because of the cold start problem?
* How many articles can we make predictions for in the test set?
* How many articles are we not able to make predictions for because of the cold start problem?<jupyter_code>df_train = df.head(40000)
df_test = df.tail(5993)
def create_test_and_train_user_item(df_train, df_test):
'''
INPUT:
df_train - training dataframe
df_test - test dataframe
OUTPUT:
user_item_train - a user-item matrix of the training dataframe
(unique users for each row and unique articles for each column)
user_item_test - a user-item matrix of the testing dataframe
(unique users for each row and unique articles for each column)
test_idx - all of the test user ids
test_arts - all of the test article ids
'''
# create user-item matrix for set and training set
user_item_train = create_user_item_matrix(df_train)
user_item_test = create_user_item_matrix(df_test)
test_idx = user_item_test.index
test_arts = user_item_test.columns
return user_item_train, user_item_test, test_idx, test_arts
user_item_train, user_item_test, test_idx, test_arts = create_test_and_train_user_item(df_train, df_test)
# users we can make predictions for in the test set
user_predictions = len(set(user_item_train.index).intersection(set(test_idx)))
# movies we can make predictions for in the test set
movie_predictions = len(set(user_item_train.columns).intersection(set(test_arts)))
# users we can not make predictions for in the test set
no_user_predictions = user_item_test.shape[0]-user_predictions
# movies we can not make predictions for in the test set
no_movie_predictions = user_item_test.shape[1]-movie_predictions
user_predictions, movie_predictions, no_user_predictions, no_movie_predictions
# Replace the values in the dictionary below
a = 662
b = 574
c = 20
d = 0
sol_4_dict = {
'How many users can we make predictions for in the test set?':c,
'How many users in the test set are we not able to make predictions for because of the cold start problem?': a,
'How many movies can we make predictions for in the test set?': b,
'How many movies in the test set are we not able to make predictions for because of the cold start problem?': d
}
t.sol_4_test(sol_4_dict)<jupyter_output>Awesome job! That's right! All of the test movies are in the training data, but there are only 20 test users that were also in the training set. All of the other users that are in the test set we have no data on. Therefore, we cannot make predictions for these users using SVD.
<jupyter_text>`5.` Now use the **user_item_train** dataset from above to find U, S, and V transpose using SVD. Then find the subset of rows in the **user_item_test** dataset that you can predict using this matrix decomposition with different numbers of latent features to see how many features makes sense to keep based on the accuracy on the test data. This will require combining what was done in questions `2` - `4`.
Use the cells below to explore how well SVD works towards making predictions for recommendations on the test data. <jupyter_code># fit SVD on the user_item_train matrix
u_train, s_train, vt_train = np.linalg.svd(user_item_train) # fit svd similar to above then use the cells below
# These cells are to see how well you can use the training decomposition to predict on test data
# subset of rows in the user_item_test dataset that we can predict
common_idx, common_arts = user_item_train.index.isin(test_idx), user_item_train.columns.isin(test_arts)
u_test, vt_test = u_train[common_idx, :], vt_train[:, common_arts]
train_idx, train_arts = set(user_item_train.index.values), set(user_item_train.columns.values)
intersecting_idx = train_idx.intersection(set(test_idx))
user_item_test2 = user_item_test.loc[intersecting_idx]
# instantiate error lists
sum_errors_train, sum_errors_test, total_errors = [], [], []
# set up list of different features
num_latent_feats=np.arange(10,710,20)
for k in num_latent_feats:
# restructure with k latent features
u_new_train, s_new_train, vt_new_train = u_train[:, :k], np.diag(s_train[:k]), vt_train[:k, :]
u_new_test, vt_new_test = u_test[:, :k], vt_test[:k, :]
# take the dot product
user_item_train_est = np.around(np.dot(np.dot(u_new_train, s_new_train), vt_new_train))
user_item_test_est = np.around(np.dot(np.dot(u_new_test, s_new_train), vt_new_test))
# compute error for each prediction to actual value
diffs_train = np.subtract(user_item_train, user_item_train_est)
diffs_test = np.subtract(user_item_test2.loc[intersecting_idx, :], user_item_test_est)
err_train = np.sum(np.sum(np.abs(diffs_train)))
err_test = np.sum(np.sum(np.abs(diffs_test)))
# append training and testing errors to error lists
sum_errors_train.append(err_train)
sum_errors_test.append(err_test)
# set up data to plot train and test accuracy
plt.plot(num_latent_feats, 1 - (np.array(sum_errors_train)/user_item_train.size), label='Train')
plt.plot(num_latent_feats, 1 - (np.array(sum_errors_test)/user_item_test2.size), label='Test')
# layout settings
plt.title('Accuracy vs. Number of Latent Features')
plt.xlabel('Number of Latent Features')
plt.ylabel('Accuracy')
plt.legend()
plt.show()<jupyter_output><empty_output><jupyter_text>`6.` Use the cell below to comment on the results you found in the previous question. Given the circumstances of your results, discuss what you might do to determine if the recommendations you make with any of the above recommendation systems are an improvement to how users currently find articles? **By comparing the two graphs from train and test accuracy, we can observe increasing train data and decreasing test data accuracy with increasing number of latent features. This mean we have an overfitting, so in this case it might be a good idea to keep the number of latent features as low as possible.**
**One limitation that has to be mentioned as well, is that we could only predict 20 users. For more extensive work it could be a good idea to compare and implement different recommendation engines, also to overcome the cold start problem.** <jupyter_code>from subprocess import call
call(['python', '-m', 'nbconvert', 'Recommendations_with_IBM.ipynb'])<jupyter_output><empty_output>
|
no_license
|
/Recommendations_with_IBM_KF.ipynb
|
keanukf/udacity_datascientist_recommendationengines
| 19 |
<jupyter_start><jupyter_text># Exploratory Data Analysis
## Data Cleaning: Deduplication<jupyter_code>#Sorting the data with ProductId
sorted_data = amazon_data.sort_values('ProductId',axis=0,ascending=True,inplace=False)
#Removing the data for reviews on similar products with same review
final_data = sorted_data.drop_duplicates(subset={'UserId','ProfileName','Time','Text'},keep='first',inplace=False)
final_data.shape
#Removing the data where HelpfulnessNumerator > HelpfulnessDenominator
cleaned_data = final_data[final_data.HelpfulnessNumerator<=final_data.HelpfulnessDenominator]
cleaned_data.shape
cleaned_data.head()
sort = cleaned_data.sort_values('Id',kind='quicksort',ascending = True)
sort[(sort.Id>=150450)&(sort.Id<=150550)]
#Removing the reviews for book products
cleaned_data = sort.drop(sort.index[(sort.Id>=150493)&(sort.Id<=150529)])
cleaned_data[(cleaned_data.Id>=150450)&(cleaned_data.Id<=150650)]
cleaned_data.shape<jupyter_output><empty_output><jupyter_text># Text Preprocessing: Stemming, Lemmatization,StopWords<jupyter_code>#Importing the required modules for Text Preprocessing
import string
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
from nltk.stem import WordNetLemmatizer
#Set of all Stop Words in English
stop = set(stopwords.words('english'))
print(stop)
#Building the SnowBallStemmer
sno_stem = SnowballStemmer('english')
sno_stem.stem('helpful')
import re
#Removing Html tags
def cleanHtml(sentence):
cleanText = re.sub('<.*?>','',sentence)
return cleanText
#Removing Punctuations
def cleanPunc(word):
cleaned = re.sub(r'[#|!|?|\'|@|"]','',word)
cleaned = re.sub(r'[,|.|(|)|\|/]',' ',word)
return cleaned
#Actual Preprocessing
stemmed_word = ''
i = 0
str1 = ' '
final_string=[]
all_positive_words=[]
all_negative_words=[]
stemmed_word=''
for sentence in cleaned_data.Text.values:
filtered_sentence=[] #Values to be stored after filtering
sent = cleanHtml(sentence) #Cleaning the html tags from the reviews
for word in sentence.split(): #Looping on each word of the sentence
for cleaned_word in cleanPunc(word).split(): #Removing any punctuations
if((cleaned_word.isalpha()) & (len(cleaned_word)>2)):
if(cleaned_word.lower() not in stop):
stemmed_word = (sno_stem.stem(cleaned_word.lower())).encode('utf8') #Converting to lowercase
filtered_sentence.append(stemmed_word)
if (cleaned_data.Score.values)[i] == 'positive':
all_positive_words.append(stemmed_word) #Inserting all the positive reviews
if (cleaned_data.Score.values)[i] == 'negative':
all_negative_words.append(stemmed_word) #Inserting all the negative reviews
else:
continue
else:
continue
str1 = b" ".join(filtered_sentence) #Converting to binary string format
final_string.append(str1)
i=i+1
cleaned_data['CleanedText'] = final_string #Inserting the cleaned text to a new column
#Saving data to a new csv file
cleaned_data.to_csv('final_data.csv',index=False)
#Loading the data for all the future uses
cleaned_data = pd.read_csv('final_data.csv')
cleaned_data = cleaned_data.drop([31393,294985,341395])
pos_rev = cleaned_data[cleaned_data.Score=='positive'] #Filtering the positive reviews
neg_rev = cleaned_data[cleaned_data.Score=='negative'] #Filtering the negative reviews
print(pos_rev.shape)
neg_rev.shape
#Balancing the positive and negatie reviews
pos_rev = pos_rev[:20000] #Selecting 20000 positive reviews
neg_rev = neg_rev[:20000] #Selecting 20000 negative reviews
balanced_data = pd.concat([pos_rev,neg_rev]) #Concating positive and negative reviews
balanced_data.shape<jupyter_output><empty_output><jupyter_text>#### Sorting data as per Time Stamps <jupyter_code>#Sorting the data as per TimeStamp
balanced_sorted_data = balanced_data.sort_values(by='Time',kind='quicksort',ascending=True,axis=0,inplace=False).reset_index(drop=True)
balanced_sorted_data.head()
balanced_data_score = balanced_sorted_data.Score
balanced_data_score.shape
balanced_data_score.value_counts()
#Dividing the data to train and test data
balanced_train_data = balanced_sorted_data.iloc[0:28000,:]
balanced_train_label = balanced_data_score[:28000]
balanced_test_data = balanced_sorted_data.iloc[28000:40000,:]
balanced_test_label = balanced_data_score[28000:40000]<jupyter_output><empty_output><jupyter_text># Bag Of Words(BOW)<jupyter_code>#Building the vectorizer on train data
from sklearn.feature_extraction.text import CountVectorizer
bow_model = CountVectorizer()
bow_train_data = bow_model.fit_transform(balanced_train_data.CleanedText.values)
bow_train_data.shape
#Transforming the test data
bow_test_data = bow_model.transform(balanced_test_data.CleanedText.values)
bow_test_data.shape
#Applying KNN
from sklearn.cross_validation import cross_val_score
from sklearn.neighbors import KNeighborsClassifier
list_vals = list(range(0,50))
neighbors = list(filter(lambda x:x%2!=0,list_vals))
cv_scores=[]
for i in neighbors:
knn_classifier = KNeighborsClassifier(i)
scores = cross_val_score(knn_classifier,bow_train_data,balanced_train_label,cv=10,scoring='accuracy')
cv_scores.append(scores.mean())
MSE = [1-x for x in cv_scores]
optimal_k = neighbors[MSE.index(min(MSE))]
optimal_k
print("The optimal K found from 10 fold cross validation is --->"+str(optimal_k))
#Training the model with optimal K
from sklearn.metrics import accuracy_score
bow_knn = KNeighborsClassifier(23)
bow_knn.fit(bow_train_data,balanced_train_label)
bow_knn_labels = bow_knn.predict(bow_test_data)
#Calculating the accuracy score for test data
score = accuracy_score(bow_knn_labels,balanced_test_label)*float(100)
print("The accuracy score for test data in Bag Of Words model is ---> "+str(score))
#Checking accuracy for 40% training data (As whole training set throws MemoryError)
bow_train_40 = bow_train_data[0:8000,]
bow_train_label_40 = balanced_train_label[0:8000]
#Accuracy for training data
bow_knn_trainl = bow_knn.predict(bow_train_40)
score_t = accuracy_score(bow_knn_trainl,bow_train_label_40)*float(100)
print("The accuracy score for train data in Bag Of Words model is ---> "+str(score_t))
#Error graph for each k
plt.scatter(neighbors,MSE)
plt.xlabel('Different K Values')
plt.ylabel('Mean Squared Error')
plt.title('Error Graph')
plt.show()
#Building the confusion matrix
import seaborn as sns
from sklearn.metrics import confusion_matrix
cnf = confusion_matrix(balanced_test_label,bow_knn_labels)
sns.heatmap(cnf,annot=True,cmap=plt.cm.gray)
plt.ylabel('Actual Values')
plt.xlabel('Predicted Values')
plt.title('Confusion Matrix')
plt.show()<jupyter_output><empty_output><jupyter_text>## Conclusion:
### 1.The model provides correct values in 7942 cases out of 12000 cases.
### 2. The accuracy of the model is 66.18333333333334% which is average.<jupyter_code>#Summary of BOW KNN
knn_summary = {'Type':['Model','Hyper Parameter','Train Error','Test Error'],'Value':['BOW KNN',str(optimal_k),str(100-score_t),str(100-score)]}
knn_summary_df = pd.DataFrame(knn_summary)
knn_summary_df<jupyter_output><empty_output><jupyter_text># TF-IDF Model<jupyter_code>#Vectorizing the train data
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf_model = TfidfVectorizer()
tfidf_train_data = tfidf_model.fit_transform(balanced_train_data.CleanedText.values)
tfidf_train_data.shape
#Transforming the test data
tfidf_test_data = tfidf_model.transform(balanced_test_data.CleanedText.values)
tfidf_test_data.shape
from sklearn.cross_validation import cross_val_score
#Applying 10 fold cross validation for findinf optimal k
list_vals = list(range(0,50))
neighbors_tfidf = list(filter(lambda x:x%2!=0,list_vals))
cv_scores=[]
for i in neighbors_tfidf:
knn_classifier = KNeighborsClassifier(i)
scores = cross_val_score(knn_classifier,tfidf_train_data,balanced_train_label,cv=10,scoring='accuracy')
cv_scores.append(scores.mean())
MSE = [1-x for x in cv_scores]
optimal_k_tfidf = neighbors_tfidf[MSE.index(min(MSE))]
optimal_k_tfidf
print("The optimal k found for Tf-IDF after 10 cross validations is --->"+str(optimal_k_tfidf))
#Building the tfidf model with optimal k
tfidf_knn = KNeighborsClassifier(optimal_k_tfidf)
tfidf_knn.fit(tfidf_train_data,balanced_train_label)
tfidf_knn_data = tfidf_knn.predict(tfidf_test_data)
#Finding the accuracy score for test data in TFIDF model
from sklearn.metrics import accuracy_score
score = accuracy_score(tfidf_knn_data,balanced_test_label)*float(100)
print("The accuracy score for test data in TF-IDF model is --->"+str(score))
#Finding the train accurcy with 40% train data
tfidf_knn_40 = tfidf_train_data[0:8000,]
tfidf_knn_label_40 = balanced_train_label[0:8000]
#Getting the accuracy score for training data
tfidf_knn_trainl = tfidf_knn.predict(tfidf_knn_40)
score_t = accuracy_score(tfidf_knn_trainl,tfidf_knn_label_40)*float(100)
print("The accuracy score for train data in TF_IDF model is --->"+str(score_t))
#Error graph for each k
plt.scatter(neighbors_tfidf,MSE)
plt.xlabel('Different K Values')
plt.ylabel('Mean Squared Error')
plt.title('Error Graph')
plt.show()
#Building the Confusion Matrix
from sklearn.metrics import confusion_matrix
import seaborn as sns
cnf_tfidf = confusion_matrix(balanced_test_label,tfidf_knn_data)
sns.heatmap(cnf_tfidf,cmap=plt.cm.gray,annot=True)
plt.xlabel('Predicted Values')
plt.ylabel('Actual Values')
plt.title('Confusion Matrix')
plt.show()
cnf_tfidf<jupyter_output><empty_output><jupyter_text>## Conclusion:
### 1.The model predicted correctly 8784 times out of 12000 times
### 2.The accuracy of the model is 73.2% which is better than the Bag Of Words Model<jupyter_code>#Summary of TFIDF KNN
tfidf_sum = {'Type':['Model','Hyper Parameter','Train Error','Test Error'],'Value':['TFIDF KNN',str(optimal_k_tfidf),str(100-score_t),str(100-score)]}
tfidf_sum_df = pd.DataFrame(tfidf_sum)
tfidf_sum_df<jupyter_output><empty_output><jupyter_text># Word2Vec<jupyter_code>#Filtering the sentences for using in word2vec
from gensim.models import Word2Vec
final_sentences = []
for sentence in balanced_sorted_data.Text.values:
sent = cleanHtml(sentence) #Cleaning the html tags from the sentences
filtered_sentence = [] #The filtered sentences to be stored in this list
for word in sentence.split(): #Looping through each word
for cleaned_word in cleanPunc(word).split(): #Cleaning the punctuations from each word
if(cleaned_word.isalpha()):
filtered_sentence.append(cleaned_word.lower()) #Converting the cleaned word to lowercase
else:
continue
final_sentences.append(filtered_sentence) #Inserting each filtered sentence to the final list
len(final_sentences)
#Training Word2Vec model with 70% of the total sentences
train_sentences = final_sentences[:28000]
w2v_model = Word2Vec(train_sentences,size = 50, workers = 4)
words = set(w2v_model.wv.vocab)
len(words)<jupyter_output><empty_output><jupyter_text># Average Word2Vec<jupyter_code>#Building model for average word2vec
sent_vectors = []
for sentence in final_sentences:
sentence_vectors = np.zeros(50) #Building word vectors and initializing with 0
count = 0 #Counting the total number of words in the sentence
for word in sentence:
try:
count+=1
w2v = w2v_model.wv[word] #Appling word2vec for each word
sentence_vectors+=w2v #Adding word2vec of each word to sentence vectors
except:
pass
sentence_vectors/=count #Computing average word2vec
sent_vectors.append(sentence_vectors)
sent_vectors_df = pd.DataFrame(sent_vectors)
sent_vectors_df.shape
#Building 70% train data set
avgw2v_train_data = sent_vectors_df.iloc[0:28000,:]
avgw2v_train_label = balanced_data_score[0:28000]
print(avgw2v_train_data.shape)
avgw2v_train_label.shape
#Building the 30% test data set
avgw2v_test_data = sent_vectors_df.iloc[28000:40000,:]
avgw2v_test_label = balanced_data_score[28000:40000]
print(avgw2v_test_data.shape)
avgw2v_test_label.shape
#Applying 10fold cross validation to find the optimal K
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cross_validation import cross_val_score
list_vals_avgw2v = list(range(0,50))
neighbors_avgw2v = list(filter(lambda x:x%2!=0,list_vals_avgw2v))
cv_scores=[]
for i in neighbors_avgw2v:
knn_classifier = KNeighborsClassifier(i)
scores = cross_val_score(knn_classifier,avgw2v_train_data,avgw2v_train_label,cv=10,scoring='accuracy')
cv_scores.append(scores.mean())
MSE = [1-x for x in cv_scores]
optimal_k_avgw2v = neighbors_avgw2v[MSE.index(min(MSE))]
optimal_k_avgw2v
print("The optimal k found for Average Word2Vec after 10 cross validations is --->"+str(optimal_k_avgw2v))
#Building the avgw2v model with optimal k
avgw2v_knn = KNeighborsClassifier(optimal_k_avgw2v)
avgw2v_knn.fit(avgw2v_train_data,avgw2v_train_label)
avgw2v_knn_data = avgw2v_knn.predict(avgw2v_test_data)
#Finding the accuracy score for test data in Average Word2Vec model
from sklearn.metrics import accuracy_score
score = accuracy_score(avgw2v_knn_data,avgw2v_test_label)*float(100)
print("The accuracy score for test data in Average Word2Vec model is --->"+str(score))
#Finding the train accuracy with 40% train data
avgw2v_knn_40 = avgw2v_train_data.iloc[0:8000,]
avgw2v_knn_label = avgw2v_train_label[0:8000]
#Accuracy score for train data
avgw2v_knn_trainl = avgw2v_knn.predict(avgw2v_knn_40)
score_t = accuracy_score(avgw2v_knn_trainl,avgw2v_knn_label)*float(100)
print("The accuracy score for train data in Average Word2Vec model is --->"+str(score_t))
#Train error on various k values
plt.scatter(neighbors_avgw2v,MSE)
plt.xlabel('Different K values')
plt.ylabel('MSE')
plt.title('Error with different k values')
plt.show()
#Building the confusion matrix
from sklearn.metrics import confusion_matrix
import seaborn as sns
cnf_avgw2v = confusion_matrix(avgw2v_test_label,avgw2v_knn_data)
sns.heatmap(cnf_avgw2v,cmap=plt.cm.gray,annot=True)
plt.xlabel('Predicted Values')
plt.ylabel('Actual Values')
plt.title('Confusion Matrix')
plt.show()
cnf_avgw2v<jupyter_output><empty_output><jupyter_text>## Conclusion:
### 1. It predicts 9249 vlaues correctly out of 12000 values
### 2.The accuracy is 77.075%<jupyter_code>#Summary of AVG W2V KNN
avgw2v_sum = {'Type':['Model','Hyper Parameter','Train Error','Test Error'],'Value':['AVG W2V KNN',str(optimal_k_avgw2v),str(100-score_t),str(100-score)]}
avgw2v_sum_df = pd.DataFrame(avgw2v_sum)
avgw2v_sum_df<jupyter_output><empty_output><jupyter_text># Weighted TFIDF W2V<jupyter_code>#Re-modeling TfidfVectorizer on train data
from sklearn.feature_extraction.text import TfidfVectorizer
tf_idf_model = TfidfVectorizer() #Initilaizing the TfidfVectorizer
tf_idf_model_data = tf_idf_model.fit(balanced_train_data.Text.values) #Training the vectorizer
#Building the vectors for final data
tf_idf_model_final = tf_idf_model.transform(balanced_sorted_data.Text.values)
tf_idf_model_final.shape
#Building the Weighted TFIDF W2V
tf_features = tf_idf_model.get_feature_names() #Retrieving the tfidf feature names
weighted_sentences = [];
row = 0;
for sentence in final_sentences:
sum_vector = np.zeros(50) #Building word vectors and initializing with 0
weighted_sum = 0; #Initializing the weighted sum to 0
for word in sentence: #Looping through each word
try:
w2v = w2v_model.wv[word] #Applying the word2vec model on each word
tf_idf = tf_idf_model_final[row,tf_features.index(word)] #Retrieving the tfidf value for the corresponding word
sum_vector += (w2v*tf_idf) #Computing tfidf weighted w2v
weighted_sum += tf_idf #Computing the total tfidf weight
except:
pass
sum_vector/=weighted_sum #Final tfidf weighted w2v
weighted_sentences.append(sum_vector)
row += 1
#Converting the weighted sentences list to a DataFrame
weighted_df = pd.DataFrame(weighted_sentences)
weighted_df.shape
#Building 70% train data set
tfidfw2v_train_data = weighted_df.iloc[0:28000,:]
tfidfw2v_train_label = balanced_data_score[0:28000]
print(tfidfw2v_train_data.shape)
tfidfw2v_train_label.shape
#Building 30% test data set
tfidfw2v_test_data = weighted_df.iloc[28000:40000]
tfidfw2v_test_label = balanced_data_score[28000:40000]
print(tfidfw2v_test_data.shape)
tfidfw2v_test_data.shape
#Applying 10fold cross validation on tfidfw2v model to find the optimal K
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cross_validation import cross_val_score
list_vals_tfidfw2v = list(range(0,50))
neighbors_tfidfw2v = list(filter(lambda x:x%2!=0,list_vals_tfidfw2v))
cv_scores=[]
for i in neighbors_tfidfw2v:
knn_classifier = KNeighborsClassifier(i)
scores = cross_val_score(knn_classifier,tfidfw2v_train_data,tfidfw2v_train_label,cv=10,scoring='accuracy')
cv_scores.append(scores.mean())
MSE = [1-x for x in cv_scores]
optimal_k_tfidfw2v = neighbors_tfidfw2v[MSE.index(min(MSE))]
optimal_k_tfidfw2v
print("The optimal k found for Weighted TFIDF Word2Vec after 10 cross validations is --->"+str(optimal_k_tfidfw2v))
#Building the tfidfw2v model with optimal k
tfidfw2v_knn = KNeighborsClassifier(optimal_k_tfidfw2v)
tfidfw2v_knn.fit(tfidfw2v_train_data,tfidfw2v_train_label)
tfidfw2v_knn_data = tfidfw2v_knn.predict(tfidfw2v_test_data)
#Finding the accuracy score for test data in TFIDF model
from sklearn.metrics import accuracy_score
score = accuracy_score(tfidfw2v_knn_data,tfidfw2v_test_label)*float(100)
print("The accuracy score for test data in Weighted TFIDF Word2Vec model is --->"+str(score))
#Finding the train accuracy with 40% train data
tfidfw2v_knn_40 = tfidfw2v_train_data.iloc[0:8000,]
tfidfw2v_knn_label = tfidfw2v_train_label[0:8000]
#Accuracy score for train data
tfidfw2v_knn_trainl = tfidfw2v_knn.predict(tfidfw2v_knn_40)
score_t = accuracy_score(tfidfw2v_knn_trainl,tfidfw2v_knn_label)*float(100)
print("The accuracy score for train data in Average Word2Vec model is --->"+str(score_t))
#Train error on various k values
plt.scatter(neighbors_tfidfw2v,MSE)
plt.xlabel('Different K values')
plt.ylabel('MSE')
plt.title('Error with different k values')
plt.show()
#Building the confusion matrix
from sklearn.metrics import confusion_matrix
import seaborn as sns
cnf_tfidfw2v = confusion_matrix(tfidfw2v_test_label,tfidfw2v_knn_data)
sns.heatmap(cnf_tfidfw2v,cmap=plt.cm.gray,annot=True)
plt.xlabel('Predicted Values')
plt.ylabel('Actual Values')
plt.title('Confusion Matrix')
plt.show()
cnf_tfidfw2v<jupyter_output><empty_output><jupyter_text># Conclusion### 1.The Weighted TFIDF Word2Vec model on KNeighborsClassifier predicts 9114 times correctly out of 12000 times
### 2.Accuracy is 75.94999999999999%<jupyter_code>#Summary of TFIDF W2V KNN
tfidfw2v_sum = {'Type':['Model','Hyper Parameter','Train Error','Test Error'],'Value':['TFIDF W2V KNN',str(optimal_k_tfidfw2v),str(100-score_t),str(100-score)]}
tfidfw2v_sum_df = pd.DataFrame(tfidfw2v_sum)
tfidfw2v_sum_df<jupyter_output><empty_output><jupyter_text># Final Summary<jupyter_code>#Final Summary of all the models
summary = {'Model':['Bag Of Words','Tfidf','Average Word2Vec','Tfidf W2V KNN'],'Hyper Parameter(k)':['23','45','35','49'],'Train Error':['23.837500000000006','21.33749999999999','20.83749999999999','22.125'],'Test Error':['33.81666666666666','26.799999999999997','22.924999999999997','24.05000000000001']}
summary_df = pd.DataFrame(summary)
summary_df<jupyter_output><empty_output>
|
no_license
|
/amazon_food_reviews_knn.ipynb
|
ashispnayak/Amazon_food_reviews
| 13 |
<jupyter_start><jupyter_text># Reproduce G. F. Tierney, et. al , Nanoscale Adv., 2019,1, 2546-2552
This notebook demonstrates reproducing the results of the paper with [Larch](https://xraypy.github.io/xraylarch/xafs/) using the corresponding published dataset (raw data).
George F. Tierney, Donato Decarolis, Norli Abdullah, Scott M. Rogers, Shusaku Hayama, Martha Briceno de Gutierrez, Alberto Villa, C. Richard A. Catlow, Paul Collier, Nikolaos Dimitratos and Peter P. Wells (2019) **Extracting structural information of Au colloids at ultra-dilute concentrations: identification of growth during nanoparticle immobilization.** Nanoscale Advances. V. 1. pp. 2546-2552. DOI: [10.1039/C9NA00159J](https://doi.org/10.1039/C9NA00159J).
- **Corresponding author**: Nikolaos Dimitratos
- **E-mail**: [email protected]
The data used for reproducing the results was published in the Southampton Instituional Repository DOI: [10.5258/SOTON/D0921](https://eprints.soton.ac.uk/431377/)
For more details about Larch, see [Larch Website](https://xraypy.github.io/xraylarch/xafs/)
|CDI Entity |Link |DOI|
|:-----------|:----------|:---|
|Publication |[cdi_pub: 267](http://cdi.ukcatalysishub.org/articles/47)|[10.1039/C9NA00159J](https://doi.org/10.1039/C9NA00159J) |
|Data Object |[cdi_do: 211](http://cdi.ukcatalysishub.org/datasets/211)| [10.5258/SOTON/D0921](https://eprints.soton.ac.uk/431377/)|## Reproducing XAS analysis
The XAS results of the paper are in [Figure 3](https://pubs.rsc.org/image/article/2019/na/c9na00159j/c9na00159j-f3_hi-res.gif): with the caption and the metadata indicate which data can be used for reproducing the results presented.
**Figure Caption**
> **Fig. 3** XAFS taken at the Au L3-edge of the colloidal Au samples; **(a & d) XANES spectra for the colloidal Au** detailing change as a result of increasing synthesis temperature and Au concentration, **(b & e) the normalised first derivative** of the absorption for the temperature and Au concentration influenced colloids respectively and (c & f) **experimental Fourier transform (FT) chi(k) data** of the corresponding EXAFS signals for colloidal Au showing the influence of temperature and Au concentration.
<jupyter_code># Library with the functions that replicate athena:
# normalisation, merging, re-binning, LCF
# and visualisation (plotting)
import lib.manage_athena as athenamgr
import lib.manage_fit as fitmgr
# File handling
from pathlib import Path
#plotting library
import matplotlib.pyplot as plt
# inline: shows plot in notebook
# tk: shows plot in popup
%matplotlib inline
# custom plot functions for paper
import paper03_plots as c_plot
gold_data = ".\wf_data\pub_267\DS0211\Compiled_XAS_data_Colloid_and_TiO2_supported_Au.prj"
# read the input file
data_prj = athenamgr.read_project(gold_data)
data_mappings={"A1":"d_0_1mM_Au_1C_Colloid",
"A2":"d_0_1mM_Au_25C_Colloid",
"A3":"d_0_1mM_Au_50C_Colloid",
"A4":"d_0_1mM_Au_75C_Colloid",
"B":"d_0_05mM_Au_1C_Colloid",
"C":"d_1_25mM_Au_1C_Colloid",
"Au Foil":"Au_foil",}
data_groups = {}
for a_mapping in data_mappings:
data_groups[a_mapping] = athenamgr.calc_with_defaults(athenamgr.get_group(data_prj, data_mappings[a_mapping]))
data_groups[a_mapping].filename = a_mapping
<jupyter_output><empty_output><jupyter_text>### Reproduce figure 3a
<jupyter_code>#define dict to set colours and line stiles
plot_groups = {"A1":["b", "solid"], "A2":["y", "solid"],
"A3":['r', "solid"], "A4":["lime", "solid"],
"Au Foil":["black", "dashed"]}
plt = c_plot.plot_normal_w_inset(data_groups, plot_groups,(7,8),[11900, 11960],[0,1.1], [11940, 11955], [0.85,1.06])
plt.show()<jupyter_output><empty_output><jupyter_text>### Reproduce figure 3b
<jupyter_code> # large plot
plt = c_plot.plot_derivative(data_groups, plot_groups, (6,8),[11900, 11980],[-0.05,0.14])
plt.show()
<jupyter_output><empty_output><jupyter_text>### Reproduce figure 3c
<jupyter_code>#plot chi magnitude (FT)
plt = c_plot.plot_chi_magnitude(data_groups, plot_groups, (6,8),[0,6],[0,1.14])
plt.show()<jupyter_output><empty_output><jupyter_text>### Reproduce figure 3d
<jupyter_code>#define dict to set colours and line stiles
plot_groups = {"A1":["b", "solid"], "B":["y", "solid"],
"C":['r', "solid"], "Au Foil":["black", "dashed"]}
plt = c_plot.plot_normal_w_inset(data_groups, plot_groups,(7,8),[11900, 11960],[0,1.1], [11940, 11955], [0.85,1.06])
plt.show()<jupyter_output><empty_output><jupyter_text>### Reproduce figure 3e
<jupyter_code> # large plot
plt = c_plot.plot_derivative(data_groups, plot_groups, (6,8),[11900, 11980],[-0.05,0.14])
plt.show()
<jupyter_output><empty_output><jupyter_text>### Reproduce figure 3f
<jupyter_code>#plot chi magnitude (FT)
plt = c_plot.plot_chi_magnitude(data_groups, plot_groups, (6,8),[0,6],[0,1.14])
plt.show()<jupyter_output><empty_output>
|
no_license
|
/psdi_phase_1/larch/Paper 03 Reproduce XAS.ipynb
|
scman1/XAS-Workflow-Demo
| 7 |
<jupyter_start><jupyter_text>## DataFrame: Data Extraction1-The `.set_index()` and `.reset_index()` Methods
2-Retrieve Rows by Index Label with `.loc[]`
3-Retrieve Row(s) by Index Position with `iloc`
4-Second Argument to `loc` and `iloc` Accessors
5-Set New Values for a Specific Cell
6-Set Multiple Values in DataFrame
7-Rename Index Labels or Columns in a DataFrame
8-Delete Rows or Columns from a DataFrame
9-Create Random Sample
10-The `.nsmallest()` and `.nlargest()` Methods
11-Filtering with the `where` Method
12-Filtering with the `.query()` Method
13-A Review of the `.apply()` Method on Single Columns
14-The `.apply()` Method with Row Values
15-The `.copy()` Method<jupyter_code>import pandas as pd
bond = pd.read_csv("files/jamesbond.csv")
bond.head(3)
bond.info()<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 26 entries, 0 to 25
Data columns (total 7 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Film 26 non-null object
1 Year 26 non-null int64
2 Actor 26 non-null object
3 Director 26 non-null object
4 Box Office 26 non-null float64
5 Budget 26 non-null float64
6 Bond Actor Salary 18 non-null float64
dtypes: float64(3), int64(1), object(3)
memory usage: 1.5+ KB
<jupyter_text>#### The .set_index() and .reset_index() Methods<jupyter_code># bond = pd.read_csv("files/jamesbond.csv",index_col = "Film") or
bond.set_index(keys = "Film", inplace = True)
bond.head(3)
bond.reset_index().head(3)
bond.reset_index(drop = True) # it deletes all column.
bond.reset_index(drop = False, inplace = True) # it works as same blank parenthesis.
bond.head(3)
bond.set_index(keys = "Film", inplace = True)
bond.head(3)
# if we set new index when we have one, set_index() method delete old indexed column in table by defult.
# there is no film column at below table:
bond.set_index("Year").head(3)
# for don't delete any column at our table, before setting new index we have to reset all indexes.
bond.reset_index(inplace = True)
bond.set_index("Year", inplace = True)
bond.head(3)<jupyter_output><empty_output><jupyter_text>#### Retrieve Rows by Index Label with .loc[]<jupyter_code>bond = pd.read_csv("files/jamesbond.csv", index_col= "Film")
bond.sort_index(inplace = True)
bond.head(3)
bond.loc["Goldfinger"]
bond.loc["GoldenEye"]
#if there are one more same name rows:
bond.loc["Casino Royale"]
bond.loc["Diamonds Are Forever" : "Licence to Kill"]
bond.loc[: "Die Another Day"]
bond.loc[["Octopussy", "Moonraker"]]
"Gold Bond" in bond.index<jupyter_output><empty_output><jupyter_text>#### Retrieve Row(s) by Index Position with iloc Accessor<jupyter_code>bond = pd.read_csv("files/jamesbond.csv")
bond.head(3)
bond.iloc[0]
bond.iloc[23]
bond.iloc[[15,21]]
bond.iloc[2:6]<jupyter_output><empty_output><jupyter_text>#### Second Argument to loc and iloc Accessors<jupyter_code>bond = pd.read_csv("files/jamesbond.csv",index_col = "Film")
bond.sort_index(inplace = True)
bond.head(3)
# If we write other parameter for second argument, it'll be give just it's information.
bond.loc["Moonraker","Actor"]
bond.loc["Moonraker","Director"]
# it'll be give director and box office information of Moonraker.
bond.loc["Moonraker", ["Director", "Box Office"]]
# it'll be give director and box office information of Moonraker and A View to a Kill.
bond.loc[["Moonraker","A View to a Kill"], ["Director", "Box Office"]]
bond.loc["Moonraker", "Director" : "Budget"]
bond.iloc[14]
bond.iloc[14, 2:6]
bond.iloc[[14,17], 2:6]
bond.iloc[[14,17], [2,5]]
bond.iloc[:5, :3]<jupyter_output><empty_output><jupyter_text>#### Set New Values for a Specific Cell<jupyter_code>bond = pd.read_csv("files/jamesbond.csv",index_col = "Film")
bond.sort_index(inplace = True)
bond.head(3)
bond.loc["Dr. No", "Actor"] = "Sir Sean Connery"
bond.loc["Dr. No"]
bond.loc["Dr. No", ["Box Office", "Budget", "Bond Actor Salary"]] = [448800000, 7000000, 600000]
bond.loc["Dr. No", "Budget"]<jupyter_output><empty_output><jupyter_text>#### Set Multiple Values in DataFrame<jupyter_code>bond = pd.read_csv("files/jamesbond.csv",index_col = "Film")
bond.sort_index(inplace = True)
bond.head(3)
actor_is_sean_connery = bond["Actor"] == "Sean Connery"
bond.loc[actor_is_sean_connery,"Actor"] = "Sir Sean Connery"
bond.head(7)<jupyter_output><empty_output><jupyter_text>#### Rename Index Labels or Columns in a DataFrame<jupyter_code>bond = pd.read_csv("files/jamesbond.csv",index_col = "Film")
bond.sort_index(inplace = True)
bond.head(3)
# to change column names:
bond.rename(columns = {"Year" : "Release Date",
"Box Office" : "Revenue"}, inplace = True)
#or
#bond.rename(mapper = {"Year" : "Release Date", "Box Office" : "Revenue"}, axis = 1)
#bond.rename(mapper = {"Year" : "Release Date", "Box Office" : "Revenue"}, axis = "columns")
bond.columns
# to change row names:
bond.rename(mapper = {"GoldenEye" : "Golden eye",
"Diamonds Are Forever" : "Diamonds are Forever"}, axis = 0).head(10)
#or bond.rename(mapper = {"GoldenEye" : "Golden eye","The World Is Not Enough" : "Best Bond Movie Ever"}, axis = "roes")
#or bond.rename(mapper = {"GoldenEye" : "Golden eye","The World Is Not Enough" : "Best Bond Movie Ever"}, axis = "index")
# we can use index statement instead mapper.
bond.rename(index = {"GoldenEye" : "Golden eye",
"The World Is Not Enough" : "Best Bond Movie Ever"},inplace = True)
# to change all column' names with list:
bond.columns = ["Year of Release","Actor", "Director","Gross","Cost","Salary"]
bond.head(1)<jupyter_output><empty_output><jupyter_text>#### Delete Rows or Columns from a DataFrame<jupyter_code>bond = pd.read_csv("files/jamesbond.csv",index_col = "Film")
bond.sort_index(inplace = True)
bond.head(3)
bond.drop("A View to a Kill").head(3)
# for dropping multiple rows:
bond.drop(["Casino Royale","Diamonds Are Forever"]).head()
# to drop a column:
bond.drop("Box Office", axis = 1).head(3)
# to drop multiple columns:
bond.drop(["Box Office","Actor"] , axis = "columns").head(3)
actor = bond.pop("Actor")
actor
del bond["Director"]
bond<jupyter_output><empty_output><jupyter_text>#### Create Random Sample<jupyter_code>bond = pd.read_csv("files/jamesbond.csv",index_col = "Film")
bond.sort_index(inplace = True)
bond.head(3)
#it picks up samples randomly.
bond.sample()
# %25 of all value:
bond.sample(frac = .25)
# 3 random columns:
bond.sample(3, axis = 1).head()<jupyter_output><empty_output><jupyter_text>#### The .nsmallest() and .nlargest() Methods<jupyter_code>bond = pd.read_csv("files/jamesbond.csv",index_col = "Film")
bond.sort_index(inplace = True)
bond.head(3)
bond.sort_values("Box Office", ascending = False).head(3)
bond.nlargest(3, columns = "Box Office")
bond.nsmallest(n = 2, columns = "Box Office")
bond["Box Office"].nlargest(8)
bond["Year"].nsmallest(2)<jupyter_output><empty_output><jupyter_text>#### Filtering with the where Method<jupyter_code>bond = pd.read_csv("files/jamesbond.csv",index_col = "Film")
bond.sort_index(inplace = True)
bond.head(3)
mask = bond["Actor"] == "Sean Connery"
bond[mask]
bond.where(mask)
bond.where(bond["Box Office"] > 800)
mask2 = bond["Box Office"] > 800
bond.where(mask & mask2)<jupyter_output><empty_output><jupyter_text>#### Filtering with the .query() Method<jupyter_code>bond = pd.read_csv("files/jamesbond.csv",index_col = "Film")
bond.sort_index(inplace = True)
bond.head(3)
bond.columns = [column_name.replace(" ", "_") for column_name in bond.columns]
bond.head(1)
bond.query('Actor == "Sean Connery"')
bond.query("Director == 'Terence Young'")
bond.query("Actor != 'Roger Moore'").head()
bond.query("Box_Office > 600")
bond.query("Actor == 'Roger Moore' or Director == 'John Glen'")
# with in and not in:
bond.query("Actor in ['Timothy Dalton', 'George Lazenby']")
bond.query("Actor not in ['Sean Connery', 'Roger Moore']").head()<jupyter_output><empty_output><jupyter_text>#### A Review of the .apply() Method on Single Columns<jupyter_code>bond = pd.read_csv("files/jamesbond.csv",index_col = "Film")
bond.sort_index(inplace = True)
bond.head(3)
def convert_to_string_and_add_millions(number):
return str(number) + " MILLIONS!"
bond["Box Office"] = bond["Box Office"].apply(convert_to_string_and_add_millions)
bond["Budget"] = bond["Budget"].apply(convert_to_string_and_add_millions)
bond.head(3)
columns = ["Box Office", "Budget", "Bond Actor Salary"]
for col in columns:
bond[col] = bond[col].apply(convert_to_string_and_add_millions)
bond.head(3)<jupyter_output><empty_output><jupyter_text>#### The .apply() Method with Row Values<jupyter_code>bond = pd.read_csv("files/jamesbond.csv",index_col = "Film")
bond.sort_index(inplace = True)
bond.head(3)
def good_movie(row):
actor = row[1]
budget = row[4]
if actor == "Pierce Brosnan":
return "The best"
elif actor == "Roger Moore" and budget > 40:
return "Enjoyable"
else:
return "I have no clue"
bond.apply(good_movie, axis = "columns")<jupyter_output><empty_output><jupyter_text>#### The .copy() Method<jupyter_code>bond = pd.read_csv("files/jamesbond.csv",index_col = "Film")
bond.sort_index(inplace = True)
bond.head(3)
directors = bond["Director"]
directors.head(3)
# you can use this line directly: directors["A View to a Kill"] = "Mister John Glen". you will have error message.
directors = bond["Director"].copy() # we created a copy of our original data source.
directors.head(3)
directors["A View to a Kill"] = "Mister John Glen" # it will change just copy value.
directors.head(3)
bond.head(3) # original data source won't change. <jupyter_output><empty_output>
|
no_license
|
/3-DataFrame_Data_Extraction.ipynb
|
ElifKarakutukDinc/Data-Analysis-with-Pandas-and-Python-
| 16 |
<jupyter_start><jupyter_text># Preprocessing and Import Data<jupyter_code>admissions = pd.read_csv('binary.csv')
admissions.head()<jupyter_output><empty_output><jupyter_text>### 使用pd.get_dummies将rank变为bool类型值<jupyter_code># Make dummy variables for rank
data = pd.concat([admissions, pd.get_dummies(admissions['rank'], prefix='rank')], axis=1)
data = data.drop('rank', axis=1)
data.head()<jupyter_output><empty_output><jupyter_text>### gre,gpa数据标准化<jupyter_code># Standarize features
for field in ['gre', 'gpa']:
mean, std = data[field].mean(), data[field].std()
data.loc[:,field] = (data[field]-mean)/std
data.head()<jupyter_output><empty_output><jupyter_text>### 分离train set 和 test set<jupyter_code># Split off random 10% of the data for testing
np.random.seed(42)
sample = np.random.choice(data.index, size=int(len(data)*0.9), replace=False)
data, test_data = data.loc[sample], data.drop(sample)<jupyter_output><empty_output><jupyter_text>### 分离features 和 targets<jupyter_code># Split into features and targets
features, targets = data.drop('admit', axis=1), data['admit']
features_test, targets_test = test_data.drop('admit', axis=1), test_data['admit']<jupyter_output><empty_output><jupyter_text># Gradient Descent<jupyter_code>def sigmoid(x):
return 1 / (1 + np.exp(-x))
n_records, n_features = features.shape
last_loss = None
# Initialize weights
weights = np.random.normal(scale=1/n_features**.5, size=n_features)
# Neural Network parameters
epochs = 1000 #迭代次数
learnrate = 0.5<jupyter_output><empty_output><jupyter_text>### Iteration<jupyter_code>for e in range(epochs):
del_w = np.zeros(weights.shape)
for x, y in zip(features.values, targets):
# Loop through all records, x is the input, y is the target
# Activation of the output unit
output = sigmoid(np.dot(x, weights))
# The error, the target minues the network output
error = y - output
# The gradient descent step, the error times the gradient times the inputs
del_w += error * output * (1 - output) * x
# Update the weights here. The learning rate times the
# change in weights, divided by the number of records to average
weights += learnrate * del_w / n_records
# Printing out the mean square error on the training set
if e % (epochs / 10) == 0:
out = sigmoid(np.dot(features, weights))
loss = np.mean((out - targets) ** 2)
if last_loss and last_loss < loss:
print("Train loss: ", loss, " WARNING - Loss Increasing")
else:
print("Train loss: ", loss)
last_loss = loss<jupyter_output>Train loss: 0.254524964515
Train loss: 0.209477560082
Train loss: 0.201394540624
Train loss: 0.198937362764
Train loss: 0.197970227125
Train loss: 0.197522225304
Train loss: 0.197291383221
Train loss: 0.197163425402
Train loss: 0.197088700389
Train loss: 0.197043358925
<jupyter_text>### Calculate accuracy on test data<jupyter_code>tes_out = sigmoid(np.dot(features_test, weights))
predictions = tes_out > 0.5
accuracy = np.mean(predictions == targets_test)
print("Prediction accuracy: {:.3f}".format(accuracy))<jupyter_output>Prediction accuracy: 0.725
|
permissive
|
/DeepLearning - Udacity/code/GradientDescent/GradientDescent.ipynb
|
Coder-AndyLee/Deep-Learning
| 8 |
<jupyter_start><jupyter_text>
[<< back to main index](../README.md)
# Logistic Regression in Python - Credit Card Approval (Demo)
### Overview
Instructor to demo this on screen.
### Builds on
None
### Run time
approx. 20-30 minutes
### Notes
Scikit-Learn has a logistic regression function called Logistic Regression.## Step 1 : Load imports<jupyter_code>%matplotlib inline
import time
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
<jupyter_output><empty_output><jupyter_text>## Step 2 : Sigmoid Curve
In logistic regression, we often use a sigmoid activation function. Let's generate a sigmoid curve in python!
**=>TODO: complete the code to get a sigmoid! **<jupyter_code>import numpy as np
# plot sigmoid curve
x = np.arange(-10.,10.,1.)
b = 0 # intercept
m = 1 # slope
sigmoid = lambda x,b,m: np.exp((b + m*x)) / (1 + np.exp((b + m*x)))
y = sigmoid(x,b,m)
## hint : scatter (x,y)
plt.scatter(x,y)
plt.title("Sigmoid (Logistic) Function")<jupyter_output><empty_output><jupyter_text>
## Step 3: Credit Approval DataHere is the sample data we are looking at:
| score | approved |
|-------|----------|
| 550 | 0 |
| 750 | 1 |
| 680 | 1 |
| 650 | 0 |
| 450 | 0 |
| 800 | 1 |
| 775 | 1 |
| 525 | 0 |
| 620 | 0 |
| 705 | 0 |
| 830 | 1 |
| 610 | 1 |
| 690 | 0 |
## Step 4: Let's visualize the data<jupyter_code>credit_approval = pd.DataFrame({'score' : [550., 750., 680., 650., 450., 800., 775., 525., 620., 705., 830., 610., 690.],
'approved' : [0,1,1,0,0,1,1,0,0,0,1,1,0]
})
credit_approval<jupyter_output><empty_output><jupyter_text>## Step 5: Let us plot and visualize the sample data.
**=> Run a scatterplot with score on the X axis and approved on the y axis **<jupyter_code>## Hint x = credit_approval.score, y = credit_approval.approved
plt.scatter(credit_approval.???,credit_approval.???)
plt.xlabel('score')
plt.ylabel('approved')
x = credit_approval.score.values.reshape(-1,1)
y = credit_approval.approved.values<jupyter_output><empty_output><jupyter_text>
## Step 7: Fit logistic regression
Now it's time to fit our logistic regression model. This is a linear model, so we will be getting the coefficients and intercept.
**=> Run Logistic Regression with 50 iterations. **
<jupyter_code>from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
# Fit the model
lrModel = lr.fit(credit_approval.score.values.reshape(-1,1),
credit_approval.approved.values)
lrModel.coef_[0],lrModel.intercept_[0]<jupyter_output><empty_output><jupyter_text>The output lists approval & estimated probabilities## Step 8 : Evaluate The Model
### 8.1 Confusion Matrix
**=> TODO : Can you interpret the confusion matrix?**<jupyter_code>from sklearn.metrics import confusion_matrix
predictions = lrModel.predict(credit_approval.score.values.reshape(-1,1))
confusion_matrix(credit_approval.approved.values, predictions)<jupyter_output><empty_output><jupyter_text>### 8.2 : ROC curve and AUC (Area Under Curve)
**=> TODO: What is the meaning of the ROC curve? How is the AUC calculated? **<jupyter_code>from sklearn.metrics import roc_curve, auc
fpr, tpr, thresholds = roc_curve(y, predictions)
roc_auc = auc(fpr, tpr)
plt.title('Receiver Operating Characteristic')
plt.plot(fpr, tpr, 'b',
label='AUC = %0.2f'% roc_auc)
plt.legend(loc='lower right')
plt.plot([0,1],[0,1],'r--')
plt.xlim([-0.1,1.2])
plt.ylim([-0.1,1.2])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
from sklearn.metrics import accuracy_score
<jupyter_output><empty_output><jupyter_text>## Step 9: Visualize data and logit model
Let's visualize the data and our model.<jupyter_code>plt.scatter(credit_approval.score, credit_approval.approved)
plt.xlabel('score')
plt.ylabel('approved')
<jupyter_output><empty_output><jupyter_text>## Step 10: Let's create some new test data and make predictions<jupyter_code>newdata = pd.DataFrame({'score' : [600., 700., 810.]
})
print(newdata)
lrmodel.predict(newdata.values)<jupyter_output><empty_output><jupyter_text>**=> TODO: Try adding your own test data **
<jupyter_code># TODO: Enter your test data here and re=run.
<jupyter_output><empty_output>
|
non_permissive
|
/logistic-regression/logistic-1-credit-approval.ipynb
|
vdevigere/ml-labs-macys
| 10 |
<jupyter_start><jupyter_text>**Prueba de Grubbs**<jupyter_code>import os
import pandas as pd
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import datasets
from scipy.stats import t, zscore
import random
def grubbs(X, test='two-tailed', alpha=0.05):
'''
Ejecuta el test de Grubbs recursivamente hasta que la hipótesis nula sea cierta.
Parametros
----------
X : ndarray
El arreglo de numeros (numpy) sobre el cual se desea hallar outliers.
test : str
Describe los tipos de outliers que se están buscando. Puede ser 'min'
(si se buscan los outliers muy pequeños), 'max' (si se buscan los grandes),
o 'two-tailed' (si se buscan ambos).
alpha : float
El nivel de significancia.
Retorna
-------
X : ndarray
El arreglo original con los outliers removidos.
outliers : ndarray
Un arreglo de outliers.
'''
Z = zscore(X, ddof=1) # Z-score
N = len(X) # Número de muestras
# Calcula valores extremos y en valor crítico de la t de student
if test == 'two-tailed':
extreme_ix = lambda Z: np.abs(Z).argmax()
t_crit = lambda N: t.isf(alpha / (2.*N), N-2)
elif test == 'max':
extreme_ix = lambda Z: Z.argmax()
t_crit = lambda N: t.isf(alpha / N, N-2)
elif test == 'min':
extreme_ix = lambda Z: Z.argmin()
t_crit = lambda N: t.isf(alpha / N, N-2)
else:
raise ValueError ("Test must be 'min', 'max', or 'two-tailed'")
thresh = lambda N: (N - 1.) / np.sqrt(N) * \
np.sqrt(t_crit(N)**2 / (N - 2 + t_crit(N)**2))
# Crea un arreglo donde almacena los outliers
outliers = np.array([])
# Bucle sobre el arreglo de datos y remueve los outliers
while abs(Z[extreme_ix(Z)]) > thresh(N):
# actualiza los outliers
outliers = np.r_[outliers, X[extreme_ix(Z)]]
# remueve los outlier del arreglo
X = np.delete(X, extreme_ix(Z))
# recalcula el Z score
Z = zscore(X, ddof=1)
N = len(X)
return X, outliers
np.random.seed(1548)
x1 = np.random.normal(30, 10, 20)
x2 = np.random.randint(50, 100, 2)
x = np.append(x1, x2)
print(x)
grubbs(x)
import numpy as np
import pandas as pd
import plotnine
import statsmodels.formula.api as smf
from plotnine import ggplot, geom_point, aes, geom_abline
datos = pd.read_csv("medidas_cuerpo2.csv")
lm=smf.ols(formula = "Peso ~ Estatura", data = datos).fit()
lm.params
lm.rsquared
n=datos.shape[0]
p=2
influence = lm.get_influence()
<jupyter_output><empty_output><jupyter_text>**Residuales estudentizados**<jupyter_code>resid_student = influence.resid_studentized_external
datos[['outlier_residstud_rls']] = np.where(abs(resid_student) >= 2, 1, 0)
(ggplot(datos, aes('Estatura', 'Peso', color='factor(outlier_residstud_rls)'))
+ geom_point()
+ geom_abline(intercept = lm.params[0], slope = lm.params[1], color = "black"))
<jupyter_output>/usr/local/lib/python3.7/dist-packages/plotnine/utils.py:1246: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if pdtypes.is_categorical(arr):
<jupyter_text>**Distancia de Cook**<jupyter_code>(cooks, p) = influence.cooks_distance
datos[['outlier_cook_rls']] = np.where(cooks >= 4 /(n-2-2), 1, 0)
(ggplot(datos, aes('Estatura', 'Peso', color='factor(outlier_cook_rls)'))
+ geom_point()
+ geom_abline(intercept = lm.params[0], slope = lm.params[1], color = "black"))
<jupyter_output>/usr/local/lib/python3.7/dist-packages/plotnine/utils.py:1246: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if pdtypes.is_categorical(arr):
<jupyter_text>**DFFIT**<jupyter_code>(dffits, p) = influence.dffits
datos[['outlier_dffit_rls']] = np.where(dffits > 2 / np.sqrt(p/n), 1, 0)
(ggplot(datos, aes('Estatura', 'Peso', color='factor(outlier_dffit_rls)'))
+ geom_point()
+ geom_abline(intercept = lm.params[0], slope = lm.params[1], color = "black"))
<jupyter_output>/usr/local/lib/python3.7/dist-packages/plotnine/utils.py:1246: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if pdtypes.is_categorical(arr):
<jupyter_text>**Leverege**<jupyter_code>leverage = influence.hat_matrix_diag
datos[['influential_hat']] = np.where(leverage >= 2*p/n, 1, 0)
(ggplot(datos, aes('Estatura', 'Peso', color='factor(influential_hat)'))
+ geom_point()
+ geom_abline(intercept = lm.params[0], slope = lm.params[1], color = "black"))
<jupyter_output>/usr/local/lib/python3.7/dist-packages/plotnine/utils.py:1246: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead
if pdtypes.is_categorical(arr):
<jupyter_text>**Regresión múltiple**<jupyter_code>datos = pd.read_csv("medidas_cuerpo2.csv")
lm2 = smf.ols(formula = "Peso~Estatura+circun_cuello+circun_muneca", data = datos).fit()
n=datos.shape[0]
p=4
lm2.params
lm2.params
influence_rlm = lm2.get_influence()
resid_student_rlm = influence_rlm .resid_studentized_external
(cooks_rlm, p_cooks_rlm) = influence_rlm.cooks_distance
(dffits_rlm, p_dffits_rlm) = influence_rlm.dffits
leverage_rlm = influence_rlm.hat_matrix_diag
datos[['outlier_residstud_rlm']] = np.where(abs(resid_student_rlm) >= 2, 1, 0)
datos[['outlier_cook_rlm']] = np.where(cooks_rlm >= 4 /(n-2-2), 1, 0)
datos[['outlier_dffit_rlm']] = np.where(dffits_rlm > 2 * np.sqrt(p/n), 1, 0)
datos[['influential_hat_rlm']] = np.where(leverage_rlm >= 2*p/n, 1, 0)
df_outliers = datos.query('outlier_residstud_rlm == 1 | outlier_cook_rlm == 1 | outlier_dffit_rlm == 1')
df_outliers
df_outliers = df_outliers.drop(['outlier_residstud_rlm','outlier_cook_rlm', 'outlier_dffit_rlm', 'influential_hat_rlm'], 1)
df_outliers
<jupyter_output><empty_output>
|
no_license
|
/sesion2/Anomalías_con_Outliers (1).ipynb
|
josezea/Anomalias
| 6 |
<jupyter_start><jupyter_text>---
_You are currently looking at **version 1.3** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-machine-learning/resources/bANLa) course resource._
---# Assignment 1 - Introduction to Machine LearningFor this assignment, you will be using the Breast Cancer Wisconsin (Diagnostic) Database to create a classifier that can help diagnose patients. First, read through the description of the dataset (below).<jupyter_code>import numpy as np
import pandas as pd
from sklearn.datasets import load_breast_cancer
cancer = load_breast_cancer()
#print(cancer.DESCR) # Print the data set description<jupyter_output><empty_output><jupyter_text>The object returned by `load_breast_cancer()` is a scikit-learn Bunch object, which is similar to a dictionary.<jupyter_code>cancer.keys()<jupyter_output><empty_output><jupyter_text>### Question 0 (Example)
How many features does the breast cancer dataset have?
*This function should return an integer.*<jupyter_code># You should write your whole answer within the function provided. The autograder will call
# this function and compare the return value against the correct solution value
def answer_zero():
# This function returns the number of features of the breast cancer dataset, which is an integer.
# The assignment question description will tell you the general format the autograder is expecting
return len(cancer['feature_names'])
# You can examine what your function returns by calling it in the cell. If you have questions
# about the assignment formats, check out the discussion forums for any FAQs
answer_zero() <jupyter_output><empty_output><jupyter_text>### Question 1
Scikit-learn works with lists, numpy arrays, scipy-sparse matrices, and pandas DataFrames, so converting the dataset to a DataFrame is not necessary for training this model. Using a DataFrame does however help make many things easier such as munging data, so let's practice creating a classifier with a pandas DataFrame.
Convert the sklearn.dataset `cancer` to a DataFrame.
*This function should return a `(569, 31)` DataFrame with *
*columns = *
['mean radius', 'mean texture', 'mean perimeter', 'mean area',
'mean smoothness', 'mean compactness', 'mean concavity',
'mean concave points', 'mean symmetry', 'mean fractal dimension',
'radius error', 'texture error', 'perimeter error', 'area error',
'smoothness error', 'compactness error', 'concavity error',
'concave points error', 'symmetry error', 'fractal dimension error',
'worst radius', 'worst texture', 'worst perimeter', 'worst area',
'worst smoothness', 'worst compactness', 'worst concavity',
'worst concave points', 'worst symmetry', 'worst fractal dimension',
'target']
*and index = *
RangeIndex(start=0, stop=569, step=1)<jupyter_code>def answer_one():
cancer_df = pd.DataFrame(cancer.data, columns=cancer.feature_names)
cancer_df["target"] = pd.Series(cancer.target)
return cancer_df
answer_one()<jupyter_output><empty_output><jupyter_text>### Question 2
What is the class distribution? (i.e. how many instances of `malignant` (encoded 0) and how many `benign` (encoded 1)?)
*This function should return a Series named `target` of length 2 with integer values and index =* `['malignant', 'benign']`<jupyter_code>def answer_two():
cancerdf = answer_one()
result = cancerdf.target.value_counts()
result = result.rename({0: "malignant", 1: "benign"})
return result# Return your answer
answer_two()<jupyter_output><empty_output><jupyter_text>### Question 3
Split the DataFrame into `X` (the data) and `y` (the labels).
*This function should return a tuple of length 2:* `(X, y)`*, where*
* `X`*, a pandas DataFrame, has shape* `(569, 30)`
* `y`*, a pandas Series, has shape* `(569,)`.<jupyter_code>def answer_three():
cancerdf = answer_one()
X = cancerdf.drop("target", axis=1)
y = cancerdf["target"]
return X, y<jupyter_output><empty_output><jupyter_text>### Question 4
Using `train_test_split`, split `X` and `y` into training and test sets `(X_train, X_test, y_train, and y_test)`.
**Set the random number generator state to 0 using `random_state=0` to make sure your results match the autograder!**
*This function should return a tuple of length 4:* `(X_train, X_test, y_train, y_test)`*, where*
* `X_train` *has shape* `(426, 30)`
* `X_test` *has shape* `(143, 30)`
* `y_train` *has shape* `(426,)`
* `y_test` *has shape* `(143,)`<jupyter_code>from sklearn.model_selection import train_test_split
def answer_four():
X, y = answer_three()
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Your code here
return X_train, X_test, y_train, y_test<jupyter_output><empty_output><jupyter_text>### Question 5
Using KNeighborsClassifier, fit a k-nearest neighbors (knn) classifier with `X_train`, `y_train` and using one nearest neighbor (`n_neighbors = 1`).
*This function should return a * `sklearn.neighbors.classification.KNeighborsClassifier`.<jupyter_code>from sklearn.neighbors import KNeighborsClassifier
def answer_five():
X_train, X_test, y_train, y_test = answer_four()
knn = KNeighborsClassifier(n_neighbors = 1)
knn.fit(X_train, y_train)
# Your code here
return knn# Return your answer<jupyter_output><empty_output><jupyter_text>### Question 6
Using your knn classifier, predict the class label using the mean value for each feature.
Hint: You can use `cancerdf.mean()[:-1].values.reshape(1, -1)` which gets the mean value for each feature, ignores the target column, and reshapes the data from 1 dimension to 2 (necessary for the precict method of KNeighborsClassifier).
*This function should return a numpy array either `array([ 0.])` or `array([ 1.])`*<jupyter_code>def answer_six():
cancerdf = answer_one()
means = cancerdf.mean()[:-1].values.reshape(1, -1)
knn = answer_five()
prediction = knn.predict(means)
# Your code here
return prediction# Return your answer<jupyter_output><empty_output><jupyter_text>### Question 7
Using your knn classifier, predict the class labels for the test set `X_test`.
*This function should return a numpy array with shape `(143,)` and values either `0.0` or `1.0`.*<jupyter_code>def answer_seven():
X_train, X_test, y_train, y_test = answer_four()
knn = answer_five()
prediction = knn.predict(X_test)
# Your code here
return prediction# Return your answer<jupyter_output><empty_output><jupyter_text>### Question 8
Find the score (mean accuracy) of your knn classifier using `X_test` and `y_test`.
*This function should return a float between 0 and 1*<jupyter_code>def answer_eight():
X_train, X_test, y_train, y_test = answer_four()
knn = answer_five()
result = knn.score(X_test, y_test)
# Your code here
return result# Return your answer<jupyter_output><empty_output><jupyter_text>### Optional plot
Try using the plotting function below to visualize the differet predicition scores between training and test sets, as well as malignant and benign cells.<jupyter_code>def accuracy_plot():
import matplotlib.pyplot as plt
%matplotlib notebook
X_train, X_test, y_train, y_test = answer_four()
# Find the training and testing accuracies by target value (i.e. malignant, benign)
mal_train_X = X_train[y_train==0]
mal_train_y = y_train[y_train==0]
ben_train_X = X_train[y_train==1]
ben_train_y = y_train[y_train==1]
mal_test_X = X_test[y_test==0]
mal_test_y = y_test[y_test==0]
ben_test_X = X_test[y_test==1]
ben_test_y = y_test[y_test==1]
knn = answer_five()
scores = [knn.score(mal_train_X, mal_train_y), knn.score(ben_train_X, ben_train_y),
knn.score(mal_test_X, mal_test_y), knn.score(ben_test_X, ben_test_y)]
plt.figure()
# Plot the scores as a bar chart
bars = plt.bar(np.arange(4), scores, color=['#4c72b0','#4c72b0','#55a868','#55a868'])
# directly label the score onto the bars
for bar in bars:
height = bar.get_height()
plt.gca().text(bar.get_x() + bar.get_width()/2, height*.90, '{0:.{1}f}'.format(height, 2),
ha='center', color='w', fontsize=11)
# remove all the ticks (both axes), and tick labels on the Y axis
plt.tick_params(top='off', bottom='off', left='off', right='off', labelleft='off', labelbottom='on')
# remove the frame of the chart
for spine in plt.gca().spines.values():
spine.set_visible(False)
plt.xticks([0,1,2,3], ['Malignant\nTraining', 'Benign\nTraining', 'Malignant\nTest', 'Benign\nTest'], alpha=0.8);
plt.title('Training and Test Accuracies for Malignant and Benign Cells', alpha=0.8)<jupyter_output><empty_output><jupyter_text>Uncomment the plotting function to see the visualization.
**Comment out** the plotting function when submitting your notebook for grading. <jupyter_code>accuracy_plot() <jupyter_output><empty_output>
|
permissive
|
/03_Applied Machine Learning in Python/Week_1/Assignment+1_Solved.ipynb
|
vblacklion/03_Applied-Data-Science-with-Python-Specialization
| 13 |
<jupyter_start><jupyter_text># Otro grupo<jupyter_code>with open("Modelos/NN_f_mas_p2.pkl", "rb") as f:
model_loaded = pickle.load(f)
model_loaded.best_params_
model_loaded.best_score_
prediction=model_loaded.predict_proba(walmart_df_test)
np.savetxt('Datos/pred_nn2.csv',prediction, delimiter=',')
#gana el de alpha 100<jupyter_output><empty_output>
|
no_license
|
/codigos_finales/mejor_modelo_redes.ipynb
|
urielmminon/marmol-final
| 1 |
<jupyter_start><jupyter_text># Decimals<jupyter_code>import decimal
from decimal import Decimal
decimal.getcontext()
decimal.getcontext().rounding
decimal.getcontext().prec = 6
decimal.getcontext()
g_ctx = decimal.getcontext()
type(g_ctx)
g_ctx.rounding = decimal.ROUND_HALF_UP
decimal.ROUND_HALF_UP
decimal.getcontext()
import decimal
decimal.getcontext().prec = 6
decimal.getcontext()
decimal.getcontext()
type(decimal.localcontext())
type(decimal.getcontext())
x = Decimal('1.25')
y = Decimal('1.35')
with decimal.localcontext() as ctx: # with is the equivalent of using statement in C#
ctx.prec = 6
ctx.rounding = decimal.ROUND_HALF_UP
print(round(x, 1))
print(round(y, 1))
print(round(x, 1))
print(round(y, 1))<jupyter_output>1.3
1.4
1.2
1.4
|
no_license
|
/Part 1 - Functional/Section 4 - Numeric Types/Decimals.ipynb
|
omarfq/Python3-Deep-Dive-Series
| 1 |
<jupyter_start><jupyter_text>## Домашнее задание №1
В этом домашнем задании вы познакомитесь с pytorch сами и сможете попрактиковаться в его применении.
#### План:
1. Простейшие операции в pytorch
2. Пишем Adam и применяем его к ручной модели
3. Обучаем свою первую нейросеть<jupyter_code>import math
import os
import random
import sys
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm<jupyter_output><empty_output><jupyter_text>### Простейшие операции на pytorch (2 балла)**Task**: Cоздайте два случайных тензора (двумерных, не квадратных):<jupyter_code>x = # your code here
y = # your code here <jupyter_output><empty_output><jupyter_text>**Task**: Умножьте их друг на друга, результат запишите в третий тензор без использования оператора `=`, для создания третьего тензора предлагается использовать `torch.empty`:<jupyter_code># your code here<jupyter_output><empty_output><jupyter_text>**Task**: Реализуйте ReLU использую только pytorch, примените его к тензору `x` (запрещено использование модулей torch.nn и его подмодулей, а также функции torch.relu)<jupyter_code>def relu_forward(x):
pass
assert torch.all(F.relu(x) == relu_forward(x))
# your code here<jupyter_output><empty_output><jupyter_text>**Task**: Сделайте тоже самое c ELU (запрещено использование модулей torch.nn и его подмодулей):<jupyter_code>def elu_forward(x):
pass
assert torch.allclose(F.elu(x), elu_forward(x), 1e-4)<jupyter_output><empty_output><jupyter_text>**Task**: LeakyReLU (запрещено использование модулей torch.nn и его подмодулей):<jupyter_code>def lrelu_forward(x, alpha):
pass
assert torch.all(F.leaky_relu(x, 0.01) == lrelu_forward(x, 0.01))<jupyter_output><empty_output><jupyter_text>**Task**: Теперь перейдем к немного более современным функциям активаций, например Mish, напомним как она выглядит:
$$x * tanh(ln(1+e^x))$$
(запрещено использование модулей torch.nn и его подмодулей)<jupyter_code>def mish(x):
pass
assert torch.allclose(
mish(torch.tensor([1, 1, 1], dtype=torch.float32)),
torch.tensor([0.8651, 0.8651, 0.8651]),
atol=1e-4,
)
assert torch.allclose(
mish(torch.tensor([0.6376, 0.4021, 0.6656, 0.3726], dtype=torch.float64)),
torch.tensor([0.5014, 0.2908, 0.5280, 0.2663], dtype=torch.float64),
atol=1e-4,
)<jupyter_output><empty_output><jupyter_text>**Task**: Теперь реализуем swish, напомним как она выглядит:
$$x * \sigma(x)$$
(запрещено использование модулей torch.nn и его подмодулей)<jupyter_code>def swish(x):
pass
assert torch.allclose(
swish(torch.tensor([1, 1, 1], dtype=torch.float32)),
torch.tensor([0.7311, 0.7311, 0.7311]),
atol=1e-4,
)
assert torch.allclose(
swish(torch.tensor([0.6376, 0.4021, 0.6656, 0.3726], dtype=torch.float64)),
torch.tensor([0.4171, 0.2409, 0.4396, 0.2206], dtype=torch.float64),
atol=1e-4,
)<jupyter_output><empty_output><jupyter_text>### Пишем Adam и применяем его к логистической регрессии (4 балла)
В данной секции вам нужно сделать две вещи:
1. Написать свой собственный оптимизатор подобно тому, который мы писали на семинаре
2. Обучить логистическую регрессию побатчево на картинках из датасета
#### Adam
$$
\begin{eqnarray}
g &=& \frac{1}{m}\nabla_w \sum_i L(f(x_{i};w), y_{i}) \\
m &=& \beta_1 m + (1 - \beta_1) g \\
v &=& \beta_2 v + (1 - \beta_2) diag(gg^{T}) \\
\hat{m} &=& \frac{m}{1 - \beta_1^{t}} \\
\hat{v} &=& \frac{v}{1 - \beta_2^{t}} \\
w &=& w - \frac{\eta}{\sqrt{\hat{v} + \epsilon}} \odot \hat{m}
\end{eqnarray}
$$<jupyter_code># абстрактный класс, не обращайте внимания
from torch.optim import Optimizer
class InClassOptimizer(Optimizer):
def step(self):
"""Perform single optimization step."""
with torch.no_grad(): # выключим градиенты
for group in self.param_groups:
self._group_step(group)
def _group_step(self, group):
# group ~ dict[str, ...]
"""
Private helper function to perform
single optimization step on model parameters.
"""
raise NotImplementedError()<jupyter_output><empty_output><jupyter_text>**Task**: Напишите свою реализацию Adam:<jupyter_code>class Adam(InClassOptimizer):
def __init__(self, params, lr=0.001, eps=1e-13, beta_1=0.9, beta_2=0.999):
defaults = dict(lr=lr, eps=eps, beta_1=beta_1, beta_2=beta_2)
super().__init__(params, defaults)
def _group_step(self, group):
# One group contains information about values passed in init
# and model parameters to update
lr = group["lr"]
eps = group["eps"]
beta_1 = group["beta_1"]
beta_2 = group["beta_2"]
for param in filter(lambda x: x.grad is not None, group["params"]):
pass
def _get_adam_buffer(self, param):
"""
Get accumulated gradients for Adam.
Parameters
----------
param : `torch.Tensor`, required
Model parameter to get accumulated gradeints for Adagrad.
Returns
-------
Accumulated Adam gradients for parameter.
"""
pass
def _init_adam_buffer(self, param):
"""
Initialize accumulated gradeints for adam.
Parameters
----------
param : `torch.Tensor`, required
Model parameter to get accumulated gradeints for adam.
"""
pass<jupyter_output><empty_output><jupyter_text>**Task**: Создайте параметры для обучения логистической регрессии, сделаем Xavier ициализацию, которая выглядит следующим образом:
$$w \sim U[-\frac{\sqrt{6}}{\sqrt{n_{in} + n_{out}}}, \frac{\sqrt{6}}{\sqrt{n_{in} + n_{out}}}]$$
где:
* $n_{in}$ -- размер входа (в нейронах)
* $n_{out}$ -- размер выхода (в нейронах)
Подумайте над выбором $n_{in}$ и $n_{out}$ самостоятельно. <jupyter_code>weights = # your code here
intercept = # your code here
optimizer = Adam([weights, intercept])
loss = nn.CrossEntropyLoss()<jupyter_output><empty_output><jupyter_text>Загрузим данные и создадим даталоадеры:<jupyter_code>fashion_mnist_train = torchvision.datasets.FashionMNIST(
'./data',
download=True,
transform=transforms.Compose([transforms.ToTensor()])
)
train_dataloader = # your code here
fashion_mnist_eval = torchvision.datasets.FashionMNIST(
'./data',
train=False,
download=True,
transform=transforms.Compose([transforms.ToTensor()])
)
eval_dataloader = # your code here<jupyter_output><empty_output><jupyter_text>Напишите функцию для тренировки логистической регрессии, она должна:
* Делать предсказания
* Считать лосс
* Подсчитывать градиенты
* Делать шаг оптимизации
* Обнулять посчитанные градиенты
* Считать метрики
* Возвращать полученные метрики
После этого предусмотрите возможность визуализировать метрики, чтобы нарисовать картинки, а именно от вас требуется визуализировать:
* Зависимость лосса от количества итераций
* Зависимость доли правильных ответов от количества итераций<jupyter_code>def train_logistic_regression(weights, bias, batch, loss, optimizer):
pass
for epoch in range(1, 100):
for batch in train_dataloader:
metrics = train_logistic_regression(weights, bias, batch, loss, optimizer)<jupyter_output><empty_output><jupyter_text>#### Вопросы к секции:
* Своими словами и без математики объясните благодаря чему Adam дает несмещенную оценку на квадрат градиента
* Когда модель начала переобучаться? Как вы это поняли? Сделайте визуализацию и докажите свою точку зрения.### Моя первая нейросеть (4 балла)В данной секции вам нужно сделать следующие вещи:
* Реализовать три разных архитектуры нейросетей. Эти архитектуры должны принципиально отличаться друг от друга. Разрешается одной из архитекур брать полностью полносвязную модель. Остальные две должны быть сверточными и сильно отличаться друг от друга. К примеру, одна из таких архитектур может быть VGG подобная сеть, а другая ResNet подобная архитектура.
* Написать цикл для обучения которым можно обучать все три модели без изменений кода
* Попробовать каждую модель с двумя оптимизаторами: RMSprop и SGD with momentum
* Визуализировать результаты перфоманса каждой модели (две метрики минимум для каждого сетапа, например, лосс и долю правильных ответов). В данном пункте мы ждем от вас визуализацию зависимости метрики от номера итерации обучения.
* Сделать выводы какие были модели были лучше и как вы думаете почему?<jupyter_code>def train_model(model, dataloader, optimizer):
pass
class FirstModel(nn.Module):
def __init__(self):
super(Model, self).__init__()
pass
def forward(self, x):
pass
second_model = nn.Sequential(
# your code here
)
third_model = torch.nn.ModuleDict(
{
# your code here
}
)
# train and visualize and write summary down there<jupyter_output><empty_output>
|
no_license
|
/2020/hw1/hw1.ipynb
|
hse-ds/iad-deep-learning
| 14 |
<jupyter_start><jupyter_text>### This notebook was created as a requirement of the Applied Data Science Capstone peer graded assignment<jupyter_code>import pandas as pd
import numpy as np
print("Hello Capstone Project Course!")<jupyter_output>Hello Capstone Project Course!
|
no_license
|
/Data Science Capstone Project.ipynb
|
pcssubashree/Coursera_Capstone
| 1 |
<jupyter_start><jupyter_text>#### below codes ran for two folders : clinical trial and katsanos folder
input for clinical trial list: input\\ClinicalTrial || output filename is CT_parse_69.csv<jupyter_code># get the elements from the XML file into a dictionary
import xml.etree.ElementTree as ET
#check if a file exist -- when parse xml error, the xml file will be moved to another folder
import os.path
from os import path
# function to get the primary outcome and secondary outcome
def CT_tag(element):
element_count = len(root.findall(element))
xml_dict[element+'_count'] = element_count
# primary/secondary outcome info
Out_list=[]
Out_measure=""
Out_time=""
Out_desc=""
for Out in root.iter(element):
if Out.find("measure") is not None:
Out_measure = Out[0].text
if Out.find("time_frame") is not None:
Out_time = Out[1].text
if Out.find("descrption") is not None:
Out_desc = Out[2].text
Out_list.append(Out_measure+"|"+Out_time+"|"+Out_desc)
xml_dict[element+'_list'] = Out_list
#function to get the arm_group label and description
def CT_arm(element):
element_count = len(root.findall(element))
xml_dict[element+'_count'] = element_count
arm_list=[]
arms_label=""
arms_description=""
for arms in root.iter(element):
if arms.find("arm_group_label") is not None:
arms_label = arms[0].text
if arms.find("description") is not None:
arms_description = arms[2].text
arms_description=arms_description.replace("\n","")
arm_list.append(arms_label+"|"+arms_description)
xml_dict[element+"_list"]=arm_list
filepath = input('filepath for list of XML files: use \"folderundercurrentdirectory\\subfolder\":')
outfilename = input('please give the filename for output: ')
outputfile = "output\\"+outfilename
for i in NCTID_list:
xml_dict={}
filename = filepath+"\\"+i+".xml"
if path.exists(filename):
print ("processing "+filename)
xml_dict['NCTID']=i
with open(filename, mode='rt', errors='ignore') as xml:
tree = ET.parse(xml)
root = tree.getroot()
#oversight_info
if root.findall('oversight_info/has_dmc'):
for dmc in root.findall('oversight_info/has_dmc'):
if dmc != None:
xml_dict['dmc_oversight']=dmc.text
else:
xml_dict['dmc_oversight']="n/a"
else:
xml_dict['dmc_oversight']="n/a"
#primary and secondary outcome
CT_tag('primary_outcome')
CT_tag('secondary_outcome')
#arm info
CT_arm('arm_group')
#eligibility
for inclusion_exclusion in root.iter('eligibility'):
for text in inclusion_exclusion.findall('criteria/textblock') :
content = text.text
content=str(content)
content=content.replace("\n","")
content = re.sub(' +', ' ', content)
xml_dict['inclusion_exclusion']=content
#mesh_term
mesh_term_list = []
if root.find('condition_browse') is not None:
for mesh_term in root.findall('condition_browse/mesh_term'):
mesh_term_list.append(mesh_term.text)
if root.find('intervention_browse') is not None:
for mesh_term in root.findall('intervention_browse/mesh_term'):
mesh_term_list.append(mesh_term.text)
xml_dict['mesh_term'] = mesh_term_list
print (i+" parsed successfully!")
#print (xml_dict)
# write the dictionary into a csv file, each key:value is a cell, separated by ";". one NCTID in one row
with open(outputfile, 'a') as f:
#writer = csv.writer(f)
for key, value in xml_dict.items():
if type(xml_dict[key]) == list:
f.write(str(key)+":")
for t in xml_dict[key]:
f.write(str(t)+",")
f.write("||")
else:
f.write(str(key)+":"+str(value))
f.write("||")
f.write("\n")
f.close() <jupyter_output>filepath for list of XML files: use "folderundercurrentdirectory\subfolder":input\\ClinicalTrial
please give the filename for output: CT_69_parse_1227.csv
processing input\\ClinicalTrial\NCT02599389.xml
NCT02599389 parsed successfully!
processing input\\ClinicalTrial\NCT02710656.xml
NCT02710656 parsed successfully!
processing input\\ClinicalTrial\NCT01221610.xml
NCT01221610 parsed successfully!
processing input\\ClinicalTrial\NCT01867736.xml
NCT01867736 parsed successfully!
processing input\\ClinicalTrial\NCT00696956.xml
NCT00696956 parsed successfully!
processing input\\ClinicalTrial\NCT00930813.xml
NCT00930813 parsed successfully!
processing input\\ClinicalTrial\NCT01594684.xml
NCT01594684 parsed successfully!
processing input\\ClinicalTrial\NCT02498080.xml
NCT02498080 parsed successfully!
processing input\\ClinicalTrial\NCT01558505.xml
NCT01558505 parsed successfully!
processing input\\ClinicalTrial\NCT03175744.xml
NCT03175744 parsed successfully!
processing input\\ClinicalTrial\NCT[...]<jupyter_text>#### Katsanos list now:
input for Katsanos list: input\\katsanos || output file name is katsanos.csv
note: the katsanos.csv contains longer XML file list than what in the katsanos folder. If not find in the katsanos folder, should copy from the output in clinicaltrial result (as the overlaps)<jupyter_code>#read the Katsanos list
NCTID_list = []
df = pd.read_excel('input\katsanos.xlsx', sheet_name='Sheet1')
NCTID_list = df['NCT Number'].dropna().tolist()
print (len(NCTID_list))
#this is a repeated cell, just wanted to keep the results for Katsanos
# get the elements from the XML file into a dictionary
import xml.etree.ElementTree as ET
#check if a file exist -- when parse xml error, the xml file will be moved to another folder
import os.path
from os import path
# function to get the primary outcome and secondary outcome
def CT_tag(element):
element_count = len(root.findall(element))
xml_dict[element+'_count'] = element_count
# primary/secondary outcome info
Out_list=[]
Out_measure=""
Out_time=""
Out_desc=""
for Out in root.iter(element):
if Out.find("measure") is not None:
Out_measure = Out[0].text
if Out.find("time_frame") is not None:
Out_time = Out[1].text
if Out.find("descrption") is not None:
Out_desc = Out[2].text
Out_list.append(Out_measure+"|"+Out_time+"|"+Out_desc)
xml_dict[element+'_list'] = Out_list
#function to get the arm_group label and description
def CT_arm(element):
element_count = len(root.findall(element))
xml_dict[element+'_count'] = element_count
arm_list=[]
arms_label=""
arms_description=""
for arms in root.iter(element):
if arms.find("arm_group_label") is not None:
arms_label = arms[0].text
if arms.find("description") is not None:
arms_description = arms[2].text
arms_description=arms_description.replace("\n","")
arm_list.append(arms_label+"|"+arms_description)
xml_dict[element+"_list"]=arm_list
filepath = input('filepath for list of XML files: use \"folderundercurrentdirectory\\subfolder\":')
outfilename = input('please give the filename for output: ')
outputfile = "output\\"+outfilename
for i in NCTID_list:
xml_dict={}
filename = filepath+"\\"+i+".xml"
if path.exists(filename):
print ("processing "+filename)
xml_dict['NCTID']=i
with open(filename, mode='rt', errors='ignore') as xml:
tree = ET.parse(xml)
root = tree.getroot()
#oversight_info
if root.findall('oversight_info/has_dmc'):
for dmc in root.findall('oversight_info/has_dmc'):
if dmc != None:
xml_dict['dmc_oversight']=dmc.text
else:
xml_dict['dmc_oversight']="n/a"
else:
xml_dict['dmc_oversight']="n/a"
#primary and secondary outcome
CT_tag('primary_outcome')
CT_tag('secondary_outcome')
#arm info
CT_arm('arm_group')
#eligibility
for inclusion_exclusion in root.iter('eligibility'):
for text in inclusion_exclusion.findall('criteria/textblock') :
content = text.text
content=str(content)
content=content.replace("\n","")
content = re.sub(' +', ' ', content)
xml_dict['inclusion_exclusion']=content
#mesh_term
mesh_term_list = []
if root.find('condition_browse') is not None:
for mesh_term in root.findall('condition_browse/mesh_term'):
mesh_term_list.append(mesh_term.text)
if root.find('intervention_browse') is not None:
for mesh_term in root.findall('intervention_browse/mesh_term'):
mesh_term_list.append(mesh_term.text)
xml_dict['mesh_term'] = mesh_term_list
# write the dictionary into a csv file, each key:value is a cell, separated by ";". one NCTID in one row
print (i+" parsed successfully!")
#print (xml_dict)
with open(outputfile, 'a') as f:
#writer = csv.writer(f)
for key, value in xml_dict.items():
if type(xml_dict[key]) == list:
f.write(str(key)+":")
for t in xml_dict[key]:
f.write(str(t)+",")
f.write("||")
else:
f.write(str(key)+":"+str(value))
f.write("||")
f.write("\n")
f.close()
# add from the CT_parse_69.csv to katsanos_parse.csv what is not in the input folder
CT_list =[]
for i in NCTID_list:
if path.exists("input\\katsanos\\"+i+".xml"):
print (i+".xml file is already parsed." )
else:
CT_list.append(i)
print (len(CT_list))<jupyter_output>NCT01816412.xml file is already parsed.
NCT02540018.xml file is already parsed.
NCT01083030.xml file is already parsed.
NCT01850056.xml file is already parsed.
NCT01450722.xml file is already parsed.
NCT00156624.xml file is already parsed.
NCT00120406.xml file is already parsed.
NCT03023098.xml file is already parsed.
12
|
non_permissive
|
/ClinicalTrialParseAll.ipynb
|
linikujp/ClinicalTrialPaclitaxel
| 2 |
<jupyter_start><jupyter_text>Count number of ASE / Imprinted genes overlapping ASTADs and compare to random sample<jupyter_code>import sys
sys.path.append("..")
import numpy as np
import pandas as pd
import seaborn as sns
from scipy.stats import norm
from pybedtools import BedTool
import matplotlib.pyplot as plt
from utilities import readDiffTAD
from collections import defaultdict
from itertools import product
from utilities import formatP, formatCell, defaultPlotting, sampleByGroup
def setASTAD(x):
return 'ASTAD' if x > 2 else 'non-ASTAD'
def setImprinted(x):
if x == 'Not Imprinted':
return 'non-Imprinted'
else:
return 'Imprinted'
def setASE(x):
if x in ['Monoallelic', 'Paternal', 'Maternal']:
return 'ASE'
else:
return 'non-ASE'
drop = ([
'score', 'strand', 'ID', 'symbol', 'Imprinting Status',
'TAD Score (GM12878)', 'TAD Score (IMR-90)', 'TAD Score (H1-hESC)',
'ASE (GM12878)', 'ASE (IMR-90)', 'ASE (H1-hESC)', 'Imprinted'
])
names = ['chrom', 'start', 'end', 'geneType', 'ASTAD', 'ref']
geneStats = pd.read_pickle('allGeneStats.pkl')
geneStats['Imprinted'] = geneStats['Imprinting Status'].apply(setImprinted)
results = {}
cells = ['GM12878', 'IMR90', 'H1hESC']
for cell, status in list(product(cells, ['ASE', 'Imprinted'])):
cell = formatCell(cell)
df = geneStats.copy()
df['ASTAD'] = df[f'TAD Score ({cell})'].apply(setASTAD)
if status == 'ASE':
ref = 'ASE'
df['ref'] = df[f'ASE ({cell})'].apply(setASE)
else:
df['ref'] = df['Imprinted']
ref = 'Imprinted'
# For GM12878, ASE only consider informative genes (since we have non-monoallelic information)
if (cell == 'GM12878') and (status == 'ASE'):
df.loc[~df['ASE (GM12878)'].isin(['Unknown'])]
df = df.drop(drop, axis=1)
# Consider only genes that overlap valid TADs
tads = readDiffTAD(
cell, diffOnly=False, X=False, normalOnly=True, excludeLowMap=True,
excludeHighSignal=True, pyBed=True).sort().merge()
df = BedTool.from_dataframe(df).intersect(tads, wa=True, u=True).to_dataframe(names=names)
sampleGroups = defaultdict(int, df.loc[df['ref']==ref].groupby(['geneType']).size().to_dict())
exp = []
obs = ((df['ASTAD'] == 'ASTAD') & (df['ref'] == ref)).sum()
for i in range(10_000):
sample = sampleByGroup(df, ['geneType'], sampleGroups).reset_index(drop=True)
exp.append((sample['ASTAD'] == 'ASTAD').sum())
results[(cell, status)] = (obs, np.array(exp))
defaultPlotting(size=9, width=90, ratio=1)
for status in ['ASE', 'Imprinted']:
for i, cell in enumerate(cells):
fig, ax = plt.subplots()
cell = formatCell(cell)
obs, exp = results[(cell, status)]
z = (obs - exp.mean()) / exp.std()
p = formatP(1 - norm.cdf(z))
sns.histplot(exp, discrete=True, ax=ax)
ax.axvline(x=obs, color='red', ls='--')
title = f'{cell} ({status}), z = {z:.2f}, p {p}'
ax.set_title(title, loc='left')
ax.set_ylabel('')
ax.set_xlabel(f'Expected Random Overlap')
fig.tight_layout()
fig.savefig(f'plots/ASGeneASTADpermute-{cell}-{status}.svg')
fig, ax = plt.subplots()
ax.axis('off')
fig.savefig('plots/blankPlot.svg')<jupyter_output><empty_output>
|
no_license
|
/1.ASE_and_Imprinted/.ipynb_checkpoints/2.chiSqEnrichment-checkpoint.ipynb
|
StephenRicher/AS-HiC-Analysis
| 1 |
<jupyter_start><jupyter_text># 13.2 Creating Model Descriptions with Patsy(利用Patsy创建模型描述)
Patsy是一个python库,用于描述统计模型(尤其是线性模型),方法是通过一个叫做公式语法(formula syntax)的字符串来描述。这种公式语法的灵感来源于R和S语言中的公式语法。
Patsy的公式是有特殊格式的字符串,像下面这样:
y ~ x0 + x1
这种a + b的语法并不代表将a和b相加,而是代表为模型创建的设计矩阵的术语(terms in the design matrix)。patsy.dmatrices函数,取一个公式字符串和一个数据集(可以使DataFrame或dict),然后为线性模型产生设计矩阵:<jupyter_code>import numpy as np
import pandas as pd
data = pd.DataFrame({'x0': [1, 2, 3, 4, 5],
'x1': [0.01, -0.01, 0.25, -4.1, 0.],
'y': [-1.5, 0., 3.6, 1.3, -2.]})
data
import patsy
y, X = patsy.dmatrices('y ~ x0 + x1', data)<jupyter_output><empty_output><jupyter_text>我们得到:<jupyter_code>y
X<jupyter_output><empty_output><jupyter_text>这些Patsy DesignMatrix实例是Numpy的ndarrays,附有额外的元数据(metadata):<jupyter_code>np.asarray(y)
np.asarray(X)<jupyter_output><empty_output><jupyter_text>我们可能奇怪X中的Intercept是从哪里来的。这其实是线性模型的一个惯例,比如普通最小二乘回归法(ordinary least squares regression)。我们可以去掉这个截距(intercept),通过加添术语`+0`给模型:<jupyter_code>patsy.dmatrices('y ~ x0 + x1 + 0', data)[1]<jupyter_output><empty_output><jupyter_text>这种Patsy对象可以直接传入一个算法,比如numpy.linalg.lstsq,来进行普通最小二乘回归的计算:<jupyter_code>coef, resid, _, _ = np.linalg.lstsq(X, y)
coef
coef = pd.Series(coef.squeeze(), index=X.design_info.column_names)
coef<jupyter_output><empty_output><jupyter_text># 1 Data Transformations in Patsy Formulas(Patsy公式的数据变换)
我们可以把python和Patsy公式混合起来。当评估公式的时候,库会尝试找到封闭域中的公式:<jupyter_code>y, X = patsy.dmatrices('y ~ x0 + np.log(np.abs(x1) + 1)', data)
X<jupyter_output><empty_output><jupyter_text>一些常用的变量变换,包括标准化(standardizing (平均值0,方差1)和中心化(减去平均值)。Patsy有内建的函数可以做到这些:<jupyter_code>y, X = patsy.dmatrices('y ~ standardize(x0) + center(x1)', data)
X<jupyter_output><empty_output><jupyter_text>作为建模的一部分,我们可能会在一个数据及上训练模型,然后在另一个数据及上评价模型。当使用中心化或标准化这样的转换时,我们必须注意,必须用模型在新数据集上做预测。这叫做状态变换(stateful transformations)。因为我们必须用原本在训练集上得到的平均值和标准差,用在新的数据集上。
通过保存原先数据集中的信息,patsy.build_design_matrices函数能把变换用在新的数据集上:<jupyter_code>new_data = pd.DataFrame({
'x0': [6, 7, 8, 9],
'x1': [3.1, -0.5, 0, 2.3],
'y': [1, 2, 3, 4]})
new_X = patsy.build_design_matrices([X.design_info], new_data)
new_X<jupyter_output><empty_output><jupyter_text>因为加号在Patsy公式中不代表加法,如果想要把两个列通过名字相加,必须把他们用I函数包起来:<jupyter_code>y, X = patsy.dmatrices('y ~ I(x0 + x1)', data)
X<jupyter_output><empty_output><jupyter_text>Patsy有一些其他的内建转换,得来patsy.builtins模块里。更多的信息请参考文档。
Categorical数据有特殊的类用于变换,下面进行介绍。
# 2 Categorical Data and Patsy(Categorical数据和Patsy)
非数值型数据可以通过很多种方式变为一个模型设计矩阵。这个话题很大,这里只做简单介绍。
当我们在Patsy公式中使用非数值术语时,这些类型数据默认会被转换为哑变量。如果有截距,一个层级上的截距会被舍弃,防止出现共线性:<jupyter_code>data = pd.DataFrame({'key1': ['a', 'a', 'b', 'b', 'a', 'b', 'a', 'b'],
'key2': [0, 1, 0, 1, 0, 1, 0, 0],
'v1': [1, 2, 3, 4, 5, 6, 7, 8],
'v2': [-1, 0, 2.5, -0.5, 4.0, -1.2, 0.2, -1.7] })
y, X = patsy.dmatrices('v2 ~ key1', data)
X<jupyter_output><empty_output><jupyter_text>如果从模型中舍弃截距,每个类型的列会被包含在模型设计矩阵中:<jupyter_code>y, X = patsy.dmatrices('v2 ~ key1 + 0', data)
X<jupyter_output><empty_output><jupyter_text>数值型列可以通过C函数,变为类型列:<jupyter_code>y, X = patsy.dmatrices('v2 ~ C(key2)', data)
X<jupyter_output><empty_output><jupyter_text>当我们在一个模型中使用多个类型术语时,会变得更复杂一些,之前用`key1:key2`的形式来包含有交集的术语,这种方法可以用于使用多个术语,例如,一个方法分析模型(analysis of variance (ANOVA) models):<jupyter_code>data['key2'] = data['key2'].map({0: 'zero', 1: 'one'})
data
y, X = patsy.dmatrices('v2 ~ key1 + key2', data)
X
y, X = patsy.dmatrices('v2 ~ key1 + key2 + key1:key2', data)
X<jupyter_output><empty_output>
|
permissive
|
/数据分析/代码与笔记/pydata-notebook-master/Chapter-13/13.2 Creating Model Descriptions with Patsy(利用Patsy创建模型描述).ipynb
|
Nick17t/SDJU-Course-Material
| 13 |
<jupyter_start><jupyter_text># RNN and Time series data predictionIt is LSTM Layer in RNN# Natural Language Processing and RNN
Using RNN archictecture for NLP tasks<jupyter_code>inputs = np.random.random([32, 10, 8]).astype(np.float32)
simple_rnn = tf.keras.layers.SimpleRNN(4)
output = simple_rnn(inputs) # The output has shape `[32, 4]`.
simple_rnn = tf.keras.layers.SimpleRNN(
4, return_sequences=True, return_state=True)
# whole_sequence_output has shape `[32, 10, 4]`.
# final_state has shape `[32, 4]`.
whole_sequence_output, final_state = simple_rnn(inputs)
whole_sequence_output.shape
final_state.shape
shakespeare_url = "https://homl.info/shakespeare" # shortcut URL
filepath = keras.utils.get_file("shakespeare.txt", shakespeare_url)
with open(filepath) as f:
shakespeare_text = f.read()
word_tokenizer = keras.preprocessing.text.Tokenizer(char_level=True)
word_tokenizer.fit_on_texts(shakespeare_text)
max_id = len(word_tokenizer.word_index)
[encoded] = word_tokenizer.texts_to_sequences([shakespeare_text[:]])
len(word_tokenizer.word_index)
# word_tokenizer.document_count
[encoded] = np.array(word_tokenizer.texts_to_sequences([shakespeare_text])) - 1
encoded<jupyter_output><empty_output><jupyter_text><jupyter_code>dataset_size = word_tokenizer.document_count
train_size = dataset_size * 90 // 100
dataset = tf.data.Dataset.from_tensor_slices(encoded[:train_size])
n_steps = 100
window_length = n_steps + 1 # target = input shifted 1 character ahead
dataset = dataset.window(window_length, shift=1, drop_remainder=True)
dataset
dataset = dataset.flat_map(lambda window: window.batch(window_length))
batch_size=40
# before
for i in dataset.take(1):
first = i
# after suffling and taking batch tensor of size 100 or
dataset = dataset.shuffle(10000).batch(batch_size)
dataset = dataset.map(lambda window: (window[:,:-1],window[:,1:]))
dataset = dataset.map(lambda X_batch,y_batch: (tf.one_hot(X_batch,depth=max_id),y_batch))
dataset = dataset.prefetch(1)
# it is taking very very long and need something perhaps I don't know and we will take it after some time but it important
model = keras.Sequential([
keras.layers.GRU(39,return_sequences=True,dropout=0.2,recurrent_dropout=0.2,input_shape=[None,max_id]),
keras.layers.GRU(39,return_sequences=True,dropout=0.2,recurrent_dropout=0.2),
keras.layers.TimeDistributed(keras.layers.Dense(max_id,activation="softmax"))
])
model.compile(optimizer="adam",loss='sparse_categorical_crossentropy')
model.fit(dataset,epochs=5)
# print(word_tokenizer.sequences_to_texts([first_map[0].numpy()])[0])
train_size = dataset_size * 40 // 100
dataset = tf.data.Dataset.from_tensor_slices(encoded[:train_size])
dataset = dataset.window(window_length, shift=n_steps, drop_remainder=True)
dataset = dataset.flat_map(lambda window: window.batch(window_length))
dataset = dataset.batch(1)
dataset = dataset.map(lambda windows: (windows[:, :-1], windows[:, 1:]))
dataset = dataset.map(
lambda X_batch, Y_batch: (tf.one_hot(X_batch, depth=max_id), Y_batch))
dataset = dataset.prefetch(1)
model = keras.models.Sequential([
keras.layers.GRU(128, return_sequences=True, stateful=True,
dropout=0.2, recurrent_dropout=0.2,
batch_input_shape=[1, None, max_id]),
keras.layers.GRU(128, return_sequences=True, stateful=True,
dropout=0.2, recurrent_dropout=0.2),
keras.layers.TimeDistributed(keras.layers.Dense(max_id,
activation="softmax"))
])
class ResetStatesCallback(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs):
self.model.reset_states()
model.compile(loss="sparse_categorical_crossentropy", optimizer="adam")
model.fit(dataset, epochs=5, callbacks=[ResetStatesCallback()])
print(word_tokenizer.sequences_to_texts([first.numpy()])[0])
# loading imdb movie rating data which has 25,000 training and 25000 test all are reviews stored as list indec by their frequency
import tensorflow_datasets as tfds
datasets, info = tfds.load("imdb_reviews", as_supervised=True, with_info=True)
train_size = info.splits["train"].num_examples
datasets
def preprocess(X_batch, y_batch):
X_batch = tf.strings.substr(X_batch, 0, 300)
X_batch = tf.strings.regex_replace(X_batch, b"<br\\s*/?>", b" ")
X_batch = tf.strings.regex_replace(X_batch, b"[^a-zA-Z']", b" ")
X_batch = tf.strings.split(X_batch)
return X_batch.to_tensor(default_value=b"<pad>"), y_batch
from collections import Counter
vocab = Counter()
for X_batch,y_batch in datasets['train'].batch(32).map(preprocess):
for review in X_batch:
vocab.update(list(review.numpy()))
vocab
vocab.most_common()[:3]
vocab_size = 10000
truncate_vocab = [word for word,count in vocab.most_common()[:vocab_size]]
len(truncate_vocab)
words = tf.constant(truncate_vocab)
word_ids = tf.range(len(words),dtype=tf.int64)
vocab_init = tf.lookup.KeyValueTensorInitializer(words, word_ids)
num_oov_buckets = 1000
table = tf.lookup.StaticVocabularyTable(vocab_init, num_oov_buckets)
table.lookup(tf.constant([b"I hate this movie".split()]))
def encode_word(X_batch,y_batch):
return table.lookup(X_batch),y_batch
train_set = datasets['train'].batch(32).map(preprocess)
train_set = train_set.map(encode_word).prefetch(1)
embed_size = 128
model = keras.models.Sequential([
keras.layers.Embedding(vocab_size + num_oov_buckets, embed_size,
input_shape=[None]),
keras.layers.GRU(128, return_sequences=True),
keras.layers.GRU(128),
keras.layers.Dense(1, activation="sigmoid")
])
model.compile(loss="binary_crossentropy", optimizer="adam",
metrics=["accuracy"])
history = model.fit(train_set, epochs=5)
model.save("/content/mydrive/MyDrive/imdb_model/")
model = keras.Sequential([
hub.KerasLayer("https://tfhub.dev/google/tf2-preview/nnlm-en-dim50/1",
dtype=tf.string, input_shape=[], output_shape=[50]),
keras.layers.Dense(128, activation="relu"),
keras.layers.Dense(1, activation="sigmoid")
])
model.compile(loss="binary_crossentropy", optimizer="adam",
metrics=["accuracy"])
datasets, info = tfds.load("imdb_reviews", as_supervised=True, with_info=True)
train_size = info.splits["train"].num_examples
batch_size = 32
train_set = datasets["train"].batch(batch_size).prefetch(1)
history = model.fit(train_set, epochs=5)
train_set
test_dataset = datasets["test"].batch(batch_size).prefetch(1)
model.evaluate(test_dataset)
for i in test_dataset.take(1):
print(len(i),i[0].numpy()[0])
ts = i[0].numpy()
ts = tf.constant(ts)
pred = np.argmax(model.predict(ts), axis=-1)
print("review:\t{}\nprediction\t{} \n\t".format(tf.compat.as_str(ts[1].numpy())[:60], pred[0]))
tf.compat.as_str(ts[0].numpy())
def plot_graphs(history, metric):
plt.plot(history.history[metric])
if 'val_'+metric in history.history:
plt.plot(history.history['val_'+metric], '')
plt.xlabel("Epochs")
plt.ylabel(metric)
plt.legend([metric, 'val_'+metric])
plot_graphs(history,'accuracy')
# history.history
for i in train_dataset.take(1):
print(len(i),i['label'],(i['text'])[0])
for example, label in train_dataset.take(1):
print('texts: ', example.numpy()[:3])
print()
print('labels: ', label.numpy()[:3])
VOCAB_SIZE = 1000
encoder = tf.keras.layers.experimental.preprocessing.TextVectorization(
max_tokens=VOCAB_SIZE)
encoder.adapt(train_dataset.map(lambda text, label: text))
example
model = keras.Sequential([encoder,
keras.layers.Embedding(input_dim=len(encoder.get_vocabulary()),output_dim=64,mask_zero=True),
keras.layers.Bidirectional(keras.layers.LSTM(64)),
keras.layers.Dense(64,"relu"),
keras.layers.Dense(1)
])
model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(1e-4),
metrics=['accuracy'])
model.fit(train_dataset,epochs=5,validation_data=test_dataset,validation_steps=30)
sample_text = ('i just like this movie')
# predictions = model.predict(np.array([sample_text]))
predictions = np.argmax(model.predict(test_dataset),axis=-1)
print(predictions)
datasets,info = tfds.load("fashion_mnist",with_info=True,as_supervised=True)
example = info.splits['train'].num_examples
train_dataset = datasets['train'].shuffle(example).batch(32).prefetch(1)
valid_dataset = datasets['test'].shuffle(example).batch(32).prefetch(1)
X_train.shape,X_valid.shape,y_train.shape,y_valid.shape
fashion_mnist = keras.datasets.fashion_mnist
(X_train_full, y_train_full), (X_test, y_test) = fashion_mnist.load_data()
X_valid, X_train = X_train_full[:5000] / 255.0, X_train_full[5000:] / 255.0
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
X_test = X_test / 255.0
stacked_encoder = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(100, activation="selu"),
keras.layers.Dense(30, activation="selu"),
])
stacked_decoder = keras.models.Sequential([
keras.layers.Dense(100, activation="selu", input_shape=[30]),
keras.layers.Dense(28 * 28, activation="sigmoid"),
keras.layers.Reshape([28, 28])
])
stacked_ae = keras.models.Sequential([stacked_encoder, stacked_decoder])
stacked_ae.compile(loss=keras.losses.binary_crossentropy,
optimizer=keras.optimizers.SGD(learning_rate=1.5))
history = stacked_ae.fit(X_train, X_train, epochs=10
)
stacked_ae.summary()
stacked_ae.summary()
history.history
fig,ax = plt.subplots(2,6,figsize=(12,5))
i,ct=0,0
# while i<2:
for j in range(1,7):
r = stacked_ae.predict(X_train[(j-1):j]).reshape(28,28)
ax[0][j-1].imshow(r,cmap="gray")
ax[0][j-1].set_title(y_train[j-1])
ax[0][j-1].axis("off")
for j in range(6):
# r = stacked_ae.predict().reshape(28,28)
o = X_train[j].reshape(28,28)
ax[1][j].imshow(o,cmap="gray")
ax[1][j].set_title(y_train[j])
ax[1][j].axis("off")
cfir = keras.datasets.cifar10
(X_train_full, y_train_full), (X_test, y_test) = cfir.load_data()
X_valid, X_train = X_train_full[:5000] / 255.0, X_train_full[5000:] / 255.0
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
X_test = X_test / 255.0
stacked_encoder = keras.models.Sequential([
keras.layers.Flatten(input_shape=[32, 32,3]),
keras.layers.Dense(200, activation="selu"),
keras.layers.Dense(200, activation="selu"),
keras.layers.Dense(100, activation="selu"),
])
stacked_decoder = keras.models.Sequential([
keras.layers.Dense(100, activation="selu", input_shape=[100]),
keras.layers.Dense(200, activation="selu"),
keras.layers.Dense(32 *32*3 , activation="sigmoid"),
keras.layers.Reshape([32, 32,3])
])
stacked_ae = keras.models.Sequential([stacked_encoder, stacked_decoder])
stacked_ae.compile(loss=keras.losses.binary_crossentropy,
optimizer=keras.optimizers.Nadam())
history = stacked_ae.fit(X_train, X_train, epochs=20 )
stacked_ae.summary()
def plot_image_owner(model)
fig,ax = plt.subplots(2,6,figsize=(12,5))
i,ct=0,0
# while i<2:
for j in range(1,7):
r = model.predict(X_train[(j-1):j]).reshape(32,32,3)
ax[0][j-1].imshow(r,cmap="gray")
ax[0][j-1].set_title(y_train[j-1])
ax[0][j-1].axis("off")
for j in range(6):
# r = stacked_ae.predict().reshape(28,28)
o = X_train[j].reshape(32,32,3)
ax[1][j].imshow(o,cmap="gray")
ax[1][j].set_title(y_train[j])
ax[1][j].axis("off")
# more clean code for image plot and compare
def plot_image(idx):
plt.imshow(idx,cmap="gray")
plt.axis("off")
def show_reconstructions(model, n_images=5):
reconstructions = model.predict(X_valid[:n_images])
fig = plt.figure(figsize=(n_images *2, 3))
for image_index in range(n_images):
plt.subplot(2, n_images, 1 + image_index)
plot_image(X_valid[image_index])
plt.subplot(2, n_images, 1 + n_images + image_index)
plot_image(reconstructions[image_index])
show_reconstructions(stacked_ae, n_images=5)
from sklearn.manifold import TSNE
stacked_encoder = keras.models.Sequential([
keras.layers.Flatten(input_shape=[32, 32,3]),
keras.layers.Dense(200, activation="selu"),
keras.layers.Dense(50, activation="selu"),
])
X_valid_compressed = stacked_encoder.predict(X_valid)
tsne_comp = TSNE()
X_valid_2D = tsne_comp.fit_transform(X_valid_compressed)
fig,ax = plt.subplots(1,figsize=(15,10))
plt.scatter(X_valid_2D[:, 0], X_valid_2D[:, 1], c=y_valid, s=10, cmap="tab10")
tf.multiply(4,5)
class DenseTranspose(keras.layers.Layer):
def __init__(self, dense, activation=None, **kwargs):
self.dense = dense
self.activation = keras.activations.get(activation)
super().__init__(**kwargs)
def build(self, batch_input_shape):
self.biases = self.add_weight(name="bias", initializer="zeros",
shape=[self.dense.input_shape[-1]])
super().build(batch_input_shape)
def call(self, inputs):
z = tf.matmul(inputs, self.dense.weights[0], transpose_b=True)
return self.activation(z + self.biases)
# it is using same structure as above but this time we are using weights of autoencoder for decoder, so number of
# paramerter learning is half, increases performance and loss is also comparable
dense_1 = keras.layers.Dense(300, activation="selu")
dense_2 = keras.layers.Dense(100, activation="selu")
tied_encoder = keras.models.Sequential([
keras.layers.Flatten(input_shape=[32,32,3]),
dense_1,
dense_2
])
tied_decoder = keras.models.Sequential([
DenseTranspose(dense_2, activation="selu"),
DenseTranspose(dense_1, activation="sigmoid"),
keras.layers.Reshape([32,32,3])
])
tied_ae = keras.models.Sequential([tied_encoder, tied_decoder])
tied_ae.compile(loss=keras.losses.binary_crossentropy,
optimizer=keras.optimizers.Nadam())
history= tied_ae.fit(X_train,X_train,epochs=10,validation_data=[X_valid,X_valid])
stacked_encoder.weights[0]
conv_encoder = keras.models.Sequential([
keras.layers.Reshape([28, 28, 1], input_shape=[28, 28]),
keras.layers.Conv2D(16, kernel_size=3, padding="same", activation="selu"),
keras.layers.MaxPool2D(pool_size=2),
keras.layers.Conv2D(32, kernel_size=3, padding="same", activation="selu"),
keras.layers.MaxPool2D(pool_size=2),
keras.layers.Conv2D(64, kernel_size=3, padding="same", activation="selu"),
keras.layers.MaxPool2D(pool_size=2)
])
conv_decoder = keras.models.Sequential([
keras.layers.Conv2DTranspose(32, kernel_size=3, strides=2, padding="valid",
activation="selu",
input_shape=[3, 3, 64]),
keras.layers.Conv2DTranspose(16, kernel_size=3, strides=2, padding="same",
activation="selu"),
keras.layers.Conv2DTranspose(1, kernel_size=3, strides=2, padding="same",
activation="sigmoid"),
keras.layers.Reshape([28, 28])
])
conv_ae = keras.models.Sequential([conv_encoder, conv_decoder])
conv_ae.compile(optimizer=keras.optimizers.Nadam(),loss=keras.losses.binary_crossentropy,metrics=['accuracy'])
# mnist = keras.datasets
(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.fashion_mnist.load_data()
X_valid, X_train = X_train_full[:5000] / 255.0, X_train_full[5000:] / 255.0
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
X_test = X_test / 255.0
conv_ae.fit(X_train,X_train,epochs=10,validation_data=(X_valid,X_valid))
show_reconstructions(conv_ae,n_images=20)
recurrent_encoder = keras.models.Sequential([
keras.layers.LSTM(100, return_sequences=True, input_shape=[None, 28]),
keras.layers.LSTM(30)
])
recurrent_decoder = keras.models.Sequential([
keras.layers.RepeatVector(28, input_shape=[30]),
keras.layers.LSTM(100, return_sequences=True),
keras.layers.TimeDistributed(keras.layers.Dense(28, activation="sigmoid"))
])
recurrent_ae = keras.models.Sequential([recurrent_encoder, recurrent_decoder])
recurrent_ae.compile(optimizer=keras.optimizers.Nadam(),loss=keras.losses.binary_crossentropy,metrics=['accuracy'])
dropout_encoder = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dropout(0.5),
keras.layers.Dense(100, activation="selu"),
keras.layers.Dense(30, activation="selu")
])
dropout_decoder = keras.models.Sequential([
keras.layers.Dense(100, activation="selu", input_shape=[30]),
keras.layers.Dense(28 * 28, activation="sigmoid"),
keras.layers.Reshape([28, 28])
])
dropout_ae = keras.models.Sequential([dropout_encoder, dropout_decoder])
dropout.compile(optimizer=keras.optimizers.Nadam(),loss=keras.losses.binary_crossentropy,metrics=['accuracy'])
K = keras.backend
class Sampling(keras.layers.Layer):
def call(self, inputs):
mean, log_var = inputs
return K.random_normal(tf.shape(log_var)) * K.exp(log_var / 2) + mean
codings_size = 10
inputs = keras.layers.Input(shape=[28, 28])
z = keras.layers.Flatten()(inputs)
z = keras.layers.Dense(150, activation="selu")(z)
z = keras.layers.Dense(100, activation="selu")(z)
codings_mean = keras.layers.Dense(codings_size)(z) # μ
codings_log_var = keras.layers.Dense(codings_size)(z) # γ
codings = Sampling()([codings_mean, codings_log_var])
variational_encoder = keras.Model(
inputs=[inputs], outputs=[codings_mean, codings_log_var, codings])
decoder_inputs = keras.layers.Input(shape=[codings_size])
x = keras.layers.Dense(100, activation="selu")(decoder_inputs)
x = keras.layers.Dense(150, activation="selu")(x)
x = keras.layers.Dense(28 * 28, activation="sigmoid")(x)
outputs = keras.layers.Reshape([28, 28])(x)
variational_decoder = keras.Model(inputs=[decoder_inputs], outputs=[outputs])
(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.fashion_mnist.load_data()
X_valid, X_train = X_train_full[:5000] / 255.0, X_train_full[5000:] / 255.0
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
X_test = X_test / 255.0
# Fashion mnist data
codings_size = 30
# this is fake image genrator and used to generate images or learn parameter to build same image as input --Generative adversial network
generator = keras.models.Sequential([
keras.layers.Dense(100, activation="selu", input_shape=[codings_size]),
keras.layers.Dense(150, activation="selu"),
keras.layers.Dense(28 * 28, activation="sigmoid"),
keras.layers.Reshape([28, 28])
])
# this is discriminator usual work of determining whether it is fake or real -- GAN
discriminator = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(150, activation="selu"),
keras.layers.Dense(100, activation="selu"),
keras.layers.Dense(1, activation="sigmoid")
])
gan = keras.models.Sequential([generator, discriminator])
discriminator.compile(loss="binary_crossentropy", optimizer="rmsprop")
discriminator.trainable = False
gan.compile(loss="binary_crossentropy", optimizer="rmsprop")
batch_size = 32
dataset = tf.data.Dataset.from_tensor_slices(X_train).shuffle(1000)
dataset = dataset.batch(batch_size, drop_remainder=True).prefetch(1)
def train_gan(gan, dataset, batch_size, codings_size, n_epochs=50):
generator, discriminator = gan.layers
for epoch in range(n_epochs):
for X_batch in dataset:
# phase 1 - training the discriminator
noise = tf.random.normal(shape=[batch_size, codings_size])
generated_images = generator(noise)
# generated_images = tf.cast(generated_images,tf.float32)
X_fake_and_real = tf.concat([generated_images, X_batch], axis=0)
y1 = tf.constant([[0.]] * batch_size + [[1.]] * batch_size)
discriminator.trainable = True
discriminator.train_on_batch(X_fake_and_real, y1)
# phase 2 - training the generator
noise = tf.random.normal(shape=[batch_size, codings_size])
plt.imshow(noise,cmap="gray")
y2 = tf.constant([[1.]] * batch_size)
discriminator.trainable = False
gan.train_on_batch(noise, y2)
train_gan(gan, dataset, batch_size, codings_size)
codings_size = 100
generator = keras.models.Sequential([
keras.layers.Dense(7 * 7 * 128, input_shape=[codings_size]),
keras.layers.Reshape([7, 7, 128]),
keras.layers.BatchNormalization(),
keras.layers.Conv2DTranspose(64, kernel_size=5, strides=2, padding="same",
activation="selu"),
keras.layers.BatchNormalization(),
keras.layers.Conv2DTranspose(1, kernel_size=5, strides=2, padding="same",
activation="tanh")
])
discriminator = keras.models.Sequential([
keras.layers.Conv2D(64, kernel_size=5, strides=2, padding="same",
activation=keras.layers.LeakyReLU(0.2),
input_shape=[28, 28, 1]),
keras.layers.Dropout(0.4),
keras.layers.Conv2D(128, kernel_size=5, strides=2, padding="same",
activation=keras.layers.LeakyReLU(0.2)),
keras.layers.Dropout(0.4),
keras.layers.Flatten(),
keras.layers.Dense(1, activation="sigmoid")
])
gan = keras.models.Sequential([generator, discriminator])
X_train = X_train.reshape(-1, 28, 28, 1) * 2. - 1. # reshape and rescale
def preprocess_fn(data):
image = data['image']
# Normalize [0, 255] to [0, 1]
image = tf.cast(image, tf.float32)
image = image / 255.
# Resize the images to 224 x 224
image = tf.image.resize(image, (224, 224))
data['image'] = image
return data
def preprocess_fn(data):
image = keras.preprocessing.image.load_img(data)
image = keras.preprocessing.image.img_to_array(image)
# Normalize [0, 255] to [0, 1]
image = tf.cast(image, tf.float32)
image = image / 255.
# Resize the images to 224 x 224
image = tf.image.resize(image, (224, 224))
return image
img = preprocess_fn("/content/train-cbb-4.jpg")
tf.constant(img).batch(1)
keras.preprocessing.image.load_img()
<jupyter_output><empty_output><jupyter_text># exercisesLoading all datasets<jupyter_code>datasets = tfds.load('cassava',as_supervised=True)
words = ["mongo","mongoose","tea","mhjds","nmsda"]
[[x for x in i] for i in words]<jupyter_output><empty_output><jupyter_text>state, action, rewards <jupyter_code>def preprocess(data,label):
image = data
image = tf.cast(image,tf.float32)
image = image/255.0
image = tf.image.resize(image,[224,224])
# data['image'] = image
return image,label
train_datasets= datasets['train'].map(preprocess).shuffle(10000).batch(64).prefetch(1)
valid_datasets= datasets['validation'].map(preprocess).batch(64).prefetch(1)
test_datasets = datasets['test'].map(preprocess).batch(64).prefetch(1)
model = keras.models.Sequential([
keras.layers.Conv2D(32, 7, activation="relu", padding="same",
input_shape=[224,224,3]),
keras.layers.MaxPooling2D(2),
keras.layers.Conv2D(64, 4, activation="relu", padding="same"),
keras.layers.Conv2D(64, 4, activation="relu", padding="same"),
keras.layers.MaxPooling2D(2),
# keras.layers.Conv2D(128, 6, activation="relu", padding="same"),
# keras.layers.Conv2D(256, 6, activation="relu", padding="same"),
# keras.layers.MaxPooling2D(2),
keras.layers.Flatten(),
keras.layers.Dense(128, activation="relu"),
keras.layers.Dropout(0.5),
keras.layers.Dense(64, activation="relu"),
keras.layers.Dropout(0.5),
keras.layers.Dense(5, activation="softmax")
])
starter_learning_rate = 0.01
end_learning_rate = 0.0001
decay_steps = 10000
learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(
starter_learning_rate,
decay_steps,
end_learning_rate,
power=0.5)
checkpoint_path = "/content/drive/MyDrive/cassava_model"
early_stop = keras.callbacks.EarlyStopping(patience=6,restore_best_weights=True)
save_model = keras.callbacks.ModelCheckpoint(checkpoint_path,save_best_only=True)
model.compile(optimizer=tf.keras.optimizers.Nadam(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_datasets,epochs=30,validation_data = valid_datasets,callbacks=[early_stop,save_model])
model.load_weights(checkpoint_path)
model.evaluate(valid_datasets)
from sklearn.datasets import load_sample_images
flower = load_sample_images()
flower = (flower['images'])
flower = tf.image.resize(flower,[224,224])
flw = tf.data.Dataset.from_tensor_slices(flower)
flw= flw.batch(2)
flower_iter =flw.as_numpy_iterator()
first = next(flower_iter)
input_shape = (4, 28, 28, 3)
x = tf.random.normal(input_shape)
plt.imshow(x[0])
y = tf.keras.layers.Conv2D(
filters= 3,kernel_size=6, activation=None, input_shape=[28,28,3])(x)
plt.imshow(y[0])
input_shape = [2,224,224,3]
# x = tf.random.normal(input_shape)
fig,ax = plt.subplots(1,2,figsize=(15,15))
y = tf.keras.layers.Conv2D(
3,(3,3),strides= (2,2), activation='elu', padding="same", input_shape=[224,224,3])(first)
print(y.shape)
ax[1].imshow(y[0])
ax[0].imshow(y[1])
example = train_datasets.as_numpy_iterator()
example = next(example)
<jupyter_output><empty_output><jupyter_text># Fine tuning pre-trained model (Mobilenet_v3) <jupyter_code>path = "/content/train-cbb-1.jpg"
image = keras.preprocessing.image.load_img(path)
image = keras.preprocessing.image.img_to_array(image)
image,label = preprocess(image,np.array([1]))
datasets,info = tfds.load("cats_vs_dogs",with_info=True,as_supervised=True)
simple_model = keras.layers.Dense(4)
simple_model.build((None,4))
simple_model.trainable =False
print("weight",len( simple_model.weights))
print("trainable_weights",len(simple_model.trainable_weights))
print("non-trainable_weights",len(simple_model.non_trainable_weights))
layer_norm = keras.layers.BatchNormalization()
layer_norm.build((None,4))
print()
print("weight",len( layer_norm.weights))
print("trainable_weights",len(layer_norm.trainable_weights))
print("non-trainable_weights",len(layer_norm.non_trainable_weights))
layer1 = keras.layers.Dense(3,"relu")
layer2 = keras.layers.Dense(3,"sigmoid")
model = keras.Sequential([keras.layers.InputLayer((3,)),layer1,layer2])
tf.pr
layer1.trainable = False # freezing layer1 --not changing intial assign weights
initial_layer1_weights_values = layer1.get_weights()
model.compile(optimizer = "adam",loss="mse")
model.fit(np.random.random((2,3)),np.random.random((2,3)))
# Check that the weights of layer1 have not changed during training
final_layer1_weights_values = layer1.get_weights()
np.testing.assert_allclose(
initial_layer1_weights_values[0], final_layer1_weights_values[0]
)
np.testing.assert_allclose(
initial_layer1_weights_values[1], final_layer1_weights_values[1]
)
initial_layer1_weights_values,final_layer1_weights_values
inner_model = keras.Sequential(
[
keras.Input(shape=(3,)),
keras.layers.Dense(3, activation="relu"),
keras.layers.Dense(3, activation="relu"),
]
)
model = keras.Sequential(
[keras.Input(shape=(3,)), inner_model, keras.layers.Dense(3, activation="sigmoid"),]
)
inner_model.trainable = False # Freeze the outer model
model.compile(optimizer = "adam",loss="mse")
model.fit(np.random.random((2,3)),np.random.random((2,3)))
# assert inner_model.trainable == False # All layers in `model` are now frozen
# assert inner_model.layers[0].trainable == False # `trainable` is propagated recursively
inner_model.trainable,model.trainable,inner_model.layers[0].trainable
# pretrained model --Mobilenet_v2 or v3 or v1
base_model_mobilenet = tf.keras.applications.MobileNet(
input_shape=[224,224,3],
include_top=False,
weights="imagenet",
)
base_model_xception = keras.applications.Xception(
weights='imagenet', # Load weights pre-trained on ImageNet.
input_shape=(224, 224, 3),
include_top=False) # Do not include the ImageNet classifier at the top.
# mobilenet_model.summary()
base_model_mobilenet.trainable = False
# Freezing weight for both model -do not change pre trained weight of base model
inputs = keras.Input(shape=(224, 224, 3))
# We make sure that the base_model is running in inference mode here,
# by passing `training=False`. This is important for fine-tuning, as you will
# learn in a few paragraphs.
x = base_model_mobilenet(inputs, training=False)
# Convert features of shape `base_model.output_shape[1:]` to vectors
x = keras.layers.GlobalAveragePooling2D()(x)
# A Dense classifier with a single unit (binary classification)
outputs = keras.layers.Dense(5)(x)
model = keras.Model(inputs, outputs)
model.compile(optimizer=keras.optimizers.Nadam(),
loss="sparse_categorical_crossentropy",
metrics=["accuracy"])
checkpoint_path = "/content/drive/MyDrive/cassava_model"
early_stop = keras.callbacks.EarlyStopping(patience=6)
save_model = keras.callbacks.ModelCheckpoint(checkpoint_path,save_best_only=True)
model.fit(train_datasets, epochs=20, callbacks= [early_stop,save_model], validation_data=valid_datasets)
model.output
dataset_cov = pd.read_csv("/content/drive/MyDrive/datasets/covid-19/who_covid_19_sit_rep_time_series/who_covid_19_sit_rep_time_series.csv")
dataset_cov_diff = pd.read_csv("/content/drive/MyDrive/Colab Notebooks/datasets/owid-covid-data.csv")
dataset_cov_diff.info()
<jupyter_output><empty_output><jupyter_text># Training Efficient model
it has 8 different models from B0 version to B7
with imagenet weights and flowers and cfir10,cfir100,cassavara disease datasets
EfficientNetB0 input shape =[224,224,3]
it uses less number of parameters
)]<jupyter_code># Loading all datasets used for training the efficientNetB0 model
# standford dogs dataset
(ds_train, ds_test), ds_info = tfds.load(
"stanford_dogs", split=["train", "test"], with_info=True, as_supervised=True
)
(ds_train1, ds_test1), ds_info1 = tfds.load(
"cifar10", split=["train", "test"], with_info=True, as_supervised=True
)
(ds_train2, ds_test2), ds_info2 = tfds.load(
"cifar100", split=["train", "test"], with_info=True, as_supervised=True
)
(ds_train3, ds_test3), ds_info3 = tfds.load(
"cassava", split=["train", "test"], with_info=True, as_supervised=True
)
IMG_SIZE = 224
BATCH_SIZE = 64
DATASET = "stanford_dogs"
SHUFFLE_SIZE = ds_info.splits['train'].num_examples
NUM_CLASSES = ds_info.features["label"].num_classes
def preprocess(data,lable):
# image = data['image']
# image = tf.cast(image,tf.float32)
image = tf.image.resize(data,[224,224])
image = image/255
# data['image'] = image
return data,lable
size = (IMG_SIZE, IMG_SIZE)
ds_train = ds_train.map(lambda image, label: (tf.image.resize(image, size), label))
ds_test = ds_test.map(lambda image, label: (tf.image.resize(image, size), label))
import tensorflow as tf
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver() # TPU detection
print("Running on TPU ", tpu.cluster_spec().as_dict()["worker"])
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
except ValueError:
print("Not connected to a TPU runtime. Using CPU/GPU strategy")
strategy = tf.distribute.MirroredStrategy()
def show_image():
def format_label(label):
string_label = label_info.int2str(label)
return string_label.split("-")[1]
label_info = ds_info.features["label"]
for i, (image, label) in enumerate(ds_train.take(9)):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(image.numpy().astype("uint8"))
plt.title("{}".format(format_label(label)))
plt.axis("off")
show_image()
from tensorflow.keras.layers.experimental import preprocessing
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
img_augmentation = Sequential(
[
preprocessing.RandomRotation(factor=0.15),
preprocessing.RandomTranslation(height_factor=0.1, width_factor=0.1),
preprocessing.RandomFlip(),
preprocessing.RandomContrast(factor=0.1),
],
name="img_augmentation",
)
label_info = ds_info.features['label']
for image, label in ds_train.take(2):
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
aug_img = img_augmentation(tf.expand_dims(image, axis=0))
plt.imshow(aug_img[0].numpy().astype("uint8"))
plt.title("{}".format(format_label(label)))
plt.axis("off")
ds_train = ds_train.batch(BATCH_SIZE).shuffle(SHUFFLE_SIZE).prefetch(1)
ds_valid = ds_test.batch(BATCH_SIZE).shuffle(SHUFFLE_SIZE).prefetch(1)
ds_valid,ds_train
# # model training eficientnetb0 from scratch
# from keras.applications.efficientnet import EfficientNetB0
# with strategy.scope():
# inputs = keras.Input(shape=(IMG_SIZE,IMG_SIZE,3))
# X = img_augmentation(inputs)
# outputs = EfficientNetB0(include_top=True,weights=None,classes=NUM_CLASSES)(X)
# model = keras.Model(inputs,outputs)
# model.compile(optimizer="adam",loss="categorical_crossentropy",metrics=["accuracy"])
# model.summary()
# history = model.fit(ds_train,epochs=50,validation_data=ds_test)
from tensorflow.keras.applications import EfficientNetB0
with strategy.scope():
inputs = layers.Input(shape=(IMG_SIZE, IMG_SIZE, 3))
x = img_augmentation(inputs)
outputs = EfficientNetB0(include_top=True, weights=None, classes=NUM_CLASSES)(x)
model = tf.keras.Model(inputs, outputs)
model.compile(
optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"]
)
model.summary()
epochs = 10
from keras.layers.experimental import preprocessing
def build(num_classes):
inputs = keras.Input(shape=(IMG_SIZE,IMG_SIZE,3))
x = img_augmentation(inputs)
model = EffiecientNetB0(include_top=False,input_tensor=x,weights='imagenet')
# freeze pretrained model layer or using image net weights
model.trainable = False
x = keras.layers.GlobalAveragePooling2D(name='ang_pool')(model.output)
x = keras.layers.BatchNormalization()(x)
from PIL import Image
# classifier_model ="https://tfhub.dev/google/tf2-preview/mobilenet_v2/classification/4"
classifier_model = "https://tfhub.dev/google/cropnet/classifier/cassava_disease_V1/2"
IMAGE_SHAPE = (224, 224)
classifier = tf.keras.Sequential([
hub.KerasLayer(classifier_model, input_shape=IMAGE_SHAPE+(3,))
])
grace_path = "/content/train-cbb-1.jpg"
grace_hopper = Image.open(grace_path).resize(IMAGE_SHAPE)
grace_hopper
grace_hopper = np.array(grace_hopper)/255.0
grace_hopper.shape
result = classifier.predict(grace_hopper[np.newaxis, ...])
result.shape
predicted_class = np.argmax(result[0], axis=-1)
predicted_class = labels[predicted_class]
name_map[predicted_class]
name_map = dict(
cmd='Mosaic Disease',
cbb='Bacterial Blight',
cgm='Green Mite',
cbsd='Brown Streak Disease',
healthy='Healthy',
unknown='Unknown')
labels = list(name_map.keys())
labels
keras.models.save_model(classifier,"/content/model",)
model = keras.models.load_model("/content/model")
result = model.predict(grace_hopper[np.newaxis, ...])
predicted_class = np.argmax(result[0], axis=-1)
predicted_class = labels[predicted_class]
name_map[predicted_class]
<jupyter_output><empty_output><jupyter_text># Question -duplicate data using sklearn and RNN<jupyter_code>path = "/content/drive/MyDrive/datasets/zip data files/quora_duplicate_questions.tsv"
datasets = pd.read_csv(path,sep="\t", header=0)
# datasets[['question1','question2']]= datasets.select_dtypes(include='O').convert_dtypes(pd.StringDtype())
datasets.dropna(inplace=True)
train_dataset,valid_dataset = datasets.iloc[:int(datasets.shape[0]*0.8),:],datasets.iloc[int(datasets.shape[0]*0.8):,:]
datasets.to_csv("/content/csv_quora_data.csv")
train_dataset.to_csv("/content/csv_quora_data_train.csv")
valid_dataset.to_csv("/content/csv_quora_data_valid.csv")
datasets.head(1)
datasets.info()
train_dataset.shape,valid_dataset.shape
tf_datasets = tf.data.experimental.CsvDataset("/content/csv_quora_data.csv", [tf.int32, tf.int32, tf.string,tf.string,tf.int32],header=True, select_cols=[2,3,4,5,6])
tf_datasets
train_tf_datasets = tf.data.experimental.CsvDataset("/content/csv_quora_data_train.csv", [tf.string,tf.string,tf.int32],header=True, select_cols=[4,5,6])
valid_tf_datasets = tf.data.experimental.CsvDataset("/content/csv_quora_data_valid.csv", [tf.string,tf.string,tf.int32],header=True, select_cols=[4,5,6])
train_tf_datasets; valid_tf_datasets
import regex as re
pat = re.compile("[.!@#$%^&*()><:;""''?/\*0-9]")
def tf_preprocess(input_data):
input_data =input_data.map(lambda c,d,x: (tf.strings.lower(c),tf.strings.lower(d),x))
data = input_data.map(lambda a,c,d: (tf.strings.regex_replace(a,"[.!@#$%^&*()><:;""''?/\*0-9]",""),tf.strings.regex_replace(c,"[.!@#$%^&*()><:;""''?/\*0-9]",""),d))
data = data.cache().shuffle(10**5).batch(64).prefetch(1)
return data
# datasets.dropna(inplace=True)
# datasets['question1'] = datasets['question1'].apply(preprocess)
# datasets['question2'] = datasets['question2'].apply(preprocess)
tf_datasets= tf_datasets.apply(tf_preprocess)
train_tf_datasets= train_tf_datasets.apply(tf_preprocess); valid_tf_datasets = valid_tf_datasets.apply(tf_preprocess)
def spliting_data():
datasets_valid = datasets.iloc[:int(0.2*datasets.shape[0]),:]
datasets_train = datasets.iloc[int(0.2*datasets.shape[0]):,:]
return datasets_train,datasets_valid
datasets_train,datasets_valid = spliting_data()
# files saving for keras data preprocessing
path0 ="/content/drive/MyDrive/train_quora/0/train0.csv"
path1 ="/content/drive/MyDrive/train_quora/1/train1.csv"
datasets_train[datasets_train['is_duplicate']==0].to_csv(path0)
datasets_train[datasets_train['is_duplicate']==1].to_csv(path1)
path0 ="/content/drive/MyDrive/validation_qoura/0/valid0.csv"
path1 ="/content/drive/MyDrive/validation_qoura/1/valid1.csv"
datasets_valid[datasets_valid['is_duplicate']==0].to_csv(path0)
datasets_valid[datasets_valid['is_duplicate']==1].to_csv(path1)
AUTOTUNE = tf.data.AUTOTUNE
def pd_to_tensor():
d = datasets['question1']
df2 = tf.ragged.constant(d,dtype=tf.string)
df = tf.ragged.constant(datasets['question2'],dtype=tf.string)
datasets['question1'] = df2
datasets['question2'] = df
return datasets
datasets = pd_to_tensor()
tf_text_dataset = tf_datasets.map(lambda qid1,qid2,y: ((qid1,qid2),y))
tf_text_dataset= tf_text_dataset.cache().shuffle(10**5).prefetch(1)
valid_tf_dataset = valid_tf_datasets.map(lambda qid1,qid2,y: ((qid1,qid2),y))
valid_tf_dataset = valid_tf_dataset.cache().shuffle(10**5).prefetch(1)
train_tf_dataset = train_tf_datasets.map(lambda qid1,qid2,y: ((qid1,qid2),y))
train_tf_dataset = train_tf_dataset.cache().shuffle(10**5).prefetch(1)
tf_text_new = tf_datasets.map(lambda qid1, qid2,y: (qid1))
tf_text_new = tf_text_new.cache().prefetch(1)
tf_text_new2 = tf_datasets.map(lambda qid1, qid2,y: (qid2))
tf_text_new2 = tf_text_new2.cache().prefetch(1)
text_df = tf_text_new.concatenate(tf_text_new2)
text_df = text_df.cache().prefetch(1)
VOCAB_SIZE = 1000
encoder = tf.keras.layers.experimental.preprocessing.TextVectorization(
max_tokens=VOCAB_SIZE)
encoder.adapt(text_df.map(lambda text: text))
vocab = np.array(encoder.get_vocabulary())
for example, label in tf_text_dataset.take(1):
print("text:", example[0][0], example[1][0])
print()
print("label:",label[0])
encoded_example = encoder(example[0])[:3].numpy()
encoded_example.shape
for n in range(3):
print("Original: ", example[1][n].numpy())
print("Round-trip: ", " ".join(vocab[encoded_example[n]]))
print()
# (encoded_example).shape
model = keras.Sequential([
encoder,
keras.layers.Embedding(input_dim=len(encoder.get_vocabulary()),mask_zero=True,output_dim=64
),
keras.layers.Bidirectional(keras.layers.LSTM(64)),
keras.layers.Dense(64,activation="relu"),
keras.layers.Dense(1)
])
print([layer.supports_masking for layer in model.layers])
sample_text = ('what is name president'
'how do I become seo expert' )
predictions = model.predict(np.array([sample_text]))
print(predictions[0])
padding = "the " * 2000
predictions = model.predict(np.array([sample_text, padding]))
print(predictions[0])
tf_text_dataset.map(lambda x)
model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(1e-4),
metrics=['accuracy'])
history = model.fit(tf_text_dataset, epochs=10,
# validation_data=test_dataset,
validation_steps=30)
import matplotlib.pyplot as plt
def plot_graphs(history, metric):
plt.plot(history.history[metric])
# plt.plot(history.history['val_'+metric], '')
plt.xlabel("Epochs")
plt.ylabel(metric)
plt.legend([metric, 'val_'+metric])
plot_graphs(history,'accuracy')
model.evaluate(valid_tf_dataset)
for (text1,text2),label in valid_tf_dataset.take(1):
print(len(text1), label.shape)
print(text1[0], text2[0], label[0])
np.argmax(model.predict((text1,text2)), axis=-1), label
m1 = model.predict_classes((text1,text2)).reshape(1,-1)[0]; m2 = label
# sns.lineplot(x = np.arange(m1[0].shape[1]), y=m1[0])
mean_squared_error(m2.numpy(),m1)
# m1.shape,m2.shape
m2.numpy(),m1
<jupyter_output><empty_output>
|
no_license
|
/Time_series_and_RNN.ipynb
|
sourabh69733/ML-notebooks
| 7 |
<jupyter_start><jupyter_text>## Function 1 - hello world
Make a simple function called greet that returns the most-famous "hello world!".
Style Points
Sure, this is about as easy as it gets. But how clever can you be to create the most creative hello world you can think of? What is a "hello world" solution you would want to show your friends?<jupyter_code>def greet():
return 'hello world!'<jupyter_output><empty_output>
|
no_license
|
/Function 1 - hello world.ipynb
|
shimotsulyu/codewars
| 1 |
<jupyter_start><jupyter_text># Intro to Flask: Lesson 1## Introduction
No matter how much experience you have with programming, you've probably used a website at some point. From Safari on iPhones, to Chrome on Android phones, and to Firefox on a laptop, everyone uses websites in their day to day lives.
Flask is a Python module that lets us write websites using Python. But first, we need to set up our programming environment to be able to use Flask.
Please READ EVERYTHING before getting started, in this and further lessons. Do not try to run commands without knowing what they mean!
## Setting Up Your Environment
When writing a Python program, such as a web application using Flask, we tend to use external libraries. These external libraries need to be imported into our program using `import`, and they are not part of the standard Python library.
We use _virtual environments_ to create an environment for each of our Python projects. That way, we can use different external libraries (and different versions of those libraries!) for different projects on our computer. Each new environment is essentially a new folder in which libraries can be installed. This is helpful for internal consistency; if you have two versions of the same library on your computer, for example, virtual environments can specify which ones to install and use for your project.
We'll be using _Anaconda_ as our virtual environment manager.
### Creating the EnvironmentWe'll use Anaconda to create a new environment named `venv`, and we'll install Flask in the environment. Here's an example Anaconda command to create the environment `venv`:<jupyter_code>conda create --name venv<jupyter_output><empty_output><jupyter_text>We can also create an environment named `venv` and also install the library `numpy` within it:<jupyter_code>conda create --name myenvname numpy<jupyter_output><empty_output><jupyter_text>Or, we can ask Anaconda to create an environment named `venv` using Python verison 3.5:<jupyter_code>conda create --name myenvname python=3.5<jupyter_output><empty_output><jupyter_text>Or, both install `numpy` and use Python 3.5 at the same time!<jupyter_code>conda create --name myenvname numpy=3.5<jupyter_output><empty_output><jupyter_text>Type the command below into your terminal. This command will create a virtual environment named `venv` and install the Flask package in our environment:<jupyter_code>## For us:
## On command line
conda create -n venv flask<jupyter_output><empty_output><jupyter_text>When you type this into your terminal, you will be prompted with a yes or no question: `(y/n?)`
Answer yes by typing `y` so that Anaconda knows to download the various packages required to install Flask.
Now, we need to activate our environment to be able to use it. Type the following into your terminal if you're running Windows:<jupyter_code>activate venv<jupyter_output><empty_output><jupyter_text>Or, if you're running macOS:<jupyter_code>source activate venv<jupyter_output><empty_output><jupyter_text>Other commands you might want to use:
- Install a package: `conda install `
- Deactivate the virtual environment: `deactivate`...if you're using conda, use `conda deactivate`
- List all environments: `conda env list`## Your First Web PageLet's create your first web page! We'll want to display `Hello World!` to a user when they visit your site.
First, create a folder to hold your web application. You can name it `flask-lesson-1` if you like. Then, create a text file named `app.py`. Your folder should look like this:
```
flask-lesson-1
|
+-- app.py
```
Then, inside `app.py`, type the following:<jupyter_code>from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
return '<h1>Hello World!</h1>'
if __name__ == '__main__':
app.run()<jupyter_output><empty_output><jupyter_text>Let's talk about this `app.py` program line by line. The first line:<jupyter_code>from flask import Flask<jupyter_output><empty_output><jupyter_text>Imports a `class` named `Flask` from the package named `flask` that we installed earlier in this lesson. Note that the first `flask` has a lowercase `f`, while the second `Flask` has an uppercase `F`.
The next line,<jupyter_code>app = Flask(__name__)<jupyter_output><empty_output><jupyter_text>Creates a new `instance` of the class `Flask` and assigns it to the variable `app`. The variable `__name__` is a built-in variable in Python that is the name of the current module. Because the name of our file is `app.py`, then the name of the current module `__name__` is `app`.
The next few lines are the "meat and potatoes" of our little `app.py`:<jupyter_code>@app.route('/')
def index():
return '<h1>Hello World!</h1>'<jupyter_output><empty_output><jupyter_text>`@app.route('/')` tells Flask that whenever a user goes to `/`, run the function `index()`. For example, if our website is hosted at `localhost:5000`, then Flask will run the function `index()` when the user goes to `localhost:5000/`.
This function is called `index()` because the root of a website (in this case, `localhost:5000/`) is called the site _index_.
When the user goes to `/`, `index()` will return `Hello World!`. The `` and `` are HTML tags to format the text, and `Hello World!` is the text to be displayed to the user.
The last few lines tell Python to run the Flask application when `app.py` is run with Python:<jupyter_code>if __name__ == '__main__':
app.run()<jupyter_output><empty_output><jupyter_text>Let's run our first Flask application! In your terminal, type the following command:<jupyter_code>python app.py<jupyter_output><empty_output><jupyter_text>When you run this command, Flask will give you a URL that you can visit to test your web application. By default, this will be `http://localhost:5000`. Go to that website in a browser, and you should see your `Hello World!` message:## Adding More RoutesA web application isn't very helpful if a user can only go to the index (`/`). We can add more routes by using `@app.route()`. The syntax for `@app.route()` is:
```python
@app.route()
```
Where `` is the route portion of a URL. For example, if you want to add a function to your app to display a message when a user goes to `localhost:5000/weather`, you would add the following to `app.py`:
```python
@app.route('/weather')
def weather():
return 'The weather is currently cloudy'
```
Note the slash at the front of `/weather`. In another example, if our Flask website is http://myapp.com, and our `app.py` includes this function:
```python
@app.route('/example_page')
def example_page():
return 'This is an example'
```
Then the URL for the function `example_page()` is http://myapp.com/example_page. **However**, during testing, the URL will be http://localhost:5000/example_page.Let's do another example. Add the following code for `info()` to your `app.py`:<jupyter_code>from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
return "<h1>Hello World!</h1>"
@app.route('/information')
def info():
return "<h1>A different page!</h1>"
if __name__ == '__main__':
app.run()<jupyter_output><empty_output><jupyter_text>Now run the Flask app again:
```
python app.py
```
Now you can test two different URLs for your app: the first is `http://localhost:5000/`, which should display `Hello World!` again, and `http://localhost:5000/information`, which will display a new message.
Try going to `http://localhost:5000/chicago`, and you'll see you get a _404 error_. This error means that the page does not exist.## Dynamic RoutingNow that we know how to add routes to our app, we can talk about _dynamic routing_.
Dynamic routing lets us create URL route extensions that aren't hard coded. A common way for web applications to operate is to have URL extensions that are specific to particular situations, such as unique profile pages for each uers. For example, if we have two users with usernames `bob` and `laura`, we can display different information when Bob vists `site.com/user/bob` and when Laura visits `site.com/user/laura`.Two do this, you need two things in the route function:
1. A variable in the app decorator `@app.route()`
2. A parameter to the function being decorated that is the same as the variable aboveLet's add a dynamic route to our `app.py`:<jupyter_code>from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
return "<h1>Hello World!</h1>"
@app.route('/information')
def info():
return "<h1>A different page!</h1>"
@app.route('/user/<name>')
def profile(name):
return '<h1>Hello, {}!</h1>'.format(name)
if __name__ == '__main__':
app.run(debug=True)<jupyter_output> * Serving Flask app "__main__" (lazy loading)
* Environment: production
WARNING: This is a development server. Do not use it in a production deployment.
Use a production WSGI server instead.
* Debug mode: on
<jupyter_text>Note that the variable we've added is called `name`, so we had to include it in two different places:
1. `` at the end of `@app.route()`
2. `name` as an argument to `profile(name)`
Now, inside the function `profile(name)`, we can use the variable `name`. For example, if a user goes to `/user/Rachel`, then `name = 'Rachel'`.
If someone named Anne went to `localhost:5000/user/Anne`, she would get:## Debugging in FlaskThe flask app is started when you run it
If the application is being changed, you must restart it manually for each change
If you use debug mode, the server will reload itself for changes in code, so you don't have to restart manually.
By default, a Flask app needs to be restarted manually after every change. If you run the app in debug mode, the Flask server that runs the app will automatically detect and reload changes to your app. In addition, you will be able to see a console in the browser.
When you run the Flask app in debug mode, Flask will give you a debug pin. To access the console, copy the debug pin, go to the website (such as `http://localhost:5000`), click on the console, and paste in the pin.
To turn debug mode on:<jupyter_code>## For the last line of the above code:
app.run(debug = True)<jupyter_output><empty_output><jupyter_text>You should never have debug mode on when you actually launch your app to production, as then actual users will see the specific errors and parts of your code. Always make sure to turn off debug mode when you "publish" your website for real users to use.## TemplatesSo far, our `app.py` returns HTML content to a user using `return` and raw strings, like `Hello World!`. We can use _templates_ to create .html files that our Flask app will load and return instead. By using templates, we can separate our code for running the app and our HTML to display content to a user.
First, create a new folder called `templates`, and open a file named `index.html`:
```
flask-lesson-1/
|
+-- app.py
+-- templates/
|
+-- index.html
```
Flask will automatically look for template .html files in the `templates` folder. The easiest way to return template .html files in our `app.py` is by using `render_template()`.
### Example
Let's try out our first template. In `index.html`, write the following:
```
Welcome to My Site!
The quick brown fox jumps over the lazy dog.
```
Now to display this HTML content when a user goes to the index (`/`), we'll modify the function `index()` in our `app.py`:<jupyter_code>from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/information')
def info():
return "<h1>A different page!</h1>"
@app.route('/user/<name>')
def profile(name):
return '<h1>Hello, {}!</h1>'.format(name)
if __name__ == '__main__':
app.run(debug=True)<jupyter_output><empty_output>
|
non_permissive
|
/11) Intro to Flask/Intro to Flask Lesson 1.ipynb
|
Rusah1129/Develop_Curriculum
| 17 |
<jupyter_start><jupyter_text># Neural Networks with Keras<jupyter_code>from __future__ import print_function
import random
import numpy as np
import pandas as pd
from math import sin
# pip install keras
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.optimizers import SGD # Stochastic Gradient Descent
from sklearn.metrics import accuracy_score, confusion_matrix, mean_squared_error
import sklearn.cross_validation as cv
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams["figure.figsize"] = (12, 8)<jupyter_output>Using TensorFlow backend.
/Users/Stefan/.virtualenvs/ga_dat/lib/python2.7/site-packages/sklearn/cross_validation.py:44: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20.
"This module will be removed in 0.20.", DeprecationWarning)
<jupyter_text>## Neural Network Regression
### Polynomial Regression
Let's train a neural network on a few different shapes. First we start with a polynomial (a cubic).<jupyter_code># Create some data
def f(x):
return x ** 3 - 5 * x + 12 + random.random()
X = np.linspace(-1, 1, 1000).reshape(-1, 1)
y = np.array(list(map(f, X)))
print(X.shape, y.shape)
# Define a Feed Forward NN
model = Sequential()
model.add(Dense(input_dim=1, output_dim=5))
model.add(Activation('tanh'))
model.add(Dense(input_dim=5, output_dim=1))
model.add(Activation('linear'))
# lr: learning rate
model.compile(loss='mse', optimizer=SGD(lr=0.01))
# Train the model
print('Training...')
loss = model.fit(X, y, nb_epoch=500, validation_split=0.1,
batch_size=128, verbose=False)
loss.history['loss'][-1]
print("Done")
# Plot the predictions
predictions = model.predict(X)
plt.scatter(X, y)
plt.plot(X, predictions, color='r')
plt.show()
print("MSE", mean_squared_error(predictions, y))<jupyter_output><empty_output><jupyter_text>### Sine Regression<jupyter_code># Sine data
X = np.linspace(0, 2 * np.pi, 500).reshape(-1,1)
y = np.sin(X)
print(X.shape, y.shape)
# Create the model
model = Sequential()
model.add(Dense(input_dim=1, output_dim=5))
model.add(Activation('tanh'))
model.add(Dense(input_dim=5, output_dim=1))
model.add(Activation('linear'))
# lr: learning rate
model.compile(loss='mse', optimizer=SGD(lr=0.1))<jupyter_output><empty_output><jupyter_text>### Train the Model<jupyter_code>print('Training..')
loss = model.fit(X, y, nb_epoch=150, validation_split=0.1,
batch_size=128, verbose=False)
print(loss.history['loss'][-1])
print('Complete')
# Plot the predictions
predictions = model.predict(X)
plt.scatter(X, y)
plt.plot(X, predictions, color='r')
plt.show()
print("MSE", mean_squared_error(predictions, y))
# Plot the error over time
plt.scatter(range(len(loss.history['loss'])), loss.history['loss'])
# plt.scatter(range(len(loss.history['val_loss'])), loss.history['val_loss'], color='red')
plt.xlabel('Epoch')
plt.ylabel('MSE')
plt.title('MSE by Epoch');<jupyter_output><empty_output><jupyter_text>### Train longer
If we train for more epochs, we can get a better regression.<jupyter_code>X = np.linspace(0, 2 * np.pi, 1000).reshape(-1,1)
y = np.sin(X)
print(X.shape, y.shape)
model = Sequential()
model.add(Dense(input_dim=1, output_dim=5))
model.add(Activation('tanh'))
model.add(Dense(input_dim=5, output_dim=1))
model.add(Activation('linear'))
# lr: learning rate
model.compile(loss='mse', optimizer=SGD(lr=0.05))
print('Training..')
loss = model.fit(X, y, nb_epoch=15000, validation_split=0.1,
batch_size=128, verbose=False)
print(loss.history['loss'][-1])
print('Complete')
# Plot
predictions = model.predict(X)
plt.scatter(X, y)
plt.plot(X, predictions, color='r')
plt.show()
print("MSE", mean_squared_error(predictions, y))<jupyter_output><empty_output><jupyter_text>We can take a closer look at the error per training epoch.<jupyter_code># Plot the error over time
plt.scatter(range(len(loss.history['loss'])), loss.history['loss'])
plt.xlabel('Epoch')
plt.ylabel('MSE')
plt.title('MSE by Epoch');<jupyter_output><empty_output><jupyter_text>### Exercise: Perform regression the following data
Hints:
* Try adding a hidden layer
* Try lowering the learning rate and using more epochs<jupyter_code>def f(x):
return x ** 2 * np.sin(x**2)
# Sine data
X = np.linspace(2, np.pi, 1000).reshape(-1,1)
y = np.array(list(map(f, X)))
print(X.shape, y.shape)
## Solution
model = Sequential([
Dense(input_dim=1, output_dim=5, activation='tanh'),
Dense(output_dim=5, activation='tanh'),
Dense(output_dim=5, activation='tanh'),
Dense(output_dim=1, activation='linear')
])
# lr: learning rate
model.compile(loss='mse', optimizer=SGD(lr=0.005))
print('Training..')
loss = model.fit(X, y, nb_epoch=10000, validation_split=0.1, batch_size=128, verbose=False)
print(loss.history['loss'][-1])
print('Complete')
# Plot
predictions = model.predict(X)
plt.scatter(X, y)
plt.plot(X, predictions, color='r');
print("MSE", mean_squared_error(predictions, y))<jupyter_output>Training..
0.00466147823777
Complete
MSE 1.16221023583
<jupyter_text>## Classification
We'll start with the Iris dataset (of course).<jupyter_code>import sklearn.datasets as datasets
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Break each output into indicator cols
y_cat = pd.get_dummies(y).values
print(X.shape, y_cat.shape)
# Define a model
model = Sequential()
# input_dim = number of neurons in previous layer.
# output_dim = number of neurons in current layer.
# First layer - input_dim=k features.
model.add(Dense(input_dim=4, output_dim=8))
model.add(Activation("tanh"))
# Output layer - output_dim=# of output per point (in y).
# Use 'softmax' for class probability. 'linear' for regression
model.add(Dense(input_dim=4, output_dim=3))
model.add(Activation("softmax"))
# Uses Mean Squared Error and Stochastic Gradient Descent
model.compile(loss='mse', optimizer=SGD(lr=0.01))
# Train the model
print('Training...')
loss = model.fit(X, y_cat,
validation_split=0.1, nb_epoch=5000, batch_size=16, verbose=False)
print(loss.history['loss'][-1]) # displays MSE at last iteration
print("Training complete")
# Model evaluation
pred_y = model.predict(X, verbose=False)
preds = model.predict_classes(X, verbose=False)
print('ACCURACY: ', accuracy_score(y, preds))
print('CONFUSION MATRIX:\n', confusion_matrix(y, preds))
# Plot the error over time
plt.scatter(range(len(loss.history['loss'])), loss.history['loss'])
plt.xlabel('Epoch')
plt.ylabel('MSE')
plt.title('MSE by Epoch');<jupyter_output><empty_output><jupyter_text>## Abalone data set<jupyter_code>columns = ["Sex", "Length", "Diameter", "Height", "Whole Weight",
"Shucked weight", "Viscera weight", "Shell weight", "Rings" ]
df = pd.read_csv("../data/abalone.data", names=columns)
df.head()
import seaborn as sns
sns.pairplot(data=df, vars=columns[1:], hue="Sex");
d = {'M': 0, 'F': 1, 'I': 2}
df["Sex"] = df["Sex"].apply(lambda x: d[x])
X = np.array(df[columns[1:]])
y = np.array(df["Sex"])
y_cat = pd.get_dummies(y).values
print(X.shape, y_cat.shape)
# Define a model
model = Sequential()
# input_dim = number of neurons in previous layer.
# output_dim = number of neurons in current layer.
# First layer - input_dim=k features.
model.add(Dense(input_dim=8, output_dim=6))
model.add(Activation("tanh"))
model.add(Dense(input_dim=6, output_dim=6))
model.add(Activation("tanh"))
# Output layer - output_dim=# of output per point (in y).
# Use 'softmax' for class probability. 'linear' for regression
model.add(Dense(input_dim=6, output_dim=3))
model.add(Activation("softmax"))
# Uses Mean Squared Error and Stochastic Gradient Descent
model.compile(loss='mse', optimizer=SGD(lr=0.1))
# Train the model
print('Training...')
loss = model.fit(X, y_cat,
validation_split=0.1, nb_epoch=1000,
batch_size=16, verbose=False)
print(loss.history['loss'][-1]) # displays MSE at last iteration
print("Training complete")
# Model evaluation
pred_y = model.predict(X, verbose=False)
preds = model.predict_classes(X, verbose=False)
print('ACCURACY: ', accuracy_score(y, preds))
print('CONFUSION MATRIX:\n', confusion_matrix(y, preds))
# Plot the error over time
plt.scatter(range(len(loss.history['loss'])), loss.history['loss'])
plt.xlabel('Epoch')
plt.ylabel('MSE')
plt.title('MSE by Epoch');
confusion_matrix?<jupyter_output><empty_output><jupyter_text>## Exercise
Classify the following data ([source](https://archive.ics.uci.edu/ml/datasets/MAGIC+Gamma+Telescope)). You'll need to translate the classes into integers and make dummies. Design a neural network to classify the data and evaluate the results.<jupyter_code>names = "fLength fWidth fSize fConc fConc1 fAsym fM3Long fM3Trans fAlpha fDist class".split()
df = pd.read_csv("../data/magic04.data", names=names)
df.head()
d = {'g': 0, 'h': 1}
df["class"] = df["class"].apply(lambda x: d[x])
X = np.array(df[df.columns[:-1]])
y = np.array(df["class"])
y_cat = pd.get_dummies(y).values
print(X.shape, y_cat.shape)
# Define a model
model = Sequential()
# input_dim = number of neurons in previous layer.
# output_dim = number of neurons in current layer.
# First layer - input_dim=k features.
model.add(Dense(input_dim=10, output_dim=6))
model.add(Activation("tanh"))
model.add(Dense(input_dim=6, output_dim=6))
model.add(Activation("tanh"))
# Output layer - output_dim=# of output per point (in y).
# Use 'softmax' for class probability. 'linear' for regression
model.add(Dense(input_dim=6, output_dim=2))
model.add(Activation("softmax"))
# Uses Mean Squared Error and Stochastic Gradient Descent
model.compile(loss='mse', optimizer=SGD(lr=0.01))
# Train the model
print('Training...')
loss = model.fit(X, y_cat,
validation_split=0.2, nb_epoch=1000,
batch_size=256, verbose=False)
print(loss.history['loss'][-1]) # displays MSE at last iteration
print("Training complete")
# Model evaluation
pred_y = model.predict(X, verbose=False)
preds = model.predict_classes(X, verbose=False)
print('ACCURACY: ', accuracy_score(y, preds))
print('CONFUSION MATRIX:\n', confusion_matrix(y, preds))
# Plot the error over time
plt.scatter(range(len(loss.history['loss'])), loss.history['loss'])
plt.xlabel('Epoch')
plt.ylabel('MSE')
plt.title('MSE by Epoch');<jupyter_output><empty_output>
|
no_license
|
/19-neural-networks/code/Neural Networks-Solutions.ipynb
|
jpbatz/DS_Review_2019
| 10 |
<jupyter_start><jupyter_text>### Data Analysis and Data Cleaning<jupyter_code>sales_df['COUNTRY'].value_counts().index
sales_df['COUNTRY'].value_counts()
sales_df['COUNTRY'].unique()
def barplot_visualization(x):
fig = plt.Figure(figsize = (12, 6))
fig = px.bar(x = sales_df[x].value_counts().index, y = sales_df[x].value_counts(), color = sales_df[x].value_counts().index, height = 600)
fig.show()
barplot_visualization('COUNTRY')
barplot_visualization('STATUS')
sales_df.drop(columns='STATUS', inplace=True)
sales_df
barplot_visualization('PRODUCTLINE')
barplot_visualization('DEALSIZE')
def dummies(x):
dummy = pd.get_dummies(sales_df[x])
sales_df.drop(columns = x, inplace= True)
return pd.concat([sales_df, dummy], axis=1)
sales_df = dummies('COUNTRY')
sales_df
sales_df = dummies('PRODUCTLINE')
sales_df
sales_df = dummies('DEALSIZE')
sales_df
y = pd.Categorical(sales_df['PRODUCTCODE'])
y
y = pd.Categorical(sales_df['PRODUCTCODE']).codes
y
#should use one-hot encoding, this is shortcut
sales_df['PRODUCTCODE'] = pd.Categorical(sales_df['PRODUCTCODE']).codes
sales_df
# Group data by order date
sales_df_group = sales_df.groupby(by = "ORDERDATE").sum()
sales_df_group
barplot_visualization('ORDERDATE')
fig = px.line(x = sales_df_group.index, y = sales_df_group.SALES, title = 'Sales')
fig.show()
sales_df.drop("ORDERDATE", axis = 1, inplace = True)
sales_df.shape
plt.figure(figsize=(25,25))
corr_matrix = sales_df.iloc[:, :10].corr()
sns.heatmap(corr_matrix, annot=True)
# drop either QTR_ID or MONTH_ID
sales_df.drop("QTR_ID", axis = 1, inplace = True)
sales_df.shape
# Distplot shows the (1) histogram, (2) kde plot and (3) rug plot.
# (1) Histogram: it's a graphical display of data using bars with various heights. Each bar groups numbers into ranges and taller bars show that more data falls in that range.
# (2) Kde Plot: Kernel Density Estimate is used for visualizing the Probability Density of a continuous variable.
# (3) Rug plot: plot of data for a single quantitative variable, displayed as marks along an axis (one-dimensional scatter plot).
import plotly.figure_factory as ff
plt.figure(figsize = (10, 10))
for i in range(8):
if sales_df.columns[i] != 'ORDERLINENUMBER':
fig = ff.create_distplot([sales_df[sales_df.columns[i]].apply(lambda x: float(x))], ['distplot'])
fig.update_layout(title_text = sales_df.columns[i])
fig.show()
# Visualize the relationship between variables using pairplots
plt.figure(figsize = (15, 15))
fig = px.scatter_matrix(sales_df,
dimensions = sales_df.columns[:8], color = 'MONTH_ID')
fig.update_layout(
title = 'Sales Data',
width = 1100,
height = 1100,
)
fig.show()
# A trend exists between 'SALES' and 'QUANTITYORDERED'
# A trend exists between 'MSRP' and 'PRICEEACH'
# A trend exists between 'PRICEEACH' and 'SALES'
# It seems that sales growth exists as we move from 2013 to 2014 to 2015 ('SALES' vs. 'YEAR_ID')
# zoom in into 'SALES' and 'QUANTITYORDERED', you will be able to see the monthly information color coded on the graph<jupyter_output><empty_output><jupyter_text>### Finding Optimal Number of Clusters using Elbow Method<jupyter_code># Scale the data
scaler = StandardScaler()
sales_df_scaled = scaler.fit_transform(sales_df)
sales_df_scaled.shape
scores = []
range_values = range(1, 15)
for i in range_values:
kmeans = KMeans(n_clusters = i)
kmeans.fit(sales_df_scaled)
scores.append(kmeans.inertia_) # intertia is the Sum of squared distances of samples to their closest cluster center
plt.plot(scores, 'bx-')
plt.title('Finding right number of clusters')
plt.xlabel('Clusters')
plt.ylabel('scores')
plt.show()
# From this we can observe that, 5th cluster seems to be forming the elbow of the curve.
# Note that curve will change everytime we run the cell<jupyter_output><empty_output><jupyter_text>## Applying K-Means<jupyter_code>kmeans = KMeans(5)
kmeans.fit(sales_df_scaled)
labels = kmeans.labels_
labels
kmeans.cluster_centers_.shape
sales_df_scaled.shape
cluster_centers = pd.DataFrame(data= kmeans.cluster_centers_, columns= [sales_df.columns])
cluster_centers
cluster_centers = scaler.inverse_transform(cluster_centers)
cluster_centers = pd.DataFrame(data= cluster_centers, columns= [sales_df.columns])
cluster_centers
labels.shape
labels.max()
labels.min()
y_kmeans = kmeans.fit_predict(sales_df_scaled)
y_kmeans
y_kmeans.shape
sale_df_cluster = pd.concat([sales_df, pd.DataFrame({'cluster':labels})], axis = 1)
sale_df_cluster
sales_df['ORDERLINENUMBER'] = sales_df['ORDERLINENUMBER'].apply(lambda x: float(x))
for i in sales_df.columns[:8]:
plt.figure(figsize = (30, 6))
for j in range(5):
plt.subplot(1, 5, j+1)
cluster = sale_df_cluster[sale_df_cluster['cluster'] == j]
cluster[i].hist()
plt.title('{} \nCluster - {} '.format(i,j))
plt.show()<jupyter_output><empty_output><jupyter_text>### Applying PCA<jupyter_code>pca = PCA(n_components = 3)
principal_comp = pca.fit_transform(sales_df_scaled)
principal_comp
pca_df = pd.DataFrame(data = principal_comp, columns = ['pca1', 'pca2', 'pca3'])
pca_df.head()
pca_df = pd.concat([pca_df, pd.DataFrame({'cluster':labels})], axis = 1)
pca_df
fig = px.scatter_3d(pca_df, x = 'pca1', y= 'pca2', z= 'pca3',color = 'cluster', symbol = 'cluster', size_max = 18, opacity = 0.7)
fig.update_layout(margin = dict(l = 0, r = 0, b = 0, t = 0))<jupyter_output><empty_output><jupyter_text>### Auto Encoders
<jupyter_code>input_df = Input(shape = (37,))
x = Dense(50, activation = 'relu')(input_df)
x = Dense(500, activation = 'relu', kernel_initializer = 'glorot_uniform')(x)
x = Dense(500, activation = 'relu', kernel_initializer = 'glorot_uniform')(x)
x = Dense(2000, activation = 'relu', kernel_initializer = 'glorot_uniform')(x)
encoded = Dense(8, activation = 'relu', kernel_initializer = 'glorot_uniform')(x)
''' Decoding segment of Auto-Encoder (not needed here)
x = Dense(2000, activation = 'relu', kernel_initializer = 'glorot_uniform')(encoded)
x = Dense(500, activation = 'relu', kernel_initializer = 'glorot_uniform')(x)
decoded = Dense(37, kernel_initializer = 'glorot_uniform')(x)'''
# encoder - used for dimensionality reduction
encoder = Model(input_df, encoded)
## autoencoder - not required here
#autoencoder = Model(input_df, decoded)
#autoencoder.compile(optimizer = 'adam', loss='mean_squared_error')
#autoencoder.fit(sales_df, sales_df, batch_size = 128, epochs = 500, verbose = 3)
#autoencoder.save_weights('autoencoder_1.h5')
pred = encoder.predict(sales_df_scaled)
scores = []
range_values = range(1, 15)
for i in range_values:
kmeans = KMeans(n_clusters = i)
kmeans.fit(pred)
scores.append(kmeans.inertia_)
plt.plot(scores, 'bx-')
plt.title('Finding right number of clusters')
plt.xlabel('Clusters')
plt.ylabel('scores')
plt.show()
# as per graph, optimal k value is 3
kmeans = KMeans(3)
kmeans.fit(pred)
labels = kmeans.labels_
y_kmeans = kmeans.fit_predict(sales_df_scaled)
df_cluster_dr = pd.concat([sales_df, pd.DataFrame({'cluster':labels})], axis = 1)
df_cluster_dr.head()
cluster_centers = pd.DataFrame(data = kmeans.cluster_centers_, columns = [sales_df.columns])
cluster_centers
cluster_centers = scaler.inverse_transform(cluster_centers)
cluster_centers = pd.DataFrame(data = cluster_centers, columns = [sales_df.columns])
cluster_centers
for i in sales_df.columns[:8]:
plt.figure(figsize = (30, 6))
for j in range(3):
plt.subplot(1, 3, j+1)
cluster = df_cluster_dr[df_cluster_dr['cluster'] == j]
cluster[i].hist()
plt.title('{} \nCluster - {} '.format(i,j))
plt.show()
pca = PCA(n_components = 3)
prin_comp = pca.fit_transform(sales_df_scaled)
pca_df = pd.DataFrame(data = prin_comp, columns = ['pca1', 'pca2', 'pca3'])
pca_df.head()
pca_df = pd.concat([pca_df, pd.DataFrame({'cluster':labels})], axis = 1)
pca_df.head()
fig = px.scatter_3d(pca_df, x = 'pca1', y = 'pca2', z = 'pca3',
color='cluster', symbol = 'cluster', size_max = 10, opacity = 0.7)
fig.update_layout(margin = dict(l = 0, r = 0, b = 0, t = 0))<jupyter_output><empty_output>
|
no_license
|
/Marketing_Targeted_Ads.ipynb
|
Sarjhana/AI-in-Marketing
| 5 |
<jupyter_start><jupyter_text># DIY calibration with hk and recharge
Here you will modify the pest control file to make calibration-period recharge (name it ``rch_0``) parameter, re-run pestpp, and analyze the results. To help, the two code blocks below setup the model working directory for you. Feel free to look back at other activities - that's not cheating....some steps:
- create a template file for "freyberg.rch" with a parameter named ``rch_0`` for recharge in the first stress period
- add ``rch_0`` to the pest control file
- add "freyberg.rch.tpl" to the control file
- rerun pestpp
- analyze the results<jupyter_code>%matplotlib inline
import os
import sys
sys.path.append("..")
import shutil
import pandas as pd
import matplotlib.pyplot as plt
import pyemu
import freyberg_setup as fs
pst_name = fs.PST_NAME_UN
working_dir = fs.WORKING_DIR_UN
fs.setup_pest_un_bareass()
pst = pyemu.Pst(os.path.join(working_dir,pst_name))
pst.control_data.noptmax = 0
pst.write(os.path.join(working_dir,pst_name))<jupyter_output>['.DS_Store', 'botm.ref', 'extract_zone_array.py', 'forecasts_true.csv', 'freyberg.bas', 'freyberg.dbf', 'freyberg.dis', 'freyberg.hds', 'freyberg.heads', 'freyberg.heads_potobs.ins', 'freyberg.hyd', 'freyberg.list', 'freyberg.locations', 'freyberg.mpbas', 'freyberg.mpenpt', 'freyberg.mplist', 'freyberg.mpnam', 'freyberg.mppthln', 'freyberg.mpsim', 'freyberg.oc', 'freyberg.pcg', 'freyberg.rivflux', 'freyberg.shp', 'freyberg.shx', 'freyberg.travel', 'freyberg.truth.lpf', 'freyberg.truth.nam', 'freyberg.truth.rch', 'freyberg.truth.riv', 'freyberg.truth.wel', 'hk.truth.ref', 'hk.zones', 'ibound.ref', 'kzone.ref', 'mpath.in', 'obs_loc.csv', 'potobs_group.csv', 'Process_output.py', 'run_true_model.py', 'strt.ref', 'Weights_and_best_PHI.xlsx']
changing model workspace...
freyberg_un
WARNING: unit 31 of package UPW already in use
FloPy is using the following executable to run the model: /Users/jwhite/Dev/GW1876/activities/freyberg_k_and_r/freyberg_un/mfnwt
[...]
|
no_license
|
/activities/freyberg_k_and_r/freyberg_k_r_DIY.ipynb
|
mnfienen-usgs/GW1876
| 1 |
<jupyter_start><jupyter_text># Operations
There are lots of operations with pandas that will be really useful to you, but don't fall into any distinct category. Let's show them here in this lecture:<jupyter_code>import pandas as pd
df = pd.DataFrame({'col1':[1,2,3,4],'col2':[444,555,666,444],'col3':['abc','def','ghi','xyz']})
df.head()<jupyter_output><empty_output><jupyter_text>### Info on Unique Values
- df[].unique()
- df[].nunique() - number of unique values
- df[].value_counts()<jupyter_code>df['col2'].value_counts()<jupyter_output><empty_output><jupyter_text>### Applying Functions<jupyter_code>def times2(x):
return x*2
df['col1'].apply(times2)
df['col3'].apply(len)
df['col1'].sum()
sum(df['col1'])<jupyter_output><empty_output><jupyter_text>** Permanently Removing a Column**<jupyter_code>del df['col1']
df<jupyter_output><empty_output><jupyter_text>** Get column and index names: **<jupyter_code>df.columns
df.index<jupyter_output><empty_output><jupyter_text>** Sorting and Ordering a DataFrame:**<jupyter_code>df
df.sort_values(by='col2') #inplace=False by default<jupyter_output><empty_output><jupyter_text>** Find Null Values or Check for Null Values**<jupyter_code>df.isnull()
# Drop rows with NaN Values
df.dropna()<jupyter_output><empty_output><jupyter_text>** Filling in NaN values with something else: **<jupyter_code>import numpy as np
df = pd.DataFrame({'col1':[1,2,3,np.nan],
'col2':[np.nan,555,666,444],
'col3':['abc','def','ghi','xyz']})
df.head()
df.fillna('FILL')
data = {'A':['foo','foo','foo','bar','bar','bar'],
'B':['one','one','two','two','one','one'],
'C':['x','y','x','y','x','y'],
'D':[1,3,2,5,4,1]}
df = pd.DataFrame(data)
df
df.pivot_table(values='D',index=['A', 'B'],columns=['C'])<jupyter_output><empty_output>
|
no_license
|
/completed/Data-preprocessing/Pandas/04 - Operations.ipynb
|
AdityaSP/sasken-ml-nov17
| 8 |
<jupyter_start><jupyter_text>Data Science Fundamentals: Python |
[Table of Contents](../../index.ipynb)
- - -
[Social Media](../../socialmedia/index.ipynb) - Analytics: **[Twitch](../twitch/index.ipynb)** | [YouTube](../youtube/index.ipynb) | [Twitter](../twitter/index.ipynb)## Social Media Analytics: Twitch### Twitch Stats<jupyter_code>pip install twitchapi
pip install -r ./stats/requirements.txt
import csv
import os
import time
from datetime import datetime
from shutil import move as move_file
import twitchapi
cycle_delay = 30 # seconds
game_configurations = [
{
'url_name': 'Elite:%20Dangerous',
'full_name': ['Elite: Dangerous', 'Elite Dangerous'],
'shorthand': 'ED'
},
{
'url_name': 'Planet%20Coaster',
'full_name': ['Planet Coaster', 'Planet: Coaster'],
'shorthand': 'PC'
},
]
def pause(amount=5):
for pause_tick in range(amount, 0, -1):
print('Paused for {} seconds '.format(pause_tick), end='\r')
time.sleep(1)
print(' ', end='\r')
def write_to_file(file_name, rows):
with open(file_name, 'a', newline='') as csvfile:
writer = csv.writer(csvfile, quotechar='"')
for row in rows:
writer.writerow(row)
csvfile.flush()
print('Written {} rows to {}'.format(len(rows), file_name))
def get_current_date_string():
previous_day, previous_month, previous_year = datetime.now().day, datetime.now().month, datetime.now().year
return '{}_{}_{}'.format(previous_day, previous_month, previous_year)
def get_twitch_client_id():
with open('stats/client_id.txt', 'r') as id_file:
return id_file.readline().strip()
def main():
client_id = get_twitch_client_id()
current_date_string = get_current_date_string()
while True:
# Scrape the data for each game
for game_configuration in game_configurations:
# if a new day has started, move the completed data to its respective subfolder
new_date_string = get_current_date_string()
if not current_date_string == new_date_string:
data_folder = os.path.join(os.getcwd(), 'data', game_configuration['shorthand'], file_name)
print('Moving {} to: {}'.format(file_name, data_folder))
move_file(src=file_name, dst=data_folder)
current_date_string = new_date_string
print('Scraping data for: {}'.format(game_configuration['full_name'][0]))
# Get the data for the current game by invoking the twitchapi module
api = twitchapi.APIStreamsRequest(
game_url_name=game_configuration['url_name'],
game_full_names=game_configuration['full_name'],
client_id=client_id)
try:
api.request_all_game_data()
except Exception as e:
print(e)
time.sleep(5)
# move onto the next game
continue
returned_data = api.return_required_data()
# if any returned data is available, then write to to the CSV
file_name = game_configuration['shorthand'] + '_' + current_date_string + '.csv'
if returned_data is not None and len(returned_data) > 0:
write_to_file(
file_name=file_name,
rows=returned_data)
else:
print('No rows written for: {}'.format(game_configuration['full_name']))
pause(cycle_delay)
if __name__ == '__main__':
main()
<jupyter_output><empty_output><jupyter_text>### Flask Application[Watch Video](https://www.youtube.com/watch?v=nASfFE9kIas)
[](https://www.youtube.com/watch?v=nASfFE9kIas)[Register Twitch Application](https://dev.twitch.tv/console/apps/create)<jupyter_code>pip install -r ./api/requirements.txt<jupyter_output>Collecting attrs==19.1.0
Downloading attrs-19.1.0-py2.py3-none-any.whl (35 kB)
Collecting certifi==2019.3.9
Downloading certifi-2019.3.9-py2.py3-none-any.whl (158 kB)
[K |████████████████████████████████| 158 kB 1.7 MB/s eta 0:00:01
[?25hRequirement already satisfied: chardet==3.0.4 in /usr/local/Cellar/jupyterlab/2.2.5/libexec/lib/python3.8/site-packages (from -r requirements.txt (line 3)) (3.0.4)
Collecting Click==7.0
Downloading Click-7.0-py2.py3-none-any.whl (81 kB)
[K |████████████████████████████████| 81 kB 5.4 MB/s eta 0:00:011
[?25hCollecting cycler==0.10.0
Using cached cycler-0.10.0-py2.py3-none-any.whl (6.5 kB)
Collecting decorator==4.4.0
Using cached decorator-4.4.0-py2.py3-none-any.whl (8.3 kB)
Collecting Flask==1.0.3
Downloading Flask-1.0.3-py2.py3-none-any.whl (92 kB)
[K |████████████████████████████████| 92 kB 6.0 MB/s eta 0:00:011
[?25hCollecting flask-ngrok==0.0.25
Downloading flask_ngrok-0.0.25-py3-none-any.whl (3.1 kB)
Collecting idna=[...]<jupyter_text>### Twitch Integration <jupyter_code>import requests, json
BASE_URL = 'https://qlx.services/institute/'
HEADERS = {'Client-ID': 'o73nmk3eyw6rv4hh4fu7xw1zzf3d7w'}
INDENT = 2
# get response from twitch API call
def get_response(query):
url = BASE_URL + query
response = requests.get(url, headers=HEADERS)
return response
# used for debugging the result
def print_response(response):
response_json = response.json()
print_response = json.dumps(response_json, indent=INDENT)
print(print_response)
# get the current live stream info, given a username
def get_user_streams_query(user_login):
return 'streams?user_login={0}'.format(user_login)
def get_user_query(user_login):
return 'users?login={0}'.format(user_login)
def get_user_videos_query(user_id):
return 'videos?user_id={0}&first=50'.format(user_id)
def get_games_query():
return 'games/top'<jupyter_output><empty_output><jupyter_text>## Flask Application: data.py<jupyter_code>import requests, json, sys
BASE_URL = 'https://www.twitch.tv/enterlifeonline'
CLIENT_ID = 'o73nmk3eyw6rv4hh4fu7xw1zzf3d7w'
HEADERS = {'Client-ID': CLIENT_ID}
INDENT = 2
# query = 'streams?game_id=33214'
# url = BASE_URL + query
# response = requests.get(url, headers=PARAMS)
# print(json.dumps(response.json(), indent=2))
# parsed = json.loads(response.text)
# print(json.dumps(parsed, indent=2))
# Takes a custom query from user and gets the response object
def get_response(query):
url = BASE_URL + query
response = requests.get(url, headers=HEADERS)
return response
# Takes a response object and prints it on the console with proper format
def print_response(response):
response_json = response.json()
print_response = json.dumps(response_json, indent=INDENT)
print(print_response)
return response.json()
# if __name__ == "__main__":
# user = sys.argv[1]
# query = 'users?login={0}'.format(user)
# print_response(get_response(query))
if __name__ == "__main__":
login = sys.argv[1]
# user_query = 'users?login={0}'.format(login)
# response = get_response(user_query)
# response_json = response.json()
# user_id = response_json['data'][0]['id']
streams_query = 'streams?user_login={0}'.format(login)
response = get_response(streams_query)
# DEBUG
print_response(response)<jupyter_output><empty_output><jupyter_text>## Flask Application: api/app.py <jupyter_code>from flask import Flask, request, render_template, redirect, url_for
from flask_ngrok import run_with_ngrok
from wtforms import Form, StringField, validators
import twitch_integration
import json, time, datetime
app = Flask(__name__)
app.debug = True
# run_with_ngrok(app)
class InputForm(Form):
user_login = StringField(validators=[validators.InputRequired()])
@app.route('/', methods=['POST', 'GET'])
def home():
form = InputForm(request.form)
user_login = form.user_login.data
user_query = twitch_integration.get_user_query(user_login)
user_info = twitch_integration.get_response(user_query)
twitch_integration.print_response(user_info)
try:
user_id = user_info.json()['data'][0]['id']
img_url = user_info.json()['data'][0]['profile_image_url']
user_videos_query = twitch_integration.get_user_videos_query(user_id)
videos_info = twitch_integration.get_response(user_videos_query)
twitch_integration.print_response(videos_info)
videos_info_json = videos_info.json()
videos_info_json_data = videos_info_json['data']
print('BEFORE!!!', videos_info_json_data)
# videos_info_json_data = list(videos_info_json_data.reverse())
videos_info_json_data_reversed = videos_info_json_data[::-1]
print('AFTER!!!', videos_info_json_data_reversed)
# sorted_video_data = videos_info_json_data.sort((a, b))
# videos_info_json_data_sorted = sorted(videos_info_json_data, key=lambda x: (videos_info_json_data[]))
line_labels = []
line_values = []
title = user_login + '\'s Video Stats'
for item in videos_info_json_data_reversed:
if (len(item['title']) == 0):
line_labels.append('No Name')
elif (len(item['title']) > 20):
line_labels.append(item['title'][:20] + '...')
else:
line_labels.append(item['title'])
line_values.append(item['view_count'])
return render_template('line_chart.html', title=title, max=max(line_values) + 10, labels=line_labels,values=line_values, img_url=img_url)
except:
return render_template("display.html", form=form)
# user_videos_query = twitch_integration.get_response(user_info['data']['user_id'])
# response = twitch_integration.get_response(user_videos_query)
# response_json = response.json()
# twitch_integration.print_response(response)
# return render_template("display.html", form=form, response_json=videos_info.json())
@app.route('/dfdf', methods=['POST', 'GET'])
def main():
form = InputForm(request.form)
user_login = form.user_login.data
# if request.method == 'POST':
# return redirect(url_for('graph', user_login=user_login))
query = twitch_integration.get_user_streams_query(user_login)
# query = twitch_integration.get_games_query()
response = twitch_integration.get_response(query)
response_json = response.json()
twitch_integration.print_response(response)
return render_template("display.html", form=form, response_json=response_json)
@app.route('/graph', methods=['POST', 'GET'])
def graph():
user_login = request.args.get('user_login')
query = twitch_integration.get_user_streams_query(user_login)
response = twitch_integration.get_response(query)
response_json = response.json()
twitch_integration.print_response(response)
line_labels = []
line_values = []
title = None
# return render_template("display.html", form=form, response_json=response_json)
for i in range(5):
query = twitch_integration.get_user_streams_query(user_login)
response = twitch_integration.get_response(query)
response_json = response.json()
current_time = datetime.datetime.now()
time_list = [current_time.hour,current_time.minute,current_time.second]
# try:
# print(response_json['data'][0]['viewer_count'])
# viewer_count = response_json['data'][0]['viewer_count']
# if title is None:
# title = response_json['data'][0]['user_name'] + ' - ' + response_json['data'][0]['title']
# t = ':'.join(str(e) for e in time_list)
# line_labels.append(t)
# line_values.append(viewer_count)
# except:
# pass
# # line_values.append(response_json['data'][0]['viewer_count'])
# # line_values.append(i * 1000)
# time.sleep(2)
# print(line_labels, line_values)
return render_template('line_chart.html', title=title, max=max(line_values) + 10, labels=line_labels,values=line_values)
# if len(response_json['data']) > 0:
# line_labels=['time', 'time']
# line_values=['1', '1000']
# return render_template('line_chart.html', title='Twitch Live Stream Info', max=17000, labels=line_labels, values=line_values)
# return render_template("display.html", form=form, response_json=response_json)
if __name__ == '__main__':
app.run()
pip install Flask<jupyter_output>Requirement already satisfied: Flask in /usr/local/Cellar/jupyterlab/2.2.5/libexec/lib/python3.8/site-packages (1.1.2)
Requirement already satisfied: Jinja2>=2.10.1 in /usr/local/Cellar/jupyterlab/2.2.5/libexec/lib/python3.8/site-packages (from Flask) (2.11.2)
Requirement already satisfied: Werkzeug>=0.15 in /usr/local/Cellar/jupyterlab/2.2.5/libexec/lib/python3.8/site-packages (from Flask) (1.0.1)
Requirement already satisfied: itsdangerous>=0.24 in /usr/local/Cellar/jupyterlab/2.2.5/libexec/lib/python3.8/site-packages (from Flask) (1.1.0)
Requirement already satisfied: click>=5.1 in /usr/local/Cellar/jupyterlab/2.2.5/libexec/lib/python3.8/site-packages (from Flask) (7.1.2)
Requirement already satisfied: MarkupSafe>=0.23 in /usr/local/Cellar/jupyterlab/2.2.5/libexec/lib/python3.8/site-packages (from Jinja2>=2.10.1->Flask) (1.1.1)
[33mWARNING: You are using pip version 20.2.2; however, version 20.2.3 is available.
You should consider upgrading via the '/usr/local/Cellar/jupyterlab/2.2[...]<jupyter_text>#### Alternatively Running Via Command Line<jupyter_code>export FLASK_APP=./api/app.py
flask run
* Running on http://127.0.0.1:5000/<jupyter_output><empty_output><jupyter_text>#### Running the Application in Jupyter Notebook<jupyter_code>import subprocess as sp
# Flask app
server = sp.Popen("FLASK_APP=./api/app.py flask run", shell=True)
server<jupyter_output><empty_output>
|
no_license
|
/socialmedia/twitch/.ipynb_checkpoints/index-checkpoint.ipynb
|
lordhriley/Exercises
| 7 |
<jupyter_start><jupyter_text># Feature engineeringСоздание новых признаков - трудоемкий процесс, который позволяет значительно повысить точность модели при правильном использовании методов создания новых признаков. Самое большое повышение точности обычно случается после того, как Data Science разработчик погружается в предметную область задачи, потому что это позволяет создавать новые признаки осмысленно.
Несмотря на это, есть автоматические/полуавтоматические методы создания новых признаков, о которых мы поговорим ниже.
Для исследования темы создания новых признаков воспользуемся датасетом `vis_data` и дополним его новой информацией:<jupyter_code>import numpy as np
import pandas as pd
%matplotlib inline
vis_data = pd.read_csv("./data/train.csv",
encoding = 'ISO-8859-1',
low_memory = False)
vis_data = vis_data.drop(['violation_zip_code', 'clean_up_cost'], axis=1)
latlons = pd.read_csv("./data/latlons.csv")
vis_data = pd.concat([vis_data, latlons], axis=1)<jupyter_output><empty_output><jupyter_text>## Ручное создание признаков
Ручное создание признаков требует погружения в предметную область задачи и не всегда гарантирует положительный результат.
Часто данные приходят без пояснения и есть только сами табличные данные и, если повезет, названия признаков. Нам повезло. Посмотрим на данные и на названия признаков:<jupyter_code>vis_data.head(2)
vis_data.columns<jupyter_output><empty_output><jupyter_text>Судя по названию переменных, это данные о нарушителях, которые должны платить штраф за какое-то правонарушение.
Есть переменная compliance, которая принимает значения `0`, `1` и `None`. Так как описания нет, мы можем предположить, что 0 - нарушитель заплатил штраф, 1 - не заплатил. Что в данном контексте означает None понять трудно, возможно, это пропуски, которые следует просто выбросить.
Кроме изначальных признаков нам также дали ширину и долготу. Также, есть признак `city`. Что можно сделать с таким набором признаков? Например, можно посмотреть расстояние от точки правонарушения до центра города, в котором произошло нарушение. Давайте сначала исследуем, какие города представлены в выборке:<jupyter_code># Можно посмотреть на value_counts, не ограничивая вывод,
# чтобы увидеть, как много там разных значений
vis_data.city.value_counts()[:10]<jupyter_output><empty_output><jupyter_text>Видно, что подавляюще большую часть значений составляет `DETROIT`. Также, чуть ниже видны `Detroit` и `detroit`.
Название города с самым большим количеством нарушений записано по-разному в данных. Возможно, некоторые другие города тоже записаны в разном регистре. <jupyter_code>vis_data.city = vis_data.city.apply(lambda x: x.lower())
vis_data.city.value_counts()[:10]<jupyter_output><empty_output><jupyter_text>Теперь мы более точно видим, что большинство преступлений (из представленных в выборке) совершается в Детройте.
Возможно, информативным признаком будет расстояние от места правонарушения до центра Детройта.
Теперь давайте найдем для каждого нарушения, для которого есть широта и долгота, его расстояние до центра Детройта со следующими координатами:
- широта: 42.331429
- долгота: -83.045753<jupyter_code>from math import radians, sin, cos, asin, sqrt
def haversine(lon1, lat1, lon2, lat2):
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2) ** 2
c = 2 * asin(sqrt(a))
earth_radius = 6371
return c * earth_radius
vis_data['distance'] = vis_data.apply(lambda row:
haversine(lon1 = -83.045753,
lat1 = 42.331429,
lon2 = row['lon'],
lat2 = row['lat']),
axis=1)
vis_data['distance'][vis_data['distance'] < 100].hist(bins=100);<jupyter_output><empty_output><jupyter_text>## Создание полиномиальных признаков
Теперь рассмотрим более механические и менее интеллектуальные способы создать признаки. Новые признаки можно создавать, просто перемножением и делением старых признаков друг на друга.
Кроме этого, можно брать один признак и возводить его, например, в квадрат или брать из него корень. То есть, в более общем виде, можно создать новый признак из старого, возведя все значения старого признака в дробную степень.<jupyter_code>from sklearn.preprocessing import PolynomialFeatures<jupyter_output><empty_output><jupyter_text>Класс `PolynomialFeatures` из библиотеки `sklearn` принимает на вход набор признаков и на выходе отдает полиномиальные признаки, сгенерированные на основе входных и со степенью меньше либо равной указанной.
Например, для признаков `[a, b]` и степени `2` он вернет `[1, a, b, a^2, ab, b^2]`.<jupyter_code>pf = PolynomialFeatures(2)
poly_features = pf.fit_transform(vis_data[['balance_due', 'payment_amount']])
poly_features
poly_features.shape<jupyter_output><empty_output><jupyter_text>## Dummy-переменные
Когда мы говорим о категориальных признаках, есть два основных случая:
- Категориальный признак содержит строки, никакая модель не примет их на вход, потребуется некоторое преобразование
- Категориальный признак содержит числа. В таком случае, в принципе, можно никак не трогать этот признак, но это будет неправильно с точки зрения логики обучения модели.
Рассмотрим второй случай: есть признак "тип животного", который принимает `n` различных значений: значение 0, если животное - кошка, 1 - если тигр, 2 - если пантера. В отличие от `continuous` признаков, то есть, имеющих некоторый порядок, категориальные признаки такого порядка не имеют - мы не можем строго сравнить тигра и пантеру, это просто разные сущности. Некоторые алгоритмы могут работать с категориальными признаками, некоторые - нет.
Естественной предобработкой для таких признаков является создание dummy-переменных на основе категориального признака, то есть, создание `n` признаков-индикаторов. Каждый из индикаторов равен 1 тогда, когда в изначальном примере выбрано соответствующее значение и 0 иначе.
Преобразуем признак `state` в dummy-переменные и посмотрим его `shape`:<jupyter_code>pd.get_dummies(vis_data.state).shape<jupyter_output><empty_output><jupyter_text>Теперь преобразуем признак `city` в dummy-переменные. Для этого сначала требуется сделать небольшую предобработку, чтобы сократить количество уникальных значений в этом признаке. Для начала, найдем города, которые описывают большинство (`90%`) примеров:<jupyter_code>cities_with_freqs = list(vis_data.city.value_counts())
top_cities_count = int(np.percentile(cities_with_freqs, 90))
top_cities_count<jupyter_output><empty_output><jupyter_text>Теперь оставим только этот топ городов, остальные города назовем `other`:<jupyter_code>all_cities = vis_data.city.value_counts().index
top_cities = list(all_cities)[:top_cities_count]
cities_to_throw_away = list(set(all_cities) - set(top_cities))
vis_data.loc[vis_data['city'].isin(cities_to_throw_away),
'city'] = 'other'<jupyter_output><empty_output><jupyter_text>Теперь количество уникальных значений в признаке `city` сократилось до:<jupyter_code>len(vis_data.city.value_counts())<jupyter_output><empty_output><jupyter_text>Можем создать dummy-признаки для этой переменной:<jupyter_code>pd.get_dummies(vis_data.city, drop_first=True).head()<jupyter_output><empty_output><jupyter_text>## Работа с датой/временем
`Pandas` поддерживает работу с датами.
[Больше про работу с временем в pandas](https://codeburst.io/dealing-with-datetimes-like-a-pro-in-pandas-b80d3d808a7f)
Выберем признак, содержащий время, уберем пропуски и преобразуем его в специальный формат для работы со временем:<jupyter_code>datetime_vals = pd.to_datetime(vis_data.payment_date.dropna())
datetime_vals.head()<jupyter_output><empty_output><jupyter_text>Теперь к этим значениям можно обращаться для определения дня, месяца или года:<jupyter_code>datetime_vals[1].month<jupyter_output><empty_output><jupyter_text>Кроме этого, можно вытаскивать такие признаки, как день недели:<jupyter_code>datetime_vals[1].weekday()<jupyter_output><empty_output><jupyter_text>Отсчет начинается с понедельника и с нуля, поэтому мы понимаем, что это четверг.
Почему это может быть важным признаком? Например, в текущем наборе данных можно использовать такой признак как будний день/выходной, чтобы определить, когда чаще совершают правонарушения:<jupyter_code>dt_issued_date = pd.to_datetime(vis_data.ticket_issued_date)
vis_data['is_weekend'] = dt_issued_date.dt.weekday > 4
vis_data['wd'] = dt_issued_date.dt.weekday<jupyter_output><empty_output>
|
no_license
|
/m2_part4_feature_engineering.ipynb
|
mbovkush/studying
| 16 |
<jupyter_start><jupyter_text>## A list stores many values in a single structure.* Doing calculations with a hundred variables called `pressure_001`, `pressure_002`, etc., would be at least as slow as doing them by hand.
* Use a *list* to store many values together.
* Contained within square brackets `[...]`.
* Values separated by commas `,`.
* Use `len` to find out how many values are in a list.<jupyter_code>pressures = [0.273, 0.275, 0.277, 0.275, 0.276]
print('pressures:', pressures)
print('length:', len(pressures))<jupyter_output>pressures: [0.273, 0.275, 0.277, 0.275, 0.276]
length: 5
<jupyter_text>## Use an item's index to fetch it from a list.* Just like strings.<jupyter_code>print('zeroth item of pressures:', pressures[0])
print('fourth item of pressures:', pressures[4])<jupyter_output>zeroth item of pressures: 0.273
fourth item of pressures: 0.276
<jupyter_text>## Lists' values can be replaced by assigning to them.* Use an index expression on the left of assignment to replace a value.<jupyter_code>pressures[0] = 0.265
print('pressures is now:', pressures)<jupyter_output>pressures is now: [0.265, 0.275, 0.277, 0.275, 0.276]
<jupyter_text>## Appending items to a list lengthens it.* Use `list_name.append` to add items to the end of a list.<jupyter_code>primes = [2, 3, 5]
print('primes is initially:', primes)
primes.append(7)
primes.append(9)
print('primes has become:', primes)<jupyter_output>primes is initially: [2, 3, 5]
primes has become: [2, 3, 5, 7, 9]
<jupyter_text>* `append` is a method of lists.
* Like a function, but tied to a particular object.
* Use `object_name.method_name` to call methods.
* Deliberately resembles the way we refer to things in a library.
* We will meet other methods of lists as we go along.
* Use `help(list)` for a preview.
* `extend` is similar to `append`, but it allows you to combine two lists. For example:<jupyter_code>teen_primes = [11, 13, 17, 19]
middle_aged_primes = [37, 41, 43, 47]
print('primes is currently:', primes)
primes.extend(teen_primes)
print('primes has now become:', primes)
primes.append(middle_aged_primes)
print('primes has finally become:', primes)<jupyter_output>primes is currently: [2, 3, 5, 7, 9]
primes has now become: [2, 3, 5, 7, 9, 11, 13, 17, 19]
primes has finally become: [2, 3, 5, 7, 9, 11, 13, 17, 19, [37, 41, 43, 47]]
<jupyter_text>Note that while `extend` maintains the "flat" structure of the list, appending a list to a list makes the result two-dimensional.## Use `del` to remove items from a list entirely.* `del list_name[index]` removes an item from a list and shortens the list.
* Not a function or a method, but a statement in the language.<jupyter_code>print('primes before removing last item:', primes)
del primes[4]
print('primes after removing last item:', primes)<jupyter_output>primes before removing last item: [2, 3, 5, 7, 9, 11, 13, 17, 19, [37, 41, 43, 47]]
primes after removing last item: [2, 3, 5, 7, 11, 13, 17, 19, [37, 41, 43, 47]]
<jupyter_text>## The empty list contains no values.* Use `[]` on its own to represent a list that doesn't contain any values.
* "The zero of lists."
* Helpful as a starting point for collecting values (which we will see in the next episode).## Lists may contain values of different types.* A single list may contain numbers, strings, and anything else.<jupyter_code>goals = [1, 'Create lists.', 2, 'Extract items from lists.', 3, 'Modify lists.']<jupyter_output><empty_output><jupyter_text>## Character strings can be indexed like lists.* Get single characters from a character string using indexes in square brackets.<jupyter_code>element = 'carbon'
print('zeroth character:', element[0])
print('third character:', element[3])<jupyter_output>zeroth character: c
third character: b
<jupyter_text>## Character strings are immutable.* Cannot change the characters in a string after it has been created.
* *Immutable*: can't be changed after creation.
* In contrast, lists are mutable: they can be modified in place.
* Python considers the string to be a single value with parts, not a collection of values.<jupyter_code>element[0] = 'C'<jupyter_output><empty_output><jupyter_text>* Lists and character strings are both collections.## Indexing beyond the end of the collection is an error.* Python reports an `IndexError` if we attempt to access a value that doesn't exist.
* This is a kind of runtime error.
* Cannot be detected as the code is parsed because the index might be calculated based on data.<jupyter_code>print('99th element of element is:', element[99])<jupyter_output><empty_output><jupyter_text>## Questions#### Q1: Fill in the BlanksFill in the blanks so that the program below produces the output shown.<jupyter_code>values = ____
values.____(1)
values.____(3)
values.____(5)
print('first time:', values)
values = values[____]
print('second time:', values)
first time: [1, 3, 5]
second time: [3, 5]<jupyter_output><empty_output><jupyter_text>#### [Answer](#answer_key)#### Q2: How Large is the Slice?If ‘low’ and ‘high’ are both non-negative integers, how long is the list values[low:high]?#### [Answer](#answer_key)#### Q3: From Strings to BlackGiven this:<jupyter_code>print('string to list:', list('tin'))
print('list to string:', ''.join(['g', 'o', 'l', 'd']))
['t', 'i', 'n']
'gold'<jupyter_output><empty_output><jupyter_text>1. Explain in simple terms what `list('some string')` does.
2. What does `'-'.join(['x', 'y'])` generate?
#### [Answer](#answer_key)#### Q4: Working With the EndWhat does the following program print?<jupyter_code>element = 'helium'
print(element[-1])<jupyter_output><empty_output><jupyter_text>1. How does Python interpret a negative index?
2. If a list or string has N elements, what is the most negative index that can safely be used with it, and what location does that index represent?
3. If `values` is a list, what does `del values[-1]` do?
4. How can you display all elements but the last one without changing `values`? (Hint: you will need to combine slicing and negative indexing.)#### [Answer](#answer_key)#### Q5: Stepping Through a ListWhat does the following program print?<jupyter_code>element = 'fluorine'
print(element[::2])
print(element[::-1])<jupyter_output><empty_output><jupyter_text>1. If we write a slice as low:high:stride, what does stride do?
2. What expression would select all of the even-numbered items from a collection?#### [Answer](#answer_key)#### Q6: Slice BoundsWhat does the following program print?<jupyter_code>element = 'lithium'
print(element[0:20])
print(element[-1:3])<jupyter_output><empty_output><jupyter_text>#### Q7: Sort and SortedWhat do these two programs print? In simple terms, explain the difference between `sorted(letters)` and `letters.sort()`.Program A<jupyter_code>letters = list('gold')
result = sorted(letters)
print('letters is', letters, 'and result is', result)<jupyter_output><empty_output><jupyter_text>Program B<jupyter_code>letters = list('gold')
result = letters.sort()
print('letters is', letters, 'and result is', result)<jupyter_output><empty_output><jupyter_text>#### [Answer](#answer_key)#### Q8: Copying or NotWhat do these two programs print? In simple terms, explain the difference between `new = old` and `new = old[:]`.Program A<jupyter_code>old = list('gold')
new = old # simple assignment
new[0] = 'D'
print('new is', new, 'and old is', old)<jupyter_output><empty_output><jupyter_text>Program B<jupyter_code>old = list('gold')
new = old[:] # assigning a slice
new[0] = 'D'
print('new is', new, 'and old is', old)<jupyter_output><empty_output><jupyter_text>#### [Answer](#answer_key)******
******
******
******
******
******
******
******
******
******
******
******
******### Answers #### Q1: Fill in the Blanks<jupyter_code>values = []
values.append(1)
values.append(3)
values.append(5)
print('first time:', values)
values = values[1:]
print('second time:', values)<jupyter_output><empty_output><jupyter_text>#### Q2: How Large is the Slice?The list `values[low:high]` has `high - low` elements. For example,
`values[1:4]` has the 3 elements `values[1]`, `values[2]`, and `values[3]`.
Note that the expression will only work if `high` is less than the total length of the list `values`.#### Q3: From Strings to Black1. `list('some string')` "splits" a string into a list of its characters.
2. `x-y`#### Q4: Working With the EndThe program prints `m`.
1. Python interprets a negative index as starting from the end (as opposed to
starting from the beginning). The last element is `-1`.
2. The last index that can safely be used with a list of N elements is element `-N`, which represents the first element.
3. `del values[-1]` removes the last element from the list.
4. `values[:-1]`#### Q5: Stepping Through a ListThe program prints:
furn
eniroulf
1. `stride` is the step size of the slice
2. The slice `1::2` selects all even-numbered items from a collection: it starts
with element `1` (which is the second element, since indexing starts at `0`),
goes on until the end (since no `end` is given), and uses a step size of `2`
(i.e., selects every second element).#### Q6: Slice Bounds<jupyter_code>lithium<jupyter_output><empty_output><jupyter_text>#### Q7: Sort and SortedProgram A<jupyter_code>letters is ['g', 'o', 'l', 'd'] and result is ['d', 'g', 'l', 'o']<jupyter_output><empty_output><jupyter_text>Program B<jupyter_code>letters is ['d', 'g', 'l', 'o'] and result is None<jupyter_output><empty_output><jupyter_text>`sorted(letters)` returns a sorted copy of the list `letters` (the original
list `letters` remains unchanged), while `letters.sort()` sorts the list
`letters` in-place and does not return anything.#### Q8: Copying or NotProgram A<jupyter_code>new is ['D', 'o', 'l', 'd'] and old is ['D', 'o', 'l', 'd']<jupyter_output><empty_output><jupyter_text>Program B<jupyter_code>new is ['D', 'o', 'l', 'd'] and old is ['g', 'o', 'l', 'd']<jupyter_output><empty_output>
|
non_permissive
|
/_episodes_jupyter/lists.ipynb
|
alyssaberger/py_intro_class
| 25 |
<jupyter_start><jupyter_text># Janta hack EDA File<jupyter_code>#Importing Required Number of Libraries
#Importing Required Libraries
#_______________________________________________________________________________________________________________
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.style as style # for styling the graphs
#_______________________________________________________________________________________________________________
# style.available (to know the available list of styles)
style.use('ggplot') # chosen style
plt.rc('xtick',labelsize=13) # to globally set the tick size
plt.rc('ytick',labelsize=13) # to globally set the tick size
# To print multiple outputs together
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
# Change column display number during print
pd.set_option('display.max_columns', 500)
# Ignore warnings
import warnings
warnings.filterwarnings('ignore')
# To display float with 2 decimal, avoid scientific printing
pd.options.display.float_format = '{:.2f}'.format
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
path='./new_train.csv'
path1='./new_test.csv'
def get_data():
train = pd.read_csv(path)
test = pd.read_csv(path1)
print(train.shape, test.shape)
return train, test
train, test = get_data()
data = pd.concat([train, test], axis=0)
data['source'] = np.nan
data['source'].iloc[:train.shape[0]] = "train"
data['source'].iloc[train.shape[0]: ] = "test"
data.drop(['Interest_Rate'], axis=1, inplace=True)
print(data.shape)
train
col = 'Gender'
plt.xticks(rotation=90)
sns.countplot(data[col], hue=data['source'])
col = 'Income_Verified'
plt.xticks(rotation=90)
sns.countplot(data[col], hue=data['source'])
sns.countplot(train[col], hue=train['Interest_Rate'])
col='Inquiries_Last_6Mo'
plt.xticks(rotation=90)
sns.countplot(data[col], hue=data['source'])
sns.countplot(train[col], hue=train['Interest_Rate'])
#Home_Owner_Mortgage
col='Home_Owner_Mortgage'
plt.xticks(rotation=90)
sns.countplot(data[col], hue=data['source'])
sns.countplot(train[col], hue=train['Interest_Rate'])
#Home_Owner_Rent
col='Home_Owner_Rent'
plt.xticks(rotation=90)
sns.countplot(data[col], hue=data['source'])
sns.countplot(train[col], hue=train['Interest_Rate'])
#Home_Owner_Other
col='Home_Owner_Other'
plt.xticks(rotation=90)
sns.countplot(data[col], hue=data['source'])
sns.countplot(train[col], hue=train['Interest_Rate'])
#Home_Owner_None
col='Home_Owner_None'
plt.xticks(rotation=90)
sns.countplot(data[col], hue=data['source'])
sns.countplot(train[col], hue=train['Interest_Rate'])
#Purpose_Of_Loan_car
col='Purpose_Of_Loan_car'
plt.xticks(rotation=90)
sns.countplot(data[col], hue=data['source'])
sns.countplot(train[col], hue=train['Interest_Rate'])
#Purpose_Of_Loan_credit_card
col='Purpose_Of_Loan_credit_card'
plt.xticks(rotation=90)
sns.countplot(data[col], hue=data['source'])
sns.countplot(train[col], hue=train['Interest_Rate'])
#Purpose_Of_Loan_debt_consolidation
col='Purpose_Of_Loan_debt_consolidation'
plt.xticks(rotation=90)
sns.countplot(data[col], hue=data['source'])
sns.countplot(train[col], hue=train['Interest_Rate'])
#Purpose_Of_Loan_educational
col='Purpose_Of_Loan_educational'
plt.xticks(rotation=90)
sns.countplot(data[col], hue=data['source'])
sns.countplot(train[col], hue=train['Interest_Rate'])
#Purpose_Of_Loan_home_improvement
col='Purpose_Of_Loan_home_improvement'
plt.xticks(rotation=90)
sns.countplot(data[col], hue=data['source'])
sns.countplot(train[col], hue=train['Interest_Rate'])
#Loan_Amount_Requested
col='Loan_Amount_Requested'
plt.figure()
sns.distplot(train[col], color='b', label='train', hist=False)
sns.distplot(test[col], color='g', label='test', hist=False)
plt.legend(loc='best')
plt.show()
target = 'Interest_Rate'
plt.figure()
sns.distplot(train[col][train[target] == 1], color='b', label='1' , hist=False)
sns.distplot(train[col][train[target] == 2], color='g', label='2' , hist=False)
sns.distplot(train[col][train[target] == 3], color='r', label='3' , hist=False)
plt.legend(loc='best')
plt.show()
#Annual_Income
col='Annual_Income'
plt.figure()
sns.distplot(train[col], color='b', label='train', hist=False)
sns.distplot(test[col], color='g', label='test', hist=False)
plt.legend(loc='best')
plt.show()
target = 'Interest_Rate'
plt.figure()
sns.distplot(train[col][train[target] == 1], color='b', label='1' , hist=False)
sns.distplot(train[col][train[target] == 2], color='g', label='2' , hist=False)
sns.distplot(train[col][train[target] == 3], color='r', label='3' , hist=False)
plt.legend(loc='best')
plt.show()
#Purpose of Loan
col='Purpose_Of_Loan_vacation'
plt.xticks(rotation=90)
sns.countplot(data[col], hue=data['source'])
sns.countplot(train[col], hue=train['Interest_Rate'])
#Debt_To_Income
col='Debt_To_Income'
plt.figure()
sns.distplot(train[col], color='b', label='train', hist=False)
sns.distplot(test[col], color='g', label='test', hist=False)
plt.legend(loc='best')
plt.show()
target = 'Interest_Rate'
plt.figure()
sns.distplot(train[col][train[target] == 1], color='b', label='1' , hist=False)
sns.distplot(train[col][train[target] == 2], color='g', label='2' , hist=False)
sns.distplot(train[col][train[target] == 3], color='r', label='3' , hist=False)
plt.legend(loc='best')
plt.show()<jupyter_output><empty_output>
|
no_license
|
/jantahack ML for banking/jantahack_eda.ipynb
|
panchamdesai777/Hackathons
| 1 |
<jupyter_start><jupyter_text># Read the CSV and Perform Basic Data Cleaning<jupyter_code># https://help.lendingclub.com/hc/en-us/articles/215488038-What-do-the-different-Note-statuses-mean-
columns = [
"loan_amnt", "int_rate", "installment", "home_ownership",
"annual_inc", "verification_status", "issue_d", "loan_status",
"pymnt_plan", "dti", "delinq_2yrs", "inq_last_6mths",
"open_acc", "pub_rec", "revol_bal", "total_acc",
"initial_list_status", "out_prncp", "out_prncp_inv", "total_pymnt",
"total_pymnt_inv", "total_rec_prncp", "total_rec_int", "total_rec_late_fee",
"recoveries", "collection_recovery_fee", "last_pymnt_amnt", "next_pymnt_d",
"collections_12_mths_ex_med", "policy_code", "application_type", "acc_now_delinq",
"tot_coll_amt", "tot_cur_bal", "open_acc_6m", "open_act_il",
"open_il_12m", "open_il_24m", "mths_since_rcnt_il", "total_bal_il",
"il_util", "open_rv_12m", "open_rv_24m", "max_bal_bc",
"all_util", "total_rev_hi_lim", "inq_fi", "total_cu_tl",
"inq_last_12m", "acc_open_past_24mths", "avg_cur_bal", "bc_open_to_buy",
"bc_util", "chargeoff_within_12_mths", "delinq_amnt", "mo_sin_old_il_acct",
"mo_sin_old_rev_tl_op", "mo_sin_rcnt_rev_tl_op", "mo_sin_rcnt_tl", "mort_acc",
"mths_since_recent_bc", "mths_since_recent_inq", "num_accts_ever_120_pd", "num_actv_bc_tl",
"num_actv_rev_tl", "num_bc_sats", "num_bc_tl", "num_il_tl",
"num_op_rev_tl", "num_rev_accts", "num_rev_tl_bal_gt_0",
"num_sats", "num_tl_120dpd_2m", "num_tl_30dpd", "num_tl_90g_dpd_24m",
"num_tl_op_past_12m", "pct_tl_nvr_dlq", "percent_bc_gt_75", "pub_rec_bankruptcies",
"tax_liens", "tot_hi_cred_lim", "total_bal_ex_mort", "total_bc_limit",
"total_il_high_credit_limit", "hardship_flag", "debt_settlement_flag"
]
target = ["loan_status"]
# Load the data
file_path = Path('LoanStats_2019Q1.csv')
df = pd.read_csv(file_path, skiprows=1)[:-2]
df = df.loc[:, columns].copy()
# Drop the null columns where all values are null
df = df.dropna(axis='columns', how='all')
# Drop the null rows
df = df.dropna()
# Remove the `Issued` loan status
issued_mask = df['loan_status'] != 'Issued'
df = df.loc[issued_mask]
# convert interest rate to numerical
df['int_rate'] = df['int_rate'].str.replace('%', '')
df['int_rate'] = df['int_rate'].astype('float') / 100
# Convert the target column values to low_risk and high_risk based on their values
x = {'Current': 'low_risk'}
df = df.replace(x)
x = dict.fromkeys(['Late (31-120 days)', 'Late (16-30 days)', 'Default', 'In Grace Period'], 'high_risk')
df = df.replace(x)
df.reset_index(inplace=True, drop=True)
df.head()
df_encoded=pd.get_dummies(df, columns=['home_ownership','verification_status','pymnt_plan','initial_list_status','application_type','hardship_flag','debt_settlement_flag'])
df_encoded=df_encoded.drop(columns=['issue_d','next_pymnt_d'])<jupyter_output><empty_output><jupyter_text># Split the Data into Training and Testing<jupyter_code># Create our features
X = df_encoded.drop(columns='loan_status')
# Create our target
y = df_encoded['loan_status']
X.describe()
# Check the balance of our target values
y.value_counts()
from sklearn.preprocessing import StandardScaler
X_scaler= StandardScaler().fit(X)
X_train= X_scaler.transform(X_train)
X_test= X_scaler.transform(X_test)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test =train_test_split(X,
y, random_state=1)<jupyter_output><empty_output><jupyter_text># Ensemble Learners
In this section, you will compare two ensemble algorithms to determine which algorithm results in the best performance. You will train a Balanced Random Forest Classifier and an Easy Ensemble AdaBoost classifier . For each algorithm, be sure to complete the folliowing steps:
1. Train the model using the training data.
2. Calculate the balanced accuracy score from sklearn.metrics.
3. Print the confusion matrix from sklearn.metrics.
4. Generate a classication report using the `imbalanced_classification_report` from imbalanced-learn.
5. For the Balanced Random Forest Classifier onely, print the feature importance sorted in descending order (most important feature to least important) along with the feature score
Note: Use a random state of 1 for each algorithm to ensure consistency between tests### Balanced Random Forest Classifier<jupyter_code># Resample the training data with the BalancedRandomForestClassifier
from imblearn.ensemble import BalancedRandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
brf_model = BalancedRandomForestClassifier(n_estimators=100, random_state=1)
Counter(y)
brf_model.fit(X_train,y_train)
y_pred=brf_model.predict(X_test)
# Calculated the balanced accuracy score
balanced_accuracy_score(y_test, y_pred)
# Display the confusion matrix
cm=confusion_matrix(y_test,y_pred)
cm
cm_df = pd.DataFrame(
cm, index=["Actual 0", "Actual 1"], columns=["Predicted 0", "Predicted 1"])
cm_df
# Print the imbalanced classification report
print(classification_report_imbalanced(y_test, y_pred))
# List the features sorted in descending order by feature importance
importances = brf_model.feature_importances_
sorted(zip(brf_model.feature_importances_, X.columns), reverse=True)<jupyter_output><empty_output><jupyter_text>### Easy Ensemble AdaBoost Classifier<jupyter_code># Train the EasyEnsembleClassifier
from imblearn.ensemble import EasyEnsembleClassifier
ec_model=EasyEnsembleClassifier(n_estimators=10, random_state=1)
ec_model.fit(X_train,y_train)
y_predict=ec_model.predict(X_test)
# Calculated the balanced accuracy score
balanced_accuracy_score(y_test, y_predict)
# Display the confusion matrix
confusion_matrix(y_test,y_predict)
# Print the imbalanced classification report
print(classification_report_imbalanced(y_test, y_predict))<jupyter_output> pre rec spe f1 geo iba sup
high_risk 0.02 0.69 0.74 0.03 0.72 0.51 101
low_risk 1.00 0.74 0.69 0.85 0.72 0.52 17104
avg / total 0.99 0.74 0.69 0.85 0.72 0.52 17205
|
no_license
|
/Module-17-Challenge-Resources/credit_risk_ensemble.ipynb
|
PazilatNur/Credit_Risk_Analysis
| 4 |
<jupyter_start><jupyter_text># BASIC OPERATIONS WITH STRING DATA TYPE
<jupyter_code>#regular expresison...case sensitive
name1 = "muhammed qasim"
name2 = "MUHAMMED qasim"
print(name1 == name2)
print(name1,name2 , sep="\n")
#inline operation only for the time being some momory is changed
#in memory operation phyically ja k os address ki value change o gy
#name1.() # name 1.tab
help(str)
#shift tab....() enter
dir(name1)
name1 = "muhammed qasim"
print(name1.upper()) #inline operation
print(name1)
name1 = "Muhammed Qasim"
name1 =name1.lower() #in memoery operation
print(name1)
name1 = "Muhammed Qasim"
name1 =name1.lower() #in memoery operation
print(name1)
#del name1 #py is iterpreted language
print(name1)
name1="qasim"
print(len(name1))
print((name1.lstrip()))
print(name1.strip())
#lstrip ..removes spaces....rstrip
a = "we are pakistanis.we love our country." #slicing multiple index
a[a.find("pakistanis"):15] #return the index of first index
#start,end,step-< slicing list tuple string ...slicing can be applied
a[7:15:1] #start:end:step
a = "we are pakistanis.we love our country."
b = a.split()
b
b.count("we")
#%
names = ["ali","hassan"] #concatenation
fsname = ["ad","ddd"]
program = ['piaic']
print("student Name: %s \n fname: %s \n program: %s" % (name,fsname,program))
name = "hamza"
fname = "koko"
program = 'piaic'
#+
"student name:" + name + "\n fname:" + "\n program": + program
name = "hamza"
fname = "koko"
program = 'piaic'
f"name:{name} \n fathername:{fname} \n program:{program}"
"name:{}, \n fname: {}".format(name, fname)<jupyter_output><empty_output>
|
no_license
|
/PIAIC PYTHON LECTURE 2 .ipynb
|
Qurat-ul-Ain-Gul19/PIAIC-QUARTER-1
| 1 |
<jupyter_start><jupyter_text># Plot Orbital Trajectories from Spice Kernels <jupyter_code>import heliopy.data.spice as spicedata
import heliopy.spice as spice
from datetime import datetime,timedelta
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
from matplotlib import animation, rc
from IPython.display import HTML
import astropy.constants as const
import astropy.units as u
from glob import glob
from ntpath import basename
import numpy as np
from urllib.request import urlretrieve
import os
rc('animation',html='html5')
# heliopy, sunpy, cdflib, spiceypy non -standard dependencies<jupyter_output><empty_output><jupyter_text>### Get Available Spice Kernels from Heliopy<jupyter_code>for kernel in spicedata.kernel_dict : # Do Downloads to Local Machine, furnsh to spice:
k = spicedata.get_kernel(kernel)
for kernel_path in k :
try : spice.furnish(kernel_path)
except : print(f"Kernel {basename(kernel_path)} was not downloaded, not furnished")
print(kernel)<jupyter_output>lsk
planet_trajectories
planet_orientations
helio_frames
helios1
helios2
juno
Downloading https://sohowww.nascom.nasa.gov/solarsoft/stereo/gen/data/spice/depm/ahead/ahead_2019_030_01.depm.bsp
Kernel ahead_2019_030_01.depm.bsp was not downloaded, not furnished
stereo_a
ulysses
psp
solo_2020
psp_pred
stereo_a_pred
juno_pred
bepi_pred
<jupyter_text>### Download some more kernels from NAIF<jupyter_code>extra_spice_dict = {
"VOYAGER 1" : 'https://naif.jpl.nasa.gov/pub/naif/VOYAGER/kernels/spk/Voyager_1.a54206u_V0.2_merged.bsp',
"VOYAGER 2" : 'https://naif.jpl.nasa.gov/pub/naif/VOYAGER/kernels/spk/Voyager_2.m05016u.merged.bsp',
"STEREO B 2007a" : "https://sohowww.nascom.nasa.gov/solarsoft/stereo/gen/data/spice/depm/behind/behind_2007_021_01.depm.bsp",
"STEREO B 2007b" : "https://sohowww.nascom.nasa.gov/solarsoft/stereo/gen/data/spice/depm/behind/behind_2007_053_01.depm.bsp",
"STEREO B 2008a" : "https://sohowww.nascom.nasa.gov/solarsoft/stereo/gen/data/spice/depm/behind/behind_2008_037_01.depm.bsp",
"STEREO B 2008b" : "https://sohowww.nascom.nasa.gov/solarsoft/stereo/gen/data/spice/depm/behind/behind_2008_078_01.depm.bsp",
"STEREO B 2010" : "https://sohowww.nascom.nasa.gov/solarsoft/stereo/gen/data/spice/depm/behind/behind_2010_203_01.depm.bsp",
"STEREO B 2011" : "https://sohowww.nascom.nasa.gov/solarsoft/stereo/gen/data/spice/depm/behind/behind_2011_193_01.depm.bsp",
"STEREO B 2012" : "https://sohowww.nascom.nasa.gov/solarsoft/stereo/gen/data/spice/depm/behind/behind_2012_265_01.depm.bsp",
"STEREO B 2014a" : "https://sohowww.nascom.nasa.gov/solarsoft/stereo/gen/data/spice/depm/behind/behind_2014_002_01.depm.bsp",
"STEREO B 2014b" : "https://sohowww.nascom.nasa.gov/solarsoft/stereo/gen/data/spice/depm/behind/behind_2014_271_01.depm.bsp",
"STEREO B 2016" : "https://sohowww.nascom.nasa.gov/solarsoft/stereo/gen/data/spice/depm/behind/behind_2016_256_01.depm.bsp",
}
for entry in extra_spice_dict :
url = extra_spice_dict.get(entry)
local = spice.data_dir+"/spice/"+basename(url)
if not os.path.isfile(local) :
print(f"Downloaded {basename(url)}")
path=urlretrieve(url,local)
spice.furnish(local)
print(entry)
def orbital_animations(body_list,t_start,t_final,t_delta=1.0,
SPICE_Origin='SUN',SPICE_Frame='ECLIPJ2000',
units='au',fname=None,fps=60,ecliptic_proj=False):
'''
Parameters
----------
body_list : tuple containing strings of bodies for which spice data exists, e.g. ['Earth']
t_start : start day for animation, format Tuple(YYYY,MM,DD)
t_final : end day for animation, format Tuple(YYYY,MM,DD)
Keywords
--------
t_delta=1.0 : time cadence of each frame in animation, format float number of days, default : 1.0
SPICE_Origin='SUN' : string containing origin defined in SPICE
SPICE_Frame='ECLIPJ2000' : string containing frame defined in SPICE
units='au' : string corresponding to unit for plotting,alt : 'm','R_earth','R_sun'
fname=None : output filename, defaults to orbit.mp4
'''
# Generate Heliopy Spice Body Objects
bodies = [spice.Trajectory(body) for body in body_list]
# Generate Time Series to Plot Orbits Over
starttime = datetime(*t_start)
endtime = datetime(*t_final)
times = []
while starttime < endtime:
times.append(starttime)
starttime += timedelta(days=t_delta)
unit_dict = {'m' : u.m, 'R_earth' : u.R_earth, 'R_sun' : u.R_sun, 'au' : u.au}
# Generate Positions for each body over the specified times
reference = SPICE_Origin
frame = SPICE_Frame
spice_error_inds = []
for body,index in zip(bodies,range(len(bodies))) :
try : # If no or partial spice data for a given body in the timeframe, remove from plot
body.generate_positions(times, reference, frame)
body.change_units(unit_dict.get(units))
except :
print(f"Some/All SPICE data missing for {body.target} in time range {t_start} to {t_final}")
print(f"Omitting {body.target}")
spice_error_inds.append(index-len(spice_error_inds))
if len(spice_error_inds) > 0 :
for ind in spice_error_inds : bodies.pop(ind)
print(f"Remaining bodies in list : {[body_.target for body_ in bodies]}")
maxes = []
for body in bodies :
maxes.append(np.max(np.abs(body.x.value)))
maxes.append(np.max(np.abs(body.y.value)))
maxes.append(np.max(np.abs(body.z.value)))
maximum = np.max(maxes)
# Setup Figure
fig,ax = plt.subplots(figsize=(15,15),subplot_kw={'projection' :'3d'})
ax.set_xlim([-maximum,maximum])
ax.set_ylim([-maximum,maximum])
ax.set_zlim([-maximum,maximum])
ax.set_xlabel(f"X-ECLIPJ2000 [{units}]",fontsize=17)
ax.set_ylabel(f"Y-ECLIPJ2000 [{units}]",fontsize=17)
ax.set_zlabel(f"Z-ECLIPJ2000 [{units}]",fontsize=17)
full_trajs,recent_trajs = [],[]
for body in bodies :
full_trajs.append(ax.plot([],[],[],lw=0.5)[0])
recent_trajs.append(ax.plot([],[],[],lw=4,label=body.target,color=full_trajs[-1].get_c())[0])
current_locs = [ax.scatter([],[],[],color="black")]
texts = [ax.text(-maximum,-maximum,maximum,"",fontsize=20)]
legend = plt.legend(fontsize=17)
if ecliptic_proj : ax.view_init(90,-90)
# Initialize Figure
def init():
xi,yi,zi = [],[],[]
for full_traj,recent_traj,body in zip(full_trajs,recent_trajs,bodies) :
full_traj.set_data([],[])
full_traj.set_3d_properties([])
recent_traj.set_data([],[])
recent_traj.set_3d_properties([])
xi.append(body.x.value[0])
yi.append(body.y.value[0])
zi.append(body.z.value[0])
current_locs[0]._offsets3d = np.array([xi,yi,zi])
texts[0].set_text(f"{times[0]}")
return full_trajs+recent_trajs+current_locs+texts
# Animation Update #10 days most recent positions
def animate(i):
xi,yi,zi=[],[],[]
for full_traj,recent_traj,body in zip(full_trajs,recent_trajs,bodies) :
full_traj.set_data(body.x.value[:i],body.y.value[:i])
full_traj.set_3d_properties(body.z.value[:i])
recent_traj.set_data(body.x.value[i-20:i],body.y.value[i-20:i])
recent_traj.set_3d_properties(body.z.value[i-20:i])
xi.append(body.x.value[i])
yi.append(body.y.value[i])
zi.append(body.z.value[i])
current_locs[0]._offsets3d = np.array([xi,yi,zi])
texts[0].set_text(f"{times[i]}")
return recent_trajs+current_locs+texts
anim = animation.FuncAnimation(fig,animate,init_func=init,frames=len(times)-1,blit=True)
if fname is not None : anim.save(fname,fps=fps)
else : anim.save('orbits.mp4',fps=fps)<jupyter_output><empty_output><jupyter_text>## Ulysses 1991 Jupiter Gravity Assist<jupyter_code>body_list = ['Ulysses',
'Earth',
'Jupiter_barycenter',
'Mars_barycenter',
'Venus_barycenter',
]
start = (1990, 10, 7)
end = (1999, 10, 7)
orbital_animations(body_list,start,end,fname='ulysses.mp4',t_delta=5.0)<jupyter_output><empty_output><jupyter_text>## Helios 1 and 2 1976-1979<jupyter_code>body_list = ['HELIOS 1',
'HELIOS 2',
'Earth',
'Mercury_barycenter',
'Venus_barycenter',
]
start=(1976, 1, 16)
end=(1979, 1, 1)
orbital_animations(body_list,start,end,fname='helios.mp4')<jupyter_output><empty_output><jupyter_text>## Parker Solar Probe<jupyter_code>body_list = ['SPP',
'Earth',
'Mercury_barycenter',
'Venus_barycenter',
]
start=(2018, 8, 17)
end=(2022, 4, 12)
orbital_animations(body_list,start,end,fname='psp.mp4',fps=30)<jupyter_output><empty_output><jupyter_text>## Juno
### Part 2 : Polar Orbit Since July 2016<jupyter_code>body_list = ['JUNO',
'Jupiter_barycenter',
]
start=(2016, 7, 6)
end=(2019, 1, 1)
orbital_animations(body_list,start,end,fname='juno.mp4',fps=30,SPICE_Origin='Jupiter_barycenter',
t_delta=0.2)<jupyter_output><empty_output><jupyter_text>### STEREO A and B<jupyter_code>body_list = ['STEREO AHEAD',
'STEREO BEHIND',
'EARTH'
]
start=(2006, 10, 27)
end=(2014, 1, 1)
orbital_animations(body_list,start,end,fname='stereo.mp4',
fps=30,
t_delta=1.0
)<jupyter_output><empty_output><jupyter_text>### Solar Orbiter<jupyter_code>body_list = ['SOLO',
'VENUS',
'SPP',
'EARTH',
]
start=(2020, 3, 1)
end=(2024, 4, 1)
orbital_animations(body_list,start,end,fname='solar-orbiter.mp4',
fps=30,
t_delta=1.0
)<jupyter_output><empty_output><jupyter_text>### Voyagers### Voyager 1 and 2<jupyter_code>body_list = ['VOYAGER 1',
'VOYAGER 2',
'EARTH',
'JUPITER BARYCENTER',
'SATURN BARYCENTER',
'URANUS BARYCENTER',
'NEPTUNE BARYCENTER',
'PLUTO BARYCENTER'
]
start=(1977, 10, 1)
end=(2000, 1, 1)
orbital_animations(body_list,start,end,fname='vgr1&2.mp4',
fps=60,
t_delta=5.0
)<jupyter_output><empty_output><jupyter_text>### PSP Perihelion 2 <jupyter_code>body_list = ['SPP',
'Earth',
'STEREO AHEAD',
]
spice.spiceypy.furnsh('/media/samuel_badman/ExtraDrive1/spice_kernels/pystereo.tm')
start=(2019, 3, 1)
end=(2019, 4, 30)
orbital_animations(body_list,start,end,fname='psp_E2.mp4',fps=30,SPICE_Frame='HEE',
ecliptic_proj=True,t_delta = 0.25)<jupyter_output><empty_output><jupyter_text>### Wind-STEREO Constellation<jupyter_code>SPICE_Origin = 'Sun'
SPICE_Frame = 'HEE'#'ECLIPJ2000'
t_start=(2006, 10, 27)
t_final=(2014, 1, 1)
t_delta=1
units = 'au'
body_list = ['STEREO AHEAD','STEREO BEHIND', 'Earth']
# Generate Heliopy Spice Body Objects
bodies = [spice.Trajectory(body) for body in body_list]
# Generate Time Series to Plot Orbits Over
starttime = datetime(*t_start)
endtime = datetime(*t_final)
times = []
while starttime < endtime:
times.append(starttime)
starttime += timedelta(days=t_delta)
unit_dict = {'m' : u.m, 'R_earth' : u.R_earth, 'R_sun' : u.R_sun, 'au' : u.au}
# Generate Positions for each body over the specified times
reference = SPICE_Origin
frame = SPICE_Frame
spice_error_inds = []
for body,index in zip(bodies,range(len(bodies))) :
try : # If no or partial spice data for a given body in the timeframe, remove from plot
body.generate_positions(times, reference, frame)
body.change_units(unit_dict.get(units))
except :
print(f"Some/All SPICE data missing for {body.target} in time range {t_start} to {t_final}")
print(f"Omitting {body.target}")
spice_error_inds.append(index-len(spice_error_inds))
if len(spice_error_inds) > 0 :
for ind in spice_error_inds : bodies.pop(ind)
print(f"Remaining bodies in list : {[body_.target for body_ in bodies]}")
L_mult = 1- ((const.M_earth.value/(const.M_earth.value+const.M_sun.value))/3)**(1/3) # L1 point ratio
Ax,Bx,Wx = [body.x.value for body in bodies]
Ay,By,Wy = [body.y.value for body in bodies]
Wx *= L_mult
Wy *= L_mult
# Setup Figure
fig,ax = plt.subplots(figsize=(10,10))
ax.set_xlim([-1.2,1.2])
ax.set_ylim([-1.2,1.2])
ax.set_xlabel(f"X-{SPICE_Frame} [{units}]",fontsize=17)
ax.set_ylabel(f"Y-{SPICE_Frame} [{units}]",fontsize=17)
sol = ax.scatter(0,0,marker='o',color="yellow",s=100,edgecolors='black')
sol_txt = plt.text(0,0,"Sun",fontsize=17)
AB = ax.plot([Ax[0],Bx[0]],[Ay[0],By[0]],color="black")[0]
AW = ax.plot([Ax[0],Wx[0]],[Ay[0],Wy[0]],color="black")[0]
BW = ax.plot([Bx[0],Wx[0]],[By[0],Wy[0]],color="black")[0]
#A_orb = ax.plot(Ax,Ay,linestyle = ":",color="black",alpha=0.5,linewidth=0.5)
#B_orb = ax.plot(Bx,By,linestyle = ":",color="black",alpha=0.5,linewidth=0.5)
#W_orb = ax.plot(Wx,Wy,linestyle = ":",color="black",alpha=0.5,linewidth=0.5)
angle=np.arange(0,2*np.pi,0.1)
ONE_AU = ax.plot(np.cos(angle),np.sin(angle),color="black",linewidth=0.8,linestyle="--",label="1 AU")
A = ax.scatter(Ax[0],Ay[0],color="blue",s=80,label="STEREO A",edgecolors='black')
B = ax.scatter(Bx[0],By[0],color="red",s=80,label="STEREO B",edgecolors='black')
W = ax.scatter(Wx[0],Wy[0],color="green",s=80,label="Wind",edgecolors='black')
texts = [ax.text(-1.1,1.1,f"{times[0]}",fontsize=20)]
legend = plt.legend(fontsize=12,loc=1)
def plot_constellation(ii) :
AB.set_data([Ax[ii],Bx[ii]],[Ay[ii],By[ii]])
AW.set_data([Ax[ii],Wx[ii]],[Ay[ii],Wy[ii]])
BW.set_data([Bx[ii],Wx[ii]],[By[ii],Wy[ii]])
A.set_offsets([Ax[ii],Ay[ii]])
B.set_offsets([Bx[ii],By[ii]])
W.set_offsets([Wx[ii],Wy[ii]])
texts[0].set_text(f"{times[ii]}")
return [AB,AW,BW,A,B,W]
date_time = datetime(2011,11,23)
ii = np.where(np.array(times) >= date_time)[0][0]
AB,AW,BW,A,B,W=plot_constellation(ii)
fname="AWB_Constellation_HEE.mp4"
fps = 24
anim = animation.FuncAnimation(fig,plot_constellation,frames=len(times)-1,blit=True)
anim.save(fname,fps=fps)<jupyter_output><empty_output><jupyter_text>### Wind-STEREO-PSP-SO Constellation<jupyter_code>SPICE_Origin = 'Sun'
SPICE_Frame = 'ECLIPJ2000'
t_start=(2020, 3, 1)
t_final=(2024, 3, 1)
t_delta=1
units = 'au'
body_list = ['STEREO AHEAD', 'Earth','SPP','SOLO']
# Generate Heliopy Spice Body Objects
bodies = [spice.Trajectory(body) for body in body_list]
# Generate Time Series to Plot Orbits Over
starttime = datetime(*t_start)
endtime = datetime(*t_final)
times = []
while starttime < endtime:
times.append(starttime)
starttime += timedelta(days=t_delta)
unit_dict = {'m' : u.m, 'R_earth' : u.R_earth, 'R_sun' : u.R_sun, 'au' : u.au}
# Generate Positions for each body over the specified times
reference = SPICE_Origin
frame = SPICE_Frame
spice_error_inds = []
for body,index in zip(bodies,range(len(bodies))) :
try : # If no or partial spice data for a given body in the timeframe, remove from plot
body.generate_positions(times, reference, frame)
body.change_units(unit_dict.get(units))
except :
print(f"Some/All SPICE data missing for {body.target} in time range {t_start} to {t_final}")
print(f"Omitting {body.target}")
spice_error_inds.append(index-len(spice_error_inds))
if len(spice_error_inds) > 0 :
for ind in spice_error_inds : bodies.pop(ind)
print(f"Remaining bodies in list : {[body_.target for body_ in bodies]}")
L_mult = 1- ((const.M_earth.value/(const.M_earth.value+const.M_sun.value))/3)**(1/3) # L1 point ratio
Ax,Wx,Px,Sx = [body.x.value for body in bodies]
Ay,Wy,Py,Sy = [body.y.value for body in bodies]
Az,Wz,Pz,Sz = [body.z.value for body in bodies]
Wx *= L_mult
Wy *= L_mult
# Setup Figure
fig,ax = plt.subplots(figsize=(10,10),subplot_kw={'projection' :'3d'})
ax.set_xlim([-1.2,1.2])
ax.set_ylim([-1.2,1.2])
ax.set_zlim([-0.2,0.2])
ax.set_xlabel(f"X-{SPICE_Frame} [{units}]",fontsize=17)
ax.set_ylabel(f"Y-{SPICE_Frame} [{units}]",fontsize=17)
ax.set_zlabel(f"Z-{SPICE_Frame} [{units}]",fontsize=17)
sol = ax.scatter(0,0,0,marker='o',color="yellow",s=100)
sol_txt = ax.text(0,0,0,"Sun",fontsize=17)
AW = ax.plot([Ax[0],Wx[0]],[Ay[0],Wy[0]],[Az[0],Wz[0]],color="black")[0]
AP = ax.plot([Ax[0],Px[0]],[Ay[0],Py[0]],[Az[0],Pz[0]],color="black")[0]
AS = ax.plot([Ax[0],Sx[0]],[Ay[0],Sy[0]],[Az[0],Sz[0]],color="black")[0]
WP = ax.plot([Wx[0],Px[0]],[Wy[0],Py[0]],[Wz[0],Pz[0]],color="black")[0]
WS = ax.plot([Wx[0],Sx[0]],[Wy[0],Sy[0]],[Wz[0],Sz[0]],color="black")[0]
PS = ax.plot([Px[0],Sx[0]],[Py[0],Sy[0]],[Pz[0],Sz[0]],color="black")[0]
#A_orb = ax.plot(Ax,Ay,linestyle = ":",color="black",alpha=0.5,linewidth=0.5)
#B_orb = ax.plot(Bx,By,linestyle = ":",color="black",alpha=0.5,linewidth=0.5)
#W_orb = ax.plot(Wx,Wy,linestyle = ":",color="black",alpha=0.5,linewidth=0.5)
angle=np.arange(0,2*np.pi,0.1)
ONE_AU = ax.plot(np.cos(angle),np.sin(angle),color="black",linewidth=0.8,linestyle="--",label="1 AU")
A = ax.scatter(Ax[0],Ay[0],color="blue",s=80,label="STEREO A")
P = ax.scatter(Px[0],Py[0],color="red",s=80,label="PSP")
W = ax.scatter(Wx[0],Wy[0],color="green",s=80,label="Wind")
S = ax.scatter(Sx[0],Sy[0],color="black",s=80,label="SO")
S_height = ax.plot([Sx[0],Sx[0]],[Sy[0],Sy[0]],[0.,Sz[0]],color="black",linestyle="--")[0]
texts = [ax.text(-1.1,-1.1,0.2,f"{times[0]}",fontsize=20)]
legend = plt.legend(fontsize=12,loc=1)
def plot_constellation(ii) :
AW.set_data([Ax[ii],Wx[ii]],[Ay[ii],Wy[ii]])
AW.set_3d_properties([Az[ii],Wz[ii]])
AP.set_data([Ax[ii],Px[ii]],[Ay[ii],Py[ii]])
AP.set_3d_properties([Az[ii],Pz[ii]])
AS.set_data([Ax[ii],Sx[ii]],[Ay[ii],Sy[ii]])
AS.set_3d_properties([Az[ii],Sz[ii]])
WP.set_data([Wx[ii],Px[ii]],[Wy[ii],Py[ii]])
WP.set_3d_properties([Wz[ii],Pz[ii]])
WS.set_data([Wx[ii],Sx[ii]],[Wy[ii],Sy[ii]])
WS.set_3d_properties([Wz[ii],Sz[ii]])
PS.set_data([Px[ii],Sx[ii]],[Py[ii],Sy[ii]])
PS.set_3d_properties([Pz[ii],Sz[ii]])
S_height.set_data([Sx[ii],Sx[ii]],[Sy[ii],Sy[ii]])
S_height.set_3d_properties([0,Sz[ii]])
A._offsets3d=[[Ax[ii]],[Ay[ii]],[Az[ii]]]
P._offsets3d=[[Px[ii]],[Py[ii]],[Pz[ii]]]
W._offsets3d=[[Wx[ii]],[Wy[ii]],[Wz[ii]]]
S._offsets3d=[[Sx[ii]],[Sy[ii]],[Sz[ii]]]
texts[0].set_text(f"{times[ii]}")
return [AW,AP,AS,WP,WS,PS,A,P,W,S]
date_time = datetime(2020,3,30)
ii = np.where(np.array(times) >= date_time)[0][0]
AW,AP,AS,WP,WS,PS,A,P,W,S=plot_constellation(ii)
fname="AWPS_Constellation.mp4"
fps = 24
anim = animation.FuncAnimation(fig,plot_constellation,frames=len(times)-1,blit=True)
anim.save(fname,fps=fps)<jupyter_output><empty_output><jupyter_text>### PSP Encounters### E1<jupyter_code>SPICE_Origin = 'Sun'
SPICE_Frame = 'HEE'#'ECLIPJ2000'
t_start=(2018, 10, 1)
t_final=(2019, 11, 30)
t_delta=1
units = 'au'
body_list = ['STEREO AHEAD','SPP', 'Earth']
# Generate Heliopy Spice Body Objects
bodies = [spice.Trajectory(body) for body in body_list]
# Generate Time Series to Plot Orbits Over
starttime = datetime(*t_start)
endtime = datetime(*t_final)
times = []
while starttime < endtime:
times.append(starttime)
starttime += timedelta(days=t_delta)
unit_dict = {'m' : u.m, 'R_earth' : u.R_earth, 'R_sun' : u.R_sun, 'au' : u.au}
# Generate Positions for each body over the specified times
reference = SPICE_Origin
frame = SPICE_Frame
spice_error_inds = []
for body,index in zip(bodies,range(len(bodies))) :
try : # If no or partial spice data for a given body in the timeframe, remove from plot
body.generate_positions(times, reference, frame)
body.change_units(unit_dict.get(units))
except :
print(f"Some/All SPICE data missing for {body.target} in time range {t_start} to {t_final}")
print(f"Omitting {body.target}")
spice_error_inds.append(index-len(spice_error_inds))
if len(spice_error_inds) > 0 :
for ind in spice_error_inds : bodies.pop(ind)
print(f"Remaining bodies in list : {[body_.target for body_ in bodies]}")
L_mult = 1- ((const.M_earth.value/(const.M_earth.value+const.M_sun.value))/3)**(1/3) # L1 point ratio
Ax,Bx,Wx = [body.x.value for body in bodies]
Ay,By,Wy = [body.y.value for body in bodies]
Wx *= L_mult
Wy *= L_mult
# Setup Figure
fig,ax = plt.subplots(figsize=(10,10))
ax.set_xlim([-1.2,1.2])
ax.set_ylim([-1.2,1.2])
ax.set_xlabel(f"X-{SPICE_Frame} [{units}]",fontsize=17)
ax.set_ylabel(f"Y-{SPICE_Frame} [{units}]",fontsize=17)
sol = ax.scatter(0,0,marker='o',color="yellow",s=100,edgecolors='black')
sol_txt = plt.text(0,0,"Sun",fontsize=17)
AB = ax.plot([Ax[0],Bx[0]],[Ay[0],By[0]],color="black")[0]
AW = ax.plot([Ax[0],Wx[0]],[Ay[0],Wy[0]],color="black")[0]
BW = ax.plot([Bx[0],Wx[0]],[By[0],Wy[0]],color="black")[0]
#A_orb = ax.plot(Ax,Ay,linestyle = ":",color="black",alpha=0.5,linewidth=0.5)
#B_orb = ax.plot(Bx,By,linestyle = ":",color="black",alpha=0.5,linewidth=0.5)
#W_orb = ax.plot(Wx,Wy,linestyle = ":",color="black",alpha=0.5,linewidth=0.5)
angle=np.arange(0,2*np.pi,0.1)
ONE_AU = ax.plot(np.cos(angle),np.sin(angle),color="black",linewidth=0.8,linestyle="--",label="1 AU")
A = ax.scatter(Ax[0],Ay[0],color="blue",s=80,label="STEREO A",edgecolors='black')
B = ax.scatter(Bx[0],By[0],color="red",s=80,label="PSP",edgecolors='black')
W = ax.scatter(Wx[0],Wy[0],color="green",s=80,label="Wind",edgecolors='black')
texts = [ax.text(-1.1,1.1,f"{times[0]}",fontsize=20)]
legend = plt.legend(fontsize=12,loc=1)
def plot_constellation(ii) :
AB.set_data([Ax[ii],Bx[ii]],[Ay[ii],By[ii]])
AW.set_data([Ax[ii],Wx[ii]],[Ay[ii],Wy[ii]])
BW.set_data([Bx[ii],Wx[ii]],[By[ii],Wy[ii]])
A.set_offsets([Ax[ii],Ay[ii]])
B.set_offsets([Bx[ii],By[ii]])
W.set_offsets([Wx[ii],Wy[ii]])
texts[0].set_text(f"{times[ii]}")
return [AB,AW,BW,A,B,W]
date_time = datetime(2019,11,3)
ii = np.where(np.array(times) >= date_time)[0][0]
AB,AW,BW,A,B,W=plot_constellation(ii)
fname="AWP_Constellation_HEE.mp4"
fps = 24
anim = animation.FuncAnimation(fig,plot_constellation,frames=len(times)-1,blit=True)
anim.save(fname,fps=fps)
SPICE_Origin = 'Venus'
SPICE_Frame = 'HEE'#'ECLIPJ2000'
t_start=(2018, 10, 1)
t_final=(2024, 11, 30)
t_delta=1./24
units = 'au'
body_list = ['Venus','SPP', 'Earth']
# Generate Heliopy Spice Body Objects
bodies = [spice.Trajectory(body) for body in body_list]
# Generate Time Series to Plot Orbits Over
starttime = datetime(*t_start)
endtime = datetime(*t_final)
times = []
while starttime < endtime:
times.append(starttime)
starttime += timedelta(days=t_delta)
unit_dict = {'m' : u.m, 'R_earth' : u.R_earth, 'R_sun' : u.R_sun, 'au' : u.au}
# Generate Positions for each body over the specified times
reference = SPICE_Origin
frame = SPICE_Frame
spice_error_inds = []
for body,index in zip(bodies,range(len(bodies))) :
try : # If no or partial spice data for a given body in the timeframe, remove from plot
body.generate_positions(times, reference, frame)
body.change_units(unit_dict.get(units))
except :
print(f"Some/All SPICE data missing for {body.target} in time range {t_start} to {t_final}")
print(f"Omitting {body.target}")
spice_error_inds.append(index-len(spice_error_inds))
if len(spice_error_inds) > 0 :
for ind in spice_error_inds : bodies.pop(ind)
print(f"Remaining bodies in list : {[body_.target for body_ in bodies]}")
plt.plot(times,bodies[1].r.to('km').value-6000)
plt.ylabel("Height Above Venus Surface / km")
plt.xlabel("Time/ Year")
#plt.yscale('log')
np.min(bodies[1].r.to('km').value-6000)<jupyter_output><empty_output>
|
non_permissive
|
/Visualizations/Orbital_Trajectories.ipynb
|
samuel-badman/Home
| 14 |
<jupyter_start><jupyter_text># Machine Learning : Feature Space
Using Feature Space with Breast Cancer dataset
What is feature space? - Feature space is a numeric representation of raw data
"Feature vector is a n-dimensional vector of numerical features that represnets some object - vector space associated with these vectors is often called the feature space"### Title: Wisconsin Breast Cancer Database (January 8, 1991)
This database is used to try and predict wheter a tumor is malignant or benign, the following database consists of the measurements of the tumor and the nature of the tumour.
Diagnosis has 2 fields B=Benign, M=Malignant
4. Relevant information
Features are computed from a digitized image of a fine needle
aspirate (FNA) of a breast mass. They describe
characteristics of the cell nuclei present in the image.
A few of the images can be found at
http://www.cs.wisc.edu/~street/images/
Separating plane described above was obtained using
Multisurface Method-Tree (MSM-T) [K. P. Bennett, "Decision Tree
Construction Via Linear Programming." Proceedings of the 4th
Midwest Artificial Intelligence and Cognitive Science Society,
pp. 97-101, 1992], a classification method which uses linear
programming to construct a decision tree. Relevant features
were selected using an exhaustive search in the space of 1-4
features and 1-3 separating planes.
The actual linear program used to obtain the separating plane
in the 3-dimensional space is that described in:
[K. P. Bennett and O. L. Mangasarian: "Robust Linear
Programming Discrimination of Two Linearly Inseparable Sets",
Optimization Methods and Software 1, 1992, 23-34].
This database is also available through the UW CS ftp server:
ftp ftp.cs.wisc.edu
cd math-prog/cpo-dataset/machine-learn/WDBC/
5. Number of instances: 569
6. Number of attributes: 32 (ID, diagnosis, 30 real-valued input features)
7. Attribute information
1) ID number
2) Diagnosis (M = malignant, B = benign)
3-32)
Ten real-valued features are computed for each cell nucleus:
a) radius (mean of distances from center to points on the perimeter)
b) texture (standard deviation of gray-scale values)
c) perimeter
d) area
e) smoothness (local variation in radius lengths)
f) compactness (perimeter^2 / area - 1.0)
g) concavity (severity of concave portions of the contour)
h) concave points (number of concave portions of the contour)
i) symmetry
j) fractal dimension ("coastline approximation" - 1)
Several of the papers listed above contain detailed descriptions of
how these features are computed.
The mean, standard error, and "worst" or largest (mean of the three
largest values) of these features were computed for each image,
resulting in 30 features. For instance, field 3 is Mean Radius, field
13 is Radius SE, field 23 is Worst Radius.
All feature values are recoded with four significant digits.
8. Missing attribute values: none
9. Class distribution: 357 benign, 212 malignant
<jupyter_code>#import prelimminaries
import numpy as np
import pandas as pd
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data'
df = pd.read_csv(url, header=None)
df.head(10)
#transfer into numpy
X = np.array(df.loc[:, 2:].values)
y = np.array(df.loc[:, 1].values) # class label is contained here
y[y == 'M'] = 1 # Malignant
y[y == 'B'] = 0 # Benign
y.astype(int) #typecast
#Class Distribution of the data (mean, sd and variance)
np.mean(y), np.std(y), np.var(y)<jupyter_output><empty_output><jupyter_text>### Plot 2 Features and scatter plot
1)ID number
2) Diagnosis (M = malignant, B = benign) 3-32)
Ten real-valued features are computed for each cell nucleus:
a) radius (mean of distances from center to points on the perimeter)
b) texture (standard deviation of gray-scale values)
c) perimeter
d) area
e) smoothness (local variation in radius lengths)
f) compactness (perimeter^2 / area - 1.0)
g) concavity (severity of concave portions of the contour)
h) concave points (number of concave portions of the contour)
i) symmetry
j) fractal dimension ("coastline approximation" - 1)
Several of the papers listed above contain detailed descriptions of how these features are computed.
The mean, standard error, and "worst" or largest (mean of the three largest values) of these
features were computed for each image, resulting in 30 features. For instance, field 3 is Mean
Radius, field 13 is Radius SE, field 23 is Worst Radius.<jupyter_code>#get some features of the tumour, indexing starts with 0
feature1 = np.array(df.loc[:, 2].values) #mean radius
feature2 = np.array(df.loc[:, 3].values) #mean texture
feature3 = np.array(df.loc[:, 5].values) #mean area
feature1
from matplotlib import pyplot as plt
plt.figure(figsize=(14,8))
plt.scatter(feature1, feature2, c=y) # plot the feature space with color coded classes
#Plot three features in 3D
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(14,12))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(feature2, feature1, feature3, c=y)
ax.set_xlabel('feature2')
ax.set_ylabel('feature1')
ax.set_zlabel('feature3')
plt.show()<jupyter_output><empty_output><jupyter_text>## Loading the pima indians diabetes dataset<jupyter_code>%matplotlib inline
<jupyter_output><empty_output><jupyter_text>The diagnostic, binary-valued variable investigated is whether the patient shows signs of diabetes according to World Health Organization criteria (i.e., if the 2 hour post-load plasma glucose was at least 200 mg/dl at any survey examination or if found during routine medical care). The population lives near Phoenix, Arizona, USA.
In particular, all patients here are females at least 21 years old of Pima Indian heritage.
For Each Attribute: (all numeric-valued)
1. Number of times pregnant
2. Plasma glucose concentration a 2 hours in an oral glucose tolerance test
3. Diastolic blood pressure (mm Hg)
4. Triceps skin fold thickness (mm)
5. 2-Hour serum insulin (mu U/ml)
6. Body mass index (weight in kg/(height in m)^2)
7. Diabetes pedigree function
8. Age (years)
9. Class variable (0 or 1)
Class Distribution: (class value 1 is interpreted as "tested positive for diabetes") <jupyter_code>#suppress warning
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
# import required stuff
import numpy as np
import pandas as pd
import pandas as pd
columns = ['n_preg', 'glucose', 'diastolic_bp', 'thickness',
'insulin', 'bmi', 'pedigree', 'age', 'class']
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/pima-indians-diabetes/pima-indians-diabetes.data', header=None, names=columns)
<jupyter_output><empty_output><jupyter_text>#### Exploratory Data Analysis<jupyter_code>df.head(5)
# some simple descriptive statistics
df.describe()<jupyter_output><empty_output><jupyter_text>### IMPUTATION<jupyter_code># value 0 corresponds to missing value and needs to be imputed
cols = ['n_preg', 'glucose', 'diastolic_bp', 'thickness', 'insulin', 'bmi', 'pedigree', 'age']
for col in cols:
print (col, df[df[col]==0].shape)
# replace zero value with mean of its corresponding class
for col in cols:
mean0 = df.loc[df[col]!=0, [col, 'class']].groupby('class').mean().iloc[0,0]
mean1 = df.loc[df[col]!=0, [col, 'class']].groupby('class').mean().iloc[1,0]
df.loc[(df[col]==0) & (df['class']==0), col] = mean0
df.loc[(df[col]==0) & (df['class']==1), col] = mean1
# read the imputed data
df.head(10)<jupyter_output><empty_output><jupyter_text>### Exploring relations<jupyter_code># we want to see the correlation between pairs of variables
# a basis for feature selection
import matplotlib.pyplot as plt
import seaborn as sns
corr = df.corr()
ax = sns.heatmap(corr, annot = True, cmap="YlGnBu")
plt.setp(ax.axes.get_xticklabels(), rotation=45)
plt.title('Correlation Matrix for Pima Diabetes Data')<jupyter_output><empty_output><jupyter_text>### Exploring Feature Space <jupyter_code>import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
feature1 = df['glucose'].values
feature2 = df['insulin'].values
feature3 = df['bmi'].values
df['class']=df['class'].astype('str')
c = df['class'].values
df['class']=df['class'].astype('int')
c[c=='0'] = 'b' #negative diagnosis diabetes
c[c=='1'] = 'r' #positive diagnosis diabetes
fig = plt.figure(figsize=(18,10))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(feature1, feature2, feature3, c=c)
ax.set_xlabel('glucose')
ax.set_ylabel('insulin')
ax.set_zlabel('bmi')
plt.show()<jupyter_output><empty_output>
|
no_license
|
/Feature_Space_Breast_Cancer.ipynb
|
HilaryMAlabi/projects
| 8 |
<jupyter_start><jupyter_text>read an image<jupyter_code>img=cv2.imread('car2.jpg')
plt.imshow(img) #bgr
plt.imshow(cv2.cvtColor(img,cv2.COLOR_BGR2RGB))
ClassIndex,confidece,bbox=model.detect(img,confThreshold=0.5)
print(ClassIndex)
font_scale=2
font=cv2.FONT_HERSHEY_PLAIN
for ClassInd,conf,boxes in zip(ClassIndex.flatten(),confidece.flatten(),bbox):
cv2.rectangle(img,boxes,(255,0,0),1)
cv2.putText(img,classLabel[ClassInd-1],(boxes[0]+10,boxes[1]+40),font,fontScale=font_scale,color=(0,255,0),thickness=2)
#cv2.putText(img,classLabel[ClassInd-1],(boxes[1]+40,font))
plt.imshow(cv2.cvtColor(img,cv2.COLOR_BGR2RGB))<jupyter_output><empty_output><jupyter_text># Video<jupyter_code>cap=cv2.VideoCapture('WhatsApp Video 2021-06-12 at 09.55.26.mp4')
#check if the video oened correctly
if not cap.isOpened():
cap=cv2.VideoCapture(0)
if not cap.isOpened():
raise IOError("Can not open video")
font_scale=2
font=cv2.FONT_HERSHEY_PLAIN
while True:
ret,frame=cap.read()
ClassIndex,confidece,bbox=model.detect(frame,confThreshold=0.55)
print(ClassIndex)
if(len(ClassIndex)!=0):
for ClassInd,conf,boxes in zip(ClassIndex.flatten(),confidece.flatten(),bbox):
if(ClassInd<=80):
cv2.rectangle(img,boxes,(255,0,0),2)
cv2.putText(frame,classLabel[ClassInd-1],(boxes[0]+10,boxes[1]+40),font,fontScale=font_scale,color=(0,255,0),thickness=2)
cv2.imshow('Object detection',frame)
if cv2.waitKey(2) & 0xFF==ord('q'):
break
cap.release()
cv2.destroyAllWindows()<jupyter_output>[[1]
[1]]
[[1]
[1]]
[[1]
[1]]
[[1]
[1]]
[[1]
[1]]
[[1]
[1]]
[[1]
[1]]
[[1]
[1]]
[[1]
[1]]
[[1]
[1]]
[[1]
[1]]
[[1]
[1]]
[[1]
[1]]
[[1]
[1]]
[[1]
[1]]
[[1]
[1]]
[[1]
[1]]
[[1]
[1]]
[[1]
[1]]
[[1]
[1]
[1]]
[[1]
[1]
[1]]
[[1]
[1]
[1]]
[[1]
[1]
[1]]
[[1]
[1]
[1]]
[[1]
[1]
[1]]
[[1]
[1]
[1]
[1]]
[[1]
[1]
[1]
[1]]
[[1]
[1]
[1]]
[[1]
[1]
[1]]
[[1]
[1]
[1]
[1]]
[[1]
[1]
[1]
[1]]
[[1]
[1]
[1]
[1]]
[[1]
[1]
[1]
[1]]
[[1]
[1]
[1]
[1]]
[[1]
[1]
[1]]
[[1]
[1]
[1]]
[[1]
[1]
[1]]
[[1]
[1]
[1]]
[[ 1]
[ 1]
[ 1]
[62]]
[[ 1]
[ 1]
[ 1]
[62]]
[[1]
[1]
[1]]
[[1]
[1]
[1]]
[[1]
[1]
[1]]
[[1]
[1]
[1]]
[[1]
[1]]
[[1]
[1]]
[[1]
[1]]
[[1]
[1]]
[[1]
[1]]
[[1]
[1]]
[[1]
[1]]
[[1]
[1]]
[[1]
[1]]
[[1]
[1]]
()
()
()
()
()
()
()
()
()
()
()
()
()
()
()
()
[[28]]
()
[[72]
[85]]
[[72]]
[[72]]
[[72]]
[[ 1]
[72]]
[[1]]
[[1]]
[[1]]
()
()
()
()
()
()
[[1]]
()
[[1]]
[[1]]
[[1]]
()
()
[[1]]
()
()
()
()
()
()
()
()
()
()
()
()
()
[[1]]
[[1[...]
|
no_license
|
/.ipynb_checkpoints/Object_Detection_Code-checkpoint.ipynb
|
Kalashri99/Object_detection_Project
| 2 |
<jupyter_start><jupyter_text>作業目標
熟悉邏輯運算
作業重點
五大類邏輯函式與其對應的函式操作題目:
english_score = np.array([55,89,76,65,48,70])
math_score = np.array([60,85,60,68,55,60])
chinese_score = np.array([65,90,82,72,66,77])
上3列共六位同學的英文、數學、國文成績,第一個元素代表第一位同學,舉例第一位同學英文55分、數學60分、國文65分,運用上列數據回答下列問題。
1. 有多少學生英文成績比數學成績高?
2. 是否全班同學最高分都是國文?
<jupyter_code>import numpy as np
eng=np.array([55,89,76,65,48,70])
math=np.array([60,85,60,68,55,60])
chi=np.array([65,90,82,72,66,77])
#1.有多少學生英文成績比數學成績高?
print(np.sum(eng>math))
#2.是否全班同學最高分都是國文?
data=np.logical_and(chi>eng,chi>math)
print(data)<jupyter_output>[ True True True True True True]
|
no_license
|
/Homework/.ipynb_checkpoints/Day_004_HW-checkpoint.ipynb
|
mar3415/ML100Days
| 1 |
<jupyter_start><jupyter_text>## Code Binary Search<jupyter_code>n=int(input())
li1=[int(x) for x in input().split()]
search=int(input())
start=0
end=n-1
while start<=end:
mid=(start+end)//2
if li1[mid]==search:
print(mid)
break
elif li1[mid]>search:
end=mid-1
else:
start=mid+1
else:
print(-1)<jupyter_output>5
1 2 5 6 9
5
2
<jupyter_text>## Code Bubble Sort<jupyter_code>n=int(input())
arr = [int(x) for x in input().split()]
def bubbleSort(arr):
n = len(arr)
for i in range(n-1):
for j in range(0, n-i-1):
if arr[j] > arr[j+1] :
arr[j], arr[j+1] = arr[j+1], arr[j]
bubbleSort(arr)
for i in range(len(arr)):
print (arr[i],end=" ")<jupyter_output>5
1 8 3 4 6
1 3 4 6 8 <jupyter_text>## Code Insertion Sort<jupyter_code>n=int(input())
arr=[int(x) for x in input().split()]
def insertion(arr):
a=len(arr)
for i in range(1,n):
j=i-1
temp=arr[i]
while (j>=0 and arr[j]>temp):
arr[j+1]=arr[j]
j=j-1
arr[j+1]=temp
insertion(arr)
for i in range(n):
print(arr[i],end=" ")<jupyter_output>5
1 3 6 9 0
0 1 3 6 9 <jupyter_text>## Code Merge Two Sorted Arrays<jupyter_code>n=int(input())
arr1=[int(x) for x in input().split()]
m=int(input())
arr2=[int(x) for x in input().split()]
list1=[]
i=0
j=0
while(i<n and j<m):
if arr1[i]<=arr2[j]:
list1.append(arr1[i])
i=i+1
else:
list1.append(arr2[j])
j=j+1
while(i<len(arr1)):
list1.append(arr1[i])
i=i+1
while(j<len(arr2)):
list1.append(arr2[j])
j=j+1
for i in range(len(list1)):
print(list1[i],end=" ")<jupyter_output>5
1 5 6 8 9
4
1 3 6 9
1 1 3 5 6 6 8 9 9 <jupyter_text>## Push Zeros to end<jupyter_code>n=int(input())
arr=[int(x) for x in input().split()]
list1=[]
list2=[]
for i in range(n):
if arr[i]==0:
list2.append(arr[i])
else:
list1.append(arr[i])
a=len(list1)+len(list2)
list3=[]
for i in range(len(list1)):
list3.append(list1[i])
for i in range(len(list2)):
list3.append(list2[i])
for i in range(a):
print(list3[i],end=" ")<jupyter_output>5
1 2 0 0 4
1 2 4 0 0 <jupyter_text>## Rotate array<jupyter_code>def Rotate(arr, d):
n=len(arr)
list1=[]
list2=[]
list3=[]
for i in range(d,n):
list1.append(arr[i])
for i in range(0,d):
list2.append(arr[i])
for i in range(len(list1)):
list3.append(list1[i])
for i in range(len(list2)):
list3.append(list2[i])
for i in range(n):
print(list3[i],end=" ")
n=int(input())
arr=list(int(i) for i in input().strip().split(' '))
d=int(input())
Rotate(arr, d)<jupyter_output>5
1 4 6 9 7
2
6 9 7 1 4 <jupyter_text>## Second Largest in array<jupyter_code>n=int(input())
arr=[int(x) for x in input().split()]
def print2largest(arr,arr_size):
first = second = -2147483648
for i in range(arr_size):
# first and second
if (arr[i] > first):
second = first
first = arr[i]
elif (arr[i] > second and arr[i] != first):
second = arr[i]
print(second)
print2largest(arr, n) <jupyter_output>5
1 2 55 6 9
9
<jupyter_text>## Check Array Rotation<jupyter_code># Given an integer array, which is sorted (in increasing order) and
# has been rotated by some number k in clockwise direction. Find and return the k.
n=int(input())
arr=[int(x) for x in input().split()]
for i in range(n):
if arr[i]>arr[i+1]:
c=i
break
print(c+1)<jupyter_output>5
5 6 1 2 3
2
<jupyter_text>## Sort 0 1 2<jupyter_code>n=int(input())
arr = [int(x) for x in input().split()]
def sort012( a, arr_size):
lo = 0
hi = arr_size - 1
mid = 0
while mid <= hi:
if a[mid] == 0:
a[lo], a[mid] = a[mid], a[lo]
lo = lo + 1
mid = mid + 1
elif a[mid] == 1:
mid = mid + 1
else:
a[mid], a[hi] = a[hi], a[mid]
hi = hi - 1
return a
sort012(arr,n)
for i in range(n):
print(arr[i],end=" ")<jupyter_output>5
1 0 2 0 2
0 0 1 2 2 <jupyter_text>## Sum of two arrays (error- not correct answer)<jupyter_code># Two random integer arrays are given A1 and A2, in which numbers from 0 to 9 are present at
# every index (i.e. single digit integer is present at every index of both given arrays).
# You need to find sum of both the input arrays (like we add two integers) and
# put the result in another array i.e. output array (output arrays should also contain only single digits at every index).
# The size A1 & A2 can be different.
# Note : Output array size should be 1 more than the size of bigger array and place 0 at the
# 0th index if there is no carry. No need to print the elements of output array.
n=int(input())
arr1=[int(x) for x in input().split()]
m=int(input())
arr2=[int(x) for x in input().split()]
i=n-1
j=m-1
list1=[]
carry=0
while (i>=0 and j>=0):
s = arr1[i]+arr2[j]+carry
sum=s%10
carry=s//10
i=i-1
j=j-1
list1.append(sum)
while(i>=0):
s = arr1[i]+carry
sum=s%10
carry=s//10
list1.append(sum)
while(j>=0):
s = arr2[j]+carry
sum=s%10
carry=s//10
list1.append(sum)
if n>=m:
if len(list1)!=n+1:
list1.append(0)
else:
if len(list1)!=m+1:
list1.append(0)
for i in range(len(list1)-1,-1,-1):
print(list1[i],end=" ")<jupyter_output>5
1 0 2 3 6
5
1 2 3 6 9
0 2 2 6 0 5
|
no_license
|
/Searching & Sorting.ipynb
|
simran2104/Intro-to-Python-Coding-Ninjas
| 10 |
<jupyter_start><jupyter_text># Experiment 4
### Master's Thesis: Domain Adaptation in Wireless Capsule Endoscopy Diagnosis
#### Author: Èlia FICAPAL VILA
#### Supervisor: Dr. Santi SEGUÍ* Model: ResNet with Triplet Loss
* Trained with: PillCam SB2 images and (or not) PillCam SB2 images
* Testing on: PillCam SB3 imagesThe second approach consists of a ResNet stacked with a dense layer. The ResNet is used in order to extract the main features to create a rich vector representation of a given size of each image, the embedding. More specifically, the 50-layer ResNet pretrained on ImageNet is considered. The triplet loss is introduced in order to keep the embeddings of images belonging to the same class closer together in the Euclidean space than those from to different classes.
Once the embedding is computed, it is fed to the dense layer, which will have as many units as number of classes. It ouputs the scores of belonging to each of the classes which, in turn, can be converted into probabilities applying a softmax function. In order to measure the performance of the classification model and then optimise the parameters, the cross-entropy loss is used.
Moreover, L2-regularisation is used to prevent overfitting.
The following image shows the general pipeline.

The model is trained using the majority of images obtained from PillCam SB2, but also some PillCam SB3 images will be added. First of all, the model trained with 2000 SB2 images per class will be considered. Then, 5, 10 and 25 SB3 images per class will be added for training. In every batch, 50 and 14 SB2 and SB2, respectively, images will be included. Data augmentation such as rotations and flips will be applied to the SB3 images. All these models will be evaluated using 25 SB3 images per class.
In this notebook it is shown how to train and evaluate the model. Also, some results ready to be analysed are plotted in the end.<jupyter_code>#Libraries
import tensorflow as tf
import numpy as np
import random
import os
import pickle
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline<jupyter_output><empty_output><jupyter_text>## Selecting the parameters
First, we give values to the parameters. The majority of parameters will stay the same for all the experiments (such as the image shape, the optimiser, the learning rate, etc.). However, there are other values that should be modified.
In this case, we will:
* use 2000 SB2 images per class for training $\Longrightarrow$ params['train-csize-SB2']=2000
* use 0 SB2 images per class for testing $\Longrightarrow$ params['eval-csize-SB2']=0
* use 0,5,10 or 25 SB3 images per class for training $\Longrightarrow$ params['train-csize-SB3']=0,5,10 or 25
* use 25 SB2 images per class for testing $\Longrightarrow$ params['eval-csize-SB3']=25
* test on SB2 images $\Longrightarrow$ params['eval']='SB3'
* include the triplet loss $\Longrightarrow$ params['include-triplet-loss']=True
Since we want to use the model from Experiment 2, we would have to specify the directory containing the checkpoints in params['warm-dir'] and also change params['warm-dir-short'] so that it can be included in the name of the directory.<jupyter_code>params = {
#Parameters for the reader
'train-csize-SB2' : 2000,
'eval-csize-SB2' : 0,
'train-csize-SB3' : 0,
'eval-csize-SB3' : 25,
'eval' : 'SB3', #can be SB2 or SB3
# Parser parameters
'image-shape' : (256,256,3),
# Optimizer parameters
'optimizer': 'SGD', #Adagrad, Adam, SGD, MomentumOptimizer
'learning-rate': 1e-4,
# Embedding parameters
'embedding-size': 2048,
# Model
'resnet-size' : 50,
'num-classes' : 7,
'train-only-last-layer' : True,
'include-classifier': True,
# Losses
'cross-entropy': True,
'include-triplet-loss': True,
'triplet-strategy': "batch-all", # batch-all, batch-hard, mixed
'triplet-margin-strategy' : 'hinge-margin', # 'hing-margin, soft-margin
'triplet-distance': 'euclid', # euclid, cosine
'margin' : 0.2,
'include-l2-loss': True,
'weight-decay': 2e-4,
# Sumaries
'with-scalars': True,
'with-histograms': False,
'with-images': False,
# Run Model
'shuffle' : False,
'batch-size': 64,
'delete-existing': False,
'warm-dir': None,
'warm-dir-short': 'TL', ##TL, noTL or None
'prefetch': 64,
'train-epochs': 1,
'eval-epochs': 1,
#Others
'gpu' : '/gpu:1',
'save-summary-steps' : 50,
# Eval Model
'fetch-tensors': ['confusion_matrix','embeddings','predictions','labels','probabilities']
}<jupyter_output><empty_output><jupyter_text>The directory where everything in the model will be saved is defined as follows.<jupyter_code>if params['include-triplet-loss']:
params['model-dir'] = 'experiments/SB2{}-{}_SB3{}-{}_BS{}_emb{}_LR{}_{}_{}_marg{}_WD{}'.format(params['train-csize-SB2'],params['eval-csize-SB2'],
params['train-csize-SB3'],params['eval-csize-SB3'],
params['batch-size'],params['embedding-size'],
params['learning-rate'], params['optimizer'],
params['triplet-strategy'],
params['margin'],params['warm-dir-short'])
else:
params['model-dir'] = 'experiments/SB2{}-{}_SB3{}-{}_BS{}_emb{}_LR{}_{}_WD{}'.format(params['train-csize-SB2'],params['eval-csize-SB2'],
params['train-csize-SB3'],params['eval-csize-SB3'],
params['batch-size'],params['embedding-size'],
params['learning-rate'], params['optimizer'],
params['warm-dir-short'])
print(params['model-dir'])<jupyter_output>experiments/SB22000-0_SB30-25_BS64_emb2048_LR0.0001_SGD_batch-all_marg0.2_WDTL
<jupyter_text>## The reader
Using the selected parameters, the reader is used to read data and to define generators for the training and test datasets.<jupyter_code>from model.reader import SB2_SB3_reader
reader = SB2_SB3_reader(train_images_SB2 = params['train-csize-SB2'],
test_images_SB2=params['eval-csize-SB2'],
train_images_SB3 = params['train-csize-SB3'],
eval_on = params['eval'],
batch_size=params['batch-size'],
shape = params['image-shape'])
reader.define_train_test_SB2(42) #split images from intestins into train and test
params['train-num-samples'] = len(reader.train_key_SB2) + len(reader.SB3_train) #number of training images
params['num-samples'] = len(reader.test_key_SB2) + len(reader.SB3_test) #number of testing images
print("Training samples:",params['train-num-samples'], "Testing samples:",params['num-samples'])<jupyter_output><empty_output><jupyter_text>## The model
The neural network is defined using both the selected parameters and the reader that has been just defined.<jupyter_code>from model.network import Network
net = Network(params, reader)<jupyter_output><empty_output><jupyter_text>## Training the model
The model trains using the following command performing a certain number of epochs defined in params['train-epochs']. If there is already one model with the same directory name, it can recover that model and start training using its weights.
It also evaluates the model every a certain number of epochs, which is defined in params['eval-epochs].<jupyter_code>net.run_model(wait = True, save_results = False)
#For creating this notebook, only one epoch has been performed.<jupyter_output>experiments/SB22000-0_SB30-25_BS64_emb2048_LR0.0001_SGD_batch-all_marg0.2_WDTL
Target model directory: experiments/SB22000-0_SB30-25_BS64_emb2048_LR0.0001_SGD_batch-all_marg0.2_WDTL
INFO:tensorflow:Using config: {'_model_dir': 'experiments/SB22000-0_SB30-25_BS64_emb2048_LR0.0001_SGD_batch-all_marg0.2_WDTL', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': None, '_save_checkpoints_secs': 1000000000.0, '_session_config': gpu_options {
allow_growth: true
}
allow_soft_placement: true
log_device_placement: true
, '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_service': None, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x7f33e688ffd0>, '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chi[...]<jupyter_text>## Evaluating the model
The following command is used in order to evaluate the model on the test dataset and obtain the results specified in params['fetch-tensors'].<jupyter_code>net.eval_model(save_results = True) <jupyter_output>experiments/SB22000-0_SB30-25_BS64_emb2048_LR0.0001_SGD_batch-all_marg0.2_WDTL
Found candidate model at: experiments/SB22000-0_SB30-25_BS64_emb2048_LR0.0001_SGD_batch-all_marg0.2_WDTL
Target model directory: experiments/SB22000-0_SB30-25_BS64_emb2048_LR0.0001_SGD_batch-all_marg0.2_WDTL
<jupyter_text>Uncomment the following command to save the results using pickle.<jupyter_code>#pickle.dump(net.results, open( "res_experiments/res_{}-{}_{}-{}_TL.p".format(params['train-csize-SB2'],params['eval-csize-SB2'],
# params['train-csize-SB3'],params['eval-csize-SB3']),
# "wb" ) )<jupyter_output><empty_output><jupyter_text>## Analysing the results
The following cells will be used to analyse the results obtained from evaluating our model in the test dataset. First of all, the results that we previously saved from our trained model are uploaded using pickle from the "res_experiments" folder. Then a Python class is built, and it will be used to plot the results. Then, we will show:### 1) Normalised confusion matrix
The normalised confusion matrix $M = (m_{i,j})_{1 \leq i,j \leq 7}$ that will be plotted has elements defined as $$m_{i,j} = 100 \frac{Pj}{R_i},$$ where $P_j$ is the number of images classified by the model as class $j$, whereas $R_i$ is the number of actual images belonging to class $i$.
The classes correspond to:<jupyter_code>ij = [1,2,3,4,5,6,7]
classes = ['Bubbles','Clear Blob','Dilated','Turbid','Undefined','Wall','Wrinkle']
df = pd.DataFrame(data={'i,j':ij,'Class':classes},index=None).set_index('i,j')
df<jupyter_output><empty_output><jupyter_text>### 2) Plot using different colours
Once the embeddings of the images in the test dataset are obtained by the model, their dimensionality can be reduced using Uniform Manifold Approximation and Projection (UMAP) to two-dimensional vectors. However, it is important to remark that these vectors are just a projection and thus not nearly all the information is included. Actually, if the embedding size is 2048, this means that 2046 components are lost.
This chart is a scatter plot using a different colour per class and a different marker if the classification is right or wrong. The dot indicates that the image has been correctly classfied, and the cross otherwise.### 3) Plot using images
Again, the UMAP projections of the embeddings are used. In this case, the markers are the actual images. The previous plot can be used to know to which class belongs each image.
This chart helps to visually identify which features the model uses to match an image with a label.### 1st experiment: adding 0 SB3 images for training<jupyter_code>name='res_{}-{}_{}-{}_TL'.format(params['train-csize-SB2'],params['eval-csize-SB2'],
params['train-csize-SB3'],params['eval-csize-SB3'])
net_results = pickle.load(open( "res_experiments/{}.p".format(name), "rb" ) )
from model.results import get_results
res = get_results(net_results)<jupyter_output><empty_output><jupyter_text>#### 1) Normalised confusion matrix<jupyter_code>print(100*res.confusion_matrix_norm.round(4))<jupyter_output>[[28. 0. 4. 68. 0. 0. 0.]
[ 8. 12. 48. 32. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0.]
[12. 0. 0. 88. 0. 0. 0.]
[ 4. 0. 60. 28. 8. 0. 0.]
[ 8. 0. 4. 48. 4. 32. 4.]
[ 0. 0. 8. 4. 0. 0. 88.]]
<jupyter_text>#### 2) Plot using different colours<jupyter_code>res.plot_colours(save=False,legend=True,filename="colours_{}".format(name))<jupyter_output><empty_output><jupyter_text>#### 3) Plot using images<jupyter_code>res.plot_images(save=False,
eval_on=params['eval'],train_size=params['train-csize-SB2'],test_size=params['eval-csize-SB2'],
filename="images_{}".format(name))<jupyter_output><empty_output><jupyter_text>### 2nd experiment: adding 5 SB3 images for training<jupyter_code>params['train-csize-SB3'] = 5
name='res_{}-{}_{}-{}_TL'.format(params['train-csize-SB2'],params['eval-csize-SB2'],
params['train-csize-SB3'],params['eval-csize-SB3'])
net_results = pickle.load(open( "res_experiments/{}.p".format(name), "rb" ) )
res = get_results(net_results)<jupyter_output><empty_output><jupyter_text>#### 1) Normalised confusion matrix<jupyter_code>print(100*res.confusion_matrix_norm.round(4))<jupyter_output>[[ 88. 0. 0. 8. 0. 4. 0.]
[ 0. 60. 8. 4. 12. 4. 12.]
[ 0. 0. 0. 0. 0. 0. 0.]
[ 4. 0. 0. 96. 0. 0. 0.]
[ 0. 4. 8. 4. 80. 4. 0.]
[ 0. 0. 0. 0. 0. 100. 0.]
[ 0. 0. 0. 4. 0. 4. 92.]]
<jupyter_text>#### 2) Plot using different colours<jupyter_code>res.plot_colours(save=False,legend=True,filename="colours_{}".format(name))<jupyter_output><empty_output><jupyter_text>#### 3) Plot using images<jupyter_code>res.plot_images(save=False,
eval_on=params['eval'],train_size=params['train-csize-SB2'],test_size=params['eval-csize-SB2'],
filename="images_{}".format(name))<jupyter_output><empty_output><jupyter_text>### 3rd experiment: adding 10 SB3 images for training<jupyter_code>params['train-csize-SB3'] = 10
name='res_{}-{}_{}-{}_TL'.format(params['train-csize-SB2'],params['eval-csize-SB2'],
params['train-csize-SB3'],params['eval-csize-SB3'])
net_results = pickle.load(open( "res_experiments/{}.p".format(name), "rb" ) )
res = get_results(net_results)<jupyter_output><empty_output><jupyter_text>#### 1) Normalised confusion matrix<jupyter_code>print(100*res.confusion_matrix_norm.round(4))<jupyter_output>[[ 92. 0. 0. 4. 0. 4. 0.]
[ 0. 68. 4. 0. 16. 0. 12.]
[ 0. 0. 0. 0. 0. 0. 0.]
[ 4. 0. 0. 96. 0. 0. 0.]
[ 4. 16. 0. 0. 72. 4. 4.]
[ 0. 0. 0. 0. 0. 100. 0.]
[ 0. 0. 0. 4. 0. 4. 92.]]
<jupyter_text>#### 2) Plot using different colours<jupyter_code>res.plot_colours(save=False,legend=True,filename="colours_{}".format(name))<jupyter_output><empty_output><jupyter_text>#### 3) Plot using images<jupyter_code>res.plot_images(save=False,
eval_on=params['eval'],train_size=params['train-csize-SB2'],test_size=params['eval-csize-SB2'],
filename="images_{}".format(name))<jupyter_output><empty_output><jupyter_text>### 4th experiment: adding 25 SB3 images for training<jupyter_code>params['train-csize-SB3'] = 25
name='res_{}-{}_{}-{}_TL'.format(params['train-csize-SB2'],params['eval-csize-SB2'],
params['train-csize-SB3'],params['eval-csize-SB3'])
net_results = pickle.load(open( "res_experiments/{}.p".format(name), "rb" ) )
res = get_results(net_results)<jupyter_output><empty_output><jupyter_text>#### 1) Normalised confusion matrix<jupyter_code>print(100*res.confusion_matrix_norm.round(4))<jupyter_output>[[ 84. 0. 0. 12. 0. 4. 0.]
[ 0. 80. 0. 0. 12. 0. 8.]
[ 0. 0. 0. 0. 0. 0. 0.]
[ 4. 0. 0. 96. 0. 0. 0.]
[ 0. 4. 0. 8. 84. 0. 4.]
[ 0. 0. 0. 0. 0. 100. 0.]
[ 0. 0. 0. 4. 4. 0. 92.]]
<jupyter_text>#### 2) Plot using different colours<jupyter_code>res.plot_colours(save=False,legend=True,filename="colours_{}".format(name))<jupyter_output><empty_output><jupyter_text>#### 3) Plot using images<jupyter_code>res.plot_images(save=False,
eval_on=params['eval'],train_size=params['train-csize-SB2'],test_size=params['eval-csize-SB2'],
filename="images_{}".format(name))<jupyter_output><empty_output>
|
no_license
|
/4_TL_onSB3_experiment.ipynb
|
eliaficapalvila/Domain-Adaptation-in-Wireless-Capsule-Endoscopy-Diagnosis
| 25 |
<jupyter_start><jupyter_text>### В этом блоке создаём временную директорию, куда копируем все каталоги-классы из simpsons_dataset
При этом,
- сначала балансируем классы так, чтобы количество изображений в каждом каталоге-классе было сравнимо с количеством изображений в самом большом каталоге homer_simpson
- затем увеличиваем количество изображений в каждом каталоге-классе копированием в 3 раза<jupyter_code>if not os.path.exists('temp'):
os.makedirs('temp')
f = list(os.walk(TRAIN_DIR))
rng = len(f[0][1])
f[0][1][1], f[2]
fmap = {}
for i in range(rng):
fmap[f[0][1][i]] = len(f[i+1][2])
maximum = max(fmap.values())
for key in fmap:
# r = maximum//fmap[key]
# fmap[key] = 1 if r < 15 else 10
fmap[key] = 3*(maximum//fmap[key])
# fmap[key] = maximum//fmap[key]+1
for class_name in fmap:
source_dir = os.path.join(TRAIN_DIR, class_name)
os.makedirs(os.path.join('temp', class_name))
for idx in range(fmap[class_name]):
for file_name in os.listdir(source_dir):
dest_dir = os.path.join('temp', class_name)
shutil.copy(os.path.join(source_dir, file_name), os.path.join(dest_dir, str(idx)+file_name))
fmap
# меняем path для TRAIN_DIR на вновь созданную директорию
TRAIN_DIR = Path('temp')
train_val_files = sorted(list(TRAIN_DIR.rglob('*.jpg')))
test_files = sorted(list(TEST_DIR.rglob('*.jpg')))
from sklearn.model_selection import train_test_split
train_val_labels = [path.parent.name for path in train_val_files]
train_files, val_files = train_test_split(train_val_files, test_size=0.2, \
stratify=train_val_labels)
val_idxs = len(val_files)
len(train_files), len(val_files)
val_dataset = SimpsonsDataset(val_files, mode='val')
train_dataset = SimpsonsDataset(train_files, mode='train')<jupyter_output><empty_output><jupyter_text>Давайте посмотрим на наших героев внутри датасета.<jupyter_code>fig, ax = plt.subplots(nrows=3, ncols=3,figsize=(8, 8), \
sharey=True, sharex=True)
for fig_x in ax.flatten():
random_characters = int(np.random.uniform(0,1000))
im_train, label = train_dataset[random_characters]
img_label = " ".join(map(lambda x: x.capitalize(),\
train_dataset.label_encoder.inverse_transform([label])[0].split('_')))
imshow(im_train.data.cpu(), \
title=img_label,plt_ax=fig_x)
# используем pretrained CNN DenseNet161 для классификации изображений
# в ней изменим конфигурацию оригинального классификатора, чтобы он работал с 42 классами
n_classes = len(np.unique(train_val_labels))
model_rn = models.densenet161(pretrained=1)
model_rn.classifier = nn.Linear(in_features=2208, out_features=n_classes, bias=True) # densenet161
model_rn = model_rn.to(DEVICE)
def mean_f1_score(y, y_pred):
'''рвссчет метрики mean f1'''
epsylon = 1e-11
cnf_matrix = confusion_matrix(y, y_pred)
FP = cnf_matrix.sum(axis=0) - np.diag(cnf_matrix)
FN = cnf_matrix.sum(axis=1) - np.diag(cnf_matrix)
TP = np.diag(cnf_matrix)
TN = cnf_matrix.sum() - (FP + FN + TP)
FP = FP.astype(float)+epsylon
FN = FN.astype(float)+epsylon
TP = TP.astype(float)+epsylon
TN = TN.astype(float)+epsylon
precision = TP/(TP+FP)
recall = TP/(TP+FN)
f1_score = 2*precision*recall/(precision+recall)
return f1_score.mean()
def fit_epoch(model, train_loader, criterion, optimizer):
'''функция для расчета accuracy и loss для выборки train'''
running_loss = 0.0
running_corrects = 0
processed_data = 0
for inputs, labels in train_loader:
inputs = inputs.to(DEVICE)
labels = labels.to(DEVICE)
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
preds = torch.argmax(outputs, 1)
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
processed_data += inputs.size(0)
train_loss = running_loss / processed_data
train_acc = running_corrects.cpu().numpy() / processed_data
return train_loss, train_acc
def eval_epoch(model, val_loader, criterion):
'''функция для расчета accuracy и loss для выборки validation'''
model.eval()
running_loss = 0.0
running_corrects = 0
processed_size = 0
for inputs, labels in val_loader:
inputs = inputs.to(DEVICE)
labels = labels.to(DEVICE)
with torch.set_grad_enabled(False):
outputs = model(inputs)
loss = criterion(outputs, labels)
preds = torch.argmax(outputs, 1)
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
processed_size += inputs.size(0)
val_loss = running_loss / processed_size
val_acc = running_corrects.double() / processed_size
return val_loss, val_acc
def train(train_files, val_files, model, epochs, batch_size):
'''функция для тренировки модели'''
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
# лучшую модель выбираем, исходя из наибольшего соотношения val_acc/val_loss по эпохам
best_model = copy.deepcopy(model.state_dict())
best_score, best_epoch = 0.0, 0
history = []
log_template = "\nEpoch {ep:03d} train_loss: {t_loss:0.4f} \
val_loss {v_loss:0.4f} train_acc {t_acc:0.4f} val_acc {v_acc:0.4f}"
with tqdm(desc="epoch", total=epochs) as pbar_outer:
opt = torch.optim.Adam(model.parameters(), lr=lr)
criterion = nn.CrossEntropyLoss()
for epoch in range(epochs):
train_loss, train_acc = fit_epoch(model, train_loader, criterion, opt)
val_loss, val_acc = eval_epoch(model, val_loader, criterion)
history.append((train_loss, train_acc, val_loss, val_acc))
if val_acc/val_loss > best_score:
best_score, best_epoch = val_acc/val_loss, epoch
best_model = copy.deepcopy(model.state_dict())
pbar_outer.update(1)
tqdm.write(log_template.format(ep=epoch+1, t_loss=train_loss,\
v_loss=val_loss, t_acc=train_acc, v_acc=val_acc))
print('best_epoch:', best_epoch)
# для предсказаний будет использоваться лючшая модель
model.load_state_dict(best_model)
return history
def predict(model, test_loader):
with torch.no_grad():
logits = []
for inputs in test_loader:
inputs = inputs.to(DEVICE)
model.eval()
outputs = model(inputs).cpu()
logits.append(outputs)
probs = nn.functional.softmax(torch.cat(logits), dim=-1).numpy()
return probs<jupyter_output><empty_output><jupyter_text>## Запустим обучение сети.<jupyter_code>if val_dataset is None:
val_dataset = SimpsonsDataset(val_files, mode='val')
if train_dataset is None:
train_dataset = SimpsonsDataset(train_files, mode='train')
history = train(train_dataset, val_dataset, model=model_rn, epochs=epoch, batch_size=batch)<jupyter_output>epoch: 33%|███▎ | 1/3 [2:32:09<5:04:19, 9129.67s/it]<jupyter_text>Построим кривые обучения<jupyter_code>loss, acc, val_loss, val_acc = zip(*history)
plt.figure(figsize=(14, 7))
plt.plot(loss, label="train_loss")
plt.plot(val_loss, label="val_loss")
plt.legend(loc='best')
plt.xlabel("epochs")
plt.ylabel("loss")
plt.show()
plt.figure(figsize=(14, 7))
plt.plot(acc, label="train_accuracy")
plt.plot(val_acc, label="val_accuracy")
plt.legend(loc='best')
plt.xlabel("epochs")
plt.ylabel("loss")
plt.show()
idxs = random.sample(list(range(val_idxs)), 4000)
imgs = [val_dataset[id][0].unsqueeze(0) for id in idxs]
probs_ims = predict(model_rn, imgs)
label_encoder = pickle.load(open("label_encoder.pkl", 'rb'))
[label_encoder.classes_]
y_pred = np.argmax(probs_ims, -1)
actual_labels = [val_dataset[id][1] for id in idxs]
preds_class = [label_encoder.classes_[i] for i in y_pred]
# навскидку выходит неплохо
print(*actual_labels[::20])
print(*y_pred[::20])<jupyter_output>41 28 11 24 36 19 12 40 5 13 3 24 13 3 7 20 21 21 26 4 21 15 19 15 22 13 5 9 11 8 34 1 13 13 27 33 17 18 14 2 12 36 29 8 34 40 26 14 36 30 34 37 0 35 11 28 9 33 18 19 12 39 30 5 7 6 19 20 35 2 8 38 15 26 8 10 11 13 33 22 38 38 14 40 0 37 29 39 18 15 4 17 15 41 11 39 3 14 15 39 13 11 31 12 5 35 31 8 36 13 21 21 17 23 35 10 33 7 33 12 41 3 39 11 38 19 31 1 41 38 7 23 39 39 30 38 36 40 32 5 23 23 27 37 7 15 0 22 31 0 5 18 22 6 12 14 4 8 26 18 12 34 14 4 36 39 12 1 16 18 23 14 15 36 10 20 15 23 30 29 18 19 14 39 12 25 11 8 3 9 40 7 29 18 0 9 23 23 24 8
41 28 11 24 36 19 12 40 5 13 3 24 13 3 7 20 21 21 26 0 21 15 19 15 22 13 5 9 11 8 34 1 13 13 27 33 17 18 14 2 12 36 29 8 34 40 26 14 36 30 34 37 0 35 11 28 9 33 18 19 12 39 30 5 7 6 19 20 35 2 8 38 15 26 8 10 11 13 33 22 38 38 14 40 0 37 29 39 18 15 4 17 15 41 11 39 3 14 15 39 13 11 31 12 5 35 31 8 36 13 21 21 17 23 35 10 33 7 33 12 41 3 39 11 38 19 31 1 41 38 7 23 39 39 30 38 36 40 32 5 23 23 27 37 7 15 0 22 31 0 5 18 22 6 12 14 4 8 26 18 1[...]<jupyter_text>Метрика — **f1-score**<jupyter_code>from sklearn.metrics import f1_score
f1_score(actual_labels, y_pred, average='micro')
# метрика mean f1 (score='macro')
mean_f1_score(actual_labels, y_pred)
# делаем предсказания для тестовой выборки по обученной модели для всех 42 классов
test_dataset = SimpsonsDataset(test_files, mode="test")
test_loader = DataLoader(test_dataset, shuffle=False, batch_size=batch)
probs = predict(model_rn, test_loader)
# получаем предсказаниекласса для каждого изображения тестовой выборки
preds = label_encoder.inverse_transform(np.argmax(probs, axis=1))
test_filenames = [path.name for path in test_dataset.files]
len(preds), type(preds), preds.shape
# создаем файл для сабмита на каггл
my_submit = pd.DataFrame({'Id': test_filenames, 'Expected': preds})
my_submit.head()
# и сохраняем его
filename = 'colab_densenet161_Adam_b32_e3_balanced3'
my_submit.to_csv(filename + '.csv', index=False)
# сохраним модель
# параметры модели
torch.save(model_rn.state_dict(), filename + '.params')
# модель полностью
# torch.save(model_rn,
# 'colab_dencenet161_Adam_b16_e5_balanced3.model')
# the_model = TheModelClass(*args, **kwargs)
# the_model.load_state_dict(torch.load(path))
# the_model = torch.load(path)
# удаляем временную директорию
!rm -rf temp<jupyter_output><empty_output>
|
no_license
|
/second_place_solution_colab_densenet161_Adam_b32_e3_balanced3_98989.ipynb
|
plaban1981/Dockship
| 5 |
<jupyter_start><jupyter_text># Functions and VisualizationsWelcome to lab 4! This week, we'll learn about functions and the table method `apply` from [Section 8.1](https://www.inferentialthinking.com/chapters/08/1/applying-a-function-to-a-column.html). We'll also learn about visualization from [Chapter 7](https://www.inferentialthinking.com/chapters/07/visualization.html).
First, set up the tests and imports by running the cell below.<jupyter_code>import numpy as np
from datascience import *
# These lines set up graphing capabilities.
import matplotlib
%matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
import warnings
warnings.simplefilter('ignore', FutureWarning)
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
from client.api.notebook import Notebook
ok = Notebook('lab04.ok')<jupyter_output>=====================================================================
Assignment: Functions and Visualizations
OK, version v1.13.11
=====================================================================
<jupyter_text>## 1. Functions and CEO Incomes
Let's start with a real data analysis task. We'll look at the 2015 compensation of CEOs at the 100 largest companies in California. The data were compiled for a Los Angeles Times analysis [here](http://spreadsheets.latimes.com/california-ceo-compensation/), and ultimately came from [filings](https://www.sec.gov/answers/proxyhtf.htm) mandated by the SEC from all publicly-traded companies. Two companies have two CEOs, so there are 102 CEOs in the dataset.
We've copied the data in raw form from the LA Times page into a file called `raw_compensation.csv`. (The page notes that all dollar amounts are in millions of dollars.)<jupyter_code>raw_compensation = Table.read_table('raw_compensation.csv')
raw_compensation<jupyter_output><empty_output><jupyter_text>**Question 1.1.** We want to compute the average of the CEOs' pay. Try running the cell below.<jupyter_code>np.average(raw_compensation.column("Total Pay"))<jupyter_output><empty_output><jupyter_text>You should see an error. Let's examine why this error occured by looking at the values in the "Total Pay" column. Use the `type` function and set `total_pay_type` to the type of the first value in the "Total Pay" column.<jupyter_code>total_pay_type = type(raw_compensation.column("Total Pay").item(0))
total_pay_type
_ = ok.grade('q1_1')<jupyter_output>~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Running tests
---------------------------------------------------------------------
Test summary
Passed: 3
Failed: 0
[ooooooooook] 100.0% passed
<jupyter_text>**Question 1.2.** You should have found that the values in "Total Pay" column are strings (text). It doesn't make sense to take the average of the text values, so we need to convert them to numbers if we want to do this. Extract the first value in the "Total Pay" column. It's Mark Hurd's pay in 2015, in *millions* of dollars. Call it `mark_hurd_pay_string`.<jupyter_code>mark_hurd_pay_string = raw_compensation.column("Total Pay").item(0)
mark_hurd_pay_string
_ = ok.grade('q1_2')<jupyter_output>~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Running tests
---------------------------------------------------------------------
Test summary
Passed: 1
Failed: 0
[ooooooooook] 100.0% passed
<jupyter_text>**Question 1.3.** Convert `mark_hurd_pay_string` to a number of *dollars*. The string method `strip` will be useful for removing the dollar sign; it removes a specified character from the start or end of a string. For example, the value of `"100%".strip("%")` is the string `"100"`. You'll also need the function `float`, which converts a string that looks like a number to an actual number. Last, remember that the answer should be in dollars, not millions of dollars.<jupyter_code>mark_hurd_pay = float(raw_compensation.column("Total Pay").item(0).strip("$"))*1000000
mark_hurd_pay
_ = ok.grade('q1_3')<jupyter_output>~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Running tests
---------------------------------------------------------------------
Test summary
Passed: 3
Failed: 0
[ooooooooook] 100.0% passed
<jupyter_text>To compute the average pay, we need to do this for every CEO. But that looks like it would involve copying this code 102 times.
This is where functions come in. First, we'll define a new function, giving a name to the expression that converts "total pay" strings to numeric values. Later in this lab we'll see the payoff: we can call that function on every pay string in the dataset at once.
**Question 1.4.** Copy the expression you used to compute `mark_hurd_pay` as the `return` expression of the function below, but replace the specific `mark_hurd_pay_string` with the generic `pay_string` name specified in the first line of the `def` statement.
*Hint*: When dealing with functions, you should generally not be referencing any variable outside of the function. Usually, you want to be working with the arguments that are passed into it, such as `pay_string` for this function. <jupyter_code>def convert_pay_string_to_number(pay_string):
"""Converts a pay string like '$100' (in millions) to a number of dollars."""
return float(pay_string.strip("$"))*1000000
_ = ok.grade('q1_4')<jupyter_output>~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Running tests
---------------------------------------------------------------------
Test summary
Passed: 2
Failed: 0
[ooooooooook] 100.0% passed
<jupyter_text>Running that cell doesn't convert any particular pay string. Instead, it creates a function called `convert_pay_string_to_number` that can convert any string with the right format to a number representing millions of dollars.
We can call our function just like we call the built-in functions we've seen. It takes one argument, a string, and it returns a number.<jupyter_code>convert_pay_string_to_number('$42')
convert_pay_string_to_number(mark_hurd_pay_string)
# We can also compute Safra Catz's pay in the same way:
convert_pay_string_to_number(raw_compensation.where("Name", are.containing("Safra")).column("Total Pay").item(0))<jupyter_output><empty_output><jupyter_text>So, what have we gained by defining the `convert_pay_string_to_number` function?
Well, without it, we'd have to copy that `10**6 * float(pay_string.strip("$"))` stuff each time we wanted to convert a pay string. Now we just call a function whose name says exactly what it's doing.
Soon, we'll see how to apply this function to every pay string in a single expression. First, let's take a brief detour and introduce `interact`.### Using `interact`
We've included a nifty function called `interact` that allows you to
call a function with different arguments.
To use it, call `interact` with the function you want to interact with as the
first argument, then specify a default value for each argument of the original
function like so:<jupyter_code>_ = interact(convert_pay_string_to_number, pay_string='$42')<jupyter_output><empty_output><jupyter_text>You can now change the value in the textbox to automatically call
`convert_pay_string_to_number` with the argument you enter in the `pay_string`
textbox. For example, entering in `'$49'` in the textbox will display the result of
running `convert_pay_string_to_number('$49')`. Neat!
Note that we'll never ask you to write the `interact` function calls yourself as
part of a question. However, we'll include it here and there where it's helpful
and you'll probably find it useful to use yourself.
Now, let's continue on and write more functions.## 2. Defining functions
Let's write a very simple function that converts a proportion to a percentage by multiplying it by 100. For example, the value of `to_percentage(.5)` should be the number 50. (No percent sign.)
A function definition has a few parts.
##### `def`
It always starts with `def` (short for **def**ine):
def
##### Name
Next comes the name of the function. Let's call our function `to_percentage`.
def to_percentage
##### Signature
Next comes something called the *signature* of the function. This tells Python how many arguments your function should have, and what names you'll use to refer to those arguments in the function's code. `to_percentage` should take one argument, and we'll call that argument `proportion` since it should be a proportion.
def to_percentage(proportion)
We put a colon after the signature to tell Python it's over.
def to_percentage(proportion):
##### Documentation
Functions can do complicated things, so you should write an explanation of what your function does. For small functions, this is less important, but it's a good habit to learn from the start. Conventionally, Python functions are documented by writing a triple-quoted string:
def to_percentage(proportion):
"""Converts a proportion to a percentage."""
##### Body
Now we start writing code that runs when the function is called. This is called the *body* of the function. We can write anything we could write anywhere else. First let's give a name to the number we multiply a proportion by to get a percentage.
def to_percentage(proportion):
"""Converts a proportion to a percentage."""
factor = 100
##### `return`
The special instruction `return` in a function's body tells Python to make the value of the function call equal to whatever comes right after `return`. We want the value of `to_percentage(.5)` to be the proportion .5 times the factor 100, so we write:
def to_percentage(proportion):
"""Converts a proportion to a percentage."""
factor = 100
return proportion * factor
Note that `return` inside a function gives the function a value, while `print`, which we have used before, is a function which has no `return` value and just prints a certain value out to the console. The two are very different. **Question 2.1.** Define `to_percentage` in the cell below. Call your function to convert the proportion .2 to a percentage. Name that percentage `twenty_percent`.<jupyter_code>def to_percentage(number):
""" Calculates the percentage of a certain number """
perc = "{:.40%}".format(number)
return float(perc.strip("%"))
twenty_percent = to_percentage(0.2)
twenty_percent
_ = ok.grade('q2_1')<jupyter_output>~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Running tests
---------------------------------------------------------------------
Test summary
Passed: 1
Failed: 0
[ooooooooook] 100.0% passed
<jupyter_text>Like the built-in functions, you can use named values as arguments to your function.
**Question 2.2.** Use `to_percentage` again to convert the proportion named `a_proportion` (defined below) to a percentage called `a_percentage`.
*Note:* You don't need to define `to_percentage` again! Just like other named things, functions stick around after you define them.<jupyter_code>a_proportion = 2**(.5) / 2
a_percentage = to_percentage(a_proportion)
a_percentage
_ = ok.grade('q2_2')<jupyter_output>~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Running tests
---------------------------------------------------------------------
Test summary
Passed: 1
Failed: 0
[ooooooooook] 100.0% passed
<jupyter_text>Here's something important about functions: the names assigned within a function body are only accessible within the function body. Once the function has returned, those names are gone. So even though you defined `factor = 100` inside `to_percentage` above and then called `to_percentage`, you cannot refer to `factor` anywhere except inside the body of `to_percentage`:<jupyter_code># You should see an error when you run this. (If you don't, you might
# have defined factor somewhere above.)
factor<jupyter_output><empty_output><jupyter_text>As we've seen with the built-in functions, functions can also take strings (or arrays, or tables) as arguments, and they can return those things, too.
**Question 2.3.** Define a function called `disemvowel`. It should take a single string as its argument. (You can call that argument whatever you want.) It should return a copy of that string, but with all the characters that are vowels removed. (In English, the vowels are the characters "a", "e", "i", "o", and "u".)
*Hint:* To remove all the "a"s from a string, you can use `that_string.replace("a", "")`. The `.replace` method for strings returns another string, so you can call `replace` multiple times, one after the other. <jupyter_code>def disemvowel(a_string):
letters = [] # make an empty list to hold the non-vowels
for char in a_string: # for each character in the word
if char.lower() not in 'aeiou': # if the letter is not a vowel
letters.append(char) # add it to the list of non-vowels
return ''.join(letters) # join the list of non-vowels together into a string
# An example call to your function. (It's often helpful to run
# an example call from time to time while you're writing a function,
# to see how it currently works.)
disemvowel("Can you read this without vowels?")
# Alternatively, you can use interact to call your function
_ = interact(disemvowel, a_string='Hello world')
_ = ok.grade('q2_3')<jupyter_output>~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Running tests
---------------------------------------------------------------------
Test summary
Passed: 1
Failed: 0
[ooooooooook] 100.0% passed
<jupyter_text>##### Calls on calls on calls
Just as you write a series of lines to build up a complex computation, it's useful to define a series of small functions that build on each other. Since you can write any code inside a function's body, you can call other functions you've written.
If a function is a like a recipe, defining a function in terms of other functions is like having a recipe for cake telling you to follow another recipe to make the frosting, and another to make the sprinkles. This makes the cake recipe shorter and clearer, and it avoids having a bunch of duplicated frosting recipes. It's a foundation of productive programming.
For example, suppose you want to count the number of characters *that aren't vowels* in a piece of text. One way to do that is this to remove all the vowels and count the size of the remaining string.
**Question 2.4.** Write a function called `num_non_vowels`. It should take a string as its argument and return a number. The number should be the number of characters in the argument string that aren't vowels.
*Hint:* The function `len` takes a string as its argument and returns the number of characters in it.<jupyter_code>def num_non_vowels(a_string):
"""The number of characters in a string, minus the vowels."""
return len(disemvowel(a_string))
# Try calling your function yourself to make sure the output is what
# you expect. You can also use the interact function if you'd like.
_ = ok.grade('q2_4')<jupyter_output>~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Running tests
---------------------------------------------------------------------
Test summary
Passed: 1
Failed: 0
[ooooooooook] 100.0% passed
<jupyter_text>Functions can also encapsulate code that *does things* rather than just computing values. For example, if you call `print` inside a function, and then call that function, something will get printed.
The `movies_by_year` dataset in the textbook has information about movie sales in recent years. Suppose you'd like to display the year with the 5th-highest total gross movie sales, printed in a human-readable way. You might do this:<jupyter_code>movies_by_year = Table.read_table("movies_by_year.csv")
rank = 5
fifth_from_top_movie_year = movies_by_year.sort("Total Gross", descending=True).column("Year").item(rank-1)
print("Year number", rank, "for total gross movie sales was:", fifth_from_top_movie_year)<jupyter_output>Year number 5 for total gross movie sales was: 2010
<jupyter_text>After writing this, you realize you also wanted to print out the 2nd and 3rd-highest years. Instead of copying your code, you decide to put it in a function. Since the rank varies, you make that an argument to your function.
**Question 2.5.** Write a function called `print_kth_top_movie_year`. It should take a single argument, the rank of the year (like 2, 3, or 5 in the above examples). It should print out a message like the one above. It shouldn't have a `return` statement.<jupyter_code>def print_kth_top_movie_year(k):
# Our solution used 2 lines.
fifth_from_top_movie_year = movies_by_year.sort("Total Gross", descending=True).column("Year").item(k-1)
print("Year number", k, "for total gross movie sales was:", fifth_from_top_movie_year)
# Example calls to your function:
print_kth_top_movie_year(2)
print_kth_top_movie_year(3)
# interact also allows you to pass in an array for a function argument. It will
# then present a dropdown menu of options.
_ = interact(print_kth_top_movie_year, k=np.arange(1, 10))
_ = ok.grade('q2_5')<jupyter_output>~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Running tests
---------------------------------------------------------------------
Test summary
Passed: 1
Failed: 0
[ooooooooook] 100.0% passed
<jupyter_text>## 3. `apply`ing functions
Defining a function is a lot like giving a name to a value with `=`. In fact, a function is a value just like the number 1 or the text "the"!
For example, we can make a new name for the built-in function `max` if we want:<jupyter_code>our_name_for_max = max
our_name_for_max(2, 6)<jupyter_output><empty_output><jupyter_text>The old name for `max` is still around:<jupyter_code>max(2, 6)<jupyter_output><empty_output><jupyter_text>Try just writing `max` or `our_name_for_max` (or the name of any other function) in a cell, and run that cell. Python will print out a (very brief) description of the function.<jupyter_code>max<jupyter_output><empty_output><jupyter_text>Why is this useful? Since functions are just values, it's possible to pass them as arguments to other functions. Here's a simple but not-so-practical example: we can make an array of functions.<jupyter_code>make_array(max, np.average, are.equal_to)<jupyter_output><empty_output><jupyter_text>**Question 3.1.** Make an array containing any 3 other functions you've seen. Call it `some_functions`.<jupyter_code>some_functions = make_array(max, np.average, are.equal_to)
some_functions
_ = ok.grade('q3_1')<jupyter_output>~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Running tests
---------------------------------------------------------------------
Test summary
Passed: 4
Failed: 0
[ooooooooook] 100.0% passed
<jupyter_text>Working with functions as values can lead to some funny-looking code. For example, see if you can figure out why this works:<jupyter_code>make_array(max, np.average, are.equal_to).item(0)(4, -2, 7)<jupyter_output><empty_output><jupyter_text>Here's a simpler example that's actually useful: the table method `apply`.
`apply` calls a function many times, once on *each* element in a column of a table. It produces an array of the results. Here we use `apply` to convert every CEO's pay to a number, using the function you defined:<jupyter_code>raw_compensation.apply(convert_pay_string_to_number, "Total Pay")<jupyter_output><empty_output><jupyter_text>Here's an illustration of what that did:
Note that we didn't write something like `convert_pay_string_to_number()` or `convert_pay_string_to_number("Total Pay")`. The job of `apply` is to call the function we give it, so instead of calling `convert_pay_string_to_number` ourselves, we just write its name as an argument to `apply`.
**Question 3.2.** Using `apply`, make a table that's a copy of `raw_compensation` with one more column called "Total Pay (\$)". It should be the result of applying `convert_pay_string_to_number` to the "Total Pay" column, as we did above, and creating a new table which is the old one, but with the "Total Pay" column redone. Call the new table `compensation`.<jupyter_code>compensation = raw_compensation.with_column(
"Total Pay ($)", raw_compensation.apply(convert_pay_string_to_number, "Total Pay")
)
compensation
_ = ok.grade('q3_2')<jupyter_output>~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Running tests
---------------------------------------------------------------------
Test summary
Passed: 2
Failed: 0
[ooooooooook] 100.0% passed
<jupyter_text>Now that we have the pay in numbers, we can compute things about them.
**Question 3.3.**Compute the average total pay of the CEOs in the dataset.<jupyter_code>average_total_pay = np.average(compensation.column("Total Pay ($)"))
average_total_pay
_ = ok.grade('q3_3')<jupyter_output>~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Running tests
---------------------------------------------------------------------
Test summary
Passed: 1
Failed: 0
[ooooooooook] 100.0% passed
<jupyter_text>**Question 3.4.** Companies pay executives in a variety of ways: directly in cash; by granting stock or other "equity" in the company; or with ancillary benefits (like private jets). Compute the proportion of each CEO's pay that was cash. (Your answer should be an array of numbers, one for each CEO in the dataset.)<jupyter_code>cash_proportion = ((compensation.column("Cash Pay").strip("$")) / (compensation.column("Total Pay").strip("$")))
cash_proportion
_ = ok.grade('q3_4')<jupyter_output>~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Running tests
---------------------------------------------------------------------
> Suite 1 > Case 1
>>> len(cash_proportion) == 102
TypeError: object of type 'float' has no len()
# Error: expected
# True
# but got
# Traceback (most recent call last):
# ...
# TypeError: object of type 'float' has no len()
Run only this test case with "python3 ok -q q3_4 --suite 1 --case 1"
---------------------------------------------------------------------
Test summary
Passed: 0
Failed: 1
[k..........] 0.0% passed
<jupyter_text>Check out the "% Change" column in `compensation`. It shows the percentage increase in the CEO's pay from the previous year. For CEOs with no previous year on record, it instead says "(No previous year)". The values in this column are *strings*, not numbers, so like the "Total Pay" column, it's not usable without a bit of extra work.
Given your current pay and the percentage increase from the previous year, you can compute your previous year's pay. For example, if your pay is \$100 this year, and that's an increase of 50% from the previous year, then your previous year's pay was $\frac{\$100}{1 + \frac{50}{100}}$, or around \$66.66.
**Question 3.5.** Create a new table called `with_previous_compensation`. It should be a copy of `compensation`, but with the "(No previous year)" CEOs filtered out, and with an extra column called "2014 Total Pay ($)". That column should have each CEO's pay in 2014.
*Hint:* This question takes several steps, but each one is still something you've seen before. Take it one step at a time, using as many lines as you need. You can print out your results after each step to make sure you're on the right track.
*Hint 2:* You'll need to define a function. You can do that just above your other code.<jupyter_code># For reference, our solution involved more than just this one line of code
...
with_previous_compensation = compensation.where("% Change", are.not_equal_to("(No previous year)")).with_column("2014 Total Pay ($)",compensation.column("Total Pay ($)") * float(compensation.column("% Change").strip("%"))
with_previous_compensation
_ = ok.grade('q3_5')<jupyter_output>~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Running tests
---------------------------------------------------------------------
Test summary
Passed: 3
Failed: 0
[ooooooooook] 100.0% passed
<jupyter_text>**Question 3.6.** What was the average pay of these CEOs in 2014?<jupyter_code>average_pay_2014 = 0
average_pay_2014
_ = ok.grade('q3_6')<jupyter_output>~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Running tests
---------------------------------------------------------------------
Test summary
Passed: 1
Failed: 0
[ooooooooook] 100.0% passed
<jupyter_text>## 4. Histograms
Earlier, we computed the average pay among the CEOs in our 102-CEO dataset. The average doesn't tell us everything about the amounts CEOs are paid, though. Maybe just a few CEOs make the bulk of the money, even among these 102.
We can use a *histogram* to display more information about a set of numbers. The table method `hist` takes a single argument, the name of a column of numbers. It produces a histogram of the numbers in that column.
**Question 4.1.** Make a histogram of the pay of the CEOs in `compensation`.<jupyter_code>compensation.hist("Total Pay ($)")<jupyter_output>/usr/local/lib/python3.6/dist-packages/matplotlib/axes/_axes.py:6462: UserWarning: The 'normed' kwarg is deprecated, and has been replaced by the 'density' kwarg.
warnings.warn("The 'normed' kwarg is deprecated, and has been "
<jupyter_text>**Question 4.2.** Looking at the histogram, how many CEOs made more than \$30 million? (Answer the question by filling in your answer manually. You'll have to do a bit of arithmetic; feel free to use Python as a calculator.)<jupyter_code>num_ceos_more_than_30_million = 5
<jupyter_output><empty_output><jupyter_text>**Question 4.3.** Answer the same question with code. *Hint:* Use the table method `where` and the property `num_rows`.<jupyter_code>num_ceos_more_than_30_million_2 = compensation.where("Total Pay ($)", are.above(30000000)).num_rows
num_ceos_more_than_30_million_2
_ = ok.grade('q4_3')<jupyter_output>~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Running tests
---------------------------------------------------------------------
Test summary
Passed: 1
Failed: 0
[ooooooooook] 100.0% passed
<jupyter_text>## 5. SubmissionGreat job! :D You're finished with lab 4! Be sure to...
- **run all the tests and verify that they all pass** (the next cell has a shortcut for that),
- **Review the notebook one last time, we will be grading the final state of your notebook after the deadline**,
- **Save and Checkpoint** from the `File` menu,<jupyter_code># For your convenience, you can run this cell to run all the tests at once!
import os
_ = [ok.grade(q[:-3]) for q in os.listdir("tests") if q.startswith('q')]<jupyter_output>~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Running tests
---------------------------------------------------------------------
Test summary
Passed: 3
Failed: 0
[ooooooooook] 100.0% passed
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Running tests
---------------------------------------------------------------------
Test summary
Passed: 1
Failed: 0
[ooooooooook] 100.0% passed
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Running tests
---------------------------------------------------------------------
Test summary
Passed: 3
Failed: 0
[ooooooooook] 100.0% passed
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Running tests
---------------------------------------------------------------------
Test summary
Passed: 1
Failed: 0
[ooooooooook] 100.0% passed
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Running tests
---------------[...]
|
no_license
|
/LabExams/lab04.ipynb
|
jhueppauff/DataScienceFoundations
| 32 |
<jupyter_start><jupyter_text># Data Visualization## 1. Select a dataset
The [Pokeman](https://www.kaggle.com/abcsds/pokemon) dataset seems most interesting to me among the most voted datasets on Kaggle.<jupyter_code>from google.colab import files
uploaded = files.upload()
!mkdir -p ~/.kaggle/ && mv kaggle.json ~/.kaggle/ && chmod 600 ~/.kaggle/kaggle.json
!kaggle datasets list -h
!kaggle datasets list --file-type csv --sort-by votes
!kaggle datasets download abcsds/pokemon
!unzip pokemon.zip
!ls
import pandas as pd
pokemon = pd.read_csv('Pokemon.csv')
pokemon.head()
pokemon.describe(include='all')<jupyter_output><empty_output><jupyter_text>## 2. Pose a question
What characteristics are associated with being Legendary?## 3. Answer the question<jupyter_code>import seaborn as sns
import matplotlib.pyplot as plt
sns.set_style('whitegrid')
%matplotlib inline
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = (9,6)<jupyter_output><empty_output><jupyter_text>#### Pokemon Type
According to the plots below, these three types of Pokemon are most likely to be Legendary:
- Flying
- Dragon
- Psychic<jupyter_code>sns.countplot(pokemon['Type 1'],hue=pokemon['Legendary'], palette='coolwarm')
plt.xticks(rotation=45)
plt.title('Pokemon Counts by Legendary')
plt.xlabel('Pokemon Type 1')
plt.ylabel('Count')
sns.countplot(pokemon['Type 2'],hue=pokemon['Legendary'], palette='coolwarm')
plt.xticks(rotation=45)
plt.title('Pokemon Counts by Legendary')
plt.xlabel('Pokemon Type 2')
plt.ylabel('Count')
plt.legend(loc=1, title='Legendary')<jupyter_output>/usr/local/lib/python3.6/dist-packages/seaborn/categorical.py:1468: FutureWarning: remove_na is deprecated and is a private function. Do not use.
stat_data = remove_na(group_data[hue_mask])
<jupyter_text>### Pokemon Generation
Being Generation 3 has the highest probability (11.25%) to be Legendary.<jupyter_code>pd.crosstab(pokemon.Generation, pokemon.Legendary, margins=True)
pokemon.groupby('Generation')['Legendary'].mean()<jupyter_output><empty_output><jupyter_text>### Pokemon Total Strength
Overall, Legendary Pokemons have higher Total Strength.<jupyter_code>plt.hist(pokemon[pokemon.Legendary==True]['Total'], color='r', label='True')
plt.hist(pokemon[pokemon.Legendary==False]['Total'], color='b', label='False', alpha=0.7, bins=20)
plt.legend(title='Legendary', loc=1)
plt.xlabel('Pokemon Total Strength')
plt.ylabel('Pokemon Counts')<jupyter_output><empty_output><jupyter_text>### Pokemon Separate Strength Points
The separate strength points all seem to contribute to being Legendary to some extent, but it's hard to tell which one(s) are the most important one(s).<jupyter_code>pokemon.columns
pokemon_power = pokemon[['Total', 'HP', 'Attack', 'Defense', 'Sp. Atk', 'Sp. Def', 'Speed']]
sum(pokemon_power.drop('Total', axis=1).sum(axis=1) == pokemon_power.Total) # sanity check
pokemon_power = pokemon_power.drop('Total', axis=1)
pokemon_power['Legendary'] = pokemon.Legendary
pokemon_power.head()
sns.pairplot(pokemon_power, hue='Legendary',palette='coolwarm')<jupyter_output><empty_output><jupyter_text>#### Dimensionality Reduction
Apply PCA to reduce dimension to 2 dimensions.
The first principal component being greater than 2 indicates higher probability of being Legendary.<jupyter_code>from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
x = pokemon[['HP', 'Attack', 'Defense', 'Sp. Atk', 'Sp. Def', 'Speed']]
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=2)
pc = pca.fit_transform(x)
pca.explained_variance_ratio_
pcDF = pd.DataFrame(data=pc, columns=['pc1', 'pc2'])
pcDF['Legendary'] = pokemon['Legendary']
sns.lmplot(x='pc1', y='pc2', hue='Legendary', data=pcDF, fit_reg=False)<jupyter_output><empty_output>
|
no_license
|
/Data_Visualization.ipynb
|
pardeep-kesnani1234/Data-Lit
| 7 |
<jupyter_start><jupyter_text>Coursera Capstone
Opening a Japanese Restaurant in Toronto
The following notebook shows the code I used to determine the best location for a new Japanese restaurant in Toronto.
A few assumptions before we begin:
- Restaurant should be located within a neighborhood that already has a high-density of similar restaurants as this shows that there is likely demand for these restaurants. It is also said that this competition will help the entire market thrive, so we will use this business rule as a basis for the hypothesis as well.
- We will look at neighbourhood populations to ensure that there is also a good balance of higher populations that would presumably go to these restaurants. Thus, we are making the assumption that the people who travel to these restaurants are likely within the same neighbourhood, or likely in a neighbouring one in which the population won't vary widely.
Data sources are listed as we go along.
All packages and tools required are imported at the beginning so if anything is used, it is already installed.
<jupyter_code># IMPORT ALL PACKAGES AND REQUIRED TOOLS
import pandas as pd
import requests
import numpy as np
import folium
import matplotlib.cm as cm
import matplotlib.colors as colors
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from geopy.geocoders import Nominatim
from pandas.io.json import json_normalize
from bs4 import BeautifulSoup
from pprint import pprint
!pip install opencage
from opencage.geocoder import OpenCageGeocode
# SCRAPE DATA AND GENERATE DATAFRAME (SAME DATA AS WK3 ASSIGNMENT; STILL IN TORONTO)
source = requests.get("https://en.wikipedia.org/wiki/List_of_postal_codes_of_Canada:_M").text
soup = BeautifulSoup(source, 'lxml')
table = soup.find("table")
table_rows = table.tbody.find_all("tr")
res = []
for tr in table_rows:
td = tr.find_all("td")
row = [tr.text for tr in td]
# Ignore cells with borough 'Not assigned'.
if row != [] and row[1] != "Not assigned\n":
# If a cell contains a borough but is a "Not assigned" neighborhood, then the neighborhood will be the same as the borough.
if "Not assigned" in row[2]:
row[2] = row[1]
res.append(row)
df = pd.DataFrame(res, columns = ["PostalCode", "Borough", "Neighborhood"])
df.head()
# REMOVE '\n' APPENDED TO EACH LINE
df["PostalCode"] = df["PostalCode"].str.replace("\n","")
df["Borough"] = df["Borough"].str.replace("\n","")
df["Neighborhood"] = df["Neighborhood"].str.replace("\n","")
df.head()
df = df.groupby(["PostalCode", "Borough"])["Neighborhood"].apply(", ".join).reset_index()
df.head()
# USING THE GEOSPATIAL COORDINATES FILE
df_coords = pd.read_csv("./Geospatial_Coords.csv")
# MERGE DF AND COORDS INTO ONE DATAFRAME, THEN CLEAN DUPLICATE POSTCODE COLUMN
df2 = pd.merge(df, df_coords, how='left', left_on = 'PostalCode', right_on = 'Postal Code')
df2.drop("Postal Code", axis=1, inplace=True)
df2.head()<jupyter_output><empty_output><jupyter_text>From here, we need population data to add to the neighbourhood data. This will allow us to make a hypothesis and eventually show what the best place for the new restaurant is.
Statistics Canada (StatsCan) is the country's resource for all sorts of census and geographic information, among other things, which will be our source for this project.<jupyter_code># LATEST DATA FROM STATSCAN
# SOURCE: 'Statistics Canada. 2017. Population and dwelling counts, for Canada and forward sortation areas© as reported by the respondents, 2016 Census (table). Population and Dwelling Count Highlight Tables. 2016 Census.''
# SOURCE URL: 'https://www12.statcan.gc.ca/census-recensement/2016/dp-pd/hlt-fst/pd-pl/Table.cfm?Lang=Eng&T=1201&S=22&O=A'
df_pop = pd.read_csv("./StatsCan_Toronto.csv",encoding = 'unicode_escape')
# CLEAN TABLE TO REMOVE UNNECESSARY COLUMNS/DATA
df_pop = df_pop.rename(columns={'Geographic code':'PostalCode', 'Geographic name':'Geoname', 'Province or territory':'Province', 'Incompletely enumerated Indian reserves and Indian settlements, 2016':'Incomplete', 'Population, 2016':'Population2016', 'Total private dwellings, 2016':'PrivateDwellings', 'Private dwellings occupied by usual residents, 2016':'OccupiedPrivateDwellings'})
df_pop = df_pop.drop(columns=['Geoname', 'Province', 'Incomplete', 'PrivateDwellings', 'OccupiedPrivateDwellings'])
df_pop = df_pop.iloc[1:]
df_pop.head()
# MERGE TORONTO DATA WITH POSTALCODE DATA AND SORT
df3 = pd.merge(df_pop, df2, on="PostalCode", how='right')
df3 = df3.sort_values(by=['Population2016'], ascending=False)
df3.head()<jupyter_output><empty_output><jupyter_text>The above dataframe shows us neighbourhoods in Toronto, sorted by population.<jupyter_code># INPUT FOURSQUARE CREDENTIALS
CLIENT_ID = '41MJB42PKA03HVBU14G3V5CVGHJEIW4JXMLUXSRJZNDDJUJM'
CLIENT_SECRET = 'KCE3T0U0LWXV5O2SDDJNIEZUIOBFFGLC1OEUJPHGT4QU2VWG'
VERSION = '20210101'
# SET LIMITS TO PREVENT OVERUSE OF FOURSQUARE FREE ACCOUNT
limit = 200
# SET SEARCH RADIUS TO 5000m. ASSUME PEOPLE WILL TRAVEL UP TO 5km TO VISIT A RESTAURANT.
radius = 5000
# DEFINE FUNCTION TO RETRIEVE VENUES
def getNearbyVenues(names, latitudes, longitudes, radius=5000):
venues_list=[]
for name, lat, lng in zip(names, latitudes, longitudes):
print(name)
url = 'https://api.foursquare.com/v2/venues/explore?&client_id={}&client_secret={}&v={}&ll={},{}&radius={}&limit={}'.format(
CLIENT_ID,
CLIENT_SECRET,
VERSION,
lat,
lng,
radius,
limit)
results = requests.get(url).json()["response"]['groups'][0]['items']
venues_list.append([(
name,
lat,
lng,
v['venue']['name'],
v['venue']['location']['lat'],
v['venue']['location']['lng'],
v['venue']['categories'][0]['name']) for v in results])
nearby_venues = pd.DataFrame([item for venue_list in venues_list for item in venue_list])
nearby_venues.columns = ['Neighborhood',
'Neighborhood Latitude',
'Neighborhood Longitude',
'Venue',
'Venue Latitude',
'Venue Longitude',
'Venue Category']
return(nearby_venues)
# GET NEIGHBOURHOOD LIST
Toronto_Venues = getNearbyVenues(names=df3['Neighborhood'],
latitudes=df3['Latitude'],
longitudes=df3['Longitude'])
# SHOW UNIQUE VENUE CATEGORIES
print('Unique Venue Categories:')
list(Toronto_Venues['Venue Category'].unique())
# ISOLATE ONLY THOSE CATEGORIES WITH JAPANESE THEMES (SUSHI, RAMEN, ETC.)
Japanese_restaurants = ['Ramen Restaurant', 'Japanese Restaurant', 'Sushi Restaurant', 'Noodle House', 'Sake Bar']
Japanese_rest_pd = pd.DataFrame(Japanese_restaurants)
Japanese_rest_pd
# RENAME COLUMN TO FIVE TYPES OF JAPANESE RESTAURANT
Japanese_rest_pd = Japanese_rest_pd.rename(columns={0:'Venue Category'})
# MERGE DATAFRAMES TO SEE ONLY JAPANESE RESTAURANT VARIANTS IN NEIGHBOURHOODS
Toronto_Japanese_rest = pd.merge(Toronto_Venues, Japanese_rest_pd, on='Venue Category', how='right')
Toronto_Japanese_rest.head()
# USING ONE HOT ENCODING
newonehot = pd.get_dummies(Toronto_Japanese_rest[['Venue Category']], prefix="", prefix_sep="")
# ADD NEIGHBOURHOOD BACK IN AND MOVE TO FIRST COLUMN
newonehot['Neighborhood'] = Toronto_Japanese_rest['Neighborhood']
fixed_columns = [newonehot.columns[-1]] + list(newonehot.columns[:-1])
newonehot = newonehot[fixed_columns]
newonehot.head()
# ANALYSIS OF RESTAURANT TYPES (PERCENTAGES) IN EACH NEIGHBORHOOD
grouped = newonehot.groupby('Neighborhood').mean().reset_index()
grouped.shape
grouped.head()
# CLUSTER MODELLING
# USE SILHOUETTE TO FIND BEST CLUSTER GROUPS
groupedclusters = grouped.drop('Neighborhood', 1)
kclusters = np.arange(2,10)
results = {}
for size in kclusters:
model = KMeans(n_clusters = size).fit(groupedclusters)
predictions = model.predict(groupedclusters)
results[size] = silhouette_score(groupedclusters, predictions)
best_size = max(results, key=results.get)
best_size
# RUN K MEANS AND SEGMENT DATA
kclusters = best_size
kmeans = KMeans(n_clusters=kclusters, random_state=0).fit(groupedclusters)
# CHECK LABELS
kmeans.labels_[0:10]
# CREATE FUNCTION TO RETURN MOST COMMON
def most_common_venues(row, num_top_venues):
row_categories = row.iloc[1:]
row_categories_sorted = row_categories.sort_values(ascending=False)
return row_categories_sorted.index.values[0:num_top_venues]
num_top_venues = 5
indicators = ['st', 'nd', 'rd']
columns = ['Neighborhood']
for ind in np.arange(num_top_venues):
try:
columns.append('{}{} Most Common Venue'.format(ind+1, indicators[ind]))
except:
columns.append('{}th Most Common Venue'.format(ind+1))
venues_sorted = pd.DataFrame(columns=columns)
venues_sorted['Neighborhood'] = grouped['Neighborhood']
for ind in np.arange(grouped.shape[0]):
venues_sorted.iloc[ind, 1:] = most_common_venues(grouped.iloc[ind, :], num_top_venues)
venues_sorted.head()
# MERGE DATAFRAMES TO INCLUDE ALL DATA FROM NEIGHBORHOOD AND RESTAURANT TYPE DFs
Toronto_complete = pd.merge(df3, venues_sorted, on='Neighborhood', how='left')
Toronto_complete.head()
# MERGE TORONTO DATA WITH COORDINATE DATA AND GET CLUSTER LABELS
labels = pd.merge(Toronto_complete, grouped, on='Neighborhood', how='right')
labels.shape
labels.head()
# ADD CLUSTERED LABELS
tablewithlabels = labels
tablewithlabels['Cluster Labels'] = kmeans.labels_
# MERGE TO ADD LAT LONG TO EACH NEIGHBORHOOD
tablewithlabels = pd.merge(labels, venues_sorted, on='Neighborhood', how='left')
tablewithlabels.head()
# FIND VALUES FOR EACH OF THE CLUSTERS
cluster0 = tablewithlabels.loc[tablewithlabels['Cluster Labels'] == 0, tablewithlabels.columns[[3, 4] + list(range(5, tablewithlabels.shape[1]))]]
cluster0.shape
cluster1 = tablewithlabels.loc[tablewithlabels['Cluster Labels'] == 1, tablewithlabels.columns[[3, 4] + list(range(5, tablewithlabels.shape[1]))]]
cluster1.shape
cluster2 = tablewithlabels.loc[tablewithlabels['Cluster Labels'] == 2, tablewithlabels.columns[[3, 4] + list(range(5, tablewithlabels.shape[1]))]]
cluster2.shape
cluster3 = tablewithlabels.loc[tablewithlabels['Cluster Labels'] == 3, tablewithlabels.columns[[3, 4] + list(range(5, tablewithlabels.shape[1]))]]
cluster1.shape
cluster4 = tablewithlabels.loc[tablewithlabels['Cluster Labels'] == 4, tablewithlabels.columns[[3, 4] + list(range(5, tablewithlabels.shape[1]))]]
cluster4.shape
cluster5 = tablewithlabels.loc[tablewithlabels['Cluster Labels'] == 5, tablewithlabels.columns[[3, 4] + list(range(5, tablewithlabels.shape[1]))]]
cluster5.shape
# CLUSTER 1 AND 3 HAVE THE SAME DENSITY, SO THEY ARE BOTH OPTIMAL LOCATIONS WITH THESE VARIABLES
# FIND GEOGRAPHIC CENTRE OF EACH CLUSTER
cluster1coords = cluster1[['Latitude', 'Longitude']]
cluster1coords = list(cluster1coords.values)
lat = []
long = []
for l in cluster1coords:
lat.append(l[0])
long.append(l[1])
Blatitude = sum(lat)/len(lat)
Blongitude = sum(long)/len(long)
print(Blatitude)
print(Blongitude)
cluster3coords = cluster3[['Latitude', 'Longitude']]
cluster3coords = list(cluster3coords.values)
lat = []
long = []
for l in cluster3coords:
lat.append(l[0])
long.append(l[1])
blatitude = sum(lat)/len(lat)
blongitude = sum(long)/len(long)
print(Blatitude)
print(Blongitude)<jupyter_output>43.695958680000004
-79.39204819333334
<jupyter_text>Since the actual ideal coordinates for Cluster 1 is a residential street, whereas the actual ideal coordinates Cluster 3 is a parkette alongside shops and other stores, the more ideal location (though marginally), would be the actual ideal coordinates of Cluster 3 as it would be near other restaurants, a grocery store, etc.
43.695958680000004, -79.39204819333334<jupyter_code># INSTALL OPENCAGE TO CONVERT COORDINATES TO ADDRESS
key = 'b7110d6d829b48a9b718a09748d1628f'
geocoder = OpenCageGeocode(key)
results = geocoder.reverse_geocode(43.695958680000004, -79.39204819333334)
pprint(results)
# FIND BEST LOCATION
popstr = df3[df3['PostalCode'].str.contains('M4S')]
def str_join(*args):
return ''.join(map(str, args))
popstr = str_join('Best Location: ', popstr['Neighborhood'].values, ' in ', popstr['Borough'].values)
print(popstr)
# USING GEOPY GEOCODERS
address = 'Toronto, ON'
geolocator = Nominatim(user_agent="http")
location = geolocator.geocode(address)
latitude = location.latitude
longitude = location.longitude
print("Toronto's Geographical Coordinates: {}, {}".format(latitude, longitude))
# USE FOLIUM TO SHOW BEST LOCATION
map_clusters = folium.Map(location=[latitude, longitude], zoom_start=12)
# COLOURS
x = np.arange(kclusters)
ys = [i+x+(i*x)**2 for i in range(kclusters)]
colors_array = cm.rainbow(np.linspace(0, 1, len(ys)))
rainbow = [colors.rgb2hex(i) for i in colors_array]
# ADD MARKERS
markers_colors = []
for lat, lon, poi, cluster in zip(tablewithlabels['Latitude'], tablewithlabels['Longitude'], tablewithlabels['Neighborhood'], tablewithlabels['Cluster Labels']):
label = folium.Popup(str(poi) + ' Cluster ' + str(cluster), parse_html=True)
folium.CircleMarker(
[lat, lon],
radius=5,
popup=label,
color=rainbow[cluster-1],
fill=True,
fill_color=rainbow[cluster-1],
fill_opacity=0.7).add_to(map_clusters)
folium.CircleMarker([blatitude, blongitude],
radius=50,
popup='Toronto',
color='red',
).add_to(map_clusters)
map_clusters.save('map_clusters.html')
map_clusters<jupyter_output><empty_output>
|
no_license
|
/W5Assignment.ipynb
|
chriskirkos/Coursera_Capstone
| 4 |
<jupyter_start><jupyter_text># Notebook final : Démonstration du fonctionnement du modèle d'entrée visuelle
Les principes généraux du modèle d'entrée visuel sont décrits dans le rapport, mais voici le principe algorithmique qui fait fonctionner le tout (voir le fichier SpikingLGN.py pour le code)On demande a Python de recharger SpikingLGN dès qu'il est modifié et on charge les bibliothèques :<jupyter_code>%load_ext autoreload
%autoreload 2
import numpy as np
import Spiking_LGN as SpiLGN
import matplotlib.pyplot as plt<jupyter_output><empty_output><jupyter_text>On importe le MotionCloud qu'on va utiliser et on le montre<jupyter_code>#first we import the video
video = np.load('./MotionClouds_64x64x128_24FPS.npy')
video.shape
plt.imshow(video[:,:,0])<jupyter_output><empty_output><jupyter_text>Si besoin est, j'ai écris une documentation entière sur tout les paramètres du modèle, qui consultable avec la commande help()<jupyter_code>help(SpiLGN.input_to_currents) #documentation for every function<jupyter_output>Help on function input_to_currents in module Spiking_LGN:
input_to_currents(video, FPS, total_time, distrib_size, safeguard_offset, random_shift, grid_res, N_theta, B_theta, sf_0, B_sf, on_thresh, off_thresh, filter_size, filter_res, sampling_rate, n_jobs, backend, mt_verbose, off_gain=1, gabors_params=None, verbose=True, get_coordinates=False)
Main method, transforms a numpy array input into currents that can be used
by a spiking neuron simulator (optimized for Nest with PyNN)
Args :
> Input arguments <
video : the numpy array input, a video of shape H x W x frames
the format is for plt.imshow convenience, careful about the H x W order
FPS : video's frame per second
total_time : simulation total time, should you want it shorter than video's length
> Gabor parameters <
gabors_params : if set to None, the default parameter dictionnary is used
> Generating gabors centers and filters coord[...]<jupyter_text>On fait tourner le modèle, en utilisant tout les processeurs disponibles de l'ordinateur<jupyter_code>output_currents = SpiLGN.input_to_currents(video=video, FPS=24, total_time=1000,
distrib_size=10, safeguard_offset=20, random_shift=1, grid_res=3,
N_theta=12, B_theta=15, sf_0=.05, B_sf=.5,
on_thresh=0, off_thresh=0,
filter_size=2., filter_res=.1,
sampling_rate = 1,
off_gain = 1,
n_jobs=-1, backend='loky', mt_verbose=10)<jupyter_output>Video shape (64, 64, 128)
Frames per second: 24
Frame duration at 24 FPS: 41.67 ms
Video length inferred from fps: 6 s
FPS conversion sanity check passed !
Stimuli shape (64, 64, 1000)
Generating filters coordinates with gabors ..
<jupyter_text>On vérifie que le format de sortie est bien N_orientations de gabor x N_positions x (Champs ON + Champs OFF)<jupyter_code>print('Output shape', np.asarray(output_currents).shape)
print('=(Thetas, Gabors, ON/OFF)')<jupyter_output>Output shape (12, 16, 2)
=(Thetas, Gabors, ON/OFF)
<jupyter_text>Pour vérifier d'un coup d'oeil que les filtres ont bien marché on peut les visualiser comme des séries temporelles parallèles (et c'est plutot joli)<jupyter_code>#We show the second Theta (here pi/4) and third gabor (i.e. simple cell)
theta_display = 1
gabor_display = 3
on_st_levels = output_currents[theta_display][gabor_display][0]
off_st_levels = output_currents[theta_display][gabor_display][1]
import matplotlib.pyplot as plt
chans = len(on_st_levels) + len(off_st_levels) #channels
fig = plt.figure(figsize = (15,8))
ax = fig.add_subplot(111)
ax.set_facecolor('white')
on_colors=plt.cm.autumn(np.linspace(0,.7,len(on_st_levels)))
off_colors=plt.cm.winter(np.linspace(0,.7,len(off_st_levels)))
on_arr = np.asarray(on_st_levels)
off_arr = np.asarray(off_st_levels)
divby = 25
for channel in range(chans):
if channel < len(off_st_levels) :
plt.fill_between(np.linspace(0, len(on_st_levels[0]), len(on_st_levels[0])),
off_arr[channel]+channel/divby, channel/divby,
facecolor='white',
zorder = chans-channel)
plt.plot(off_arr[channel]+channel/divby, zorder = chans-channel,
color = off_colors[channel])
else :
plt.fill_between(np.linspace(0, len(off_st_levels[0]), len(off_st_levels[0])),
on_arr[channel-len(off_st_levels)]+channel/divby, channel/divby,
facecolor='white',
zorder = chans-channel)
plt.plot(on_arr[channel-len(off_st_levels)]+channel/divby, zorder = chans-channel,
color = on_colors[channel-len(off_st_levels)])
ax.set_yticklabels(['','','OFF fields','','' 'ON fields'])
ax.set_xlabel('Time (ms)')
#plt.savefig('./figs/2018_11_08_Script_output_white.pdf', dpi = 200, bbox_inches = 'tight')
plt.show() <jupyter_output><empty_output><jupyter_text>Et on resauvegarde les courants du modèle d'entrée histoire de pas avoir a les refaire calculer à chaque fois<jupyter_code>#Now we save the currents, in a numpy compressed array to save space (2.5x less size)
#tradeoff : saving twice as slow
import datetime
now = datetime.datetime.now()
strtime = now.strftime("%Y-%m-%d_%H_%M")
np.savez_compressed('./output/%s'%strtime, output_currents)
#the loading method is a bit different as the npz can dump multiple arrays
load_npz = np.load('./output/%s.npz'%strtime)
load_arr = load_npz['arr_0.npy'] #won't contain more than one array normally<jupyter_output><empty_output><jupyter_text>Chaque sous fonction est également visualisable en l'appellant par SpiLGN.nom_de_la_fonction. Par exemple, on peut visualiser les coordonées des centroides, qui sont les points autour desquels sont distribués les champs récepteurs<jupyter_code>video = np.load('./MotionClouds_64x64x128_24FPS.npy')
video.shape
coor = SpiLGN.generate_centers_coordinates(
distrib_size=10, safeguard_offset = 12, random_shift = 1, video=video)
fig = plt.figure(figsize = (8,6))
#la taille des points n'est pas une science exacte
plt.scatter(coor[0], coor[1], s = 82**2, edgecolor = 'black')
plt.imshow(video[:,:,0], cmap = plt.cm.binary)
plt.title('Centroids distribution\nSome filters are still likely outside the boundaries')<jupyter_output><empty_output>
|
non_permissive
|
/FINAL_Retina_LGN_generation.ipynb
|
laurentperrinet/InternshipM2
| 8 |
<jupyter_start><jupyter_text>This is a simple example on how you can use a jupyter notebook to train your model :) <jupyter_code>import torch
import torch.nn as nn
from task2 import Trainer, compute_loss_and_accuracy, create_plots
import dataloaders
#reload(dataloaders)
def compute_output_pooling(w1, h1, d1, f, s):
w2 = (w1 - f) / s + 1
h2 = (h1 - f) / s + 1
d2 = d1
return w2, h2, d2
def compute_output_convolution(w1, h1, d1, fw, fh, pw, ph, sw, sh, num_filter):
w2 = (w1 - fw + 2*pw) / sw + 1
h2 = (h1 - fh + 2*ph) / sh + 1
d2 = num_filter
return w2, h2, d2
num_filters = 16
w1, h1, d1 = 32, 32, 3
fw, fh, pw, ph, sw, sh = 3,3,1,1,1,1
f, s = 2, 2
for i in range(5):
w1, h1, d1 = compute_output_convolution(w1, h1, d1, fw, fh, pw, ph, sw, sh, num_filters)
num_filters *= 2
w1, h1, d1 = compute_output_pooling(w1, h1, d1, f, s)
print(w1, h1, d1)
class ExampleModel(nn.Module):
def __init__(self,
image_channels,
num_classes):
"""
Is called when model is initialized.
Args:
image_channels. Number of color channels in image (3)
num_classes: Number of classes we want to predict (10)
"""
super().__init__()
fw, fh, pw, ph, sw, sh = 3,3,1,1,1,1
f, s = 2, 2
num_filters = 64 # Set number of filters in first conv layer
self.num_classes = num_classes
# Define the convolutional layers
self.feature_extractor = nn.Sequential(
nn.Conv2d(
in_channels=image_channels,
out_channels=num_filters,
kernel_size=fw,
stride=sw,
padding=pw
),
nn.BatchNorm2d(num_filters),
nn.ReLU(),
nn.Conv2d(
in_channels=num_filters,
out_channels=2*num_filters,
kernel_size=fw,
stride=sw,
padding=pw
),
nn.BatchNorm2d(2*num_filters),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout2d(p=0.1),
nn.Conv2d(
in_channels=2*num_filters,
out_channels=2*2*num_filters,
kernel_size=fw,
stride=sw,
padding=pw
),
nn.BatchNorm2d(2*2*num_filters),
nn.ReLU(),
nn.Conv2d(
in_channels=2*2*num_filters,
out_channels=2*2*2*num_filters,
kernel_size=fw,
stride=sw,
padding=pw
),
nn.BatchNorm2d(2*2*2*num_filters),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout2d(p=0.1),
nn.Conv2d(
in_channels=2*2*2*num_filters,
out_channels=2*2*2*2*num_filters,
kernel_size=fw,
stride=sw,
padding=pw
),
nn.BatchNorm2d(2*2*2*2*num_filters),
nn.ReLU(),
nn.Conv2d(
in_channels=2*2*2*2*num_filters,
out_channels=2*2*2*2*2*num_filters,
kernel_size=fw,
stride=sw,
padding=pw
),
nn.BatchNorm2d(2*2*2*2*2*num_filters),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout2d(p=0.1),
)
# The output of feature_extractor will be [batch_size, num_filters, 16, 16]
w1, h1, d1 = 32, 32, 3
for i in range(6):
w1, h1, d1 = compute_output_convolution(w1, h1, d1, fw, fh, pw, ph, sw, sh, num_filters)
num_filters *= 2
w1, h1, d1 = compute_output_pooling(w1, h1, d1, f, s)
self.num_output_features = 32768 #int(w1 * h1 * d1)
# Initialize our last fully connected layer
# Inputs all extracted features from the convolutional layers
# Outputs num_classes predictions, 1 for each class.
# There is no need for softmax activation function, as this is
# included with nn.CrossEntropyLoss
self.classifier = nn.Sequential(
nn.Linear(self.num_output_features, 64),
nn.Softplus(),
nn.Linear(64, num_classes),
)
def forward(self, x):
"""
Performs a forward pass through the model
Args:
x: Input image, shape: [batch_size, 3, 32, 32]
"""
batch_size = x.shape[0]
out = self.feature_extractor(x)
out = out.view(batch_size, -1)
out = self.classifier(out)
expected_shape = (batch_size, self.num_classes)
assert out.shape == (batch_size, self.num_classes),\
f"Expected output of forward pass to be: {expected_shape}, but got: {out.shape}"
return out
epochs = 10
batch_size = 64
learning_rate = 5e-2
early_stop_count = 4
dataloader = dataloaders.load_cifar10(batch_size)
model = ExampleModel(image_channels=3, num_classes=10)
trainer = Trainer(
batch_size,
learning_rate,
early_stop_count,
epochs,
model,
dataloader
)
trainer.train()
create_plots(trainer, "task3_bestmodel")<jupyter_output><empty_output>
|
no_license
|
/assignment3/task3_bestmodel.ipynb
|
anviba/ComputerVisionBlatt3.1
| 1 |
<jupyter_start><jupyter_text># Working with Lists - Lab## Introduction
Now that we have a sense of how to read from a list and alter a list in Python, let's put this knowledge to use. ## Objectives
You will be able to:
* Understand and use Lists## InstructionsIn the previous lesson, we had a list of top travel cities.<jupyter_code>top_travel_cities = ['Solta', 'Greenville', 'Buenos Aires', 'Los Cabos', 'Walla Walla Valley', 'Marakesh', 'Albuquerque', 'Archipelago Sea', 'Iguazu Falls', 'Salina Island', 'Toronto', 'Pyeongchang']<jupyter_output><empty_output><jupyter_text>> Remember to press shift+enter to run each gray block of code (including the one above). Otherwise, the variables will not be defined.In this lab we will work with a list of associated countries corresponding to each of the top travel cities.<jupyter_code>countries = ['Croatia',
'USA',
'Argentina',
'Mexico',
'USA',
'Morocco',
'New Mexico',
'Finland',
'Argentina',
'Italy',
'Canada',
'South Korea']<jupyter_output><empty_output><jupyter_text>> Run the code in the cell above by pressing shift + enter.The list of countries associated with each city has been assigned to the variable `countries`. Now we will work with reading and manipulating this list.### Accessing elements from listsFirst, set the variable `italy` to be equal to the third to last element from `countries`.
>**Note:** If you see an **error** stating that `countries` is undefined, it means you must press shift+enter in the second gray box where `countries` variable is assigned.<jupyter_code>italy = countries[-3] # 'Italy'
italy<jupyter_output><empty_output><jupyter_text>> We assign the variable `italy` equal to `None`, but you should change the word `None` to code that uses the `countries` list to assign `italy` to `'Italy'`. We wrote the variable `italy` a second time, so that you can see what it equals when you run the code block. Currently, nothing is displayed below as it equals `None`, but when it's correct it will match the string which is commented out, `'Italy'`.<jupyter_code>italy # 'Italy'<jupyter_output><empty_output><jupyter_text>Now access the fourth element and set it equal to the variable `mexico`.<jupyter_code>mexico = countries[3]
mexico<jupyter_output><empty_output><jupyter_text>Notice that the second through fifth elements are all in a row and all in the Western Hemisphere. Assign that subset of elements to a variable called `kindof_neighbors`.<jupyter_code>kindof_neighbors = countries[1:5]
kindof_neighbors<jupyter_output><empty_output><jupyter_text>### Changing ElementsOk, now let's add a couple of countries onto this list. At the end of the list, add the country 'Malta'.<jupyter_code>countries.append("Malta") # add code here<jupyter_output><empty_output><jupyter_text>Then add the country 'Thailand'.<jupyter_code>countries.append("Thailand") # add code here<jupyter_output><empty_output><jupyter_text>Now your list of countries should look like the following.<jupyter_code>countries
# ['Croatia', 'USA', 'Argentina', 'Mexico', 'USA', 'Morocco', 'New Mexico', 'Finland',
# 'Argentina', 'Italy', 'Canada', 'South Korea', 'Malta', 'Thailand']<jupyter_output><empty_output><jupyter_text>You may have noticed that "New Mexico" is included in our list of countries. That doesn't seem right. Let's change 'New Mexico' to 'USA'.<jupyter_code>for idx, item in enumerate(countries):
if item == "New Mexico":
countries[idx] = "USA"
# add code here
countries
# ['Croatia', 'USA', 'Argentina', 'Mexico', 'USA', 'Morocco', 'USA', 'Finland',
# 'Argentina', 'Italy', 'Canada', 'South Korea', 'Malta', 'Thailand']<jupyter_output><empty_output><jupyter_text>Finally, let's remove Thailand from the list. No good reason, we're acting on whimsy.<jupyter_code>countries.pop() # 'Thailand'
print(countries)<jupyter_output>['Croatia', 'USA', 'Argentina', 'Mexico', 'USA', 'Morocco', 'USA', 'Finland', 'Argentina', 'Italy', 'Canada', 'South Korea', 'Malta']
<jupyter_text>### Exploring Lists with MethodsOk, now we notice that some countries are mentioned more than once. Let's see how many repeat countries are on this list. First, use the `set` and `list` functions to return a unique list of countries. Set this list equal to the variable `unique_countries`.<jupyter_code>unique_countries = set(countries)
unique_countries # ['Canada', 'Italy', 'USA', 'Mexico', 'Finland',
#'Malta', 'Morocco', 'Croatia', 'Argentina', 'South Korea']<jupyter_output><empty_output><jupyter_text>Now the number of repeat countries should be the number of countries minus the number of unique countries. So use the `len` function on both `unique_countries` and `countries` to calculate this and assign the result to the variable `num_of_repeats`.<jupyter_code>num_of_repeats = len(countries)-len(unique_countries)
num_of_repeats # 3<jupyter_output><empty_output>
|
non_permissive
|
/index.ipynb
|
sgoldstein0/dsc-0-01-16-working-with-lists-lab-online-ds-sp-000
| 13 |
<jupyter_start><jupyter_text># Arcseconds and parsec<jupyter_code>%pylab nbagg
from astropy import units
from astropy import constants<jupyter_output><empty_output><jupyter_text>By default the sin function takes the angle in units of radians where a complete turn of $2\pi \, \mathrm{rad}$ corresponds to $360^{\circ}$. For example, $90^{\circ}$ corresponds to $\frac{1}{2}\pi \, \mathrm{rad}$:<jupyter_code>sin(0.5*pi)<jupyter_output><empty_output><jupyter_text>Another measure for angle are minutes and seconds. 60 minutes are one degree.<jupyter_code>a=90*60*units.arcmin
sin(a)
a=90*3600*units.arcsec
sin(a)<jupyter_output><empty_output><jupyter_text>One arcsec is very small, and the sin of one arcsec is very small:<jupyter_code>sin(1*units.arcsec)<jupyter_output><empty_output><jupyter_text>sin(45 deg):<jupyter_code>sin(pi/4.)
1/sqrt(2)<jupyter_output><empty_output><jupyter_text>Convert units conveniently with the unit package, for example one arcmin in rad:<jupyter_code>units.arcmin.to('rad')<jupyter_output><empty_output><jupyter_text>In order to make a plot we create an array containing numbers with units:<jupyter_code>a = linspace(0,90*60*units.arcmin,100)<jupyter_output><empty_output><jupyter_text>The 10$^\mathrm{th}$ element of the angle array $a$:<jupyter_code>a[10]<jupyter_output><empty_output><jupyter_text>Note how the unit of angle is changed when plotting the second line:<jupyter_code>close(1);figure(1)
plot(a,sin(a),label="$ \sin (\\alpha)$")
plot(a,a.to('rad'),'--',label="$ \\alpha \mathrm{[rad]}$")
xlabel("$\\alpha [\mathrm{'}]$")
legend()<jupyter_output><empty_output><jupyter_text>The key point is that for small $\alpha$ (certainly a couple hundred arcmin) $\sin(\alpha) \approx \alpha$ is a very good approximation. ## Astronomical unit
<jupyter_code>constants.au<jupyter_output><empty_output><jupyter_text>### Parsec
The distance $D$ of an object from which one astronomical unit $AU$ is seen at an angle 1 arcsec. Because for such a small angle $\sin(\alpha) \approx \alpha$ we have
for $\alpha = 1\mathrm{'} = 4.848\times10^{-6}\mathrm{rad}$
$$
\alpha \approx \sin \alpha = \frac{AU}{D}
$$
where we just need to remember that because $\alpha$ as an argument of $\sin$ must be in $\mathrm{rad}$, $\alpha$ on the far left of this equation must also be in radians.<jupyter_code>alpha = 0.11*units.arcsec
r = alpha.to('rad').value * 8.5*units.kpc
r.to('m')
# one arcsec in rad:
units.arcsec.to('rad')
# one rad in arcsec:
1./units.arcsec.to('rad')
# Then one parsec is
D = constants.au / units.arcsec.to('rad')
D
# parsec in AU:
D.to('AU')<jupyter_output><empty_output><jupyter_text>### Lightyear<jupyter_code>units.lightyear
# parsec in lightyear:
D.to('lightyear')
constants.kpc.to('lightyear')
constants.au.to('lightyear')
(constants.c*units.year).to('au')
(1*units.lightyear).to('au')
constants.pc.to('au')<jupyter_output><empty_output>
|
permissive
|
/notebooks/Arcsec_and_parsec.ipynb
|
UVic-CompPhys/ASTR150
| 12 |
<jupyter_start><jupyter_text>The main questions we want to ask are documented in the [analysis plan](https://github.com/TSSlade/tusome-d4dm/blob/master/analysis_plan.md), which is an evolving document.## Teacher InstrumentHere we begin exploring the data we obtained from interviewing the teachers.<jupyter_code>tchr_ct = tchrs.shape[0]<jupyter_output><empty_output><jupyter_text>Our dataset contains interviews with {{tchr_ct}} teachers.### Teachers visited previously by CSOs
The underlying assumption of most of the interview protocol is that the teacher has had a coaching interaction with a CSO. The first issue we should then address is the proportion of teachers who have received a visit from a CSO.<jupyter_code>never = 100 * ((tchr_ct - tchrs.vis_before.sum())/tchr_ct)<jupyter_output><empty_output><jupyter_text>We see that {{np.round(never, decimals=2)}}% of teachers interviewed had never been previously visited by CSOs.### Number of coaching visits in the last academic term
We have confirmed that the overwhelming majority of our teachers have been visited. We can have greater confidence in the responses they give us over the course of the interview if they have had a visit in the recent past. We therefore asked the teachers to tell us how many times they had been visited by their CSO in the preceding academic term (Term 2 of the Kenyan academic year, running from roughly May-July 2018).<jupyter_code>tchrs.vis_before_freq = tchrs.vis_before_freq.replace({55: ">4x"})
viscount_df = pd.DataFrame(tchrs.vis_before_freq.value_counts(sort=False)).rename_axis("prevterm_vis").reset_index()
viscount_df["pct"] = np.round(100 * (viscount_df.vis_before_freq / tchrs.vis_before.sum()), decimals=2)
more_than_monthly = viscount_df[viscount_df.prevterm_vis.isin([4, ">4x"])].pct.sum()
alt.Chart(viscount_df, title="# of times CSO visited in preceding term").mark_bar().encode(
alt.Y("prevterm_vis:O", title="CSO visits last term"),
alt.X("vis_before_freq:Q", title="# Teachers"),
tooltip="pct")<jupyter_output><empty_output><jupyter_text>We see that roughly {{viscount_df[viscount_df.prevterm_vis==0].pct[0]}}% of the respondents, while they'd been visited by a CSO in the past, had not been visited in the preceding term. However, {{viscount_df[viscount_df.prevterm_vis.isin([1, 2, 3])].pct.sum()}}% of the respondents were visited between once per term and once per month. {{more_than_monthly}}% of the teachers were visited by their CSOs more frequently than monthly.### CSOs' activities during last coaching visit
We are interested in knowing what CSOs are focusing on when they pay a visit to a school. Are they observing a lesson? Are they giving feedback to the teacher? Do they assess pupils' fluency rates? Do they take advantage of their presence at the school to meet with the head teacher (HT)? What kinds of things are they doing _besides_ these activities?<jupyter_code>visact_df = pd.DataFrame.from_dict({"activities": ["Assessed pupils",
"Talked to HT",
"Provided feedback on lesson",
"Had general talk",
"Other"],
"tchrs_reporting": [tchrs[tchrs.vis_before != 0].vis_act_kids.sum(),
tchrs[tchrs.vis_before != 0].vis_act_ht.sum(),
tchrs[tchrs.vis_before != 0].vis_act_fdbk.sum(),
tchrs[tchrs.vis_before != 0].vis_act_gen.sum(),
tchrs[tchrs.vis_before != 0].vis_act_other.sum()]})
visact_df["pct"] = np.round(visact_df.tchrs_reporting.apply(lambda x: 100 * (x/(tchr_ct - never))), decimals=2)
visact_df
alt.Chart(visact_df, title="CSO activities during previous visit").mark_bar().encode(
alt.Y("activities:O",
title="Activities named",
sort = alt.EncodingSortField(field="tchrs_reporting", op="values", order="ascending")),
alt.X("pct:Q",
title="% of teachers responding"),
tooltip = "tchrs_reporting")<jupyter_output><empty_output><jupyter_text>Nearly {{int(np.ceil(visact_df.pct.max()))}}% of teachers report that when the CSO last visited, s/he provided feedback on a lesson. Nearly the same quantity said that the CSO assessed pupils. Neither of these is surprising, as those activities are key features of a "reimbursable" or "valid" lesson observation. If anything, it is interesting that these numbers are not higher, given that we have excluded from our denominator those teachers who said they had never received a visit from the CSO.
Of note is the relatively low proportion of teachers reporting the CSO had spoken with the HT. While Tusome encourages CSOs to speak with HTs as part of the standard protocol for visiting a school, it is not explicitly considered as a factor for reimbursement of transportation costs for visiting that school.
That said, it is also possible that teachers may simply not be aware of activities taking place outside of their classroom. They and their classrooms would have been the objects of the lesson observation and fluency assessment; they may not have as much visibility into what happened before or after the CSO entered their classroom.
A little over {{int(np.floor(visact_df[visact_df.activities=="Other"].pct))}}% of teachers reported the CSO conducted an activity that was not listed in the questionnaire. Below we have sampled 20 of the things that they reported which were not captured in the questionnaire.<jupyter_code>print(tchrs[tchrs.vis_act_other_det.notna() & (tchrs.vis_act_other_det != "")].vis_act_other_det.sample(20))<jupyter_output>160 Talks to other staff memebers.
467 He always notifys me through the HT that he will come to support us on a given day.
438 look for GPS,asks my details including lesson
491 shortage of [...]<jupyter_text>### CSOs using tablets or pen & paper during observation
The _Tangerine:Tutor_ app was developed with the intent and belief that CSOs would use it _while observing_ the lesson. However, Tusome staff report that not all CSOs find the tablet interface comfortable, and not all use it with ease. So we asked teachers to report whether CSOs use the tablets during the lesson observation, and also whether they use pen and paper.Roughly {{int(np.round(100 * (tchrs.cso_usetab_yn.sum()/tchr_ct), decimals=2))}}% of teachers reported that the CSOs use tablets during lesson observation; roughly {{int(np.round(100 * (tchrs.cso_usepcl_yn.sum()/tchr_ct), decimals=2))}}% of teachers reported the CSOs use pen and paper during the lesson observation.<jupyter_code>tabs_n_pencils = pd.crosstab(tchrs.cso_usetab_yn, tchrs.cso_usepcl_yn)
# tabs_n_pencils.rename(columns=["No pen", "Uses pen"])
tabs_n_pencils = tabs_n_pencils.rename_axis("Uses tablet").rename_axis("Uses pen and paper", axis="columns")
tabs_n_pencils = tabs_n_pencils.rename({0: "No", 1: "Yes"}, axis="columns").rename({0: "No", 1: "Yes"}, axis="index")
tabs_n_pencils<jupyter_output><empty_output><jupyter_text>We see that the overwhelming majority of CSOs are using both tablets _and_ pen-and-paper systems when observing the teachers' lesson. There have historically been some instruments/data that CSOs were tasked by TSC to complete that were not rendered in _Tangerine_ format on the tablets; as of midway through Term 3 of the 2018 academic year, those instruments (mostly for the TSC's TPAD \[Teacher Professional Appraisal and Development\] project) are now in _Tangerine_. While the use of pen and paper does not appear to have come at the expense of using the tablets - indeed, it appears to be complementary, as nearly all CSOs are using both - Tusome should nonetheless follow up on these reports of CSOs' usage of pen and paper to understand the roots of the practice.### CSOs' usage of the tablets to assess pupils' performance
Tusome's coaching protocol requires CSOs to randomly select three children from the classroom at the end of the lesson to assess their reading fluency. The prompt the children are to read from is a laminated sheet of paper with a short passage printed on it; the CSOs are instructed to use the tablet to record the children's responses. The tablet is then able to calculate fluency rates and store those as data associated with that observation.
Approximately {{int(np.round(100 * (tchrs.cso_usetab_pup_yn.sum()/tchr_ct), decimals=2))}}% of the teachers reported that CSOs use the tablets to assess children's reading fluency.### Teachers' experience of feedback, and CSOs' use of tablets
Tusome asked teachers whether the CSO gave feedback on the lesson last time s/he paid a visit, whether s/he used the tablet to do so, and whether the teacher was able to recall specific feedback the CSO provided.<jupyter_code>feedback = {"CSO gave feedback": tchrs.cso_gave_fdbk_yn.sum(),
"CSO used a tablet": tchrs.cso_usetab_fdbk_yn.sum(),
"Tchr remembers feedback": tchrs.cso_fdbk_remember.sum()}
fdbk_df = pd.DataFrame.from_dict(feedback, orient="index", columns=["ct"])
fdbk_df["pct"] = 100 * np.round(fdbk_df["ct"] / tchrs.shape[0], decimals=3)
fdbk_df = fdbk_df.rename_axis("event").reset_index()
fdbk_df
tchrs[tchrs.cso_fdbk_det.notna() & (tchrs.cso_fdbk_det != "")].cso_fdbk_det.sample(20)<jupyter_output><empty_output><jupyter_text>## CSO InstrumentHere we begin exploring the data we obtained from interviewing the CSOs.### Length of experience using tablet-based Tangerine
_When did you first receive a tablet from Tusome or PRIMR?_
We want to convert the year/month CSOs provided into a number so we can work with it easily. We'll make the simplifying assumption that the tablet was received on the first day of the month they provided, and that this interview was conducted on the first day of the month. Neither of those are true, but the marginal days are unlikely to make a practical difference in a CSO's facility with the tool.<jupyter_code>now = dt.datetime(2018, 10, 1)
csos["tab_usage"] = pd.to_timedelta(now - pd.to_datetime({"year": csos.recd_tab_yr, "month": csos.recd_tab_month, "day": 1})).dt.days<jupyter_output><empty_output><jupyter_text>We'll also want to convert the duration to months, since that will be easier to digest, and filter out any missing values.<jupyter_code>tab_usage = pd.DataFrame(csos[csos.tab_usage.notna()].tab_usage / 30)
# tab_usage
alt.Chart(tab_usage, title="Distribution of CSO tablet usage, months").mark_bar().encode(
alt.X("tab_usage:Q",
bin=alt.BinParams(step=1), title="# Months has had a tablet"),
alt.Y("count()", title="# of CSOs"))<jupyter_output><empty_output><jupyter_text>We see that the majority of our CSOs have had their tablets for roughly {{int(tab_usage.tab_usage.mode()[0])}} months ({{np.round(tab_usage.tab_usage.mode()[0]/12, decimals=1)}} years). This is in keeping with the beginning of the Tusome program, and is as expected. CSOs who have had their tablets for longer are likely veterans of the PRIMR program (Tusome's predecessor); those who have had tablets for fewer months may have assumed their roles more recently. (CSO turnover due to retirement, promotion, maternity leave, etc. is an issue which Tusome is constantly needing to manage.)### Proportion of CSOs reporting use of each application
_Which applications do you use frequently when supporting teachers? ...After recording unprompted responses, read the list of options and record responses._
+ Tangerine Tutor
+ Papaya
+ Tusome Books (in Adobe Acrobat)
+ Tusome Videos (in MX Player)
The CSOs' tablets come equipped with several tools meant to support their efforts as instructional coaches. These include the [_Tangerine:Tutor_](http://www.tangerinecentral.org/tutor/) application, the [_Papaya_](http://schoolsnetkenya.com/tusome-early-literacy-programme-sounds-application/) application, PDF versions of Tusome's instructional materials (pupil's books and teacher's guides), and videos that model effective instructional delivery.
We are interested in which of those tools CSOs use, and how frequently. We will provide an analysis of their unprompted free-response answers to the question above. For the moment, we note the tools they said they use when we explicitly prompted them with the list of tools available.<jupyter_code>app_users = {"tutor": csos.freqapps_tt_promp.sum(),
"papaya": csos.freqapps_papaya_promp.sum(),
"books": csos.freqapps_bks_promp.sum(),
"videos": csos.freqapps_vids_promp.sum()}
app_usage = pd.DataFrame.from_dict(app_users, orient="index", columns=["ct"])
app_usage["pct"] = 100 * np.round(app_usage["ct"] / csos.shape[0], decimals=3)
app_usage = app_usage.rename_axis("app").reset_index()
alt.Chart(app_usage, title="Proportion of CSOs reporting tool usage").mark_bar().encode(
alt.Y("app:O",
title="Application",
sort = alt.EncodingSortField(field="pct", op="values", order="ascending"),
),
alt.X("pct:Q", title="% of CSOs reporting usage"),
tooltip="pct")<jupyter_output><empty_output><jupyter_text>The overwhelming majority of the CSOs report using all of the applications. That said, the videos and books are used less frequently.### Proportion of CSOs who refer to Tangerine to provide post-observation feedback
_Do you refer to Tangerine when giving teachers feedback after observing a lesson?_
The _Tangerine:Tutor_ application analyzes the pattern of the CSO's responses to the observation items and surfaces actionable feedback that could be share with the teacher to improve her or his instruction. While the auto-generated feedback is intended as a tool to help CSOs give more effective guidance to teachers, it can be quite extensive and is not prioritized. We were interested to know whether the CSOs make reference to the application's auto-generated feedback when they hold their post-observation debriefing session with the teacher.<jupyter_code>100 * np.round(csos.ref_tang_fdbk.sum() / csos.shape[0], decimals=3)<jupyter_output><empty_output><jupyter_text>Again, the overwhelming majority of CSOs report using the auto-generated feedback when holding their debrief session with the teacher. We will separately provide an analysis of their open-ended answers regarding _what_ they refer to and what they find most useful.### Proportion of CSOs who use Tangerine to plan their work
_Do you refer to Tangerine to help you plan your work when you are **not** either observing a teacher or giving the teacher feedback?_
Version 3 of the _Tangerine:Tutor_ application features a screen which allows a CSO to see schools they have visited and which they have not. We were interested to know whether the CSOs are leveraging this feature—or others, such as the pupils' fluency rates, which are reported in the feedback—to make decisions about which schools to visit in the future.<jupyter_code>100 * np.round(csos.refer_tang_nonobs.sum() / csos.shape[0], decimals=3)<jupyter_output><empty_output><jupyter_text>Fewer than two-thirds of CSOs refer to Tangerine to plan their work. A sampling of their reasons is provided surfaced below:<jupyter_code>pd.set_option('display.max_colwidth', -1)
csos[csos.tang_nonobs_nowhynot.notnull() & (csos.tang_nonobs_nowhynot != "")].tang_nonobs_nowhynot.sample(10)<jupyter_output><empty_output><jupyter_text>This result surfaces a few issues Tusome might consider addressing next time CSOs receive a refresher training on Tangerine's use.
1. A lack of awareness of how the information provided by the tablet (schools visited and not visited) could be operationalized
1. A fear that they would be accused of falsifying observation data, rooted specifically in a misunderstanding of the application's GPS-capture functionality
1. A mental compartmentalization of Tangerine as being a Tusome-specific tool, not for use in broader contexts
Other reasons given suggest that the need which Tangerine might fill is already being addressed separately.
1. The CSO keeps a personal record (outside of the tablet) in which they track school visitation
1. The CSO has prepared a work schedule, and uses that as their guide until the month's activities have concluded.### Frequency with which users refer to the application
\[If the CSO refers to the Tangerine for planning purposes\], _how often do you reference the data?_
<jupyter_code>tchk_freq = csos[csos.freq_refer_tang_plan.notna()].freq_refer_tang_plan.sort_values().value_counts(sort=False).to_frame(name="ct")
tchk_freq = tchk_freq.rename_axis("frequency").reset_index()
tchk_freq["frequency"] = tchk_freq["frequency"].replace({
1: "Daily",
2: "Weekly",
3: "Monthly",
4: "Termly"})
tchk_freq["sort_order"] = tchk_freq.index<jupyter_output><empty_output><jupyter_text>It appears that among the CSOs who check the application, it is most common to check it at least weekly.<jupyter_code>alt.Chart(tchk_freq, title="Tangerine app checking behavior").mark_bar().encode(
alt.X("ct:Q"),
alt.Y("frequency:O", sort = alt.EncodingSortField(field="sort_order:Q", op="values", order="ascending")),
color = "frequency")<jupyter_output><empty_output><jupyter_text>### CSO reference to the Tangerine Dashboard
_In the last term, how often did you look at the Tangerine Dashboard?_
The data that is generated by the CSOs' lesson observations is uploaded to the cloud and reported on the [Tangerine Dashboard](http://tools.tusome.tangerinecentral.org/_csv/report/group-national_tablet_program/00b0a09a-2a9f-baca-2acb-c6264d4247cb,c835fc38-de99-d064-59d3-e772ccefcf7d/2018/1/ep8yqMKT.html#tutor). This Dashboard is reviewed by senior management within the Ministry of Education: the Principal Secretary, his Directors, and their deputies. It is also sent to the Directors of MOE and TSC at the County level. As these latter personnel oversee the CSOs, the Dashboard may have an effect ...<jupyter_code>ip=get_ipython()
locate_dir = ip.ipython_dir
profile_dir = ip.config.ProfileDir.location
print("IPython location: %s" % locate_dir)
print("IPython profile location: %s" % profile_dir)
import os
extra_paths = ip.config.NotebookApp['extra_static_paths']
static_paths = os.path.join(profile_dir,'static')
if type( extra_paths ) is list:
static_paths = extra_paths + static_paths
print("The custom.js file will be searched in this list of directories: %s" % static_paths)
static_paths = os.path.join(profile_dir,'static')
import os
import re
for static_path in static_paths:
custom_js = os.path.join(static_path,'custom', 'custom.js')
if os.path.isfile(custom_js) is True:
print("custom.js found in %s" % custom_js)
break
test = 91.1
int(test)
tab_usage.tab_usage.mode()<jupyter_output><empty_output>
|
no_license
|
/prev_eda.ipynb
|
TSSlade/tusome-d4dm
| 16 |
<jupyter_start><jupyter_text>## Application 1<jupyter_code>import DSGRN
import Berry_2019_figures_results as Berry
from min_interval_posets import posets, poset_distance
from copy import deepcopy
from IPython import display
import matplotlib.pyplot as plt
from importlib import reload
from matplotlib import rc
rc('text', usetex=True)
fontsize=20
rc('axes', labelsize=fontsize) # fontsize of the x and y labels
rc('xtick', labelsize=fontsize) # fontsize of the tick labels
rc('ytick', labelsize=fontsize) # fontsize of the tick labels
rc('legend', fontsize=16) # legend fontsize
%matplotlib inline
wavepool = DSGRN.Network("good_wavepool.txt")
swapped = DSGRN.Network("bad_wavepool.txt")
DSGRN.DrawGraph(wavepool)
DSGRN.DrawGraph(swapped)
wt1_file = "WT1_WT2_microarray_interpolated/wt1_microarray_coregenes_lifepoints_interpol_trim.csv"
wt2_file = "WT1_WT2_microarray_interpolated/wt2_microarray_coregenes_lifepoints_interpol.csv"
epsilons = [0.0, 0.01,0.04,0.05,0.06,0.08,0.09,0.1,0.14,0.15]
names = ["YOX1","SWI4","HCM1","NDD1"]
posets1 = Berry.getposets(wt1_file,"row",epsilons,names=names)
posets2 = Berry.getposets(wt2_file,"row",epsilons,names=names)
# graph data
def make_fig(fname,savename,start_time=None,end_time=None,names=None):
curves = Berry.row(fname)
subset_curves = deepcopy(curves)
if names is not None:
for name in curves:
if name not in names:
subset_curves.pop(name)
for name,curve in subset_curves.items():
n = curve.normalize()
if start_time is not None and end_time is not None:
n = curve.trim(start_time,end_time)
times,vals = zip(*n.items())
plt.plot(times,vals,label=r"${}$".format(name))
lgd = plt.legend(loc='upper left', bbox_to_anchor=(1, 1))
plt.ylabel(r"\textbf{normalized expression}")
plt.xlabel(r"\textbf{time points}")
plt.savefig(savename,bbox_extra_artists=(lgd,), bbox_inches='tight')
display.display(plt.show())
make_fig(wt1_file,"time_series_rep1.pdf",names=names)
make_fig(wt2_file,"time_series_rep2.pdf",names=names)
def make_posets(p,network):
events = list(p[1][0])
event_ordering = list(p[1][1])
poe = DSGRN.PosetOfExtrema(network, events, event_ordering)
return poe,len(events)
def create_pattern_graph(poset,network):
eps = poset[0]
poe,_ = make_posets(poset,network)
return DSGRN.PatternGraph(poe),eps
def create_search_graph(param):
domain_graph = DSGRN.DomainGraph(param)
return DSGRN.SearchGraph(domain_graph)
def get_matches(posets,network):
param_matches = {}
parameter_graph = DSGRN.ParameterGraph(network)
for poset in posets:
pattern_graph,eps = create_pattern_graph(poset,network)
param_matches[eps] = []
for pind in range(parameter_graph.size()):
parameter = parameter_graph.parameter(pind)
search_graph = create_search_graph(parameter)
matching_graph = DSGRN.MatchingGraph(search_graph, pattern_graph)
path_match = DSGRN.PathMatch(matching_graph)
if path_match:
param_matches[eps].append(pind)
return param_matches
wavepool1_parameter_matches=get_matches(posets1,wavepool)
wavepool2_parameter_matches=get_matches(posets2,wavepool)
swapped11_parameter_matches=get_matches(posets1,swapped)
swapped12_parameter_matches=get_matches(posets2,swapped)
for eps in epsilons:
print("Epsilon = {}".format(eps))
print("Number of matches in rep 1, wavepool: {}".format(len(wavepool1_parameter_matches[eps])))
print("Number of matches in rep 2, wavepool: {}".format(len(wavepool2_parameter_matches[eps])))
print("Number of matches in rep 1, swapped: {}".format(len(swapped11_parameter_matches[eps])))
print("Number of matches in rep 2, swapped: {}".format(len(swapped12_parameter_matches[eps])))
print(wavepool1_parameter_matches[0.04])
print(wavepool2_parameter_matches[0.04])
for (p,q) in zip(posets1,posets2):
print("Number of extrema is {},{} for replicates 1,2 at epsilon {}".format(len(p[1][0]),len(q[1][0]),p[0]))
# example poset between reps 1 and 2
# notice that even with the same number of extrema, the identity of the nodes vary
print("Replicate 1, eps 0.15")
poe, N = make_posets(posets1[-1],wavepool)
display.display(DSGRN.DrawGraph(poe))
with open("example_poset.dot","w") as f:
f.write(poe.graphviz())
print("Replicate 2, eps 0.15")
poe, N = make_posets(posets2[-1],wavepool)
display.display(DSGRN.DrawGraph(poe))
print("Replicate 1, eps 0.0")
poe, N = make_posets(posets1[0],wavepool)
display.display(DSGRN.DrawGraph(poe))
with open("example_poset0.dot","w") as f:
f.write(poe.graphviz())
print("Replicate 1, eps 0.01")
poe, N = make_posets(posets1[1],wavepool)
display.display(DSGRN.DrawGraph(poe))
print("Replicate 1, eps 0.04")
poe, N = make_posets(posets1[2],wavepool)
display.display(DSGRN.DrawGraph(poe))
# example pattern graph
pattern_graph,_ = create_pattern_graph(posets1[-1],wavepool)
with open("example_pattern_graph.dot","w") as f:
f.write(pattern_graph.graphviz())
DSGRN.DrawGraph(pattern_graph)
# example search graph
pind = wavepool1_parameter_matches[0.15][0]
parameter_graph = DSGRN.ParameterGraph(wavepool)
param = parameter_graph.parameter(pind)
search_graph = create_search_graph(param)
display.display(DSGRN.DrawGraph(search_graph))
with open("example_search_graph.dot","w") as f:
f.write(search_graph.graphviz())
# example matching graph
matching_graph = DSGRN.MatchingGraph(search_graph, pattern_graph)
path_match = DSGRN.PathMatch(matching_graph)
with open("example_matching_graph.dot","w") as f:
f.write(matching_graph.graphviz_with_highlighted_path(path_match))
DSGRN.DrawGraphWithHighlightedPath(matching_graph, path_match)<jupyter_output><empty_output>
|
permissive
|
/scripts/Berry_2019_figures_results_Application1.ipynb
|
breecummins/min_interval_posets
| 1 |
<jupyter_start><jupyter_text># Image Classification with FastAI2
Notebook based on
* https://github.com/fastai/fastbook/blob/master/02_production.ipynb
* https://github.com/fastai/fastbook/blob/master/05_pet_breeds.ipynb
* https://github.com/fastai/fastbook/blob/master/18_CAM.ipynb<jupyter_code># Make sure to go to Runtime -> Change runtime type -> GPU
# when training models
## installs fastai v2 (Google Colab comes with fastai v1 by default)
# this will also ask permision to access your Google Drive
!pip install -Uqq fastbook
import fastbook
fastbook.setup_book()
import fastai
fastai.__version__
# the variable gdrive now contains you Google Drive path
# this is a pathlib Path object
# we can create new file paths from it using the '/' operator
# See: https://realpython.com/python-pathlib/
from fastbook import *
gdrive
# we can check this Path using os.listdir()
import os
os.listdir(gdrive)[0:5]
# fastai comes with a curated list of datasets
# See: https://docs.fast.ai/data.external.html
dir(URLs)
# URLs.PETS links to a dataset of dog and cat images compiled
# by Oxford's Visual Geometry Group
URLs.PETS
?untar_data
import fastai
fastai.__version__
?untar_data
untar_data(URLs.PETS, (gdrive / 'pets_images'))
!ls '/root/.fastai/data/oxford-iiit-pet'
!ls /root/.fastai/data
from fastai.vision.all import *
# Use gdrive and the dest argument to save the uncompressed data
# in a folder named 'pets_images'
# see: https://github.com/fastai/fastai/blob/89770a495b500f585210845e195c5c9a7996f2f4/fastai/data/external.py#L244
# See: https://www.geeksforgeeks.org/python-os-path-exists-method/
if not (gdrive / 'pets_images').is_dir():
# https://www.geeksforgeeks.org/python-os-mkdir-method/
os.mkdir(gdrive / 'pets_images')
# this will download the data, decompress it, and return the path where the uncompressed folder is
# see: https://github.com/fastai/fastai/blob/89770a495b500f585210845e195c5c9a7996f2f4/fastai/data/external.py#L244
path = untar_data(URLs.PETS, dest = (gdrive / 'pets_images'))
path = Path('/content/gdrive/My Drive/pets_images/oxford-iiit-pet')
# what we have downloaded
os.listdir(path)
# We take a look at the images
len(os.listdir(path / 'images')), os.listdir(path / 'images')[:10]
from PIL import Image
imag = Image.open(path / 'images' / os.listdir(path / 'images')[0])
imag.save(Path('.')/'name.jpg')
import os
# this tells you your default Path (current working directory)
os.getcwd()
os.listdir(os.getcwd())
# viewing a single image
from PIL import Image
# change the index to change the image
index = 0
image_filenames = os.listdir(path / 'images')
print(image_filenames[index])
img_pil = Image.open(path /'images'/ image_filenames[index])
print(type(img_pil))
import numpy as np
print(Image.fromarray(np.array(img_pil)))
img_pil
path /'images'/ image_filenames[index]
# turning the image into a tensor
# https://github.com/fastai/fastai/blob/66a03da8a11cd85188f4c6f063b98c4a209492e8/fastai/vision/core.py#L91
img_as_tensor = image2tensor(load_image(path /'images'/ image_filenames[index]))
img_as_tensor
# let's check the annotations file, out of curiosity
os.listdir(path / 'annotations')[:10]
#the space in 'My Drive' gives us some pain when train to use the path variable through bash
!ls {path}
from pathlib import Path
Path('path_to_object')
type(gdrive)
gdrive.stem, gdrive.root
type(path)
# we convert the path to string
# fastai's ls() method works like os.listdir() and bash's ls
str(path.ls()[1])
# we won't need the annotations file, as the labels of the images are already in the filenames
!cat '/content/gdrive/My Drive/pets_images/oxford-iiit-pet/annotations/test.txt' | head
image_filenames[0:3]
# let's try using label extraction using regular expression
# as shown in the notebook
fname = (path/"images").ls()[0]
# we want to discard the file extension and the number of the pet
# we discard all characters after the _
re.findall(r'(.+)_\d+.jpg$', fname.name)[0]
# with str.split() we would get the same thing??
# https://stackoverflow.com/a/48593823/45963
str(fname.name).split('_')[0]
import pandas as pd
list_of_breeds = [re.findall(r'(.+)_\d+.jpg$', fname.name) for fname in (path/"images").ls()]
pd.Series([item[0] for item in list_of_breeds if isinstance(item, list) and len(item) > 0]).value_counts()
# fastai's DataBlock transforms raw data into PyTorch Datasets and Dataloaders
# that are fed into the forward function of nn.Module subclasses
# https://docs.fast.ai/data.block.html#DataBlock.dataloaders
pets = DataBlock(blocks = (ImageBlock, CategoryBlock),
# get_image_files reads the name of the image files
get_items=get_image_files,
# here we split the dataset into train and validation sets
splitter=RandomSplitter(seed=42),
#RegexLabeller uses the expression that we saw above
get_y=using_attr(RegexLabeller(r'(.+)_\d+.jpg$'), 'name'),
# these two lines implement fastai's 'presizing' data augmentation strategy
# this is resizing and augmenting the data before feeding it to the trainer
# it is important to use a Resize transform to make all images in a batch able to fit
# into a single tensor
item_tfms=Resize(460),
#See: https://docs.fast.ai/vision.augment.html#aug_transforms
batch_tfms=aug_transforms(size=224, min_scale=0.75))
pets
#summary will show us how we have splitted our train and validation sets
#https://docs.fast.ai/data.block.html#Debugging
#https://forums.fast.ai/t/datablock-summary-is-amazing-in-v2/64632
pets.summary(path / 'images')
# https://forums.fast.ai/t/solved-reproducibility-where-is-the-randomness-coming-in/31628/25
# we set up the seed defined for the training and validation split
set_seed(42, True)
dls = pets.dataloaders(path / 'images')
dls
# We inspect a batch of the labeled data
# We should *always* do this, as there is no guarantee
# that the DataBlock was created correctly
dls.show_batch()
# we can inspect how a single image from the training set is being augmented
# see https://github.com/fastai/fastbook/blob/master/02_production.ipynb
dls.train.show_batch(max_n=8, nrows=2, unique=True)
# check images in the validation set
# https://github.com/fastai/fastbook/blob/master/02_production.ipynb
dls.valid.show_batch(max_n=16, nrows=4)
# we get a batch of data as training tensor images and training tensor labels using one_batch()
x, y = dls.one_batch()
print(f'batch size = {len(y)}')
# labels are encoded as integers, based on alphabetical order
y
# we create a convolutional netwokr trainer with cnn_learner
# we pass it the dataloaders for traing and validation
# specify a resnet34 as the pretrained model that we will use and define
# error_rate (1 -accuracy)
# try ctrl + click on cnn_learner to see its definition in Google Colab
# Q: which is its optimization algorithm by default?
learn = cnn_learner(dls, resnet34, metrics=error_rate)
learn
# we can check the architecture of the PyTorch model
# Q: What is the number of out_features in the last Linear layer? Why?
learn.model
# fine_tune trains first just the head of the model (final layers)
# and then all layers as a second step
# the parameters that it receives is number of epochs that it will train all layers
# layers are using 'discriminative learning rates'
# final layers will get higher learning rates than initial layers
# use ctrl + click to read the the definintion of fine_tune in schedule.py
learn.fine_tune(2)
# sample save of the model
#learn.export(gdrive / 'pets_path'/ 'saved_model.pkl')
# here show a graph of the validation and training losses
learn.recorder.plot_loss()
# show results from the validation set
learn.show_results(max_n=6, figsize=(7, 8))
# We get the activations of of final layer of the neural network using learn.get_preds()
# here we are using the same x and y data that we got from one_batch()
pred_probs, pred_labels = learn.get_preds(dl=[(x, y)])
# Q: Why do these have different dimensions?
pred_probs[0].shape, pred_labels.shape
# the predicted probabilities should add up to 1
pred_probs[0].sum()<jupyter_output><empty_output><jupyter_text>## Model interpretation
<jupyter_code># ClassificationIntrepretation will allow us to examine our model
# using the validation data
interp = ClassificationInterpretation.from_learner(learn)
interp
# plot a huuge confusion matrix from the validation set data
interp.plot_confusion_matrix(figsize=(14,14), dpi=70)
# check out the predictions that have been confused at least six times
interp.most_confused(min_val=6)
# plot the top losses
# prediction / actual (ground truth) label / loss value / probability (model confidence)
#https://github.com/fastai/fastbook/blob/master/02_production.ipynb
interp.plot_top_losses(5, nrows=5)
#clean mislabeled images
# won't work without importing fastai.vision.widgets
from fastai.vision.widgets import *
cleaner = ImageClassifierCleaner(learn)
cleaner
# print the classification report
# https://github.com/fastai/fastai/blob/5f3ed67f40c32df0948d6e67bfb4d08034139eb9/fastai/interpret.py#L101
interp.print_classification_report()<jupyter_output><empty_output><jupyter_text>## The Learning Rate finder
https://arxiv.org/abs/1506.01186
https://sgugger.github.io/how-do-you-find-a-good-learning-rate.html
https://blog.dataiku.com/the-learning-rate-finder-technique-how-reliable-is-it
https://towardsdatascience.com/finding-good-learning-rate-and-the-one-cycle-policy-7159fe1db5d6
https://walkwithfastai.com/lr_finder
https://www.pyimagesearch.com/2019/07/29/cyclical-learning-rates-with-keras-and-deep-learning/
https://www.pyimagesearch.com/2019/08/05/keras-learning-rate-finder/
https://forums.fast.ai/t/interpreting-the-sched-plot-from-lr-find/12329/4<jupyter_code>?learn.fine_tune
learn = cnn_learner(dls, resnet34, metrics=error_rate)
# let's try what happens when we train using a **big** learning of 0.1
# Q: How does this error rate compare to
# see the definition of fine_tune, what is the base_lr that we used before?
learn.fine_tune(1, base_lr=0.1)<jupyter_output><empty_output><jupyter_text>"Over an epoch begin your SGD with a very low learning rate (like 10−8) but change it (by multiplying it by a certain factor for instance) at each mini-batch until it reaches a very high value (like 1 or 10). Record the loss each time at each iteration and once you're finished, plot those losses against the learning rate. You'll find something like this"
""The recommended minimum learning rate is the value where the loss decreases the fastest (minimum negative gradient), while the recommended maximum learning rate is 10 times less than the learning rate where the loss is minimum. Why not just the very minimum of the loss? Why 10 times less? Because what we actually plot is a smoothed version of the loss, and taking the learning rate corresponding to the minimum loss is likely to be too large and make the loss diverge during training""<jupyter_code>learn = cnn_learner(dls, resnet34, metrics=error_rate)
# why do we care about lr_steep or lr_min
lr_min, lr_steep = learn.lr_find()
print(f"Minimum/10: {lr_min:.2e}, steepest point: {lr_steep:.2e}")
1.00e-02
learn = cnn_learner(dls, resnet34, metrics=error_rate)
learn.fine_tune(2, base_lr=lr_steep)
learn<jupyter_output><empty_output><jupyter_text>## Testing with out-of-training data<jupyter_code># let's try the model in a couple of test images of a breed that was **not** in the training set
import skimage.io as io
shep1 = io.imread('https://raw.githubusercontent.com/andandandand/intro-computer-vision/main/images/German-Shepherd-on-White-00.jpg?token=AAHZIX6PZ34YFM6T6CXEN3TAHJK5W')[:,:,:3]
print(type(shep1))
Image.fromarray(shep1)
learn.predict(shep1)
predicted_breed,int_label,probs = learn.predict(shep1)
print(f"Predicted breed: {predicted_breed}.")
print(f"Probability of the breed: {probs[int_label].item():.6f}")
shep2 = io.imread('https://raw.githubusercontent.com/andandandand/intro-computer-vision/main/images/german_shepherd.jpeg?token=AAHZIX2N2P3X35RQMDKH4JTAHJLR2')[:,:,:3]
Image.fromarray(shep2)
learn.predict(shep2)
predicted_breed,int_label,probs = learn.predict(shep2)
print(f"Predicted breed: {predicted_breed}.")
print(f"Probability of the breed: {probs[int_label].item():.6f}")
shep3 = io.imread('https://raw.githubusercontent.com/andandandand/intro-computer-vision/main/images/German_Shepherd_-_DSC_0346_(10096362833).jpg?token=AAHZIX5UT353RV4LNNG6XH3AHJLXO')[:,:,:3]
Image.fromarray(shep3)
learn.predict(shep3)
predicted_breed,int_label,probs = learn.predict(shep3)
print(f"Predicted breed: {predicted_breed}.")
print(f"Probability of the breed: {probs[int_label].item():.6f}")
[imag for imag in os.listdir(path / 'images') if 'german' in imag]<jupyter_output><empty_output><jupyter_text>### CAM and GradCAM
https://github.com/fastai/fastbook/blob/master/18_CAM.ipynb<jupyter_code>path
# let's explore the behavior of the classifier
# in the dataset all cat images have a filename that start with uppercase
# dog images start with lowercase
def is_dog(x): return x[0].islower()
dls = ImageDataLoaders.from_name_func(
path, get_image_files(path), valid_pct=0.2, seed=21,
label_func=is_dog, item_tfms=Resize(224))
# we call this binary classifier dog_learn, to avoid clashes with the one
# for pet breeds
dog_learn = cnn_learner(dls, resnet34, metrics=error_rate)
dog_learn.fine_tune(1)
# call PILImage.create on shep1
img = PILImage.create(shep1)
img
# we put the image in a batch of the testing (not validation) set
x, = first(dls.test_dl([img]))
x.shape
# "For CAM we want to store the activations of the last convolutional layer.
# We put our hook function in a class so it has a state that we can access later, and just store a copy of the output:"
class Hook():
def hook_func(self, m, i, o): self.stored = o.detach().clone()
# We can then instantiate a Hook and attach it to the layer we want, which is the last layer of the CNN body:
hook_output = Hook()
hook_output
# the model is at model[0]
dog_learn.model[0]
hook = dog_learn.model[0].register_forward_hook(hook_output.hook_func)
hook
# feed the image through the model
with torch.no_grad(): output = dog_learn.model.eval()(x)
# access our stored activations
act = hook_output.stored[0]
act
# double check predictions
F.softmax(output, dim=-1)
# our model is confident that the picture is a dog
dls.vocab
# this is the final activation layer
dog_learn.model[1][-1]
# and here are its weights
dog_learn.model[1][-1].weight.shape, dog_learn.model[1][-1].weight
# matrix multiplication using einstein summation, between the weights and the activation
# https://pytorch.org/docs/stable/generated/torch.einsum.html
# check the dimensions of both parts
print(dog_learn.model[1][-1].weight.shape, act.shape)
# using 'ck,kij->cij' which values are c,k,i, and j?
cam_map = torch.einsum('ck,kij->cij', dog_learn.model[1][-1].weight, act)
cam_map.shape
cam_map[1].detach().cpu()
np.array(cam_map[1].detach().cpu()).min(), np.array(cam_map[1].detach().cpu()).max()
from mpl_toolkits.axes_grid1 import make_axes_locatable
# Why do we use decode() and dls.train?
x_dec = TensorImage(dls.train.decode((x,))[0][0])
_,ax = plt.subplots()
x_dec.show(ctx=ax)
im = ax.imshow(cam_map[1].detach().cpu(), alpha=0.6, extent=(0,224,224,0),
interpolation='bilinear', cmap='magma');
# added colorbar
# https://matplotlib.org/3.1.1/gallery/axes_grid1/simple_colorbar.html#sphx-glr-gallery-axes-grid1-simple-colorbar-py
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax);
# remove the hook to prevent memory leakage
# hook.remove()
# Same thing, now using a context manager
# we won't go deeper into this
class Hook():
def __init__(self, m):
self.hook = m.register_forward_hook(self.hook_func)
def hook_func(self, m, i, o): self.stored = o.detach().clone()
def __enter__(self, *args): return self
def __exit__(self, *args): self.hook.remove()
with Hook(dog_learn.model[0]) as hook:
with torch.no_grad(): output = dog_learn.model.eval()(x.cuda())
act = hook.stored
<jupyter_output><empty_output><jupyter_text>## Gradient CAM## Rationale:
> The gradients of the output of the last layer with respect to the input of that layer are equal to the layer weights, since it is a linear layer.
> With deeper layers, we still want the gradients, but they won't just be equal to the weights anymore. We have to calculate them. The gradients of every layer are calculated for us by PyTorch during the backward pass, but they're not stored (except for tensors where requires_grad is True). We can, however, register a hook on the backward pass, which PyTorch will give the gradients to as a parameter, so we can store them there. For this we will use a HookBwd class that works like Hook, but intercepts and stores gradients instead of activations:
<jupyter_code>class HookBwd():
def __init__(self, m):
self.hook = m.register_backward_hook(self.hook_func)
def hook_func(self, m, gi, go): self.stored = go[0].detach().clone()
def __enter__(self, *args): return self
def __exit__(self, *args): self.hook.remove()
cls = 1
with HookBwd(dog_learn.model[0]) as hookg:
with Hook(dog_learn.model[0]) as hook:
output = dog_learn.model.eval()(x.cuda())
act = hook.stored
output[0,cls].backward()
grad = hookg.stored
w = grad[0].mean(dim=[1,2], keepdim=True)
cam_map = (w * act[0]).sum(0)
_,ax = plt.subplots()
x_dec.show(ctx=ax)
im = ax.imshow(cam_map.detach().cpu(), alpha=0.6, extent=(0,224,224,0),
interpolation='bilinear', cmap='magma');
# added colorbar
# https://matplotlib.org/3.1.1/gallery/axes_grid1/simple_colorbar.html#sphx-glr-gallery-axes-grid1-simple-colorbar-py
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax);
# this represents the second-to-last Resnet group
learn.model[0][-2]
def get_cam_map(index):
with HookBwd(dog_learn.model[0][index]) as hookg:
with Hook(dog_learn.model[0][index]) as hook:
output = dog_learn.model.eval()(x.cuda())
act = hook.stored
output[0,cls].backward()
grad = hookg.stored
w = grad[0].mean(dim=[1,2], keepdim=True)
cam_map = (w * act[0]).sum(0)
return cam_map
cam_map = get_cam_map(-2)
_,ax = plt.subplots()
x_dec.show(ctx=ax)
im = ax.imshow(cam_map.detach().cpu(), alpha=0.6, extent=(0,224,224,0),
interpolation='bilinear', cmap='magma');
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax);
cam_map = get_cam_map(-3)
_,ax = plt.subplots()
x_dec.show(ctx=ax)
im = ax.imshow(cam_map.detach().cpu(), alpha=0.6, extent=(0,224,224,0),
interpolation='bilinear', cmap='magma');
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax);
cam_map = get_cam_map(-4)
_,ax = plt.subplots()
x_dec.show(ctx=ax)
im = ax.imshow(cam_map.detach().cpu(), alpha=0.6, extent=(0,224,224,0),
interpolation='bilinear', cmap='magma');
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax);
cam_map = get_cam_map(-5)
_,ax = plt.subplots()
x_dec.show(ctx=ax)
im = ax.imshow(cam_map.detach().cpu(), alpha=0.6, extent=(0,224,224,0),
interpolation='bilinear', cmap='magma');
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax);
cam_map = get_cam_map(-6)
_,ax = plt.subplots()
x_dec.show(ctx=ax)
im = ax.imshow(cam_map.detach().cpu(), alpha=0.6, extent=(0,224,224,0),
interpolation='bilinear', cmap='magma');
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax);
# Would get_cam_map(-7) work?
# check dog_learn.model[0][-7]
cam_map = get_cam_map(-8)
_,ax = plt.subplots()
x_dec.show(ctx=ax)
im = ax.imshow(cam_map.detach().cpu(), alpha=0.6, extent=(0,224,224,0),
interpolation='bilinear', cmap='magma');
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax);
cam_map = get_cam_map(-1)
_,ax = plt.subplots()
x_dec.show(ctx=ax)
im = ax.imshow(cam_map.detach().cpu(), alpha=0.6, extent=(0,224,224,0),
interpolation='bilinear', cmap='magma');
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax);<jupyter_output><empty_output>
|
permissive
|
/fastai2_pet_classification_solution.ipynb
|
igivis7/computer-vision-dsr
| 7 |
<jupyter_start><jupyter_text># Frequency Analysis## Top 5 Talkers<jupyter_code>top5_talkers_ip = pd.DataFrame(df['src_IP'].value_counts()[:5])
top5_talkers_ip= top5_talkers_ip.reset_index()
top5_talkers_ip.columns = ['ip','count']
top5_talkers_ip
orgs = []
for ip in top5_talkers_ip['ip']:
try:
orgs.append(ip_lookup.at[ip,'org'])
except:
orgs.append(None)
top5_talkers_ip['org'] = orgs
top5_talkers_ip
fig = px.pie(top5_talkers_ip, values='count', names='org', title='Traffic Share Among Top 5 Talkers')
fig.show()<jupyter_output><empty_output><jupyter_text>## Top 5 Listeners<jupyter_code>top5_listeners_ip = pd.DataFrame(df['dst_IP'].value_counts()[:5])
top5_listeners_ip = top5_listeners_ip.reset_index()
top5_listeners_ip.columns = ['ip','count']
top5_listeners_ip
orgs = []
for ip in top5_listeners_ip['ip']:
try:
orgs.append(ip_lookup.at[ip,'org'])
except:
orgs.append(None)
top5_listeners_ip['org'] = orgs
top5_listeners_ip
fig = px.pie(top5_listeners_ip, values='count', names='org', title='Traffic Share Among Top 5 Talkers')
fig.show()<jupyter_output><empty_output><jupyter_text>## Top 3 IP Protocols<jupyter_code>top3_IP_Protocols = pd.DataFrame(df['IP_Protocol'].value_counts()[:3])
top3_IP_Protocols = top3_IP_Protocols.reset_index()
top3_IP_Protocols.columns = ['protocol id','count']
top3_IP_Protocols
import socket
prefix = "IPPROTO_"
ip_protocol_name = {num:name[len(prefix):]
for name,num in vars(socket).items()
if name.startswith(prefix)}
protocol_names =[]
for pid in top3_IP_Protocols['protocol id'].tolist():
if pid in ip_protocol_name.keys():
protocol_names.append(str(pid)+' ({})'.format(ip_protocol_name[pid]))
else:
protocol_names.append(str(pid)+' (Unknown Protocol)')
top3_IP_Protocols['Protocol'] = protocol_names
top3_IP_Protocols
fig = px.pie(top3_IP_Protocols, values='count', names='Protocol', title='Traffic Share Among Top 3 IP Protocols')
fig.show()
apps = []
for index, row in df.iterrows():
if df.at[index,'IP_Protocol']==6:
protocol = 'tcp'
elif df.at[index,'IP_Protocol']==17:
protocol = 'udp'
else:
protocol = None
try:
app = socket.getservbyport(df.at[index,'outputPort'],protocol)+' ({})'.format(str(df.at[index,'outputPort']))
except:
app = None
apps.append(app)
df['app'] = apps
df.head()
top5_apps_protocol = df['app'].value_counts(dropna=True)[:5]
top5_apps_protocol = top5_apps_protocol.reset_index()
top5_apps_protocol.columns = ['app','count']
top5_apps_protocol
fig = px.pie(top5_apps_protocol, values='count', names='app', title='Traffic Share Among Top 5 Application Protocols')
fig.show()<jupyter_output><empty_output><jupyter_text>## Top 5 Communication Pairs<jupyter_code>pairs={}
for index, row in df.iterrows():
word1 = row['src_IP']+'/'+row['dst_IP']
word2 = row['dst_IP']+'/'+row['src_IP']
if word1 in pairs.keys():
pairs[word1]+=1
elif word2 in pairs.keys():
pairs[word2]+=1
else:
pairs[word1]=1
pairs_sorted = sorted([(k,v) for k,v in pairs.items()], key= lambda x: x[1], reverse=True)
pairs_sorted = pd.DataFrame(pairs_sorted[:5])
pairs_sorted.columns = ['pair','count']
pairs_sorted
org_pairs = []
for pair in pairs_sorted['pair']:
ips = pair.split('/')
orgs = []
for ip in ips:
try:
orgs.append(ip_lookup.at[ip,'org'])
except:
orgs.append(None)
org_pairs.append('/'.join(orgs))
pairs_sorted['org'] = org_pairs
pairs_sorted
fig = px.pie(pairs_sorted, values='count', names='org', title='Traffic Share Among Top 5 Communication Pairs')
fig.show()<jupyter_output><empty_output>
|
no_license
|
/cz3006lab4/.ipynb_checkpoints/freq_anal_viz-checkpoint.ipynb
|
jsheng1996/CZ-3006-Labs
| 4 |
<jupyter_start><jupyter_text># LOAD DATA
NOTE: SS_cons symbols can be looked up in the infernal dokumentation
aliX , ali1, ali2 , removing badly aligned sequences: 20%, 1 ,2
calculation: rank sequences by outlier-points: if a column has 1 '.', or all but 1 '.' count an outlier-point
definition block: bunch of adjacent in the structure, now adding all other stems and a counter
len + lennogap ,count ,, length of alignments sometimes removing gaps, number of sequences in the alignment
filtered: removing blocks of ~size~ len <3
flank: selected the positions that are within 5nuc of a block
stem length last-1: size of the second largest block
cons, cov:
calculation: mean([(max(Y,R)> .5 and (number of dots) else 0) / count])
calculation: mean([(max(character) / count] and . becomes 0)
calculation: cov/len
<jupyter_code>debug = False
numneg = 800
randseed = 42
use_rnaz = False
blacklist_file = "noblacklist"
import sys
import os
#sys.path.append("../")
#os.symlink("../data", "data")
from sklearn.model_selection import train_test_split
import input.loadfiles as loadfiles
p,n = loadfiles.loaddata("data", numneg, randseed, use_rnaz, 'both', blacklist_file=blacklist_file)<jupyter_output>No Blacklist found in noblacklist
loadfiles.py: removing some positives
<jupyter_text># Make a pandas dataframe, <jupyter_code>import pandas as pd
import copy
def clean(di,oklist):
for k in list(di.keys()):
if k not in oklist:
di.pop(k)
return di
def makeXY(featurelist):
asd = [ clean(e,featurelist) for e in copy.deepcopy(p+n) ]
df = pd.DataFrame(asd)
df = df.transpose().drop_duplicates().transpose() # Remove Duplicates
X= df.to_numpy()
y= [1]*len(p)+[0]*len(n)
return X,y,df
allfeatures = list(p[1].keys()) # the filenames are the last one and we dont need that (for now)
allfeatures.remove("name")
X,Y,df = makeXY(allfeatures)
# check if is nan
from IPython.display import display, HTML
display(HTML(df[df.isna().any(axis=1)].head().to_html()))<jupyter_output><empty_output><jupyter_text># feature selection <jupyter_code>%%time
from sklearn.feature_selection import RFECV as rec
from sklearn.feature_selection import VarianceThreshold, chi2, SelectKBest
import pandas as pd
X = StandardScaler().fit_transform(X)
from sklearn.linear_model import Lasso
from skrebate import ReliefF as relief
from sklearn.metrics import f1_score
from sklearn.svm import SVC
randseed = 42
testsize=.3
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=testsize, random_state=randseed) # USE THE SAME SEED AS BELOW!
def scorer(esti,x,y):
yh = esti.predict(x)
return f1_score(y,yh)
def lasso(X_data,y_data,alpha=.06):
mod = Lasso(alpha=alpha)
mod.fit(X_data,y_data)
#print(mod.sparse_coef_)
return [b for a,b in zip(mod.coef_, df.columns) if a]
reli=relief()
reli.fit(X_train,y_train)
def relief(_,_2,param):
#https://github.com/EpistasisLab/scikit-rebate
#print(reli.top_features_)
top_features = reli.top_features_[:param]
return [ df.columns[top] for top in top_features]
def variance_threshold(X_data, y_data, threshold=0.0):
clf = VarianceThreshold(threshold)
clf.fit(X_data, y_data)
return [b for a,b in zip(clf.get_support(), df.columns) if a]
def select_k_best(X_data, y_data, score_func=chi2, k=20):
clf = SelectKBest(score_func, k)
mini = 0
for x in range(0, len(X_data)):
mini = min(min(X_data[x]), mini)
if mini < 0:
for x in range(0, len(X_data)):
for y in range(0, len(X_data[x])):
X_data[x][y] -= mini
clf.fit(X_data, y_data)
return [b for a,b in zip(clf.get_support(), df.columns) if a]
rfecv_estimator = SVC(kernel="linear")
def rfecv(X_data, y_data, estimator, step=1, cv=3):
clf = rec(estimator, step, cv)
clf.fit(X_data, y_data)
return [b for a,b in zip(clf.get_support(), df.columns) if a]
featurelists = [ selector(X_train,y_train) for selector in [ #lambda x,y: lasso(x,y,alpha=.05),
#lambda x,y: lasso(x,y,alpha=.01),
#lambda x,y: relief(x,y,40),
#lambda x,y: relief(x,y,60),
#lambda x,y: relief(x,y,80),
lambda x,y: variance_threshold(x,y, threshold=1), #98,99,1,101fe
#lambda x,y: variance_threshold(x,y, threshold=1.01)
#lambda x,y: variance_threshold(x,y, threshold=1.04),
#lambda x,y: select_k_best(x, y, k=20),
#lambda x,y: rfecv(x, y, rfecv_estimator, step=1)
]]
#featurelists.append(df.columns)
print(type(featurelists[0]))
tmp = pd.DataFrame([ [ 1 if f in featurelist else 0 for f in df.columns ] for featurelist in featurelists],columns = df.columns)
display(HTML(tmp.loc[:, (tmp != 0).any(axis=0)].to_html()))
for featurelist in featurelists:
print(len(featurelist))<jupyter_output><class 'list'>
<jupyter_text># Custom Feature Selection - Returns Featurenames and their scores sorted
Note: Executing Parameter search after this one will not work (though Parameter search is probably broken right now anyways because of the new file structure)<jupyter_code>from sklearn.feature_selection import (RFECV, VarianceThreshold,
chi2, SelectKBest, SelectFromModel)
from sklearn.linear_model import Lasso
from sklearn.svm import LinearSVC, SVC
from skrebate import ReliefF
from pprint import pprint
def kfold(X, y, n_splits=2, randseed=None, shuffle=True):
"""Applies KFold Cross Validation to the given data.
Returns:
splits (List): A list where each entry represents each fold with [X_train, X_test, y_train, y_test]
"""
from sklearn.model_selection import StratifiedKFold
splits = []
kf = StratifiedKFold(n_splits=n_splits, shuffle=shuffle, random_state=randseed)
for train, test in kf.split(X, y):
splits.append([X[train], X[test],
[y[i] for i in train], [y[i] for i in test]])
return splits
######################
def svcl1(X_data, y_data, df, C):
clf = LinearSVC(penalty="l1", dual=False, C=C)
clf.fit(X_data, y_data)
print(np.count_nonzero(clf.coef_))
return [(a, b) for a, b in zip(clf.coef_[0], df.columns) if a]
def svcl2(X_data, y_data, df, C):
clf = LinearSVC(penalty="l2", C=C)
clf.fit(X_data, y_data)
sel = SelectFromModel(clf, prefit=True)
support = sel.get_support(True)
print(len(support))
return [(a, b) for a, b in zip(clf.coef_[0][support], df.columns[support])]
def lasso(X_data, y_data, df, alpha=.06):
clf = Lasso(alpha=alpha)
clf.fit(X_data, y_data)
print(np.count_nonzero(clf.coef_))
return [(a, b) for a, b in zip(clf.coef_, df.columns) if a]
def relief(X_data, y_data, df, param):
clf = ReliefF()
clf.fit(X_data, y_data)
# https://github.com/EpistasisLab/scikit-rebate
top_features = clf.top_features_[:param]
print(len(top_features))
return [(a, b) for a, b in zip(clf.feature_importances_[top_features], df.columns[top_features])]
def variance_threshold(X_data, y_data, df, threshold=0.0):
clf = VarianceThreshold(threshold)
clf.fit(X_data, y_data)
support = clf.get_support(True)
print(len(support))
return [(a, b) for a, b in zip(clf.variances_[support], df.columns[support])]
def select_k_best(X_data, y_data, df, k=20):
score_func=chi2
clf = SelectKBest(score_func, k=k)
mini = 0
for x in range(0, len(X_data)):
mini = min(min(X_data[x]), mini)
if mini < 0:
for x in range(0, len(X_data)):
for y in range(0, len(X_data[x])):
X_data[x][y] -= mini
clf.fit(X_data, y_data)
support = clf.get_support(True)
print(len(support))
return [(a, b) for a, b in zip(clf.pvalues_[support], df.columns[support])]
def rfecv(X_data, y_data, df, step=1, cv=3):
rfecv_estimator = SVC(kernel="linear")
clf = RFECV(rfecv_estimator, step=step, min_features_to_select=20, cv=cv)
clf.fit(X_data, y_data)
support = clf.get_support(True)
print(len(support))
return [(a, b) for a, b in zip(clf.ranking_[support], df.columns[support])]
use_svcl1 = 1
use_svcl2 = 1
use_lasso = 1
use_varthresh = 1
use_kbest = 1
use_relief = 1
use_rfecv = 1
X = StandardScaler().fit_transform(X)
folds = kfold(X, Y, n_splits=2, randseed=randseed)
for X_train, X_test, y_train, y_test in folds:
if use_svcl1:
for C in [.03, .04]:
print(f"---- SVC_l1 {C} -----")
pprint(sorted(svcl1(X_train, y_train, df, C), reverse = True))
if use_svcl2:
for C in [.03, .04]:
print(f"---- SVC_l2 {C} -----")
pprint(sorted(svcl2(X_train, y_train, df, C), reverse = True))
if use_lasso:
for alpha in [.03]: # Lasso
print(f"---- Lasso {alpha} -----")
pprint(sorted(lasso(X_train, y_train, df, alpha), reverse = True))
if use_varthresh:
for threshold in [1.1]: # VarThresh [.99, .995, 1, 1.005, 1.01]
print(f"---- VarThresh {threshold} -----")
pprint(sorted(variance_threshold(X_train, y_train, df, threshold), reverse = True))
if use_kbest:
for k in [20]: # SelKBest
print(f"---- SelKBest {k} -----")
pprint(sorted(select_k_best(X_train, y_train, df, k), reverse = True))
if use_relief:
for features in [40]: # Relief
print(f"---- Relief {features} -----")
pprint(sorted(relief(X_train, y_train, df, features), reverse = True))
if use_rfecv:
for stepsize in [1]: # RFECV
print(f"---- RFECV {stepsize} -----")
pprint(sorted(rfecv(X_train, y_train, df, stepsize), reverse = True))
break # Added this break since we dont really need the other folds for this...<jupyter_output>---- SVC_l1 0.03 -----
32
[(0.28507381005121346, 'yao_score'),
(0.2130295307275869, 'flank_cons_nuc'),
(0.15852446262535932, 'rm_small_stems remove_1/3_seq sloppy_gu'),
(0.13719313906905825, 'rm_small_stems number_of_(_blocks'),
(0.11195614393712987, 'remove_2_seq length'),
(0.10690544845874847, 'rm_small_stems remove_2_seq stem_covariance'),
(0.10276807803995236, 'rm_small_stems flank_cons'),
(0.09645525634101101, 'rm_small_stems remove_2_seq number_of_<_blocks'),
(0.07411664003564793, 'number_of_<_blocks'),
(0.04108952034056658, 'rm_small_stems sloppy_gu'),
(0.03699265591706892, 'rm_small_stems remove_2_seq stem_length_smallest'),
(0.03202646598378955, 'rm_small_stems remove_1_seq number_of_[_blocks'),
(0.03141816981451415, 'number_of_(_blocks'),
(0.0265686365274649, 'rm_small_stems total_conservation_nuc_+cov'),
(0.019477894906016716, 'rm_small_stems stem_length_smallest'),
(0.014870890554712324, 'rm_small_stems remove_1_seq flank_cons'),
(-0.0010213358064284426, 're[...]<jupyter_text># check performance for various classifiers<jupyter_code>from sklearn.ensemble import ExtraTreesClassifier, GradientBoostingClassifier
names = ["Nearest Neighbors","Linear SVM", "RBF SVM",
#"Gaussian Process", # 2 slow
"Decision Tree", "Random Forest", "Neural Net", "AdaBoost",
"Naive Bayes", "QDA",'extra trees','gradient boosting']
res = []
for FEATURELIST in featurelists: # loop over all the selectors
# make some data
X,y,df = makeXY(FEATURELIST)
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=testsize, random_state=randseed) # USE THE SAME SEED AS BELOW!
classifiers = [
KNeighborsClassifier(5),
SVC(kernel="linear",class_weight='balanced', C=0.025),
SVC(gamma=2, C=1,class_weight='balanced'),
#SVC(),
#GaussianProcessClassifier(1.0 * RBF(1.0)),
DecisionTreeClassifier(max_depth=5,class_weight='balanced',min_samples_leaf=4),
RandomForestClassifier(max_depth=9, n_estimators=30, class_weight='balanced'),
#RandomForestClassifier(max_depth=5, n_estimators=25, max_features=5,class_weight='balanced'),
MLPClassifier(alpha=.001, max_iter=2000),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3)),
GaussianNB(),
QuadraticDiscriminantAnalysis(),
ExtraTreesClassifier(),
GradientBoostingClassifier()
]
def score(clf):
clf.fit(X_train, y_train)
return scorer(clf,X_test,np.array(y_test))
res.append( [score(clf) for clf in classifiers] )
display(HTML(pd.DataFrame(res,columns=names).to_html()))<jupyter_output>c:\users\arkanini\appdata\local\programs\python\python38\lib\site-packages\sklearn\discriminant_analysis.py:691: UserWarning: Variables are collinear
warnings.warn("Variables are collinear")
c:\users\arkanini\appdata\local\programs\python\python38\lib\site-packages\sklearn\discriminant_analysis.py:691: UserWarning: Variables are collinear
warnings.warn("Variables are collinear")
c:\users\arkanini\appdata\local\programs\python\python38\lib\site-packages\sklearn\discriminant_analysis.py:691: UserWarning: Variables are collinear
warnings.warn("Variables are collinear")
c:\users\arkanini\appdata\local\programs\python\python38\lib\site-packages\sklearn\discriminant_analysis.py:691: UserWarning: Variables are collinear
warnings.warn("Variables are collinear")
c:\users\arkanini\appdata\local\programs\python\python38\lib\site-packages\sklearn\discriminant_analysis.py:691: UserWarning: Variables are collinear
warnings.warn("Variables are collinear")
c:\users\arkanini\appdata\local\pro[...]<jupyter_text># LIKE ABOVE BUT WITH RANDOM PARAM SEARCH! <jupyter_code>%%time
from sklearn.ensemble import ExtraTreesClassifier, GradientBoostingClassifier
from sklearn.model_selection import RandomizedSearchCV as RSCV
import other.randomsearch as rs
res = []
for FEATURELIST in featurelists: # loop over all the selectors
# make some data
X,y,df = makeXY(FEATURELIST)
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=testsize, random_state=randseed) # USE THE SAME SEED AS BELOW!
print(len(X_train))
print(len(X_test))
def score(clf,param):
searcher = RSCV(clf,
param,
n_iter=50 if not debug else 5,
scoring=None,
n_jobs=4,
iid=False,
#fefit=True,
cv=4,
verbose=0,
pre_dispatch="2*n_jobs",
random_state=None,
error_score=np.nan,
return_train_score=False)
searcher.fit(X_train, y_train)
# print(searcher.best_params_)
return scorer(searcher.best_estimator_,X_test,np.array(y_test))
res.append( [score(clf,param) for clf,param in zip(rs.classifiers,rs.param_lists)] )
#print ("#"*80)
display(HTML(pd.DataFrame(res,columns=rs.clfnames).to_html()))
%%time
from random_param_search_performance import random_param_search as rps, maketasks
debug=True
tasks = maketasks(featurelists, p, n, randseed, n_splits=2)
rps(tasks, debug=debug)
import other.logistic_regression as log_reg
cv = log_reg.log_reg(["data/yaoscores/pos.json", "data/yaoscores/pos2.json"], ["data/yaoscores/neg.json"])
print(cv)<jupyter_output>Confusion matrix, without normalization
[[56938 12935]
[ 464 424]]
|
no_license
|
/notebook/improvedML.ipynb
|
smautner/pig
| 6 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.