content
stringlengths 73
1.12M
| license
stringclasses 3
values | path
stringlengths 9
197
| repo_name
stringlengths 7
106
| chain_length
int64 1
144
|
---|---|---|---|---|
<jupyter_start><jupyter_text><jupyter_code>%reload_ext autoreload
%autoreload 2
%matplotlib inline
from fastai import *
from fastai.text import *
from scipy.spatial.distance import cosine as dist
torch.cuda.set_device(0)
import pandas as pd
import numpy as np
import OpenBlender
import json
from statsmodels.graphics.tsaplots import plot_acf,plot_pacf
import matplotlib.pyplot as plt
#!pip install OpenBlender
api_token = '#####'
action = 'API_getObservationsFromDataset'# ANCHOR: 'Bitcoin vs USD'
parameters = {
'token' : api_token,
'id_dataset' : '5d4c3af79516290b01c83f51',
'date_filter':{"start_date" : "2020-01-01",
"end_date" : "2020-08-29"} }
df = pd.read_json(json.dumps(OpenBlender.call(action, parameters)['sample']), convert_dates=False, convert_axes=False).sort_values('timestamp', ascending=False)
df.reset_index(drop=True, inplace=True)
df['date'] = [OpenBlender.unixToDate(ts, timezone = 'GMT') for ts in df.timestamp]
df = df.drop('timestamp', axis = 1)
df.describe()
df['log_ret'] = np.log(df['price']) - np.log(df['open'])
df.sort_index(inplace=True,ascending=False)
df.tail()
df.tail()
plot_acf(df['log_ret'])
plot_pacf(df['log_ret'])
plt.plot(df['log_ret'])
plt.plot(df['log_ret']**2)
df['price']
plt.plot(df['price'])
df['log_ret'].hist()
df['target'] = [1 if log_diff > 0 else 0 for log_diff in df['log_ret']]
df.reset_index(inplace=True)
df.head()
format = '%d-%m-%Y %H:%M:%S'
timezone = 'GMT'
df['u_timestamp'] = OpenBlender.dateToUnix(df['date'],
date_format = format,
timezone = timezone)
df = df[['date', 'u_timestamp', 'price', 'target']]
df.head()
search_keyword = 'bitcoin'
df = df.sort_values('u_timestamp').reset_index(drop = True)
print('From : ' + OpenBlender.unixToDate(min(df.u_timestamp)))
print('Until: ' + OpenBlender.unixToDate(max(df.u_timestamp)))
OpenBlender.searchTimeBlends(api_token,df.u_timestamp,search_keyword)
!pip install fsspec
alt_source = {
'id_dataset':'5ea20ec595162936337159b4',
'feature' : 'text'
}
# Now, let's 'timeBlend' it to our dataset
df_blend_alt = OpenBlender.timeBlend( token = api_token,
anchor_ts = df.u_timestamp,
blend_source = alt_source,
blend_type = 'agg_in_intervals',
interval_size = 60 * 60 * 24,
direction = 'time_prior',
interval_output = 'list',
missing_values = 'raw')
df_blend_alt.head()
# We need to add the 'id_dataset' and the 'feature' name we want.
blend_source = {
'id_dataset':'5ea2039095162936337156c9',
'feature' : 'text'
}
# Now, let's 'timeBlend' it to our dataset
df_blend = OpenBlender.timeBlend( token = api_token,
anchor_ts = df.u_timestamp,
blend_source = blend_source,
blend_type = 'agg_in_intervals',
interval_size = 60 * 60 * 24,
direction = 'time_prior',
interval_output = 'list',
missing_values = 'raw')
df = pd.concat([df, df_blend.loc[:, df_blend.columns != 'u_timestamp']], axis = 1)
df.head()
df['text'] = df['BITCOIN_NE.text_last1days'].apply(lambda x:"xxeos ".join(x))
#df['text'] = df['BITCOIN_NE.text_last1days']
df['count'] = df['BITCOIN_NE.text_COUNT_last1days']
df.head()
df['text'].values
df['BITCOIN_NE.text_last1days'].apply(lambda x:["".join(k) for k in x])
from fastai.text import *
data = (TextList.from_df(df, cols='text')
.split_by_rand_pct(0.2,seed=42)
.label_for_lm()
.databunch(bs=8))
data.show_batch()
### Fit bitcoin wikitext model with domain specific data
learn = language_model_learner(data,AWD_LSTM, drop_mult=0.2)
# select the appropriate learning rate
learn.lr_find()
learn.recorder.plot()
bs = 48
lr = 1e-3
lr *= bs/48
learn.to_fp16()
learn.fit_one_cycle(5, lr*10, moms=(0.8,0.7))
learn.unfreeze()
learn.fit_one_cycle(10, lr, moms=(0.8,0.7))
learn.save('fine_tuned')
learn.save_encoder('fine_tuned_enc')
# # we typically find the point where the slope is steepest
# learn.recorder.plot()
# Fit the model based on selected learning rate
learn.fit_one_cycle(5, 1e-2, moms=(0.8,0.7))
# Tune a little more
learn.unfreeze()
learn.fit_one_cycle(5, 1e-3, moms=(0.8,0.7))
# # Save the encoder for use in classification
learn.save_encoder('fine_tuned_enc')
<jupyter_output><empty_output><jupyter_text>### Refit model with classification label<jupyter_code>#test_datalist = TextList.from_df(test, cols='text', vocab=data.vocab)
data_clas = (TextList.from_df(df, cols=['text'], vocab=data.vocab)
.split_by_rand_pct(0.2,seed=42)
.label_from_df(cols= 'target')
.databunch(bs=48))
data_clas.show_batch()
learn_classifier = text_classifier_learner(data_clas, AWD_LSTM, drop_mult=0.6,metrics=[Precision(average='micro')])
# load the encoder saved
learn_classifier.load_encoder('fine_tuned_enc')
learn_classifier.freeze()
# select the appropriate learning rate
learn_classifier.lr_find()
# we typically find the point where the slope is steepest
learn_classifier.recorder.plot()
# Fit the model based on selected learning rate
learn_classifier.fit_one_cycle(1, 2e-2, moms=(0.8,0.7))
# Tune a little more
learn_classifier.freeze_to(-2)
learn_classifier.fit_one_cycle(1, slice(1e-2/(2.6**4),1e-2), moms=(0.8,0.7))
# Tune a little more
learn_classifier.freeze_to(-3)
learn_classifier.fit_one_cycle(1, slice(5e-3/(2.6**4),5e-3), moms=(0.8,0.7))
#learn_classifier.unfreeze()
learn_classifier.fit_one_cycle(5, slice(2e-3/(2.6**4),2e-3), moms=(0.8,0.7))
learn_classifier.show_results()
interp = ClassificationInterpretation.from_learner(learn_classifier)
interp.plot_confusion_matrix()
y_valid = interp.data.valid_ds.y.items
y_pred = interp.pred_class.numpy()
from sklearn.metrics import classification_report
print(classification_report(y_valid,
y_pred,
target_names= ['0', '1']))
from sklearn.metrics import roc_curve
false_positive_rate, true_positive_rate, threshold = roc_curve(y_valid,
y_pred)
<jupyter_output><empty_output>
|
no_license
|
/NLP/CryptoSentimentAnalysis_TransferLearning.ipynb
|
webclinic017/MachineLearning-1
| 2 |
<jupyter_start><jupyter_text># 영화 평점 분석<jupyter_code>import pandas as pd
upath = 'data/pydata-book/movielens/users.dat'
rpath = 'data/pydata-book/movielens/ratings.dat'
mpath = 'data/pydata-book/movielens/movies.dat'
users = pd.read_csv(upath, sep='::', engine='python', names=['user_id', '성별', '나이', '직업', '지역'])
users[:5]
ratings = pd.read_csv(rpath, sep='::', engine='python', names=['user_id', 'movie_id', 'rating', 'timestamp'])
ratings[:5]
movies = pd.read_csv(mpath, sep='::', engine='python', names=['movie_id', 'title' , 'genres'])
movies[:5]
data = pd.merge(pd.merge(ratings, users), movies)
data[:5]
data.ix[0]
영화별성별 = data.groupby(['title', '성별'])
영화별성별['rating'].mean().unstack()[:5]
mean_ratings = data.pivot_table('rating', aggfunc='mean', index='title', columns='성별')
mean_ratings[:5]
제목별 = data.groupby('title')
평점충분 = 제목별.size() >= 250
평점충분[:5]
mean_ratings = mean_ratings[평점충분]
mean_ratings[:5]
mean_ratings.sort_values(by='F', ascending=False)[:5]
mean_ratings['diff'] = mean_ratings['M'] - mean_ratings['F']
mean_ratings[:5]<jupyter_output><empty_output><jupyter_text>여성 선호 영화<jupyter_code>mean_ratings.sort_values(by='diff')[:5]<jupyter_output><empty_output><jupyter_text>남성 선호 영화<jupyter_code>mean_ratings.sort_values(by='diff', ascending=False)[:5]
#mean_ratings.sort_values(by='diff')[::-1][:5]<jupyter_output><empty_output><jupyter_text>표준편차<jupyter_code>제목별_평점편차 = 제목별['rating'].std()
제목별_평점편차 = 제목별_평점편차.ix[평점충분]
제목별_평점편차.sort_values(ascending=False)[:5]<jupyter_output><empty_output>
|
no_license
|
/sean/Day 1-3.ipynb
|
chosh0615/python-training
| 4 |
<jupyter_start><jupyter_text>## Analyze A/B Test Results
You may either submit your notebook through the workspace here, or you may work from your local machine and submit through the next page. Either way assure that your code passes the project [RUBRIC](https://review.udacity.com/#!/projects/37e27304-ad47-4eb0-a1ab-8c12f60e43d0/rubric). **Please save regularly.**
This project will assure you have mastered the subjects covered in the statistics lessons. The hope is to have this project be as comprehensive of these topics as possible. Good luck!
## Table of Contents
- [Introduction](#intro)
- [Part I - Probability](#probability)
- [Part II - A/B Test](#ab_test)
- [Part III - Regression](#regression)
### Introduction
A/B tests are very commonly performed by data analysts and data scientists. It is important that you get some practice working with the difficulties of these
For this project, you will be working to understand the results of an A/B test run by an e-commerce website. Your goal is to work through this notebook to help the company understand if they should implement the new page, keep the old page, or perhaps run the experiment longer to make their decision.
**As you work through this notebook, follow along in the classroom and answer the corresponding quiz questions associated with each question.** The labels for each classroom concept are provided for each question. This will assure you are on the right track as you work through the project, and you can feel more confident in your final submission meeting the criteria. As a final check, assure you meet all the criteria on the [RUBRIC](https://review.udacity.com/#!/projects/37e27304-ad47-4eb0-a1ab-8c12f60e43d0/rubric).
#### Part I - Probability
To get started, let's import our libraries.<jupyter_code>import pandas as pd
import numpy as np
import random
import matplotlib.pyplot as plt
%matplotlib inline
#We are setting the seed to assure you get the same answers on quizzes as we set up
random.seed(42)<jupyter_output><empty_output><jupyter_text>`1.` Now, read in the `ab_data.csv` data. Store it in `df`. **Use your dataframe to answer the questions in Quiz 1 of the classroom.**
a. Read in the dataset and take a look at the top few rows here:<jupyter_code>df = pd.read_csv('ab_data.csv')
df.head()<jupyter_output><empty_output><jupyter_text>b. Use the cell below to find the number of rows in the dataset.<jupyter_code>df.info()
df.shape<jupyter_output><empty_output><jupyter_text>c. The number of unique users in the dataset.<jupyter_code>df.user_id.nunique()<jupyter_output><empty_output><jupyter_text>d. The proportion of users converted.<jupyter_code>df['converted'].sum()/290584<jupyter_output><empty_output><jupyter_text>e. The number of times the `new_page` and `treatment` don't match.<jupyter_code>old_treatment = df.query("group == 'treatment' and landing_page == 'old_page'").shape[0]
new_control = df.query("group == 'control' and landing_page == 'new_page'").shape[0]
old_treatment+ new_control <jupyter_output><empty_output><jupyter_text>f. Do any of the rows have missing values?<jupyter_code>df.info()<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 294478 entries, 0 to 294477
Data columns (total 5 columns):
user_id 294478 non-null int64
timestamp 294478 non-null object
group 294478 non-null object
landing_page 294478 non-null object
converted 294478 non-null int64
dtypes: int64(2), object(3)
memory usage: 11.2+ MB
<jupyter_text>`2.` For the rows where **treatment** does not match with **new_page** or **control** does not match with **old_page**, we cannot be sure if this row truly received the new or old page. Use **Quiz 2** in the classroom to figure out how we should handle these rows.
a. Now use the answer to the quiz to create a new dataset that meets the specifications from the quiz. Store your new dataframe in **df2**.<jupyter_code>df2 = df.query("group == 'control' and landing_page == 'old_page'")
df2 = df2.append(df.query("group == 'treatment' and landing_page == 'new_page'"))
# Double Check all of the correct rows were removed - this should be 0
df2[((df2['group'] == 'treatment') == (df2['landing_page'] == 'new_page')) == False].shape[0]<jupyter_output><empty_output><jupyter_text>`3.` Use **df2** and the cells below to answer questions for **Quiz3** in the classroom.a. How many unique **user_id**s are in **df2**?<jupyter_code>df2.user_id.nunique()<jupyter_output><empty_output><jupyter_text>b. There is one **user_id** repeated in **df2**. What is it?<jupyter_code>df2[df2['user_id'].duplicated()]<jupyter_output><empty_output><jupyter_text>c. What is the row information for the repeat **user_id**? <jupyter_code>df2[df2['user_id']== 773192]<jupyter_output><empty_output><jupyter_text>d. Remove **one** of the rows with a duplicate **user_id**, but keep your dataframe as **df2**.<jupyter_code>df2 = df2.drop(1899)<jupyter_output><empty_output><jupyter_text>`4.` Use **df2** in the cells below to answer the quiz questions related to **Quiz 4** in the classroom.
a. What is the probability of an individual converting regardless of the page they receive?<jupyter_code>df2.converted.mean()<jupyter_output><empty_output><jupyter_text>b. Given that an individual was in the `control` group, what is the probability they converted?<jupyter_code>control_prob = df2.query("group == 'control'")['converted'].mean()
control_prob<jupyter_output><empty_output><jupyter_text>c. Given that an individual was in the `treatment` group, what is the probability they converted?<jupyter_code>treatment_prob = df2.query("group == 'treatment'")['converted'].mean()
treatment_prob<jupyter_output><empty_output><jupyter_text>d. What is the probability that an individual received the new page?<jupyter_code>df2.query('landing_page == "new_page"').shape[0]/df2.shape[0]<jupyter_output><empty_output><jupyter_text>e. Consider your results from parts (a) through (d) above, and explain below whether you think there is sufficient evidence to conclude that the new treatment page leads to more conversions.There is no evidence to conclude that the new treatment page leads to more conversions. The new page actually led a lower conversaion rate in comparsion to the new page.
### Part II - A/B Test
Notice that because of the time stamp associated with each event, you could technically run a hypothesis test continuously as each observation was observed.
However, then the hard question is do you stop as soon as one page is considered significantly better than another or does it need to happen consistently for a certain amount of time? How long do you run to render a decision that neither page is better than another?
These questions are the difficult parts associated with A/B tests in general.
`1.` For now, consider you need to make the decision just based on all the data provided. If you want to assume that the old page is better unless the new page proves to be definitely better at a Type I error rate of 5%, what should your null and alternative hypotheses be? You can state your hypothesis in terms of words or in terms of **$p_{old}$** and **$p_{new}$**, which are the converted rates for the old and new pages. H0:Pold−Pnew=0
H1:Pold−Pnew>0
`2.` Assume under the null hypothesis, $p_{new}$ and $p_{old}$ both have "true" success rates equal to the **converted** success rate regardless of page - that is $p_{new}$ and $p_{old}$ are equal. Furthermore, assume they are equal to the **converted** rate in **ab_data.csv** regardless of the page.
Use a sample size for each page equal to the ones in **ab_data.csv**.
Perform the sampling distribution for the difference in **converted** between the two pages over 10,000 iterations of calculating an estimate from the null.
Use the cells below to provide the necessary parts of this simulation. If this doesn't make complete sense right now, don't worry - you are going to work through the problems below to complete this problem. You can use **Quiz 5** in the classroom to make sure you are on the right track.a. What is the **conversion rate** for $p_{new}$ under the null? <jupyter_code>
p_null = df2['converted'].mean()
p_null<jupyter_output><empty_output><jupyter_text>b. What is the **conversion rate** for $p_{old}$ under the null? <jupyter_code>p_nullold = df['converted'].mean()
p_nullold<jupyter_output><empty_output><jupyter_text>c. What is $n_{new}$, the number of individuals in the treatment group?<jupyter_code>n_new = df2.query("landing_page == 'new_page'").shape[0]
n_new<jupyter_output><empty_output><jupyter_text>d. What is $n_{old}$, the number of individuals in the control group?<jupyter_code>n_old = df2.query("landing_page == 'old_page'").shape[0]
n_old<jupyter_output><empty_output><jupyter_text>e. Simulate $n_{new}$ transactions with a conversion rate of $p_{new}$ under the null. Store these $n_{new}$ 1's and 0's in **new_page_converted**.<jupyter_code>new_page_converted = np.random.binomial(1, p_null, n_new)<jupyter_output><empty_output><jupyter_text>f. Simulate $n_{old}$ transactions with a conversion rate of $p_{old}$ under the null. Store these $n_{old}$ 1's and 0's in **old_page_converted**.<jupyter_code>old_page_converted = np.random.binomial(1, p_nullold, n_old)<jupyter_output><empty_output><jupyter_text>g. Find $p_{new}$ - $p_{old}$ for your simulated values from part (e) and (f).<jupyter_code>new_page_converted.mean() - old_page_converted.mean()<jupyter_output><empty_output><jupyter_text>h. Create 10,000 $p_{new}$ - $p_{old}$ values using the same simulation process you used in parts (a) through (g) above. Store all 10,000 values in a NumPy array called **p_diffs**.<jupyter_code>new_converted_simulation = np.random.binomial(n_new, p_null, 10000)/n_new
old_converted_simulation = np.random.binomial(n_old, p_null, 10000)/n_old
p_diffs = new_converted_simulation - old_converted_simulation<jupyter_output><empty_output><jupyter_text>i. Plot a histogram of the **p_diffs**. Does this plot look like what you expected? Use the matching problem in the classroom to assure you fully understand what was computed here.<jupyter_code>plt.hist(p_diffs);<jupyter_output><empty_output><jupyter_text>j. What proportion of the **p_diffs** are greater than the actual difference observed in **ab_data.csv**?<jupyter_code>obs_diff = treatment_prob - control_prob
low_prob = (p_diffs < obs_diff).mean()
high_prob = (p_diffs.mean() + (p_diffs.mean() - obs_diff) < p_diffs).mean()
plt.hist(p_diffs);
plt.axvline(obs_diff, color='red');
plt.axvline(p_diffs.mean() + (p_diffs.mean() - obs_diff), color='red');
p_val = low_prob + high_prob
print(p_val)
(p_diffs > obs_diff).mean()<jupyter_output><empty_output><jupyter_text>k. Please explain using the vocabulary you've learned in this course what you just computed in part **j.** What is this value called in scientific studies? What does this value mean in terms of whether or not there is a difference between the new and old pages?**In part j, we are looking for the p value and we got the value above 0.05, which means that we do not have evidence to reject our null hypothesis Pnew = Pold. This also means that there is a low probability that the null hypothesis were true.
l. We could also use a built-in to achieve similar results. Though using the built-in might be easier to code, the above portions are a walkthrough of the ideas that are critical to correctly thinking about statistical significance. Fill in the below to calculate the number of conversions for each page, as well as the number of individuals who received each page. Let `n_old` and `n_new` refer the the number of rows associated with the old page and new pages, respectively.<jupyter_code>
import statsmodels.api as sm
convert_old = df2.query('group == "control" and converted == 1').user_id.size
convert_new = df2.query('group == "treatment" and converted == 1').user_id.size
n_old = df2.query('group == "control"').user_id.size
n_new = df2.query('group == "treatment"').user_id.size
convert_old, convert_new, n_old, n_new<jupyter_output><empty_output><jupyter_text>m. Now use `stats.proportions_ztest` to compute your test statistic and p-value. [Here](https://docs.w3cub.com/statsmodels/generated/statsmodels.stats.proportion.proportions_ztest/) is a helpful link on using the built in.<jupyter_code>z_score, p_value = sm.stats.proportions_ztest([convert_old, convert_new], [n_old, n_new])
z_score, p_value # but here, P_value is for two_tailed test..we need one tailed test.
from scipy.stats import norm
norm.cdf(1.3109241984234394), norm.ppf(1-(0.05))<jupyter_output><empty_output><jupyter_text>n. What do the z-score and p-value you computed in the previous question mean for the conversion rates of the old and new pages? Do they agree with the findings in parts **j.** and **k.**?
** Given the fact that our critcal value is 1.6448536269514722 , the z score value is 1.3109241984234394 and the p-value is 0.905, the z score is not greater than the critcal value. This means that we do not have evidence to reject out null hybothesis. Thereofre, thhey agree with findings in parts j and k.
### Part III - A regression approach
`1.` In this final part, you will see that the result you achieved in the A/B test in Part II above can also be achieved by performing regression.
a. Since each row is either a conversion or no conversion, what type of regression should you be performing in this case?Logistic Regression with categorical predictorsb. The goal is to use **statsmodels** to fit the regression model you specified in part **a.** to see if there is a significant difference in conversion based on which page a customer receives. However, you first need to create in df2 a column for the intercept, and create a dummy variable column for which page each user received. Add an **intercept** column, as well as an **ab_page** column, which is 1 when an individual receives the **treatment** and 0 if **control**.<jupyter_code>df2['intercept'] = 1
df2[['new_page','old_page']] = pd.get_dummies(df2['landing_page'])
df2['ab_page'] = pd.get_dummies(df2['group'])['treatment']
df2.head()<jupyter_output><empty_output><jupyter_text>c. Use **statsmodels** to instantiate your regression model on the two columns you created in part b., then fit the model using the two columns you created in part **b.** to predict whether or not an individual converts. <jupyter_code>import statsmodels.api as sm
from scipy import stats
stats.chisqprob = lambda chisq, df: stats.chi2.sf(chisq, df)
log_model = sm.Logit(df2['converted'], df2[['intercept', 'ab_page']])
result = log_model.fit()<jupyter_output>Optimization terminated successfully.
Current function value: 0.366118
Iterations 6
<jupyter_text>d. Provide the summary of your model below, and use it as necessary to answer the following questions.<jupyter_code>result.summary()<jupyter_output><empty_output><jupyter_text>e. What is the p-value associated with **ab_page**? Why does it differ from the value you found in **Part II**? **Hint**: What are the null and alternative hypotheses associated with your regression model, and how do they compare to the null and alternative hypotheses in **Part II**?**Put your answer here.**
Accordig to our finding, the p-value for the ab_page is 0.190, which is close to the number we got previously 0.1911. We use this model to predict whether the user should convert their old page to the new page. The null hypothesis in Part II is ab_page = 1 and converted equal to 0. And alternative hybothesis is ab_page =1 and converted >=1
f. Now, you are considering other things that might influence whether or not an individual converts. Discuss why it is a good idea to consider other factors to add into your regression model. Are there any disadvantages to adding additional terms into your regression model?It is a good idea to consider other factors to add into the regression model because we want to see if these factors will predict any conversion. Also, we dont want to select facotrs that are colinear. As of what we have seen,there is no evidence that it would make any difference if a user would like to convert their old page to new page.g. Now along with testing if the conversion rate changes for different pages, also add an effect based on which country a user lives in. You will need to read in the **countries.csv** dataset and merge together your datasets on the appropriate rows. [Here](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.join.html) are the docs for joining tables.
Does it appear that country had an impact on conversion? Don't forget to create dummy variables for these country columns - **Hint: You will need two columns for the three dummy variables.** Provide the statistical output as well as a written response to answer this question.<jupyter_code>countries = pd.read_csv('countries.csv')
countries.head()
df2 = df2.set_index('user_id').join(countries.set_index('user_id'))
df2[['CA', 'UK', 'US']] = pd.get_dummies(df2['country'])
logit_mod = sm.Logit(df2['converted'], df2[['intercept', 'ab_page', 'CA', 'UK']])
results = logit_mod.fit()
results.summary()<jupyter_output>Optimization terminated successfully.
Current function value: 0.366113
Iterations 6
<jupyter_text>According to the result we found here, the p-value shows that the country does not seem to has a impact on the conversion.h. Though you have now looked at the individual factors of country and page on conversion, we would now like to look at an interaction between page and country to see if there significant effects on conversion. Create the necessary additional columns, and fit the new model.
Provide the summary results, and your conclusions based on the results.<jupyter_code>
df2['CA_page'] = df2['CA']*df2['ab_page']
df2['UK_page'] = df2['UK']*df2['ab_page']
df2['US_page'] = df2['US']*df2['ab_page']
logit_mod = sm.Logit(df2['converted'], df2[['intercept', 'CA_page', 'UK_page']])
results = logit_mod.fit()
results.summary()<jupyter_output>Optimization terminated successfully.
Current function value: 0.366113
Iterations 6
<jupyter_text>The interction of CA and ab_page, we can see that they are (p = 0.046; p < 0.05) which shows there is statistically significant.Below we would use this formula to calculate the coefficient of the CA_page. <jupyter_code>np.exp(results.params)
print(1/0.927579)<jupyter_output>1.0780752906221465
<jupyter_text>Given the number 1.0780752906221465 which is the interpretation for the CA_page coefficient, a Canada user who uses the new page would be 1.08 more to convert their page. While this is a small amount of statitical significance, it does not have much practical significance. In addition, we do not have evidecne to reject our null hybothesis( conversation has no significant relationship with country/landing_page) based on what we found in our A/B testing and that proved the fact that converting the old page to new page would not help in this case. To be more speciifc, the ab_page (coefficient : -0.015) and country are insignificant on their own so the value of statistical significance is not useful for us. The P-values of ab_page is 0.19 and country is 0.104,0.341) are also considered as insignificant.Therefore, there is no reason to convert the old page to new page as they both have similar perfomance.
## Finishing Up
> Congratulations! You have reached the end of the A/B Test Results project! You should be very proud of all you have accomplished!
> **Tip**: Once you are satisfied with your work here, check over your report to make sure that it is satisfies all the areas of the rubric (found on the project submission page at the end of the lesson). You should also probably remove all of the "Tips" like this one so that the presentation is as polished as possible.
## Directions to Submit
> Before you submit your project, you need to create a .html or .pdf version of this notebook in the workspace here. To do that, run the code cell below. If it worked correctly, you should get a return code of 0, and you should see the generated .html file in the workspace directory (click on the orange Jupyter icon in the upper left).
> Alternatively, you can download this report as .html via the **File** > **Download as** submenu, and then manually upload it into the workspace directory by clicking on the orange Jupyter icon in the upper left, then using the Upload button.
> Once you've done this, you can submit your project by clicking on the "Submit Project" button in the lower right here. This will create and submit a zip file with this .ipynb doc and the .html or .pdf version you created. Congratulations!<jupyter_code>from subprocess import call
call(['python', '-m', 'nbconvert', 'Analyze_ab_test_results_notebook.ipynb'])<jupyter_output><empty_output>
|
no_license
|
/Analyze_ab_test_results_notebook.ipynb
|
loganchg/Udacity-Data-Analyst-A-B-testing
| 35 |
<jupyter_start><jupyter_text>## Evaluate complete data
In this file, we calculate the evaluation error metrics for the complete datasets. <jupyter_code>import pandas as pd
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error as mae
from sklearn.metrics import mean_squared_error as mse
from sklearn.metrics import explained_variance_score as ev
from math import sqrt
k_fold = KFold(n_splits=5, random_state=None)
lin = LinearRegression()
def perform_crossvalidation(df, output_variable):
results_mse = []
results_rmse = []
results_ev = []
results_mae = []
results_dif = []
for train_indices, cv_indices in k_fold.split(df):
X_train = df.drop(output_variable, 1).iloc[train_indices]
y_train = df[output_variable].iloc[train_indices]
X_val = df.drop(output_variable, 1).iloc[cv_indices]
y_val = df[output_variable].iloc[cv_indices]
scaler = StandardScaler()
scaler.fit(X_train)
X_train = pd.DataFrame(data=scaler.transform(X_train), columns=X_train.columns)
X_val = pd.DataFrame(data=scaler.transform(X_val), columns=X_val.columns)
lin.fit(X_train, y_train)
pred = lin.predict(X_val)
results_mse.append(mse(y_val,pred))
results_rmse.append(sqrt(mse(y_val,pred)))
results_ev.append(ev(y_val,pred))
results_mae.append(mae(y_val,pred))
results_dif.append(mse(y_train,lin.predict(X_train))-mse(y_val,pred))
print('mse: {}, rmse: {}, ev: {}, mae: {}, dif: {}'.format(sum(results_mse) / len(results_mse),
sum(results_rmse) / len(results_rmse),
sum(results_ev) / len(results_ev),
sum(results_mae) / len(results_mae),
sum(results_dif) / len(results_dif)))
df_ff = pd.read_csv('../data/processed/forest_fires.txt',sep='\t')
perform_crossvalidation(df_ff, 'area')
df_st = pd.read_csv('../data/processed/slump_test.txt',sep='\t')
perform_crossvalidation(df_st, 'SLUMP(cm)')
df_rwq = pd.read_csv('../data/processed/red_wine_quality.txt',sep='\t')
perform_crossvalidation(df_rwq, 'quality')
df_sac = pd.read_csv('../data/processed/school_alcohol_consumption.txt',sep='\t')
perform_crossvalidation(df_sac, 'Dalc')
df = pd.read_csv('../data/processed/custom_dataset_poor_little.txt',sep='\t')
perform_crossvalidation(df, 'y')
df = pd.read_csv('../data/processed/custom_dataset_poor_much.txt',sep='\t')
perform_crossvalidation(df, 'y')
df = pd.read_csv('../data/processed/custom_dataset_rich_little.txt',sep='\t')
perform_crossvalidation(df, 'y')
df = pd.read_csv('../data/processed/custom_dataset_rich_much.txt',sep='\t')
perform_crossvalidation(df, 'y')<jupyter_output>mse: 24.866799186237536, rmse: 4.986148967423025, ev: 0.9999996356182048, mae: 3.9608002240642954, dif: -0.2391166027069019
|
no_license
|
/sims/notebooks/1.2-rms-evaluation_complete_data.ipynb
|
tiantiy/missing_data_science
| 1 |
<jupyter_start><jupyter_text># Notebook to analysis Clash Royale Cards
## To-Do
- combine Golemite to Golem
- draw radar chart by given binned variables<jupyter_code>import numpy as np
import pandas as pd
troop_card_path = "../data/troop_card_info.txt"
troop_damage_path = "../data/troop_damage_info.txt"
troop_card = pd.read_csv(troop_card_path, sep="\t", header = 0)
troop_damage = pd.read_csv(troop_damage_path, sep="\t", header = 0)
troop_level = pd.Series([5 ,6 ,8 ,8 ,5 ,8 ,6 ,7 ,8 ,8 ,3 ,1 ,0 ,5 ,5 ,5 ,5 ,5 ,5 ,3 ,4 ,0 ,2 ,3 ,3 ,2 ,2 ,3 ,2 ,2 ,2 ,1 ,1 ,0])
troop_card['Level'] = troop_level
own_troop = troop_card.merge(troop_damage)
own_troop['Count'] = pd.to_numeric(own_troop['Count'].str.replace("x",""))
own_troop['Damage'] = pd.to_numeric(own_troop['Damage'])
own_troop['Other Damage'] = pd.to_numeric(own_troop['Other Damage'])
own_troop['Hitpoints'] = pd.to_numeric(own_troop['Hitpoints'])
own_troop['Total Damage'] = own_troop['Count'] * own_troop['Damage'] + own_troop['Other Damage']
own_troop['Total HP'] = own_troop['Hitpoints'] * own_troop['Count']
own_troop.sort_values('Total Damage', axis=0, ascending=False)
def bin_target(x):
if x == "Ground":
return 2
if x == "Buildings":
return 3
if x == "Air":
return 4
if x == "Air & Ground":
return 5<jupyter_output><empty_output><jupyter_text>## Total Damage
min: 67
25%: 149.5
50%: 219
75%: 462
max: 924<jupyter_code>def bin_total_damage(x):
if x < 149.5:
return 2
elif x <219:
return 3
elif x <462:
return 4
else:
return 5
def bin_speed(x):
if x == "Slow":
return 2
elif x == "Medium":
return 3
elif x == "Fast":
return 4
else:
return 5
bin_troop = pd.DataFrame()
bin_troop['Name'] = own_troop['Name']
bin_troop['Bin Target'] = own_troop['Target'].apply(bin_target)
own_troop['Bin Total Damage'] = own_troop['Total Damage'].apply(bin_total_damage)
own_troop<jupyter_output><empty_output>
|
no_license
|
/script/clash_royale.ipynb
|
WangJingxuan0216/clash_royale
| 2 |
<jupyter_start><jupyter_text># TensorFlow Basic## TensorFlow Version<jupyter_code>import tensorflow as tf
tf.__version__<jupyter_output><empty_output><jupyter_text>## Hello TensorFlow<jupyter_code>hello = tf.constant("Hello TensorFlow!")
sess = tf.Session()
sess.run(hello)<jupyter_output><empty_output><jupyter_text>## graph<jupyter_code>node0 = tf.constant(3.0, tf.float32)
node1 = tf.constant(4.0)
node2 = tf.add(node0, node1)
print(f"node0 = {node0}")
print(f"node1 = {node1}")
print(f"node2 = {node2}")
print(sess.run([node0, node1]))
print(sess.run(node2))<jupyter_output>node0 = Tensor("Const_15:0", shape=(), dtype=float32)
node1 = Tensor("Const_16:0", shape=(), dtype=float32)
node2 = Tensor("Add_6:0", shape=(), dtype=float32)
[3.0, 4.0]
7.0
<jupyter_text>## placeholder<jupyter_code>a = tf.placeholder(tf.float32)
b = tf.placeholder(tf.float32)
add_node = a + b
print(sess.run(add_node, feed_dict={a: [3.2,5.5,2.1], b:[7.0,4.,8.]}))<jupyter_output>[10.2 9.5 10.1]
<jupyter_text>## Variables<jupyter_code>weight = tf.Variable(tf.random_normal([1]), name='weight')
bias = tf.Variable(tf.random_normal([1]), name='bias')<jupyter_output><empty_output>
|
no_license
|
/ai_visual/TF_Basic.ipynb
|
artcoderj/coding_practice
| 5 |
<jupyter_start><jupyter_text><jupyter_code># Check work directory
!pwd
#Mount Colab onto Google Drive for easier extraction of files
from google.colab import drive
drive.mount('/content/gdrive')
#Import the operating system and confirm the list of files.
import os
os.chdir('/content/gdrive/MyDrive/Data Science/Data')
!ls
#Import libraries
import pandas as pd
import numpy as np
import datetime
from matplotlib import pyplot as plt
import glob
import seaborn as sns
#Import the data
TrendingData = pd.read_csv('/content/gdrive/MyDrive/Data Science/Data/YoutubeData.csv')
TrendingData.head()
TrendingData.describe()
#Check info on the data
TrendingData.info()
# Convert the trending date column to datetime format
TrendingData['trending_date']= pd.to_datetime(TrendingData['trending_date'])
# Convert the publish column to datetime format
TrendingData['publish_date']= pd.to_datetime(TrendingData['publish_date'])
# Check the format of the converted columns
TrendingData.info()
#Calculate the number of days between the trending date and the publish date
TrendingData2 = TrendingData
TrendingData2['difference_in_date'] = abs(TrendingData2['trending_date'] - TrendingData2['publish_date']).dt.days
TrendingData2.head()
#Get the day of the week of the publish date
TrendingData2['publish_day'] = TrendingData2['publish_date'].dt.day_name()
TrendingData2.head()
# Convert the publish time column to datetime format
TrendingData2['publish_time']= pd.to_datetime(TrendingData['publish_time'])
#Change the publish time to show the hour without the minutes and seconds
TrendingData2['publish_hour'] = TrendingData2['publish_time'].dt.hour
TrendingData2.head()
#Count the tag words
TrendingData2['tag_count'] = TrendingData2['tags'].str.len()
TrendingData2.head()
#Count the title words
TrendingData2['title_count'] = TrendingData2['title'].str.len()
TrendingData2.head()
# Count the description words
TrendingData2['description_count'] = TrendingData2['description'].str.len()
TrendingData2.head()
#Reorder the columns for similar columns to be placed together
UpdatedData = TrendingData2
UpdatedData = UpdatedData[['video_id', 'region', 'publish_date', 'trending_date', 'difference_in_date', 'publish_time', 'publish_hour', 'publish_day', 'title', 'title_count', 'channel_title', 'category_id', 'category_name', 'description', 'description_count', 'tags', 'tag_count', 'views', 'likes', 'ratings_disabled', 'video_error_or_removed']]
UpdatedData.head()
# Check the format of the reordered columns
UpdatedData.info()
PerRegion = UpdatedData.groupby('region').count()['title'].sort_values()
PerRegion.plot(kind='barh',title='Number of Trending Videos per Country ',grid=True,fontsize=14,figsize=(15,10))
plt.xlabel("Number of Trending Videos",fontsize=16)
plt.ylabel("Name of Country",fontsize=16)
plt.show()
#Observation
# There seems to be a balanced trend on how the videos have trended through the regions. USA, Canada and Germany top the list and Japan has the lowest numbers compared to the other countries. Could it be they spend less time on Youtube compared to the other listed countries?
#View data by the number of views per region
RegionViews=UpdatedData.groupby(by=['region'],as_index=False).views.sum()
plt.figure()
RegionViews.plot.bar('region', 'views')
plt.title('Number of Views per Region')
plt.show()
#View data by the number of likes per region
RegionLikes=UpdatedData.groupby(by=['region'],as_index=False).likes.sum()
plt.figure()
RegionLikes.plot.bar('region', 'likes')
plt.title('Number of Likes per Region')
plt.show()
# Observation:
# The number of views and likes in each region highly correlate with each other. An interesting observation here is that the highest views and likes are coming from Great Britain but if we check the previous pie chart, the United States ranked the highest with the trending videos. This could only mean that most of the views and likes come from Great Britain and not their own country.
#Analyze the number of days it takes from publish date to trending date
labels = UpdatedData.groupby(['difference_in_date']).count().index
sizes = UpdatedData.groupby(['difference_in_date']).count()['title']
fig, ax = plt.subplots(figsize=(10,10))
ax.pie(sizes, labels=labels, autopct='%1.1f%%',
shadow=False, startangle=90)
ax.axis('equal')
sizes
#View it in a table format
Grouped_UpdatedData = UpdatedData.groupby(['difference_in_date']).agg({'difference_in_date': 'count'})
Grouped_UpdatedData.head(20)
# Observation:
# 46.5% of trending videos trend after one day of it being published. This number gradually decreases as the days go by. This is a very short life span meaning if you haven’t captured your audience by day 5, there’s a 14% chance that your video will trend post publish date.
#View data by the day of the week vs the number of views
DayofWeek=UpdatedData.groupby(by=['publish_day'],as_index=False).views.sum()
plt.figure()
DayofWeek.plot.bar('publish_day', 'views')
plt.title('Day of the Week Views')
plt.show()
#View data by the day of the week vs the publish date
PublishDateViews=UpdatedData.groupby(by=['publish_day'],as_index=False).publish_date.count()
plt.figure()
PublishDateViews.plot.bar('publish_day', 'publish_date')
plt.title('Days of the Week vs Publish Date')
plt.show()
PublishHour = UpdatedData.groupby('publish_hour').count()['title'].sort_values()
PublishHour.plot(kind='barh',title='Publish Hour Trend',grid=True,fontsize=14,figsize=(15,10))
plt.xlabel("Number of Trending Videos",fontsize=16)
plt.ylabel("Publish Hour",fontsize=16)
plt.show()
# Observation:
# The most popular timings are 4:00 PM, 5:00 PM and 3:00 PM consecutively.
#Visualize the correlation patterns
corr = UpdatedData.corr()
plt.figure()
sns.heatmap(corr, annot=True);
# Observation:
# The highly correlated variables are views where an increase in the number of views increases the number of likes and vice versa. There seems to be a negative correlation on the title count and a low correlation with the tag count. The number of words posted on a Youtube video title or the number of tags doesn’t count to the number of views you will get.
#Statistical Summary of the numerical data
UpdatedData.describe()
# Observations:
# On average, a video takes 7 days to trend from the date it is published and 75% percent of the videos that were published took 3 days to trend. However we seem to have a major outlier as there was a video that took 4,215 days of it being published to trend. This affected the mean and standard deviation.
# Most trending videos had an average of 266 tags total. The minimum had 2 tags and the maximum had 1,476 tags. 75% of the trending videos had 431 tags.
# The trending videos had an average title count of 63 words, 75% of the videos having 81 words and the max number of words that a video had was 297.
# The trending videos had an average description count of 938 words, 75% of the videos having 1,251 words and the max number of words that a video had was 11,938.
# The average number of views for trending videos was 1.32 million with a 75% percentile of 647,692 views. Interestingly, there’s a trending video that had 117 views and the highest number of views received was 424.5 million.
# The average number of likes for trending videos was 37,884 which is 2.86% of the liked videos. Minimum number of likes was zero and the maximum number of likes was 5.61 million translating to 1.3% of the maximum number of viewed videos.
#Review effect of activating the ratings function
plt.figure(figsize=(10,15))
Ratings_DisabledData=UpdatedData.groupby(by=['ratings_disabled'],as_index=False).views.sum()
plt.figure()
Ratings_DisabledData.plot.bar('ratings_disabled', 'views')
plt.title('Effect of Ratings on Views')
Ratings_DisabledData=UpdatedData.groupby(by=['ratings_disabled'],as_index=False).likes.sum()
plt.figure()
Ratings_DisabledData.plot.bar('ratings_disabled', 'likes')
plt.title('Effect of Ratings on likes')
Ratings_DisabledData=UpdatedData.groupby(by=['ratings_disabled'],as_index=False).difference_in_date.sum()
plt.figure()
Ratings_DisabledData.plot.bar('ratings_disabled', 'difference_in_date')
plt.title('Effect of Ratings on Difference in Dates')
plt.tight_layout()
plt.show()
# Observations:
#Disabling the rating feature of a Youtube video has a drastic effect on the number of views, likes and the difference in dates it trends from the publish date. This is one feature that should not be disabled for maximum viewership and eventually to a trending video.
#Check the categories of the trending videos with the highest views
labels = UpdatedData.groupby(['category_name']).count().index
sizes = UpdatedData.groupby(['category_name']).count()['views']
fig, ax = plt.subplots(figsize=(10,10))
ax.pie(sizes, labels=labels, autopct='%1.1f%%',
shadow=False, startangle=90)
ax.axis('equal')
sizes
#Generally what were the trending tags during this period for the trended videos
# separate each word in the tags column and add them onto a list of strings
# first split by '|' and send to a list.
tags = UpdatedData.tags.str.split('|').tolist()
# then get rid of anything that isn't a list
tags = [x for x in tags if type(x) == list]
# that gave us a list of lists (of strings), so we must separate the items in each
tags2 = []
tags3 = []
for item in tags:
for string in item:
# get rid of numbers and other types
if type(string) == str:
tags2.append(string)
def meaningless(x):
words = ['to','the','a','of','and','on','in','for','is','&','with','you','video','videos','"','"the','video"','videos"']
return x in words
# now let's split these strings by the spaces between words
for multiple in tags2:
singles = multiple.split()
# then let's add these cleaned tags to the final list
for tag in singles:
# now let's make everything lowercase and get rid of spaces
tag = tag.strip()
tag = tag.lower()
# now let's remove the meaningless tags
if not meaningless(tag):
tags3.append(tag)
# let's bring that into a dataframe
tagsdf = pd.DataFrame(tags3,columns=['tags'])
# then count the values
tagcounts = tagsdf.tags.value_counts()
# now preparing a bar chart representing the top values
tagcountslice = tagcounts[:30].sort_values()
tagcountslice.plot(kind='barh',title='Most Popular Tags on YouTube Trending Videos Between 14th Nov 2017 and 14th June 2018 ',grid=True,fontsize=12,figsize=(11,8))
plt.xlabel('In How Many Videos the Tag Occurred')
plt.tight_layout()
plt.show()
CategoryViews = UpdatedData.groupby('category_name').count()['title'].sort_values()
CategoryViews.plot(kind='barh',title='Category Name Trend',grid=True,fontsize=14,figsize=(15,10))
plt.xlabel("Number of Trending Videos",fontsize=16)
plt.ylabel("Category Name",fontsize=16)
plt.show()
#Check the categories of the trending videos with no tags
NoTagsVideo = UpdatedData[UpdatedData['tags']=='[none]']['category_name'].value_counts().reset_index()
plt.figure(figsize=(15,10))
sns.set_style("whitegrid")
ax = sns.barplot(y=NoTagsVideo['index'],x=NoTagsVideo['category_name'], data=NoTagsVideo,orient='h')
plt.xlabel("Number of Videos")
plt.ylabel("Categories")
plt.title("Categories of trend videos in with no tags")
# Observation:
# The top 3 categories with the highest views are Entertainment, People & Blogs and Music. This is definitely not a surprise indicating that most Youtube users go to Youtube for entertainment and updates from celebrities.
# The least viewed category is Movies and Trailers meaning that Youtube users prefer to watch movies from other platforms rather than Youtube. Nonprofits & Activism also do not have much of a chance on Youtube as the viewer rate is quite low.
#Download data set that has data that's not specifically trended. This data will be used for creating the predictive model
Set_One = pd.read_csv('/content/gdrive/MyDrive/Data Science/Data/Youtubedata_20210109_213012A.csv')
Set_One.head()
Set_Two = pd.read_csv('/content/gdrive/MyDrive/Data Science/Data/YouTubedata_20210106_220013B.csv')
Set_Two.head()
# Merge all data into one dataframe
Frames = [Set_One, Set_Two]
All_data = pd.concat(Frames, axis=0).reset_index()
All_data.head()
#Check info on the data
All_data.info()
# Do a count on the title words
All_data2 = All_data
All_data2['title_count'] = All_data2['title'].str.len()
All_data2.head()
# Do a count on the description words
All_data3 = All_data2
All_data3['description_count'] = All_data3['description'].str.len()
All_data3.head()
# Separate the date and time from publised at column
All_data4 = All_data3
All_data4['publish_date'] = pd.to_datetime(All_data4['published_at']).dt.date
All_data4['publish_time'] = pd.to_datetime(All_data4['published_at']).dt.time
All_data4.head()
# Convert the rows in published date column to datetime.
All_data4['publish_date'] = pd.to_datetime(All_data4['publish_date'])
# Get the day of the week from publish date column.
from datetime import datetime as dt
dw_mapping={
0: 'Monday',
1: 'Tuesday',
2: 'Wednesday',
3: 'Thursday',
4: 'Friday',
5: 'Saturday',
6: 'Sunday'
}
All_data4['publish_day']=All_data4['publish_date'].dt.weekday.map(dw_mapping)
All_data4.head()
#Replace the values of the categoryId with its corresponding name.
All_data5 = All_data4.replace({'category' :{2 : 'Autos & Vehicles' , 1 : 'Film & Animation', 10 : 'Music', 15 : 'Pets & Animals', 17 : 'Sports', 18 : 'Short Movies', 19 : 'Travel & Events', 20 : 'Gaming', 21 : 'Videoblogging', 22 : 'People & Blogs', 23 : 'Comedy', 24 : 'Entertainment', 25 : 'News & Politics', 26 : 'Howto & Style', 27 : 'Education', 28 : 'Science & Technology', 29 : 'Nonprofits & Activism', 30 : 'Movies', 31 : 'Anime/Animation', 32 : 'Action/Adventure', 33 : 'Classics', 34 : 'Comedy', 35 : 'Documentary', 36 : 'Drama', 37 : 'Family', 38 : 'Foreign', 39 : 'Horror', 40 : 'Sci-Fi/Fantasy', 41 : 'Thriller', 42 : 'Shorts', 43 : 'Shows', 44 : 'Trailers'}})
All_data5.head()
#Drop columns not needed
All_data6 = All_data5.drop(columns =['level_0', 'Unnamed: 0', 'tags', 'thumbnail', 'localizations', 'topic_categories', 'default_language'], axis = 1)
All_data6.head()
#Visualize the correlation patterns
corr = All_data6.corr()
plt.figure()
sns.heatmap(corr, annot=True);
#Statistical Summary of People & Blogs
All_data6.describe()
# Analysis showing the days of the week that gets the highest views
All_data6_Views = All_data6.groupby(by=['publish_day'],as_index=False).view.sum()
plt.figure()
All_data6_Views.plot.bar('publish_day', 'view')
plt.title('Day of the Week Vs the Number of Views')
plt.show()
#What is the common publish day
labels = All_data6.groupby(['publish_day']).count().index
sizes = All_data6.groupby(['publish_day']).count()['title']
fig, ax = plt.subplots(figsize=(10,10))
ax.pie(sizes, labels=labels, autopct='%1.1f%%',
shadow=False, startangle=90)
ax.axis('equal')
sizes
#Relationship between the number of views and the different variables
plt.figure(figsize=(10,15))
plt.subplot(3,2,1)
plt.scatter(All_data6.view, All_data6.like)
plt.xlabel('Views')
plt.ylabel('Likes')
plt.title("Relationship between Views and Likes")
plt.subplot(3,2,2)
plt.scatter(All_data6.view, All_data6.dislike)
plt.xlabel('Views')
plt.ylabel('Dislikes')
plt.title("Relationship between Views and Dislikes")
plt.subplot(3,2,3)
plt.scatter(All_data6.view, All_data6.title_count)
plt.xlabel('Views')
plt.ylabel('Title Count')
plt.title("Relationship between Views and Title Count")
plt.subplot(3,2,4)
plt.scatter(All_data6.view, All_data6.comment)
plt.xlabel('Views')
plt.ylabel('Comment')
plt.title("Relationship between Views and Comments")
plt.subplot(3,2,5)
plt.scatter(All_data6.view, All_data6.description_count)
plt.xlabel('Views')
plt.ylabel('Description Count')
plt.title("Relationship between Views and Description Count")
plt.subplot(3,2,6)
plt.scatter(All_data6.view, All_data6.channel_sub)
plt.xlabel('Views')
plt.ylabel('Channel Subscriptions')
plt.title("Relationship between Views and Channel Subscriptions")
plt.show()
CategoryViews = All_data6.groupby('category').sum()['view'].sort_values()
CategoryViews.plot(kind='barh',title='Category Name Trend',grid=True,fontsize=14,figsize=(15,10))
plt.xlabel("Number of Trending Videos",fontsize=16)
plt.ylabel("Category Name",fontsize=16)
plt.show()
#Import OneHotCoder library
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
All_data7 = pd.get_dummies(All_data6, columns=['category'], drop_first=True)
pd.set_option("display.max_columns", None)
All_data7.head()
#75% of the videos from the trending dataset had a title count of 81 words. Thus, the code below will differentiate between title count that are <= 81 and those that aren't.
All_data8 = All_data7
All_data8['title_count'] = (All_data7['title_count'] <= 86).astype(int)
All_data8.head()
#75% of the videos from the dataset had a description count of 1,251 words. Thus, the code below will differentiate between description count that are <= 1,251 and those that aren't.
All_data9 = All_data8
All_data9['description_count'] = (All_data9['description_count'] <= 1251).astype(int)
All_data9.head()
#75% of the videos from the trending dataset had a view count of 647,692 words. Thus, the code below will differentiate between tag count that are <= 431 and those that aren't.
All_data10 = All_data9
All_data10['view'] = (All_data10['view'] <= 647692).astype(int)
All_data10.head()
#Import logarithm libraries
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
import sklearn.model_selection
from sklearn.metrics import accuracy_score
# Create model based on the target variable People & Blogs
X = All_data10[['description_count', 'category_Entertainment', 'title_count']]
y = All_data10['view']
#split the data into train and test using train_test_split
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.25,random_state=0)
#Create an instance for Logistic Regression and fit the model
lr = LogisticRegression()
lr.fit(X_train,y_train)
#Predict the test data set
y_pred = lr.predict(X_test)
#Model evaluation using the confusion matrix
from sklearn import metrics
cnf_matrix = metrics.confusion_matrix(y_test, y_pred)
cnf_matrix
# create heatmap
sns.heatmap(pd.DataFrame(cnf_matrix), annot=True, cmap="YlGnBu" ,fmt='g')
ax.xaxis.set_label_position("top")
plt.tight_layout()
plt.title('Confusion matrix', y=1.1)
plt.ylabel('Actual label')
plt.xlabel('Predicted label')
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
print("Precision:",metrics.precision_score(y_test, y_pred))
print("Recall:",metrics.recall_score(y_test, y_pred))
y_pred_proba = lr.predict_proba(X_test)[::,1]
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba)
auc = metrics.roc_auc_score(y_test, y_pred_proba)
plt.plot(fpr,tpr,label="data 1, auc="+str(auc))
plt.legend(loc=4)
plt.show()
#Observation
# The model is based on chances seeing that the AUC score is 0.53.
# Precision value is 0.92 and the recall is 1, meaning that this model is 92% of the data was actually trending videos. However the recall is at 100%. This is confusing, does this mean 100% of the data was missed?
<jupyter_output><empty_output>
|
no_license
|
/Data_Cquence_Capstone_Project_B_Measurement_and_Analysis_of_Youtube_Videos.ipynb
|
nalymugwe/DataCquence-Capstone-Project
| 1 |
<jupyter_start><jupyter_text>| Name | Description | Date
| :- |-------------: | :-:
|Reza Hashemi| Skewed Logistic Regression. | On 15th of July 2019<jupyter_code>import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline<jupyter_output><empty_output><jupyter_text>### Use scikit-learn's built-in *make_classification* method to generate syntehtic classificiation data<jupyter_code>from sklearn.datasets import make_classification<jupyter_output><empty_output><jupyter_text>#### I used two informative features (Temp, Humidity) and one redundant feature 'Crime'<jupyter_code>X,y = make_classification(n_samples=35040,n_classes=2,n_features=3,n_informative=2,n_redundant=1,
weights=[0.999,0.001],class_sep=1.0)
df=pd.DataFrame(data=X,columns=['Temp','Humidity','Crime'])
df['y']=y
df['Temp']=df['Temp']-min(df['Temp'])
maxt=max(df['Temp'])
df['Temp']=90*df['Temp']/maxt
df['Humidity']=df['Humidity']-min(df['Humidity'])
maxh=max(df['Humidity'])
df['Humidity']=100*df['Humidity']/maxh
df['Crime']=df['Crime']-min(df['Crime'])
maxc=max(df['Crime'])
df['Crime']=10*df['Crime']/maxc
df.hist('Temp')
df.hist('Humidity')
df.hist('Crime')<jupyter_output><empty_output><jupyter_text>### Take a sum on the Boolean array with df['y']==1 to count the number of positive examples<jupyter_code>sum(df['y']==1)<jupyter_output><empty_output><jupyter_text>** That means only 223 responses out of 35040 samples are positive **<jupyter_code>df.head(10)
df.describe()<jupyter_output><empty_output><jupyter_text>## Logistic Regression undersampling<jupyter_code>from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegressionCV
from sklearn.metrics import classification_report<jupyter_output><empty_output><jupyter_text>### Under-sampling the negative class to limited number<jupyter_code>df0=df[df['y']==0].sample(800)
df1=df[df['y']==1]
df_balanced = pd.concat([df0,df1],axis=0)
df_balanced.describe()
df_balanced.hist('y')
plt.title("Relative frequency of positive and negative classes\n in the balanced (under-sampled) dataset")
log_model_balanced = LogisticRegressionCV(cv=5,class_weight='balanced')
X_train, X_test, y_train, y_test = train_test_split(df_balanced.drop('y',axis=1),
df_balanced['y'], test_size=0.30)
from sklearn.preprocessing import MinMaxScaler
scaler=MinMaxScaler()
X_train = scaler.fit_transform(X_train)
log_model_balanced.fit(X_train,y_train)
print(classification_report(y_test,log_model_balanced.predict(X_test)))<jupyter_output> precision recall f1-score support
0 0.00 0.00 0.00 233
1 0.23 1.00 0.38 70
avg / total 0.05 0.23 0.09 303
<jupyter_text>### I did an experiment with how the degree of under-sampling affects _F1-score_, _precision_, and _recall_<jupyter_code>from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
n_neg = [i for i in range(200,4200,200)]
df1=df[df['y']==1]
F1_scores=[]
precision_scores=[]
recall_scores=[]
for num in n_neg:
# Create under-sampled data sets
df0=df[df['y']==0].sample(num)
df_balanced = pd.concat([df0,df1],axis=0)
# Create model with 'class_weight=balanced' and 5-fold cross-validation
log_models=LogisticRegressionCV(cv=5,class_weight='balanced')
# Create test/train splits
X_train, X_test, y_train, y_test = train_test_split(df_balanced.drop('y',axis=1),
df_balanced['y'], test_size=0.30)
# Min-max scale the training data
X_train = scaler.fit_transform(X_train)
# Fit the logistic regression model
log_models.fit(X_train,y_train)
# Calculate various scores
F1_scores.append(f1_score(y_test,log_models.predict(X_test)))
precision_scores.append(precision_score(y_test,log_models.predict(X_test)))
recall_scores.append(recall_score(y_test,log_models.predict(X_test)))
plt.scatter(n_neg,F1_scores,color='green',edgecolor='black',alpha=0.6,s=100)
plt.title("F1-score as function of negative samples")
plt.grid(True)
plt.ylabel("F1-score")
plt.xlabel("Number of negative samples")
plt.scatter(n_neg,precision_scores,color='orange',edgecolor='black',alpha=0.6,s=100)
plt.title("Precision score as function of negative samples")
plt.grid(True)
plt.ylabel("Precision score")
plt.xlabel("Number of negative samples")
plt.scatter(n_neg,recall_scores,color='blue',edgecolor='black',alpha=0.6,s=100)
plt.title("Recall score as function of negative samples")
plt.grid(True)
plt.ylabel("Recall score")
plt.xlabel("Number of negative samples")<jupyter_output><empty_output>
|
permissive
|
/Classification/Skewed Logistic Regression.ipynb
|
rezapci/Machine-Learning
| 8 |
<jupyter_start><jupyter_text>
# Embedding In Wx3
Copyright (C) 2003-2004 Andrew Straw, Jeremy O'Donoghue and others
License: This work is licensed under the PSF. A copy should be included
with this source code, and is also available at
https://docs.python.org/3/license.html
This is yet another example of using matplotlib with wx. Hopefully
this is pretty full-featured:
- both matplotlib toolbar and WX buttons manipulate plot
- full wxApp framework, including widget interaction
- XRC (XML wxWidgets resource) file to create GUI (made with XRCed)
This was derived from embedding_in_wx and dynamic_image_wxagg.
Thanks to matplotlib and wx teams for creating such great software!
<jupyter_code>from __future__ import print_function
import sys
import time
import os
import gc
import matplotlib
import matplotlib.cm as cm
import matplotlib.cbook as cbook
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wxagg import NavigationToolbar2WxAgg as NavigationToolbar
from matplotlib.figure import Figure
import numpy as np
import wx
import wx.xrc as xrc
ERR_TOL = 1e-5 # floating point slop for peak-detection
matplotlib.rc('image', origin='lower')
class PlotPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, -1)
self.fig = Figure((5, 4), 75)
self.canvas = FigureCanvas(self, -1, self.fig)
self.toolbar = NavigationToolbar(self.canvas) # matplotlib toolbar
self.toolbar.Realize()
# self.toolbar.set_active([0,1])
# Now put all into a sizer
sizer = wx.BoxSizer(wx.VERTICAL)
# This way of adding to sizer allows resizing
sizer.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
# Best to allow the toolbar to resize!
sizer.Add(self.toolbar, 0, wx.GROW)
self.SetSizer(sizer)
self.Fit()
def init_plot_data(self):
a = self.fig.add_subplot(111)
x = np.arange(120.0) * 2 * np.pi / 60.0
y = np.arange(100.0) * 2 * np.pi / 50.0
self.x, self.y = np.meshgrid(x, y)
z = np.sin(self.x) + np.cos(self.y)
self.im = a.imshow(z, cmap=cm.RdBu) # , interpolation='nearest')
zmax = np.max(z) - ERR_TOL
ymax_i, xmax_i = np.nonzero(z >= zmax)
if self.im.origin == 'upper':
ymax_i = z.shape[0] - ymax_i
self.lines = a.plot(xmax_i, ymax_i, 'ko')
self.toolbar.update() # Not sure why this is needed - ADS
def GetToolBar(self):
# You will need to override GetToolBar if you are using an
# unmanaged toolbar in your frame
return self.toolbar
def OnWhiz(self, evt):
self.x += np.pi / 15
self.y += np.pi / 20
z = np.sin(self.x) + np.cos(self.y)
self.im.set_array(z)
zmax = np.max(z) - ERR_TOL
ymax_i, xmax_i = np.nonzero(z >= zmax)
if self.im.origin == 'upper':
ymax_i = z.shape[0] - ymax_i
self.lines[0].set_data(xmax_i, ymax_i)
self.canvas.draw()
class MyApp(wx.App):
def OnInit(self):
xrcfile = cbook.get_sample_data('embedding_in_wx3.xrc',
asfileobj=False)
print('loading', xrcfile)
self.res = xrc.XmlResource(xrcfile)
# main frame and panel ---------
self.frame = self.res.LoadFrame(None, "MainFrame")
self.panel = xrc.XRCCTRL(self.frame, "MainPanel")
# matplotlib panel -------------
# container for matplotlib panel (I like to make a container
# panel for our panel so I know where it'll go when in XRCed.)
plot_container = xrc.XRCCTRL(self.frame, "plot_container_panel")
sizer = wx.BoxSizer(wx.VERTICAL)
# matplotlib panel itself
self.plotpanel = PlotPanel(plot_container)
self.plotpanel.init_plot_data()
# wx boilerplate
sizer.Add(self.plotpanel, 1, wx.EXPAND)
plot_container.SetSizer(sizer)
# whiz button ------------------
whiz_button = xrc.XRCCTRL(self.frame, "whiz_button")
whiz_button.Bind(wx.EVT_BUTTON, self.plotpanel.OnWhiz)
# bang button ------------------
bang_button = xrc.XRCCTRL(self.frame, "bang_button")
bang_button.Bind(wx.EVT_BUTTON, self.OnBang)
# final setup ------------------
sizer = self.panel.GetSizer()
self.frame.Show(1)
self.SetTopWindow(self.frame)
return True
def OnBang(self, event):
bang_count = xrc.XRCCTRL(self.frame, "bang_count")
bangs = bang_count.GetValue()
bangs = int(bangs) + 1
bang_count.SetValue(str(bangs))
if __name__ == '__main__':
app = MyApp(0)
app.MainLoop()<jupyter_output><empty_output>
|
no_license
|
/2.2.2/_downloads/embedding_in_wx3_sgskip.ipynb
|
matplotlib/matplotlib.github.com
| 1 |
<jupyter_start><jupyter_text># Logistic Regression with a Neural Network mindset
Welcome to your first (required) programming assignment! You will build a logistic regression classifier to recognize cats. This assignment will step you through how to do this with a Neural Network mindset, and so will also hone your intuitions about deep learning.
**Instructions:**
- Do not use loops (for/while) in your code, unless the instructions explicitly ask you to do so.
**You will learn to:**
- Build the general architecture of a learning algorithm, including:
- Initializing parameters
- Calculating the cost function and its gradient
- Using an optimization algorithm (gradient descent)
- Gather all three functions above into a main model function, in the right order.## Updates
This notebook has been updated over the past few months. The prior version was named "v5", and the current versionis now named '6a'
#### If you were working on a previous version:
* You can find your prior work by looking in the file directory for the older files (named by version name).
* To view the file directory, click on the "Coursera" icon in the top left corner of this notebook.
* Please copy your work from the older versions to the new version, in order to submit your work for grading.
#### List of Updates
* Forward propagation formula, indexing now starts at 1 instead of 0.
* Optimization function comment now says "print cost every 100 training iterations" instead of "examples".
* Fixed grammar in the comments.
* Y_prediction_test variable name is used consistently.
* Plot's axis label now says "iterations (hundred)" instead of "iterations".
* When testing the model, the test image is normalized by dividing by 255.## 1 - Packages ##
First, let's run the cell below to import all the packages that you will need during this assignment.
- [numpy](www.numpy.org) is the fundamental package for scientific computing with Python.
- [h5py](http://www.h5py.org) is a common package to interact with a dataset that is stored on an H5 file.
- [matplotlib](http://matplotlib.org) is a famous library to plot graphs in Python.
- [PIL](http://www.pythonware.com/products/pil/) and [scipy](https://www.scipy.org/) are used here to test your model with your own picture at the end.<jupyter_code>import numpy as np
import matplotlib.pyplot as plt
import h5py
import scipy
from PIL import Image
from scipy import ndimage
from lr_utils import load_dataset
%matplotlib inline<jupyter_output><empty_output><jupyter_text>## 2 - Overview of the Problem set ##
**Problem Statement**: You are given a dataset ("data.h5") containing:
- a training set of m_train images labeled as cat (y=1) or non-cat (y=0)
- a test set of m_test images labeled as cat or non-cat
- each image is of shape (num_px, num_px, 3) where 3 is for the 3 channels (RGB). Thus, each image is square (height = num_px) and (width = num_px).
You will build a simple image-recognition algorithm that can correctly classify pictures as cat or non-cat.
Let's get more familiar with the dataset. Load the data by running the following code.<jupyter_code># Loading the data (cat/non-cat)
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()<jupyter_output><empty_output><jupyter_text>We added "_orig" at the end of image datasets (train and test) because we are going to preprocess them. After preprocessing, we will end up with train_set_x and test_set_x (the labels train_set_y and test_set_y don't need any preprocessing).
Each line of your train_set_x_orig and test_set_x_orig is an array representing an image. You can visualize an example by running the following code. Feel free also to change the `index` value and re-run to see other images. <jupyter_code># Example of a picture
index = 25
plt.imshow(train_set_x_orig[index])
print(train_set_x_orig.shape)
print ("y = " + str(train_set_y[:, index]) + ", it's a '" + classes[np.squeeze(train_set_y[:, index])].decode("utf-8") + "' picture.")<jupyter_output>(209, 64, 64, 3)
y = [1], it's a 'cat' picture.
<jupyter_text>Many software bugs in deep learning come from having matrix/vector dimensions that don't fit. If you can keep your matrix/vector dimensions straight you will go a long way toward eliminating many bugs.
**Exercise:** Find the values for:
- m_train (number of training examples)
- m_test (number of test examples)
- num_px (= height = width of a training image)
Remember that `train_set_x_orig` is a numpy-array of shape (m_train, num_px, num_px, 3). For instance, you can access `m_train` by writing `train_set_x_orig.shape[0]`.<jupyter_code>### START CODE HERE ### (≈ 3 lines of code)
m_train = train_set_x_orig.shape[0]
m_test = test_set_x_orig.shape[0]
num_px = train_set_x_orig.shape[1]
### END CODE HERE ###
print ("Number of training examples: m_train = " + str(m_train))
print ("Number of testing examples: m_test = " + str(m_test))
print ("Height/Width of each image: num_px = " + str(num_px))
print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("train_set_x shape: " + str(train_set_x_orig.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x shape: " + str(test_set_x_orig.shape))
print ("test_set_y shape: " + str(test_set_y.shape))<jupyter_output>Number of training examples: m_train = 209
Number of testing examples: m_test = 50
Height/Width of each image: num_px = 64
Each image is of size: (64, 64, 3)
train_set_x shape: (209, 64, 64, 3)
train_set_y shape: (1, 209)
test_set_x shape: (50, 64, 64, 3)
test_set_y shape: (1, 50)
<jupyter_text>**Expected Output for m_train, m_test and num_px**:
**m_train**
209
**m_test**
50
**num_px**
64
For convenience, you should now reshape images of shape (num_px, num_px, 3) in a numpy-array of shape (num_px $*$ num_px $*$ 3, 1). After this, our training (and test) dataset is a numpy-array where each column represents a flattened image. There should be m_train (respectively m_test) columns.
**Exercise:** Reshape the training and test data sets so that images of size (num_px, num_px, 3) are flattened into single vectors of shape (num\_px $*$ num\_px $*$ 3, 1).
A trick when you want to flatten a matrix X of shape (a,b,c,d) to a matrix X_flatten of shape (b$*$c$*$d, a) is to use:
```python
X_flatten = X.reshape(X.shape[0], -1).T # X.T is the transpose of X
```<jupyter_code># Reshape the training and test examples
### START CODE HERE ### (≈ 2 lines of code)
train_set_x_flatten = train_set_x_orig.reshape(m_train, -1).T
test_set_x_flatten = test_set_x_orig.reshape(m_test, -1).T
### END CODE HERE ###
print ("train_set_x_flatten shape: " + str(train_set_x_flatten.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x_flatten shape: " + str(test_set_x_flatten.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
print ("sanity check after reshaping: " + str(train_set_x_flatten[0:5,0]))<jupyter_output>train_set_x_flatten shape: (12288, 209)
train_set_y shape: (1, 209)
test_set_x_flatten shape: (12288, 50)
test_set_y shape: (1, 50)
sanity check after reshaping: [17 31 56 22 33]
<jupyter_text>**Expected Output**:
**train_set_x_flatten shape**
(12288, 209)
**train_set_y shape**
(1, 209)
**test_set_x_flatten shape**
(12288, 50)
**test_set_y shape**
(1, 50)
**sanity check after reshaping**
[17 31 56 22 33]
To represent color images, the red, green and blue channels (RGB) must be specified for each pixel, and so the pixel value is actually a vector of three numbers ranging from 0 to 255.
One common preprocessing step in machine learning is to center and standardize your dataset, meaning that you substract the mean of the whole numpy array from each example, and then divide each example by the standard deviation of the whole numpy array. But for picture datasets, it is simpler and more convenient and works almost as well to just divide every row of the dataset by 255 (the maximum value of a pixel channel).
Let's standardize our dataset.<jupyter_code>train_set_x = train_set_x_flatten/255.
test_set_x = test_set_x_flatten/255.<jupyter_output><empty_output><jupyter_text>
**What you need to remember:**
Common steps for pre-processing a new dataset are:
- Figure out the dimensions and shapes of the problem (m_train, m_test, num_px, ...)
- Reshape the datasets such that each example is now a vector of size (num_px \* num_px \* 3, 1)
- "Standardize" the data## 3 - General Architecture of the learning algorithm ##
It's time to design a simple algorithm to distinguish cat images from non-cat images.
You will build a Logistic Regression, using a Neural Network mindset. The following Figure explains why **Logistic Regression is actually a very simple Neural Network!**
**Mathematical expression of the algorithm**:
For one example $x^{(i)}$:
$$z^{(i)} = w^T x^{(i)} + b \tag{1}$$
$$\hat{y}^{(i)} = a^{(i)} = sigmoid(z^{(i)})\tag{2}$$
$$ \mathcal{L}(a^{(i)}, y^{(i)}) = - y^{(i)} \log(a^{(i)}) - (1-y^{(i)} ) \log(1-a^{(i)})\tag{3}$$
The cost is then computed by summing over all training examples:
$$ J = \frac{1}{m} \sum_{i=1}^m \mathcal{L}(a^{(i)}, y^{(i)})\tag{6}$$
**Key steps**:
In this exercise, you will carry out the following steps:
- Initialize the parameters of the model
- Learn the parameters for the model by minimizing the cost
- Use the learned parameters to make predictions (on the test set)
- Analyse the results and conclude## 4 - Building the parts of our algorithm ##
The main steps for building a Neural Network are:
1. Define the model structure (such as number of input features)
2. Initialize the model's parameters
3. Loop:
- Calculate current loss (forward propagation)
- Calculate current gradient (backward propagation)
- Update parameters (gradient descent)
You often build 1-3 separately and integrate them into one function we call `model()`.
### 4.1 - Helper functions
**Exercise**: Using your code from "Python Basics", implement `sigmoid()`. As you've seen in the figure above, you need to compute $sigmoid( w^T x + b) = \frac{1}{1 + e^{-(w^T x + b)}}$ to make predictions. Use np.exp().<jupyter_code># GRADED FUNCTION: sigmoid
def sigmoid(z):
"""
Compute the sigmoid of z
Arguments:
z -- A scalar or numpy array of any size.
Return:
s -- sigmoid(z)
"""
### START CODE HERE ### (≈ 1 line of code)
s = 1 / (1 + np.exp(-z))
### END CODE HERE ###
return s
print ("sigmoid([0, 2]) = " + str(sigmoid(np.array([0,2]))))<jupyter_output>sigmoid([0, 2]) = [ 0.5 0.88079708]
<jupyter_text>**Expected Output**:
**sigmoid([0, 2])**
[ 0.5 0.88079708]
### 4.2 - Initializing parameters
**Exercise:** Implement parameter initialization in the cell below. You have to initialize w as a vector of zeros. If you don't know what numpy function to use, look up np.zeros() in the Numpy library's documentation.<jupyter_code># GRADED FUNCTION: initialize_with_zeros
def initialize_with_zeros(dim):
"""
This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0.
Argument:
dim -- size of the w vector we want (or number of parameters in this case)
Returns:
w -- initialized vector of shape (dim, 1)
b -- initialized scalar (corresponds to the bias)
"""
### START CODE HERE ### (≈ 1 line of code)
w = np.zeros((dim, 1))
b = 0.
### END CODE HERE ###
assert(w.shape == (dim, 1))
assert(isinstance(b, float) or isinstance(b, int))
return w, b
dim = 2
w, b = initialize_with_zeros(dim)
print ("w = " + str(w))
print ("b = " + str(b))<jupyter_output>w = [[ 0.]
[ 0.]]
b = 0.0
<jupyter_text>**Expected Output**:
** w **
[[ 0.]
[ 0.]]
** b **
0
For image inputs, w will be of shape (num_px $\times$ num_px $\times$ 3, 1).### 4.3 - Forward and Backward propagation
Now that your parameters are initialized, you can do the "forward" and "backward" propagation steps for learning the parameters.
**Exercise:** Implement a function `propagate()` that computes the cost function and its gradient.
**Hints**:
Forward Propagation:
- You get X
- You compute $A = \sigma(w^T X + b) = (a^{(1)}, a^{(2)}, ..., a^{(m-1)}, a^{(m)})$
- You calculate the cost function: $J = -\frac{1}{m}\sum_{i=1}^{m}y^{(i)}\log(a^{(i)})+(1-y^{(i)})\log(1-a^{(i)})$
Here are the two formulas you will be using:
$$ \frac{\partial J}{\partial w} = \frac{1}{m}X(A-Y)^T\tag{7}$$
$$ \frac{\partial J}{\partial b} = \frac{1}{m} \sum_{i=1}^m (a^{(i)}-y^{(i)})\tag{8}$$<jupyter_code># GRADED FUNCTION: propagate
def propagate(w, b, X, Y):
"""
Implement the cost function and its gradient for the propagation explained above
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples)
Return:
cost -- negative log-likelihood cost for logistic regression
dw -- gradient of the loss with respect to w, thus same shape as w
db -- gradient of the loss with respect to b, thus same shape as b
Tips:
- Write your code step by step for the propagation. np.log(), np.dot()
"""
m = X.shape[1]
# FORWARD PROPAGATION (FROM X TO COST)
### START CODE HERE ### (≈ 2 lines of code)
#x.shape = (NR_FEATURES, NX)
#w.shape = (NR_FEATURES, 1)
A = sigmoid(np.dot(X.T, w) + b)
#A.shape = (NX, 1)
cost = (Y.T * np.log(A) + (1 - Y.T) * np.log(1 - A)).sum() / -m
### END CODE HERE ###
# BACKWARD PROPAGATION (TO FIND GRAD)
### START CODE HERE ### (≈ 2 lines of code)
dw = np.dot(X, (A - Y.T)) / m
db = (A - Y.T).sum() / m
### END CODE HERE ###
assert(dw.shape == w.shape)
assert(db.dtype == float)
cost = np.squeeze(cost)
assert(cost.shape == ())
grads = {"dw": dw, "db": db}
return grads, cost
w, b, X, Y = np.array([[1.],[2.]]), 2., np.array([[1.,2.,-1.],[3.,4.,-3.2]]), np.array([[1,0,1]])
grads, cost = propagate(w, b, X, Y)
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
print ("cost = " + str(cost))<jupyter_output>dw = [[ 0.99845601]
[ 2.39507239]]
db = 0.00145557813678
cost = 5.80154531939
<jupyter_text>**Expected Output**:
** dw **
[[ 0.99845601]
[ 2.39507239]]
** db **
0.00145557813678
** cost **
5.801545319394553
### 4.4 - Optimization
- You have initialized your parameters.
- You are also able to compute a cost function and its gradient.
- Now, you want to update the parameters using gradient descent.
**Exercise:** Write down the optimization function. The goal is to learn $w$ and $b$ by minimizing the cost function $J$. For a parameter $\theta$, the update rule is $ \theta = \theta - \alpha \text{ } d\theta$, where $\alpha$ is the learning rate.<jupyter_code># GRADED FUNCTION: optimize
def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):
"""
This function optimizes w and b by running a gradient descent algorithm
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of shape (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat), of shape (1, number of examples)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
print_cost -- True to print the loss every 100 steps
Returns:
params -- dictionary containing the weights w and bias b
grads -- dictionary containing the gradients of the weights and bias with respect to the cost function
costs -- list of all the costs computed during the optimization, this will be used to plot the learning curve.
Tips:
You basically need to write down two steps and iterate through them:
1) Calculate the cost and the gradient for the current parameters. Use propagate().
2) Update the parameters using gradient descent rule for w and b.
"""
costs = []
for i in range(num_iterations):
# Cost and gradient calculation (≈ 1-4 lines of code)
### START CODE HERE ###
grads, cost = propagate(w, b, X, Y)
### END CODE HERE ###
# Retrieve derivatives from grads
dw = grads["dw"]
db = grads["db"]
# update rule (≈ 2 lines of code)
### START CODE HERE ###
w = w - learning_rate * dw
b = b - learning_rate * db
### END CODE HERE ###
# Record the costs
if i % 100 == 0:
costs.append(cost)
# Print the cost every 100 training iterations
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
params = {"w": w, "b": b}
grads = {"dw": dw, "db": db}
return params, grads, costs
params, grads, costs = optimize(w, b, X, Y, num_iterations= 100, learning_rate = 0.009, print_cost = False)
print ("w = " + str(params["w"]))
print ("b = " + str(params["b"]))
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))<jupyter_output>w = [[ 0.19033591]
[ 0.12259159]]
b = 1.92535983008
dw = [[ 0.67752042]
[ 1.41625495]]
db = 0.219194504541
<jupyter_text>**Expected Output**:
**w**
[[ 0.19033591]
[ 0.12259159]]
**b**
1.92535983008
**dw**
[[ 0.67752042]
[ 1.41625495]]
**db**
0.219194504541
**Exercise:** The previous function will output the learned w and b. We are able to use w and b to predict the labels for a dataset X. Implement the `predict()` function. There are two steps to computing predictions:
1. Calculate $\hat{Y} = A = \sigma(w^T X + b)$
2. Convert the entries of a into 0 (if activation 0.5), stores the predictions in a vector `Y_prediction`. If you wish, you can use an `if`/`else` statement in a `for` loop (though there is also a way to vectorize this). <jupyter_code># GRADED FUNCTION: predict
def predict(w, b, X):
'''
Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Returns:
Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X
'''
m = X.shape[1]
Y_prediction = np.zeros((1,m))
w = w.reshape(X.shape[0], 1)
#w has shape (NRFEATURES, 1)
# Compute vector "A" predicting the probabilities of a cat being present in the picture
### START CODE HERE ### (≈ 1 line of code)
A = sigmoid(np.dot(X.T, w) + b)
#A has shape (M, 1)
### END CODE HERE ###
A = A.reshape(1, m)
#print(A)
Y_prediction[A > 0.5] = 1
#above i used vectorization, avoiding a for-loop
'''
for i in range(A.shape[1]):
# Convert probabilities A[0,i] to actual predictions p[0,i]
### START CODE HERE ### (≈ 4 lines of code)
if (A[index] < 0.5):
Y_prediction[index] = 0
else:
Y_prediction[index] = 1
### END CODE HERE ###
'''
assert(Y_prediction.shape == (1, m))
return Y_prediction
w = np.array([[0.1124579],[0.23106775]])
b = -0.3
X = np.array([[1.,-1.1,-3.2],[1.2,2.,0.1]])
print ("predictions = " + str(predict(w, b, X)))<jupyter_output>[[ 0.52241976 0.50960677 0.34597965]]
predictions = [[ 1. 1. 0.]]
<jupyter_text>**Expected Output**:
**predictions**
[[ 1. 1. 0.]]
**What to remember:**
You've implemented several functions that:
- Initialize (w,b)
- Optimize the loss iteratively to learn parameters (w,b):
- computing the cost and its gradient
- updating the parameters using gradient descent
- Use the learned (w,b) to predict the labels for a given set of examples## 5 - Merge all functions into a model ##
You will now see how the overall model is structured by putting together all the building blocks (functions implemented in the previous parts) together, in the right order.
**Exercise:** Implement the model function. Use the following notation:
- Y_prediction_test for your predictions on the test set
- Y_prediction_train for your predictions on the train set
- w, costs, grads for the outputs of optimize()<jupyter_code># GRADED FUNCTION: model
def model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False):
"""
Builds the logistic regression model by calling the function you've implemented previously
Arguments:
X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train)
Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train)
X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test)
Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test)
num_iterations -- hyperparameter representing the number of iterations to optimize the parameters
learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize()
print_cost -- Set to true to print the cost every 100 iterations
Returns:
d -- dictionary containing information about the model.
"""
### START CODE HERE ###
# initialize parameters with zeros (≈ 1 line of code)
w, b = initialize_with_zeros(X_train.shape[0])
# Gradient descent (≈ 1 line of code)
parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost)
# Retrieve parameters w and b from dictionary "parameters"
w = parameters["w"]
b = parameters["b"]
# Predict test/train set examples (≈ 2 lines of code)
Y_prediction_test = predict(w, b, X_test)
Y_prediction_train = predict(w, b, X_train)
### END CODE HERE ###
# Print train/test Errors
print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))
print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))
d = {"costs": costs,
"Y_prediction_test": Y_prediction_test,
"Y_prediction_train" : Y_prediction_train,
"w" : w,
"b" : b,
"learning_rate" : learning_rate,
"num_iterations": num_iterations}
return d<jupyter_output><empty_output><jupyter_text>Run the following cell to train your model.<jupyter_code>d = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 2000, learning_rate = 0.006, print_cost = True)<jupyter_output>Cost after iteration 0: 0.693147
Cost after iteration 100: 0.649811
Cost after iteration 200: 0.538312
Cost after iteration 300: 0.439262
Cost after iteration 400: 0.349825
Cost after iteration 500: 0.278498
Cost after iteration 600: 0.249764
Cost after iteration 700: 0.231178
Cost after iteration 800: 0.215229
Cost after iteration 900: 0.201339
Cost after iteration 1000: 0.189110
Cost after iteration 1100: 0.178249
Cost after iteration 1200: 0.168533
Cost after iteration 1300: 0.159788
Cost after iteration 1400: 0.151873
Cost after iteration 1500: 0.144677
Cost after iteration 1600: 0.138104
Cost after iteration 1700: 0.132079
Cost after iteration 1800: 0.126537
Cost after iteration 1900: 0.121421
[[ 0.95440359 0.88120845 0.89882433 0.9490987 0.7585563 0.53890624
0.02232011 0.86695688 0.89310163 0.68301889 0.24021578 0.47633497
0.75469443 0.81138514 0.0072201 0.9344184 0.02042173 0.87032334
0.18533124 0.02788586 0.94528046 0.11893648 0.01056269 0.81216[...]<jupyter_text>**Expected Output**:
**Cost after iteration 0 **
0.693147
$\vdots$
$\vdots$
**Train Accuracy**
99.04306220095694 %
**Test Accuracy**
70.0 %
**Comment**: Training accuracy is close to 100%. This is a good sanity check: your model is working and has high enough capacity to fit the training data. Test accuracy is 68%. It is actually not bad for this simple model, given the small dataset we used and that logistic regression is a linear classifier. But no worries, you'll build an even better classifier next week!
Also, you see that the model is clearly overfitting the training data. Later in this specialization you will learn how to reduce overfitting, for example by using regularization. Using the code below (and changing the `index` variable) you can look at predictions on pictures of the test set.<jupyter_code># Example of a picture that was wrongly classified.
index = 8
plt.imshow(test_set_x[:,index].reshape((num_px, num_px, 3)))
print ("y = " + str(test_set_y[0,index]) + ", you predicted that it is a \"" + classes[d["Y_prediction_test"][0,index]].decode("utf-8") + "\" picture.")<jupyter_output>y = 1, you predicted that it is a "cat" picture.
<jupyter_text>Let's also plot the cost function and the gradients.<jupyter_code># Plot learning curve (with costs)
costs = np.squeeze(d['costs'])
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(d["learning_rate"]))
plt.show()<jupyter_output><empty_output><jupyter_text>**Interpretation**:
You can see the cost decreasing. It shows that the parameters are being learned. However, you see that you could train the model even more on the training set. Try to increase the number of iterations in the cell above and rerun the cells. You might see that the training set accuracy goes up, but the test set accuracy goes down. This is called overfitting. ## 6 - Further analysis (optional/ungraded exercise) ##
Congratulations on building your first image classification model. Let's analyze it further, and examine possible choices for the learning rate $\alpha$. #### Choice of learning rate ####
**Reminder**:
In order for Gradient Descent to work you must choose the learning rate wisely. The learning rate $\alpha$ determines how rapidly we update the parameters. If the learning rate is too large we may "overshoot" the optimal value. Similarly, if it is too small we will need too many iterations to converge to the best values. That's why it is crucial to use a well-tuned learning rate.
Let's compare the learning curve of our model with several choices of learning rates. Run the cell below. This should take about 1 minute. Feel free also to try different values than the three we have initialized the `learning_rates` variable to contain, and see what happens. <jupyter_code>learning_rates = [0.01, 0.001, 0.0001]
models = {}
for i in learning_rates:
print ("learning rate is: " + str(i))
models[str(i)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 1500, learning_rate = i, print_cost = False)
print ('\n' + "-------------------------------------------------------" + '\n')
for i in learning_rates:
plt.plot(np.squeeze(models[str(i)]["costs"]), label= str(models[str(i)]["learning_rate"]))
plt.ylabel('cost')
plt.xlabel('iterations (hundreds)')
legend = plt.legend(loc='upper center', shadow=True)
frame = legend.get_frame()
frame.set_facecolor('0.90')
plt.show()<jupyter_output>learning rate is: 0.01
[[ 0.97125943 0.9155338 0.92079132 0.96358044 0.78924234 0.60411297
0.01179527 0.89814048 0.91522859 0.70264065 0.19380387 0.49537355
0.7927164 0.85423431 0.00298587 0.96199699 0.01234735 0.9107653
0.13661137 0.01424336 0.96894735 0.1033746 0.00579297 0.86081326
0.53811196 0.64950178 0.83272843 0.00426307 0.0131452 0.99947804
0.11468372 0.82182442 0.69611733 0.4991522 0.67231401 0.01728165
0.04136099 0.80069693 0.26832359 0.03958566 0.74731239 0.32116434
0.71871197 0.01205725 0.96879962 0.62310364 0.17737126 0.98960523
0.74697265 0.07284605]]
[[ 1.47839654e-01 5.78008187e-02 9.42385025e-01 4.14849240e-05
2.27209941e-02 7.29254668e-02 2.23704494e-02 9.49717864e-01
5.41724296e-02 2.92729895e-02 6.82412300e-02 8.33370210e-01
1.71420615e-01 9.66879883e-01 8.11537151e-01 2.44343483e-02
7.87634098e-03 2.64027272e-02 5.60720049e-02 9.53130353e-01
5.30[...]<jupyter_text>**Interpretation**:
- Different learning rates give different costs and thus different predictions results.
- If the learning rate is too large (0.01), the cost may oscillate up and down. It may even diverge (though in this example, using 0.01 still eventually ends up at a good value for the cost).
- A lower cost doesn't mean a better model. You have to check if there is possibly overfitting. It happens when the training accuracy is a lot higher than the test accuracy.
- In deep learning, we usually recommend that you:
- Choose the learning rate that better minimizes the cost function.
- If your model overfits, use other techniques to reduce overfitting. (We'll talk about this in later videos.)
## 7 - Test with your own image (optional/ungraded exercise) ##
Congratulations on finishing this assignment. You can use your own image and see the output of your model. To do that:
1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub.
2. Add your image to this Jupyter Notebook's directory, in the "images" folder
3. Change your image's name in the following code
4. Run the code and check if the algorithm is right (1 = cat, 0 = non-cat)!<jupyter_code>## START CODE HERE ## (PUT YOUR IMAGE NAME)
my_image = "my_image.jpg" # change this to the name of your image file
## END CODE HERE ##
# We preprocess the image to fit your algorithm.
fname = "images/" + my_image
image = np.array(ndimage.imread(fname, flatten=False))
image = image/255.
my_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((1, num_px*num_px*3)).T
my_predicted_image = predict(d["w"], d["b"], my_image)
plt.imshow(image)
print("y = " + str(np.squeeze(my_predicted_image)) + ", your algorithm predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.")<jupyter_output><empty_output>
|
permissive
|
/Neural Networks and Deep Learning/week_2/logistic_regression_with_a_neural_network_mindset.ipynb
|
hantoniu/Deep-Learning-Specialization-Coursera
| 17 |
<jupyter_start><jupyter_text># Prediction using Decision Tree Algorithm
Create the Decision Tree classifier and visualize it graphically.\
The purpose is if we feed any new data to this classifier, it would be able to predict the right class accordingly.Made By: Raghu Madhav Tiwari<jupyter_code># importing neccessary libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
#importing the data
iris=pd.read_csv("iris.csv")
iris.head()
iris['Species']=iris["Species"].str.split('-',expand=True)[1]
iris.drop('Id',axis=1,inplace=True)
iris.head()
iris.info()
iris.describe()
# import train test split
from sklearn.model_selection import train_test_split
# splitting feature and target variable
X=iris.iloc[:,:4]
y=iris["Species"]
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.3)
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier, export_graphviz
# initialize, train and predict
dt = DecisionTreeClassifier(criterion='entropy')
dt.fit(X_train,y_train)
dt.predict(X_test)
export_graphviz(dt,'dt.tree')
# finding importance of each feature
dt.feature_importances_
# !pip install graphviz
# conda install python-graphviz
## uncomment to download graphviz in anaconda navigator
dot_data = tree.export_graphviz(dt, out_file=None,
feature_names=['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm'],
class_names=np.array(['setosa', 'versicolor', 'virginica']),
filled=True)
# Draw graph
import graphviz
graph = graphviz.Source(dot_data, format="png")
graph<jupyter_output><empty_output>
|
no_license
|
/Task 02.2- Prediction using Decision Tree Algorithm .ipynb
|
RaghuMadhavTiwari/LGVPIM-DATA-SCIENCE
| 1 |
<jupyter_start><jupyter_text># KNN
First run the file [/notebooks/CaseStudy1/Clean%20Data%20(From%20Book).ipynb](/notebooks/CaseStudy1/Clean%20Data%20(From%20Book).ipynb)
<jupyter_code>OFFLINE_SOURCE_CSV = '../data/offline.final.trace.csv'
OFFLINE_SOURCE = '../data/offline.final.trace.txt'
ONLINE_SOURCE = '../data/online.final.trace.txt'
ALL_MACS = c('00:0f:a3:39:dd:cd', '00:0f:a3:39:e1:c0', '00:14:bf:3b:c7:c6', '00:14:bf:b1:97:81', '00:14:bf:b1:97:8a', '00:14:bf:b1:97:8d', '00:14:bf:b1:97:90')
# remove 00:0f:a3:39:dd:cd
ORIGINAL_MACS = c('00:0f:a3:39:e1:c0', '00:14:bf:3b:c7:c6', '00:14:bf:b1:97:81', '00:14:bf:b1:97:8a', '00:14:bf:b1:97:8d', '00:14:bf:b1:97:90')
NEXT_MACS = c('00:0f:a3:39:dd:cd', '00:14:bf:3b:c7:c6', '00:14:bf:b1:97:81', '00:14:bf:b1:97:8a', '00:14:bf:b1:97:8d', '00:14:bf:b1:97:90')
NUM_ROWS_FOR_SAMPLE = 1000 # out of 914,951 rows
roundOrientation = function(angles) {
refs = seq(0, by = 45, length = 9)
q = sapply(angles, function(o) which.min(abs(o - refs)))
c(refs[1:8], 0)[q]
}
processLine =
function(x)
{
tokens = strsplit(x, "[;=,]")[[1]]
tmp = matrix(tokens[ - (1:10) ], ncol = 4, byrow = TRUE)
cbind(matrix(tokens[c(2, 4, 6:8, 10)], nrow = nrow(tmp),
ncol = 6, byrow = TRUE), tmp)
}
readData =
function(filename = 'offline.final.trace.txt',
subMacs = ALL_MACS)
{
txt = readLines(filename)
lines = txt[ substr(txt, 1, 1) != "#" ]
tmp = lapply(lines, processLine)
offline = as.data.frame(do.call("rbind", tmp),
stringsAsFactors= FALSE)
names(offline) = c("time", "scanMac",
"posX", "posY", "posZ", "orientation",
"mac", "signal", "channel", "type")
# keep only signals from access points
offline = offline[ offline$type == "3", ]
# drop scanMac, posZ, channel, and type - no info in them
dropVars = c("scanMac", "posZ", "channel", "type")
offline = offline[ , !( names(offline) %in% dropVars ) ]
# drop more unwanted access points
offline = offline[ offline$mac %in% subMacs, ]
# convert numeric values
numVars = c("time", "posX", "posY", "orientation", "signal")
offline[ numVars ] = lapply(offline[ numVars ], as.numeric)
# convert time to POSIX
offline$rawTime = offline$time
offline$time = offline$time/1000
class(offline$time) = c("POSIXt", "POSIXct")
# round orientations to nearest 45
offline$angle = roundOrientation(offline$orientation)
return(offline)
}
online = readData(ONLINE_SOURCE)
offline = read.csv(OFFLINE_SOURCE_CSV, row.names=1)
trainSample = offline[sample(nrow(offline), NUM_ROWS_FOR_SAMPLE), ]
#Create a special factor that contains all of the unique combinations
#of the observed (x, y) pairs for the 166 locations.
get.summary = function(df) {
df$posXY = paste(df$posX, df$posY, sep = "-")
#create a list of data frames for every combination of (x, y), angle, and access point
byLocAngleAP = with(df,
by(df, list(posXY, angle, mac),
function(x) x))
#create summary statistics on each of the data frames
signalSummary =
lapply(byLocAngleAP,
function(oneLoc) {
ans = oneLoc[1, ]
ans$medSignal = median(oneLoc$signal)
ans$avgSignal = mean(oneLoc$signal)
ans$num = length(oneLoc$signal)
ans$sdSignal = sd(oneLoc$signal)
ans$iqrSignal = IQR(oneLoc$signal)
return(ans)
})
return(do.call("rbind", signalSummary))
}
offlineSummary = get.summary(offline)
online$posXY = paste(online$posX, online$posY, sep = "-")
tabonlineXYA = table(online$posXY, online$angle)
keepVars = c("posXY", "posX","posY", "orientation", "angle")
byLoc = with(online,
by(online, list(posXY),
function(x) {
ans = x[1, keepVars]
avgSS = tapply(x$signal, x$mac, mean)
y = matrix(avgSS, nrow = 1, ncol = 6)
cbind(ans, y)
}))
onlineSummary = do.call("rbind", byLoc)
# provide a scalar x and y along with a dataframe that has the columns posX and posY
# will return the dataframe sorted by whose posX/posY values are closest to the x/y provided
findNN.with.x.y = function(x, y, trainSubset) {
diffs = apply(trainSubset[c('posX', 'posY')], 1, function(row) row - c(x, y))
dists = apply(diffs, 2, function(x) sqrt(sum(x^2)) )
closest = order(dists)
return(trainSubset[closest,])
}
# Idealy, this would have a bunch of posX=4, posY=8 rows
head(findNN.with.x.y(4, 8, trainSample))
bossStuff = 0
selectTrain = function (angleNewObs, signals, m, macs=ALL_MACS) {
refs = seq(0, by = 45, length = 8)
nearestAngle = roundOrientation(angleNewObs)
if (m %% 2 == 1) {
angles = seq(-45 * (m - 1) /2, 45 * (m - 1) /2, length = m)
} else {
m=m+1
angles = seq(-45 * (m - 1) /2, 45 * (m - 1) /2, length = m)
if (sign(angleNewObs - nearestAngle) > -1)
angles = angles[ -1 ]
else
angles = angles[ -m ]
}
angles = angles + nearestAngle
angles[angles < 0] = angles[ angles < 0 ] + 360
angles[angles > 360] = angles[ angles > 360 ] - 360
offlineSubset = signals[ signals$angle %in% angles, ]
#offlineSubset = subset(offlineSubset, mac %in% macs)
reshapeSS = function(data, varSignal = "signal",
keepVars = c("posXY", "posX","posY")) {
byLocation =
with(data, by(data, list(posXY),
function(x) {
ans = x[1, keepVars]
avgSS = tapply(x[ , varSignal ], x$mac, mean)
#print(avgSS)
bossStuff <<- avgSS
# Take suset of the data for only rows with mac matching the supplied macs parameter
#print(avgSS)
avgSS = avgSS[macs]
#print(avgSS)
#print(class(avgSS))
#print(list(ans$posXY, names(avgSS)))
# TODO: bring dim.names back perhaps?
dim.names = list(ans$posXY, names(avgSS))[2][[1]]
#print(length(dim.names))
#print(dim.names)
y = matrix(avgSS, nrow = 1, dimnames=list(ans$posXY, names(avgSS)))
cbind(ans, y)
}))
newDataSS = do.call("rbind", byLocation)
return(newDataSS)
}
# TODO: I'm getting a bunch of warnings about columns length not matching
return(suppressWarnings(reshapeSS(offlineSubset, varSignal = "avgSignal")))
}
train130 = selectTrain(130, offlineSummary, m = 3, ALL_MACS)
print(dim(train130))
tail(train130)
tail(selectTrain(130, offlineSummary, m = 3, ORIGINAL_MACS))
setdiff(ALL_MACS, ORIGINAL_MACS)
setdiff(ALL_MACS, NEXT_MACS)
setdiff(NEXT_MACS, ORIGINAL_MACS)
head(offline)
select.last.n.columns.from.df = function (df, n) {
df[,(ncol(df)-n+1):ncol(df)]
}
head(select.last.n.columns.from.df(offline, 2))
findNN = function(newSignal, trainSubset, macs) {
mac.columns = select.last.n.columns.from.df(trainSubset, length(macs))
diffs = apply(mac.columns, 1,
function(x) x - newSignal)
dists = apply(diffs, 2, function(x) sqrt(sum(x^2)) )
closest = order(dists)
return(trainSubset[closest, 1:3 ])
}
predXY = function(newSignals, newAngles, trainData,
numAngles = 1, k = 3, macs = ALL_MACS){
print(macs)
print(dim(trainData))
closeXY = list(length = nrow(newSignals))
for (i in 1:nrow(newSignals)) {
trainSS = selectTrain(newAngles[i], trainData, m = numAngles, macs)
closeXY[[i]] =
findNN(newSignal = as.numeric(newSignals[i, ]), trainSS, macs)
}
estXY = lapply(closeXY,
function(x) sapply(x[ , 2:3],
function(x) mean(x[1:k])))
estXY = do.call("rbind", estXY)
return(estXY)
}
estXYk1 = predXY(newSignals = onlineSummary[ , 6:11],
newAngles = onlineSummary[ , 4],
offlineSummary, numAngles = 3, k = 1)
estXYk3 = predXY(newSignals = onlineSummary[ , 6:11],
newAngles = onlineSummary[ , 4],
offlineSummary, numAngles = 3, k = 3)
calcError = function(estXY, actualXY)
sum( rowSums( (estXY - actualXY)^2) )<jupyter_output><empty_output><jupyter_text>### Errors comparing K=m1 versus K=3<jupyter_code>actualXY = onlineSummary[ , c("posX", "posY")]
sapply(list(estXYk1, estXYk3), calcError, actualXY)
estXYk1 = predXY(newSignals = onlineSummary[ , 6:11],
newAngles = onlineSummary[ , 4],
offlineSummary, numAngles = 3, k = 1)
estXYk3 = predXY(newSignals = onlineSummary[ , 6:11],
newAngles = onlineSummary[ , 4],
offlineSummary, numAngles = 3, k = 3)
actualXY = onlineSummary[ , c("posX", "posY")]
sapply(list(estXYk1, estXYk3), calcError, actualXY)<jupyter_output>[1] "00:0f:a3:39:dd:cd" "00:0f:a3:39:e1:c0" "00:14:bf:3b:c7:c6"
[4] "00:14:bf:b1:97:81" "00:14:bf:b1:97:8a" "00:14:bf:b1:97:8d"
[7] "00:14:bf:b1:97:90"
[1] 9296 14
<jupyter_text># The real analysis
<jupyter_code>getErrorFromMac = function (macs) {
offline2 = readData(OFFLINE_SOURCE, macs)
offlineSummary2 = get.summary(offline2)
signals = select.last.n.columns.from.df(onlineSummary, length(macs))
kyPrediction.original.macs = predXY(newSignals = signals,
newAngles = onlineSummary[ , 4],
offlineSummary2, numAngles = 3, k = 3, macs)
calcError(kyPrediction.original.macs, actualXY)
}
getErrorFromMac(ORIGINAL_MACS[1:2])<jupyter_output>Warning message in matrix(tokens[c(2, 4, 6:8, 10)], nrow = nrow(tmp), ncol = 6, :
“data length exceeds size of matrix”Warning message in matrix(tokens[c(2, 4, 6:8, 10)], nrow = nrow(tmp), ncol = 6, :
“data length exceeds size of matrix”Warning message in matrix(tokens[c(2, 4, 6:8, 10)], nrow = nrow(tmp), ncol = 6, :
“data length exceeds size of matrix”Warning message in matrix(tokens[c(2, 4, 6:8, 10)], nrow = nrow(tmp), ncol = 6, :
“data length exceeds size of matrix”Warning message in matrix(tokens[c(2, 4, 6:8, 10)], nrow = nrow(tmp), ncol = 6, :
“data length exceeds size of matrix”Warning message in matrix(tokens[c(2, 4, 6:8, 10)], nrow = nrow(tmp), ncol = 6, :
“data length exceeds size of matrix”<jupyter_text>### Using different MACs<jupyter_code>getErrorFromMac(NEXT_MACS)<jupyter_output>Warning message in matrix(tokens[c(2, 4, 6:8, 10)], nrow = nrow(tmp), ncol = 6, :
“data length exceeds size of matrix”Warning message in matrix(tokens[c(2, 4, 6:8, 10)], nrow = nrow(tmp), ncol = 6, :
“data length exceeds size of matrix”Warning message in matrix(tokens[c(2, 4, 6:8, 10)], nrow = nrow(tmp), ncol = 6, :
“data length exceeds size of matrix”Warning message in matrix(tokens[c(2, 4, 6:8, 10)], nrow = nrow(tmp), ncol = 6, :
“data length exceeds size of matrix”Warning message in matrix(tokens[c(2, 4, 6:8, 10)], nrow = nrow(tmp), ncol = 6, :
“data length exceeds size of matrix”Warning message in matrix(tokens[c(2, 4, 6:8, 10)], nrow = nrow(tmp), ncol = 6, :
“data length exceeds size of matrix”<jupyter_text>### Using MACs from the book<jupyter_code>getErrorFromMac(ORIGINAL_MACS)<jupyter_output>Warning message in matrix(tokens[c(2, 4, 6:8, 10)], nrow = nrow(tmp), ncol = 6, :
“data length exceeds size of matrix”Warning message in matrix(tokens[c(2, 4, 6:8, 10)], nrow = nrow(tmp), ncol = 6, :
“data length exceeds size of matrix”Warning message in matrix(tokens[c(2, 4, 6:8, 10)], nrow = nrow(tmp), ncol = 6, :
“data length exceeds size of matrix”Warning message in matrix(tokens[c(2, 4, 6:8, 10)], nrow = nrow(tmp), ncol = 6, :
“data length exceeds size of matrix”Warning message in matrix(tokens[c(2, 4, 6:8, 10)], nrow = nrow(tmp), ncol = 6, :
“data length exceeds size of matrix”Warning message in matrix(tokens[c(2, 4, 6:8, 10)], nrow = nrow(tmp), ncol = 6, :
“data length exceeds size of matrix”<jupyter_text>### All MACS<jupyter_code>getErrorFromMac(ALL_MACS)<jupyter_output>Warning message in matrix(tokens[c(2, 4, 6:8, 10)], nrow = nrow(tmp), ncol = 6, :
“data length exceeds size of matrix”Warning message in matrix(tokens[c(2, 4, 6:8, 10)], nrow = nrow(tmp), ncol = 6, :
“data length exceeds size of matrix”Warning message in matrix(tokens[c(2, 4, 6:8, 10)], nrow = nrow(tmp), ncol = 6, :
“data length exceeds size of matrix”Warning message in matrix(tokens[c(2, 4, 6:8, 10)], nrow = nrow(tmp), ncol = 6, :
“data length exceeds size of matrix”Warning message in matrix(tokens[c(2, 4, 6:8, 10)], nrow = nrow(tmp), ncol = 6, :
“data length exceeds size of matrix”Warning message in matrix(tokens[c(2, 4, 6:8, 10)], nrow = nrow(tmp), ncol = 6, :
“data length exceeds size of matrix”
|
no_license
|
/CaseStudy1/KNN (From Book).ipynb
|
kjprice/smu-quantifying-the-world
| 6 |
<jupyter_start><jupyter_text># Neural networks with PyTorch
Deep learning networks tend to be massive with dozens or hundreds of layers, that's where the term "deep" comes from. You can build one of these deep networks using only weight matrices as we did in the previous notebook, but in general it's very cumbersome and difficult to implement. PyTorch has a nice module `nn` that provides a nice way to efficiently build large neural networks.<jupyter_code># Import necessary packages
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import torch
import helper
import matplotlib.pyplot as plt<jupyter_output><empty_output><jupyter_text>
Now we're going to build a larger network that can solve a (formerly) difficult problem, identifying text in an image. Here we'll use the MNIST dataset which consists of greyscale handwritten digits. Each image is 28x28 pixels, you can see a sample below
Our goal is to build a neural network that can take one of these images and predict the digit in the image.
First up, we need to get our dataset. This is provided through the `torchvision` package. The code below will download the MNIST dataset, then create training and test datasets for us. Don't worry too much about the details here, you'll learn more about this later.<jupyter_code>### Run this cell
from torchvision import datasets, transforms
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,)),
])
# Download and load the training data
trainset = datasets.MNIST('~/.pytorch/MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)<jupyter_output><empty_output><jupyter_text>We have the training data loaded into `trainloader` and we make that an iterator with `iter(trainloader)`. Later, we'll use this to loop through the dataset for training, like
```python
for image, label in trainloader:
## do things with images and labels
```
You'll notice I created the `trainloader` with a batch size of 64, and `shuffle=True`. The batch size is the number of images we get in one iteration from the data loader and pass through our network, often called a *batch*. And `shuffle=True` tells it to shuffle the dataset every time we start going through the data loader again. But here I'm just grabbing the first batch so we can check out the data. We can see below that `images` is just a tensor with size `(64, 1, 28, 28)`. So, 64 images per batch, 1 color channel, and 28x28 images.<jupyter_code>dataiter = iter(trainloader)
images, labels = dataiter.next()
print(type(images))
print(images.shape)
print(labels.shape)<jupyter_output><class 'torch.Tensor'>
torch.Size([64, 1, 28, 28])
torch.Size([64])
<jupyter_text>This is what one of the images looks like. <jupyter_code>plt.imshow(images[1].numpy().squeeze(), cmap='Greys_r');<jupyter_output><empty_output><jupyter_text>First, let's try to build a simple network for this dataset using weight matrices and matrix multiplications. Then, we'll see how to do it using PyTorch's `nn` module which provides a much more convenient and powerful method for defining network architectures.
The networks you've seen so far are called *fully-connected* or *dense* networks. Each unit in one layer is connected to each unit in the next layer. In fully-connected networks, the input to each layer must be a one-dimensional vector (which can be stacked into a 2D tensor as a batch of multiple examples). However, our images are 28x28 2D tensors, so we need to convert them into 1D vectors. Thinking about sizes, we need to convert the batch of images with shape `(64, 1, 28, 28)` to a have a shape of `(64, 784)`, 784 is 28 times 28. This is typically called *flattening*, we flattened the 2D images into 1D vectors.
Previously you built a network with one output unit. Here we need 10 output units, one for each digit. We want our network to predict the digit shown in an image, so what we'll do is calculate probabilities that the image is of any one digit or class. This ends up being a discrete probability distribution over the classes (digits) that tells us the most likely class for the image. That means we need 10 output units for the 10 classes (digits). We'll see how to convert the network output into a probability distribution next.
> **Exercise:** Flatten the batch of images `images`. Then build a multi-layer network with 784 input units, 256 hidden units, and 10 output units using random tensors for the weights and biases. For now, use a sigmoid activation for the hidden layer. Leave the output layer without an activation, we'll add one that gives us a probability distribution next.<jupyter_code>def activation(x):
""" Sigmoid activation function
Arguments
---------
x: torch.Tensor
"""
return 1/(1+torch.exp(-x))
### Generate some data
torch.manual_seed(7) # Set the random seed so things are predictable
# Features are 3 random normal variables
features = images.view(64,784)
# Define the size of each layer in our network
n_input = features.shape[1] # Number of input units, must match number of input features
n_hidden = 256 # Number of hidden units
n_output = 10 # Number of output units
# Weights for inputs to hidden layer
W1 = torch.randn(n_input, n_hidden)
# Weights for hidden layer to output layer
W2 = torch.randn(n_hidden, n_output)
# and bias terms for hidden and output layers
B1 = torch.randn((1, n_hidden))
B2 = torch.randn((1, n_output))
## Your solution here
h_h = activation(torch.mm(features, W1)+B1)
#print(h_h)
output_h = torch.mm(h_h, W2)+B2
output_h
out =output_h
out.shape
print(out)<jupyter_output>tensor([[-8.2275e+00, -1.0901e+01, 5.0343e+00, -1.2493e+01, 1.4637e+00,
6.9631e+00, -1.9445e+00, -5.1593e-01, -1.2712e+01, -1.6011e+01],
[-1.5584e+01, -5.3025e+00, -1.0042e+00, -9.1664e+00, 1.0405e+01,
2.0038e+00, -6.8517e+00, 2.3632e+00, -1.0994e+01, -5.2743e+00],
[-1.4536e+01, 2.6837e+00, 3.2770e+00, -2.4670e+00, -1.1291e+00,
9.6091e+00, -1.7720e+01, -1.3554e+01, -1.7552e+00, 9.7264e-03],
[-1.3870e+01, -6.0014e+00, 4.4288e+00, -6.9014e+00, -3.5071e-01,
1.4931e+01, -1.0324e+01, -1.7106e+01, -9.6710e+00, 6.1074e+00],
[-1.7758e+01, -7.5365e+00, 2.8167e+00, -1.2441e+01, 2.8616e+00,
2.8630e+00, -1.1581e+01, -3.1318e+00, -1.9440e+01, 7.2715e-01],
[-9.5641e+00, -3.1439e+00, 1.1066e+00, -1.8872e+01, 1.2034e+01,
1.0850e+01, -2.5829e+00, -4.0953e-01, -1.5058e+01, 1.3635e+00],
[-1.5349e+01, -7.9012e+00, 4.0848e+00, -2.3580e+01, 2.4034e+00,
2.6259e+00, -6.5523e+0[...]<jupyter_text>Now we have 10 outputs for our network. We want to pass in an image to our network and get out a probability distribution over the classes that tells us the likely class(es) the image belongs to. Something that looks like this:
Here we see that the probability for each class is roughly the same. This is representing an untrained network, it hasn't seen any data yet so it just returns a uniform distribution with equal probabilities for each class.
To calculate this probability distribution, we often use the [**softmax** function](https://en.wikipedia.org/wiki/Softmax_function). Mathematically this looks like
$$
\Large \sigma(x_i) = \cfrac{e^{x_i}}{\sum_k^K{e^{x_k}}}
$$
What this does is squish each input $x_i$ between 0 and 1 and normalizes the values to give you a proper probability distribution where the probabilites sum up to one.
> **Exercise:** Implement a function `softmax` that performs the softmax calculation and returns probability distributions for each example in the batch. Note that you'll need to pay attention to the shapes when doing this. If you have a tensor `a` with shape `(64, 10)` and a tensor `b` with shape `(64,)`, doing `a/b` will give you an error because PyTorch will try to do the division across the columns (called broadcasting) but you'll get a size mismatch. The way to think about this is for each of the 64 examples, you only want to divide by one value, the sum in the denominator. So you need `b` to have a shape of `(64, 1)`. This way PyTorch will divide the 10 values in each row of `a` by the one value in each row of `b`. Pay attention to how you take the sum as well. You'll need to define the `dim` keyword in `torch.sum`. Setting `dim=0` takes the sum across the rows while `dim=1` takes the sum across the columns.<jupyter_code>def softmax(x):
## TODO: Implement the softmax function here
ss = torch.sum( torch.exp(x),dim=1).view(-1, 1)
#print(ss)
soft = torch.exp(x)/ss
return soft
print(out[1])
# Here, out should be the output of the network in the previous excercise with shape (64,10)
probabilities = softmax(out)
# Does it have the right shape? Should be (64, 10)
print(probabilities.shape)
# Does it sum to 1?
print(probabilities[0,:])
print(probabilities[0,:].sum())
print(probabilities.sum(dim=1))<jupyter_output>tensor([-15.5839, -5.3025, -1.0042, -9.1664, 10.4051, 2.0038, -6.8517,
2.3632, -10.9936, -5.2743])
torch.Size([64, 10])
tensor([2.1981e-07, 1.5170e-08, 1.2635e-01, 3.0879e-09, 3.5553e-03, 8.6949e-01,
1.1769e-04, 4.9108e-04, 2.4803e-09, 9.1531e-11])
tensor(1.)
tensor([1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000,
1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000,
1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000,
1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000,
1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000,
1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000,
1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000,
1.0000])
<jupyter_text>## Building networks with PyTorch
PyTorch provides a module `nn` that makes building networks much simpler. Here I'll show you how to build the same one as above with 784 inputs, 256 hidden units, 10 output units and a softmax output.<jupyter_code>from torch import nn
class Network(nn.Module):
def __init__(self):
super().__init__()
# Inputs to hidden layer linear transformation
self.hidden = nn.Linear(784, 256)
# Output layer, 10 units - one for each digit
self.output = nn.Linear(256, 10)
# Define sigmoid activation and softmax output
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
# Pass the input tensor through each of our operations
x = self.hidden(x)
x = self.sigmoid(x)
x = self.output(x)
x = self.softmax(x)
return x<jupyter_output><empty_output><jupyter_text>Let's go through this bit by bit.
```python
class Network(nn.Module):
```
Here we're inheriting from `nn.Module`. Combined with `super().__init__()` this creates a class that tracks the architecture and provides a lot of useful methods and attributes. It is mandatory to inherit from `nn.Module` when you're creating a class for your network. The name of the class itself can be anything.
```python
self.hidden = nn.Linear(784, 256)
```
This line creates a module for a linear transformation, $x\mathbf{W} + b$, with 784 inputs and 256 outputs and assigns it to `self.hidden`. The module automatically creates the weight and bias tensors which we'll use in the `forward` method. You can access the weight and bias tensors once the network (`net`) is created with `net.hidden.weight` and `net.hidden.bias`.
```python
self.output = nn.Linear(256, 10)
```
Similarly, this creates another linear transformation with 256 inputs and 10 outputs.
```python
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax(dim=1)
```
Here I defined operations for the sigmoid activation and softmax output. Setting `dim=1` in `nn.Softmax(dim=1)` calculates softmax across the columns.
```python
def forward(self, x):
```
PyTorch networks created with `nn.Module` must have a `forward` method defined. It takes in a tensor `x` and passes it through the operations you defined in the `__init__` method.
```python
x = self.hidden(x)
x = self.sigmoid(x)
x = self.output(x)
x = self.softmax(x)
```
Here the input tensor `x` is passed through each operation and reassigned to `x`. We can see that the input tensor goes through the hidden layer, then a sigmoid function, then the output layer, and finally the softmax function. It doesn't matter what you name the variables here, as long as the inputs and outputs of the operations match the network architecture you want to build. The order in which you define things in the `__init__` method doesn't matter, but you'll need to sequence the operations correctly in the `forward` method.
Now we can create a `Network` object.<jupyter_code># Create the network and look at it's text representation
model = Network()
model<jupyter_output><empty_output><jupyter_text>You can define the network somewhat more concisely and clearly using the `torch.nn.functional` module. This is the most common way you'll see networks defined as many operations are simple element-wise functions. We normally import this module as `F`, `import torch.nn.functional as F`.<jupyter_code>import torch.nn.functional as F
class Network(nn.Module):
def __init__(self):
super().__init__()
# Inputs to hidden layer linear transformation
self.hidden = nn.Linear(784, 256)
# Output layer, 10 units - one for each digit
self.output = nn.Linear(256, 10)
def forward(self, x):
# Hidden layer with sigmoid activation
x = F.sigmoid(self.hidden(x))
# Output layer with softmax activation
x = F.softmax(self.output(x), dim=1)
return x<jupyter_output><empty_output><jupyter_text>### Activation functions
So far we've only been looking at the sigmoid activation function, but in general any function can be used as an activation function. The only requirement is that for a network to approximate a non-linear function, the activation functions must be non-linear. Here are a few more examples of common activation functions: Tanh (hyperbolic tangent), and ReLU (rectified linear unit).
In practice, the ReLU function is used almost exclusively as the activation function for hidden layers.### Your Turn to Build a Network
> **Exercise:** Create a network with 784 input units, a hidden layer with 128 units and a ReLU activation, then a hidden layer with 64 units and a ReLU activation, and finally an output layer with a softmax activation as shown above. You can use a ReLU activation with the `nn.ReLU` module or `F.relu` function.
It's good practice to name your layers by their type of network, for instance 'fc' to represent a fully-connected layer. As you code your solution, use `fc1`, `fc2`, and `fc3` as your layer names.<jupyter_code>## Your solution here
import torch.nn.functional as F
class MyNetwork(nn.Module):
def __init__(self):
super().__init__()
# Inputs to hidden layer linear transformation
self.fc1 = nn.Linear(784, 128)
self.fc2 = nn.Linear(128, 64)
self.fc3 = nn.Linear(64, 10)
def forward(self, x):
# Hidden layer with sigmoid activation
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.softmax(self.fc3(x),dim=1)
# Output layer with softmax activation
return x
model = MyNetwork()
<jupyter_output><empty_output><jupyter_text>### Initializing weights and biases
The weights and such are automatically initialized for you, but it's possible to customize how they are initialized. The weights and biases are tensors attached to the layer you defined, you can get them with `model.fc1.weight` for instance.<jupyter_code>print(model.fc1.weight)
print(model.fc1.bias)<jupyter_output>Parameter containing:
tensor([[ 0.0019, 0.0005, -0.0266, ..., 0.0021, 0.0257, 0.0180],
[-0.0291, 0.0194, 0.0033, ..., -0.0058, -0.0152, -0.0074],
[-0.0255, 0.0141, -0.0173, ..., -0.0066, 0.0269, 0.0035],
...,
[-0.0114, 0.0222, 0.0197, ..., 0.0243, 0.0304, 0.0111],
[-0.0090, -0.0345, -0.0355, ..., -0.0130, 0.0239, -0.0339],
[ 0.0088, 0.0205, 0.0327, ..., -0.0091, 0.0315, 0.0055]],
requires_grad=True)
Parameter containing:
tensor([-0.0006, -0.0100, 0.0276, -0.0134, -0.0039, 0.0245, -0.0207, 0.0039,
-0.0155, 0.0004, 0.0028, -0.0305, 0.0163, 0.0109, -0.0037, 0.0187,
0.0095, 0.0269, -0.0096, -0.0019, -0.0355, 0.0344, 0.0178, -0.0305,
-0.0307, -0.0113, -0.0355, 0.0336, -0.0058, -0.0032, 0.0309, 0.0017,
0.0203, -0.0101, 0.0036, -0.0251, -0.0256, -0.0087, 0.0092, -0.0355,
0.0127, -0.0146, -0.0254, -0.0126, -0.0269, 0.0212, 0.0112, -0.0237,
0.0278[...]<jupyter_text>For custom initialization, we want to modify these tensors in place. These are actually autograd *Variables*, so we need to get back the actual tensors with `model.fc1.weight.data`. Once we have the tensors, we can fill them with zeros (for biases) or random normal values.<jupyter_code># Set biases to all zeros
model.fc1.bias.data.fill_(0)
# sample from random normal with standard dev = 0.01
model.fc1.weight.data.normal_(std=0.01)<jupyter_output><empty_output><jupyter_text>### Forward pass
Now that we have a network, let's see what happens when we pass in an image.<jupyter_code># Grab some data
dataiter = iter(trainloader)
images, labels = dataiter.next()
# Resize images into a 1D vector, new shape is (batch size, color channels, image pixels)
images.resize_(64, 1, 784)
# or images.resize_(images.shape[0], 1, 784) to automatically get batch size
# Forward pass through the network
img_idx = 0
ps = model.forward(images[img_idx,:])
img = images[img_idx]
helper.view_classify(img.view(1, 28, 28), ps)<jupyter_output><empty_output><jupyter_text>As you can see above, our network has basically no idea what this digit is. It's because we haven't trained it yet, all the weights are random!
### Using `nn.Sequential`
PyTorch provides a convenient way to build networks like this where a tensor is passed sequentially through operations, `nn.Sequential` ([documentation](https://pytorch.org/docs/master/nn.html#torch.nn.Sequential)). Using this to build the equivalent network:<jupyter_code># Hyperparameters for our network
input_size = 784
hidden_sizes = [128, 64]
output_size = 10
# Build a feed-forward network
model = nn.Sequential(nn.Linear(input_size, hidden_sizes[0]),
nn.ReLU(),
nn.Linear(hidden_sizes[0], hidden_sizes[1]),
nn.ReLU(),
nn.Linear(hidden_sizes[1], output_size),
nn.Softmax(dim=1))
print(model)
# Forward pass through the network and display output
images, labels = next(iter(trainloader))
images.resize_(images.shape[0], 1, 784)
ps = model.forward(images[0,:])
helper.view_classify(images[0].view(1, 28, 28), ps)<jupyter_output>Sequential(
(0): Linear(in_features=784, out_features=128, bias=True)
(1): ReLU()
(2): Linear(in_features=128, out_features=64, bias=True)
(3): ReLU()
(4): Linear(in_features=64, out_features=10, bias=True)
(5): Softmax(dim=1)
)
<jupyter_text>Here our model is the same as before: 784 input units, a hidden layer with 128 units, ReLU activation, 64 unit hidden layer, another ReLU, then the output layer with 10 units, and the softmax output.
The operations are available by passing in the appropriate index. For example, if you want to get first Linear operation and look at the weights, you'd use `model[0]`.<jupyter_code>print(model[0])
model[0].weight<jupyter_output>Linear(in_features=784, out_features=128, bias=True)
<jupyter_text>You can also pass in an `OrderedDict` to name the individual layers and operations, instead of using incremental integers. Note that dictionary keys must be unique, so _each operation must have a different name_.<jupyter_code>from collections import OrderedDict
model = nn.Sequential(OrderedDict([
('fc1', nn.Linear(input_size, hidden_sizes[0])),
('relu1', nn.ReLU()),
('fc2', nn.Linear(hidden_sizes[0], hidden_sizes[1])),
('relu2', nn.ReLU()),
('output', nn.Linear(hidden_sizes[1], output_size)),
('softmax', nn.Softmax(dim=1))]))
model<jupyter_output><empty_output><jupyter_text>Now you can access layers either by integer or the name<jupyter_code>print(model[0])
print(model.fc1)<jupyter_output>Linear(in_features=784, out_features=128, bias=True)
Linear(in_features=784, out_features=128, bias=True)
|
permissive
|
/intro-to-pytorch/Part 2 - Neural Networks in PyTorch (Exercises).ipynb
|
blacksaturn1/deep-learning-v2-pytorch
| 17 |
<jupyter_start><jupyter_text>### Libraries Used
* Pandas, Numpy - Data Loading / Transformation / Analysis
* Sklearn - ML Algorithms / Preprocessing ( PCA, TfidF Vectorizer )<jupyter_code># Import all dependencies required for the problem.
from __future__ import print_function
import numpy as np
import pandas as pd
from sklearn import tree
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
# Set a Seed for random number generation for reproducible results
RANDOM_SEED = 42
np.random.seed(RANDOM_SEED)
# Load the titanic dataset using Pandas library
df = pd.read_excel('../../data/titanic_dataset.xlsx').dropna(subset=['Age'])
# Preview the Titanic Dataset
df.head()
# Split the dataset into dependent features (passenger details used for prediction)
# and target features (prediction if the passenger survived)
x = df.loc[:,:'Embarked']
y = df['Survived']
# Convert categorical data (strings) to numerical for running ML Algorithms
x['Sex'] = x['Sex'].map(lambda x: 0 if x == 'male' else 1)
# x.Embarked = x.Embarked.map({'S': 1, 'Q': 2, 'N': 3}).fillna(4)
# Split the dataset into train and test, for learning from one dataset and test it on the other.
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)
# Create a Decision Tree, with a max depth of 3 levels.
clf = tree.DecisionTreeClassifier(random_state=42, max_depth=4)
# Filter only required columns for training
X_train_scaled = preprocessing.scale(X_train[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare']])
X_test_scaled = preprocessing.scale(X_test[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare']])
# Train the Decision Tree classifier with the training dataset
clf.fit(X_train_scaled, y_train)
from sklearn.metrics import accuracy_score
print("Accuracy of Decision Tree: {:.2f}".format(
accuracy_score(y_test, clf.predict(X_test_scaled)) * 100.0
))
print("Accuracy of Smart Classifier: {:.2f}".format(
accuracy_score(y_test, [z[1] > 0 for z in X_test_scaled]) * 100.0
))
# Plot the features the decision tree found important
pd.DataFrame(['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare'], clf.feature_importances_)
# Export the Tree, to manually explore its decision criterias
tree.export_graphviz(clf, 'tree.dot')
# MultiLevel Survival Rate
df.pivot_table(index=['Sex', 'Pclass'], values=['Survived'])<jupyter_output><empty_output>
|
no_license
|
/private_nbs/ml_classification/Titanic - Decision Tree.ipynb
|
KshitijKarthick/reva_ml_workshop
| 1 |
<jupyter_start><jupyter_text># Call Files First Look<jupyter_code>import re
import pandas as pd
import functools as fnc<jupyter_output><empty_output><jupyter_text>## Set Notebook Options<jupyter_code>pd.options.display.max_columns = 300<jupyter_output><empty_output><jupyter_text>## Read In Data<jupyter_code>FILE_1 = '/Users/admin/Downloads/FFIEC CDR Call Bulk Subset of Schedules 2017/FFIEC CDR Call Subset of Schedules 2017(1 of 2).txt'
FILE_2 = '/Users/admin/Downloads/FFIEC CDR Call Bulk Subset of Schedules 2017/FFIEC CDR Call Subset of Schedules 2017(2 of 2).txt'
df_1 = pd.read_csv(
FILE_1,
delimiter='\t',
header=[0, 1])
df_2 = pd.read_csv(
FILE_2,
delimiter='\t',
header=[0, 1])<jupyter_output><empty_output><jupyter_text>## Review Data<jupyter_code>df_1.head(2)<jupyter_output><empty_output><jupyter_text>#### Below: the data in the last column is empty<jupyter_code>df_1[~df_1[('Unnamed: 253_level_0', 'Unnamed: 253_level_1')].isnull()].count().sum()<jupyter_output><empty_output><jupyter_text>#### Review Column Associations<jupyter_code>for item in df_1.columns:
print(item)<jupyter_output>('Reporting Period End Date', 'Unnamed: 0_level_1')
('IDRSSD', 'Unnamed: 1_level_1')
('FDIC Certificate Number', 'Unnamed: 2_level_1')
('OCC Charter Number', 'Unnamed: 3_level_1')
('OTS Docket Number', 'Unnamed: 4_level_1')
('Primary ABA Routing Number', 'Unnamed: 5_level_1')
('Financial Institution Name', 'Unnamed: 6_level_1')
('Financial Institution Address', 'Unnamed: 7_level_1')
('Financial Institution City', 'Unnamed: 8_level_1')
('Financial Institution State', 'Unnamed: 9_level_1')
('Financial Institution Zip Code', 'Unnamed: 10_level_1')
('Financial Institution Filing Type', 'Unnamed: 11_level_1')
('Last Date/Time Submission Updated On', 'Unnamed: 12_level_1')
('RCFD0071', 'INT-BEARING BALS DUE FRM DEP INSTS')
('RCFD0081', 'NONINTEREST-BEARING BALS&CURR&COIN')
('RCFD0426', 'OTHER IDENTIFIABLE INTANGIBLE ASSETS')
('RCFD1248', 'LNS SECD BY RE TO NONUS ADR,PAS DU30')
('RCFD1249', 'LNS SECD BY RE TO NONUS ADR,PAS DU90')
('RCFD1250', 'LNS SECD BY RE TO NONUS ADR,NONACCRL')
('RCFD1251[...]<jupyter_text>###### set a regular expression matching object<jupyter_code>pattern = re.compile('Unnamed:*')<jupyter_output><empty_output><jupyter_text>###### review matching pattern<jupyter_code>for col in df_1.columns:
match_0, match_1 = pattern.match(col[0]), pattern.match(col[1])
if (match_0 is not None) and (match_1 is not None):
print('both positions', col)
elif match_0 is not None:
print('first position', col)
elif match_1 is not None:
print('second position', col)
else:
print('no matches', col)<jupyter_output>second position ('Reporting Period End Date', 'Unnamed: 0_level_1')
second position ('IDRSSD', 'Unnamed: 1_level_1')
second position ('FDIC Certificate Number', 'Unnamed: 2_level_1')
second position ('OCC Charter Number', 'Unnamed: 3_level_1')
second position ('OTS Docket Number', 'Unnamed: 4_level_1')
second position ('Primary ABA Routing Number', 'Unnamed: 5_level_1')
second position ('Financial Institution Name', 'Unnamed: 6_level_1')
second position ('Financial Institution Address', 'Unnamed: 7_level_1')
second position ('Financial Institution City', 'Unnamed: 8_level_1')
second position ('Financial Institution State', 'Unnamed: 9_level_1')
second position ('Financial Institution Zip Code', 'Unnamed: 10_level_1')
second position ('Financial Institution Filing Type', 'Unnamed: 11_level_1')
second position ('Last Date/Time Submission Updated On', 'Unnamed: 12_level_1')
no matches ('RCFD0071', 'INT-BEARING BALS DUE FRM DEP INSTS')
no matches ('RCFD0081', 'NONINTEREST-BEARING BALS&CURR[...]<jupyter_text>###### Determine Items That are Constant within Institution
Note: This is unclear since reporting appears inconsistant for items like "Financial Institution Name" which we expect to be constant within institution and reported accross institutions. We will use FDIC number as a unique identifier since it is unique and reported for all institutions.<jupyter_code>for col in df_2.columns:
print(col, df_2[col].drop_duplicates().count())<jupyter_output>('Reporting Period End Date', 'Unnamed: 0_level_1') 1
('IDRSSD', 'Unnamed: 1_level_1') 5908
('FDIC Certificate Number', 'Unnamed: 2_level_1') 5908
('OCC Charter Number', 'Unnamed: 3_level_1') 1336
('OTS Docket Number', 'Unnamed: 4_level_1') 1533
('Primary ABA Routing Number', 'Unnamed: 5_level_1') 5884
('Financial Institution Name', 'Unnamed: 6_level_1') 5102
('Financial Institution Address', 'Unnamed: 7_level_1') 5601
('Financial Institution City', 'Unnamed: 8_level_1') 3306
('Financial Institution State', 'Unnamed: 9_level_1') 55
('Financial Institution Zip Code', 'Unnamed: 10_level_1') 4811
('Financial Institution Filing Type', 'Unnamed: 11_level_1') 3
('Last Date/Time Submission Updated On', 'Unnamed: 12_level_1') 5908
('RCONC241', 'LNS & LEASES H-F-S PA DU 90 DYS MORE') 10
('RCONC410', 'ADDITIONS TO NONACCRUAL ASSETS') 1344
('RCONC411', 'N/ACCRUAL ASSETS SOLD DURING QUARTER') 228
('RCONF166', 'LSE INDVDL FOR HSHLD, FMLY, PRSNL 30') 5
('RCONF167', 'LSE INDVDL FOR HSHLD, FMLY, PRSNL[...]<jupyter_text>## Define Row Transform###### Definitions<jupyter_code>def drop_col_bool(colname, pat):
"""returns true if the colname matches the pattern"""
matched = pat.match(colname)
return matched is not None
def top_colname_trns(colname):
"manipulate colnames to be machine friendly"
return colname.lower().replace('/', '').replace(' ', '_')
def row_transform(pat, row):
"""pat matches empty items and row is the element of a dataframe. builds a nested
dictionary from the flat dataframe.
"""
top = dict()
top['financials'] = dict()
top['institution_constants'] = dict()
for tup in row.index:
match_0, match_1 = pat.match(tup[0]), pat.match(tup[1])
key = top_colname_trns(tup[0])
if (match_0 is not None) and (match_1 is not None):
continue
elif ((match_1 is not None) and
(key in ('reporting_period_end_date', 'fdic_certificate_number'))):
top[key] = row[tup]
elif match_1 is not None:
top['institution_constants'][key] = row[tup]
else:
top['financials'][tup[0]] = row[tup]
return top
transformer = fnc.partial(row_transform, pattern)
def transform_flat_text(filepath, fdic_codes):
"""conduct full transform on a text file returning a list of dictionaries"""
df = pd.read_csv(filepath, delimiter='\t', header=[0, 1])
# flatten the columns for filtering since the second column level is set
# by position if null and we do not want to depend on column order
tmpdf = df.copy()
tmpdf.columns = tmpdf.columns.droplevel(1)
filtered = df[tmpdf['FDIC Certificate Number'].isin(fdic_codes)]
transform = filtered.apply(transformer, axis=1)
return list(transform)<jupyter_output><empty_output><jupyter_text>###### Testing-ish<jupyter_code>top_colname_trns('Financial Institution Name')
top_colname_trns('Last Date/Time Submission Updated On')
row_transform(pattern, df_1.loc[1, :])
trans = df_1[:5].apply(transformer, axis=1)
list(trans)
transform_flat_text(FILE_1, [10057, 3850])<jupyter_output><empty_output>
|
no_license
|
/prototyping/call_files_first_look.ipynb
|
marshall245/fdic_data
| 11 |
<jupyter_start><jupyter_text>## Estrutura de dados
### Tuplas - São as primas imutáveis das listas. Quase tudo que eu posso fazer com a lista, que não envolva a modificação, é possível ser feito em uma tupla. Usamos o parênteses (ou nada) em vez do colchetes<jupyter_code>tupla = (12,16)
print (tupla)
tupla2 = 12,15
print (tupla)
#Lembrando que as tuplas são imutáveis
try:
tupla[1]=23
except TypeError:
print ("As tuplas não aceitam modificações nos seus valores!!!")<jupyter_output>As tuplas não aceitam modificações nos seus valores!!!
|
no_license
|
/Estrutura_tuplas.ipynb
|
GilFarias/DScienceZero
| 1 |
<jupyter_start><jupyter_text># LAB 6: Text classification with linear models
Objectives:
* Train and evaluate linear text classifiers using SGDClassifier
* Experiment with different feature extraction and training methods
* Log and evaluate experimental results using [mlflow](https://mlflow.org)<jupyter_code>import numpy as np
import pandas as pd
from cytoolz import *
from tqdm.auto import tqdm
tqdm.pandas()<jupyter_output><empty_output><jupyter_text>### Load and preprocess data<jupyter_code>train = pd.read_parquet(
"s3://ling583/rcv1-topics-train.parquet", storage_options={"anon": True}
)
test = pd.read_parquet(
"s3://ling583/rcv1-topics-test.parquet", storage_options={"anon": True}
)
train.head()<jupyter_output><empty_output><jupyter_text>CCAT : CORPORATE/INDUSTRIAL
ECAT : ECONOMICS
GCAT : GOVERNMENT/SOCIAL
MCAT : MARKETS<jupyter_code>train["topics"].value_counts()
import spacy
nlp = spacy.load(
"en_core_web_sm",
exclude=["tagger", "parser", "ner", "lemmatizer", "attribute_ruler"],
)
def tokenize(text):
doc = nlp.tokenizer(text)
return [t.norm_ for t in doc if t.is_alpha]
import multiprocessing as mp
with mp.Pool() as p:
train["tokens"] = pd.Series(p.imap(tokenize, tqdm(train["text"]), chunksize=100))
test["tokens"] = pd.Series(p.imap(tokenize, tqdm(test["text"]), chunksize=100))
train.head()<jupyter_output><empty_output><jupyter_text>---### SGDClassifier<jupyter_code>from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import classification_report
from sklearn.linear_model import SGDClassifier
from sklearn.pipeline import make_pipeline
sgd = make_pipeline(CountVectorizer(analyzer=identity), SGDClassifier())
sgd.fit(train["tokens"], train["topics"])
predicted = sgd.predict(test["tokens"])
print(classification_report(test["topics"], predicted))
import logger
import mlflow
from logger import log_search, log_test
mlflow.set_experiment("lab-6")
log_test(sgd, test["topics"], predicted)<jupyter_output><empty_output><jupyter_text>---### Hyperparameters<jupyter_code>from dask.distributed import Client
client = Client("tcp://127.0.0.1:35033")
client
from dask_ml.model_selection import RandomizedSearchCV
from scipy.stats.distributions import loguniform, randint, uniform
from warnings import simplefilter
simplefilter(action="ignore", category=FutureWarning)
mlflow.set_experiment("lab-6/SGDClassifier")
%%time
search = RandomizedSearchCV(
sgd,
{
"countvectorizer__min_df": randint(1, 10),
"countvectorizer__max_df": uniform(0.5, 0.5),
"sgdclassifier__alpha": [0.1],
},
n_iter=25,
scoring="f1_macro",
)
search.fit(train["tokens"], train["topics"])
log_search(search)<jupyter_output>CPU times: user 5.38 s, sys: 429 ms, total: 5.81 s
Wall time: 1min 4s
<jupyter_text>### Optimized Model for SGD Classifier
<jupyter_code>sgd = make_pipeline(
CountVectorizer(analyzer=identity, min_df=2, max_df=0.7), SGDClassifier(alpha=0.1)
)
sgd.fit(train["tokens"], train["topics"])
predicted = sgd.predict(test["tokens"])
print(classification_report(test["topics"], predicted))
mlflow.set_experiment("lab-6")
log_test(sgd, test["topics"], predicted)<jupyter_output><empty_output><jupyter_text>### TfidTransformer Classifier<jupyter_code>from sklearn.feature_extraction.text import TfidfTransformer
sgd = make_pipeline(CountVectorizer(analyzer=identity),
TfidfTransformer(),
SGDClassifier())
sgd.fit(train["tokens"], train["topics"])
predicted = sgd.predict(test["tokens"])
print(classification_report(test["topics"], predicted))
mlflow.set_experiment("lab-6/SGDClassifierTransform")
%%time
search = RandomizedSearchCV(
sgd,
{
"countvectorizer__min_df": randint(1, 10),
"countvectorizer__max_df": uniform(0.5, 0.5),
"sgdclassifier__alpha": loguniform(1e-8, 100.0),
"tfidftransformer__smooth_idf":[True, False],
},
n_iter=25,
scoring="f1_macro",
)
search.fit(train["tokens"], train["topics"])
log_search(search)<jupyter_output>CPU times: user 5.48 s, sys: 420 ms, total: 5.9 s
Wall time: 1min 5s
<jupyter_text>### Optimized Model for SGD Classifier Transformation<jupyter_code>sgd = make_pipeline(CountVectorizer(analyzer=identity, max_df = 0.97),
TfidfTransformer(use_idf = "TRUE"),
SGDClassifier(alpha = 0.0001))
sgd.fit(train["tokens"], train["topics"])
predicted = sgd.predict(test["tokens"])
print(classification_report(test["topics"], predicted))
#Take note of the difference between the macro average of the default model and the
#optimized model for the transformation.
mlflow.set_experiment("lab-6")
log_test(sgd, test["topics"], predicted)<jupyter_output><empty_output><jupyter_text>### Truncated SVD Model<jupyter_code>from sklearn.decomposition import TruncatedSVD
sgd = make_pipeline(CountVectorizer(analyzer=identity),
TfidfTransformer(),
TruncatedSVD(n_components=100),
SGDClassifier())
sgd.fit(train["tokens"], train["topics"])
predicted = sgd.predict(test["tokens"])
print(classification_report(test["topics"], predicted))
mlflow.set_experiment("lab-6/SGDClassifierTruncate")
%%time
search = RandomizedSearchCV(
sgd,
{
"countvectorizer__min_df": randint(1, 10),
"countvectorizer__max_df": uniform(0.5, 0.5),
"sgdclassifier__alpha": loguniform(1e-8, 100.0),
},
n_iter=25,
scoring="f1_macro",
)
search.fit(train["tokens"], train["topics"])
log_search(search)<jupyter_output>CPU times: user 6.05 s, sys: 561 ms, total: 6.61 s
Wall time: 3min 58s
<jupyter_text>### Optimized TruncatedSVD <jupyter_code>sgd = make_pipeline(CountVectorizer(analyzer=identity),
TfidfTransformer(),
TruncatedSVD(n_components=100),
SGDClassifier())
sgd.fit(train["tokens"], train["topics"])
predicted = sgd.predict(test["tokens"])
print(classification_report(test["topics"], predicted))
mlflow.set_experiment("lab-6")
log_test(sgd, test["topics"], predicted)<jupyter_output><empty_output><jupyter_text>### Ngrams Classifier<jupyter_code>from nltk import bigrams
def unibigrams(toks):
return [(tok,) for tok in toks] + list(bigrams(toks))
sgd = make_pipeline(CountVectorizer(analyzer=unibigrams), SGDClassifier())
sgd.fit(train["tokens"], train["topics"])
predicted = sgd.predict(test["tokens"])
print(classification_report(test["topics"], predicted))
mlflow.set_experiment("lab-6/SGDClassifierNGrams")
%%time
search = RandomizedSearchCV(
sgd,
{
"countvectorizer__min_df": randint(1, 10),
"countvectorizer__max_df": uniform(0.5, 0.5),
"sgdclassifier__alpha": loguniform(1e-8, 100.0),
},
n_iter=25,
scoring="f1_macro",
)
search.fit(train["tokens"], train["topics"])
log_search(search)
sgd = make_pipeline(CountVectorizer(analyzer=unibigrams), SGDClassifier())
sgd.fit(train["tokens"], train["topics"])
predicted = sgd.predict(test["tokens"])
print(classification_report(test["topics"], predicted))<jupyter_output><empty_output>
|
no_license
|
/lab-6-davidjapostol-main/2-sgdclassifier.ipynb
|
davidjapostol/ling583-lab6
| 11 |
<jupyter_start><jupyter_text># Vertex AI client library: Custom training image classification model for batch prediction with explanation
Run in Colab
View on GitHub
## Overview
This tutorial demonstrates how to use the Vertex AI Python client library to train and deploy a custom image classification model for batch prediction with explanation.### Dataset
The dataset used for this tutorial is the [CIFAR10 dataset](https://www.tensorflow.org/datasets/catalog/cifar10) from [TensorFlow Datasets](https://www.tensorflow.org/datasets/catalog/overview). The version of the dataset you will use is built into TensorFlow. The trained model predicts which type of class an image is from ten classes: airplane, automobile, bird, cat, deer, dog, frog, horse, ship, truck.### Objective
In this tutorial, you create a custom model, with a training pipeline, from a Python script in a Google prebuilt Docker container using the Vertex AI client library, and then do a batch prediction with explanations on the uploaded model. You can alternatively create custom models using `gcloud` command-line tool or online using Google Cloud Console.
The steps performed include:
- Create a Vertex AI custom job for training a model.
- Train the TensorFlow model.
- Retrieve and load the model artifacts.
- View the model evaluation.
- Set explanation parameters.
- Upload the model as a Vertex AI `Model` resource.
- Make a batch prediction with explanations.### Costs
This tutorial uses billable components of Google Cloud (GCP):
* Vertex AI
* Cloud Storage
Learn about [Vertex AI
pricing](https://cloud.google.com/ai-platform-unified/pricing) and [Cloud Storage
pricing](https://cloud.google.com/storage/pricing), and use the [Pricing
Calculator](https://cloud.google.com/products/calculator/)
to generate a cost estimate based on your projected usage.## Installation
Install the latest version of Vertex AI client library.<jupyter_code>import sys
if "google.colab" in sys.modules:
USER_FLAG = ""
else:
USER_FLAG = "--user"
! pip3 install -U google-cloud-aiplatform $USER_FLAG<jupyter_output><empty_output><jupyter_text>Install the latest GA version of *google-cloud-storage* library as well.<jupyter_code>! pip3 install -U google-cloud-storage $USER_FLAG
import os
if os.environ["IS_TESTING"]:
! pip3 install -U tensorflow
import os
if os.environ["IS_TESTING"]:
! pip3 install -U opencv-python<jupyter_output><empty_output><jupyter_text>### Restart the kernel
Once you've installed the Vertex AI client library and Google *cloud-storage*, you need to restart the notebook kernel so it can find the packages.<jupyter_code>import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)<jupyter_output><empty_output><jupyter_text>## Before you begin
### GPU runtime
*Make sure you're running this notebook in a GPU runtime if you have that option. In Colab, select* **Runtime > Change Runtime Type > GPU**
### Set up your Google Cloud project
**The following steps are required, regardless of your notebook environment.**
1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.
2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project)
3. [Enable the Vertex AI APIs and Compute Engine APIs.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component)
4. [The Google Cloud SDK](https://cloud.google.com/sdk) is already installed in Vertex AI Notebooks.
5. Enter your project ID in the cell below. Then run the cell to make sure the
Cloud SDK uses the right project for all the commands in this notebook.
**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands.<jupyter_code>PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID<jupyter_output><empty_output><jupyter_text>#### Region
You can also change the `REGION` variable, which is used for operations
throughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you.
- Americas: `us-central1`
- Europe: `europe-west4`
- Asia Pacific: `asia-east1`
You may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services. For the latest support per region, see the [Vertex AI locations documentation](https://cloud.google.com/ai-platform-unified/docs/general/locations)<jupyter_code>REGION = "us-central1" # @param {type: "string"}<jupyter_output><empty_output><jupyter_text>#### Timestamp
If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append onto the name of resources which will be created in this tutorial.<jupyter_code>from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")<jupyter_output><empty_output><jupyter_text>### Authenticate your Google Cloud account
**If you are using Vertex AI Notebooks**, your environment is already authenticated. Skip this step.
**If you are using Colab**, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.
**Otherwise**, follow these steps:
In the Cloud Console, go to the [Create service account key](https://console.cloud.google.com/apis/credentials/serviceaccountkey) page.
**Click Create service account**.
In the **Service account name** field, enter a name, and click **Create**.
In the **Grant this service account access to project** section, click the Role drop-down list. Type "Vertex AI" into the filter box, and select **Vertex AI Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.
Click Create. A JSON file that contains your key downloads to your local environment.
Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.<jupyter_code>import os
import sys
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# If on AI Platform, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''<jupyter_output><empty_output><jupyter_text>### Create a Cloud Storage bucket
**The following steps are required, regardless of your notebook environment.**
When you submit a custom training job using the Vertex AI client library, you upload a Python package
containing your training code to a Cloud Storage bucket. Vertex AI runs
the code from this package. In this tutorial, Vertex AI also saves the
trained model that results from your job in the same bucket. You can then
create an `Endpoint` resource based on this output in order to serve
online predictions.
Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.<jupyter_code>BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP<jupyter_output><empty_output><jupyter_text>**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.<jupyter_code>! gsutil mb -l $REGION $BUCKET_NAME<jupyter_output><empty_output><jupyter_text>Finally, validate access to your Cloud Storage bucket by examining its contents:<jupyter_code>! gsutil ls -al $BUCKET_NAME<jupyter_output><empty_output><jupyter_text>### Set up variables
Next, set up some variables used throughout the tutorial.
### Import libraries and define constants<jupyter_code>import os
import sys
import time
import google.cloud.aiplatform_v1beta1 as aip
from google.protobuf import json_format
from google.protobuf.json_format import MessageToJson, ParseDict
from google.protobuf.struct_pb2 import Struct, Value<jupyter_output><empty_output><jupyter_text>#### Vertex AI constants
Setup up the following constants for Vertex AI:
- `API_ENDPOINT`: The Vertex AI API service endpoint for dataset, model, job, pipeline and endpoint services.
- `PARENT`: The Vertex AI location root path for dataset, model, job, pipeline and endpoint resources.<jupyter_code># API service endpoint
API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
# Vertex AI location root path for your dataset, model and endpoint resources
PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION<jupyter_output><empty_output><jupyter_text>#### Hardware Accelerators
Set the hardware accelerators (e.g., GPU), if any, for training and prediction.
Set the variables `TRAIN_GPU/TRAIN_NGPU` and `DEPLOY_GPU/DEPLOY_NGPU` to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify:
(aip.AcceleratorType.NVIDIA_TESLA_K80, 4)
For GPU, available accelerators include:
- aip.AcceleratorType.NVIDIA_TESLA_K80
- aip.AcceleratorType.NVIDIA_TESLA_P100
- aip.AcceleratorType.NVIDIA_TESLA_P4
- aip.AcceleratorType.NVIDIA_TESLA_T4
- aip.AcceleratorType.NVIDIA_TESLA_V100
Otherwise specify `(None, None)` to use a container image to run on a CPU.
*Note*: TF releases before 2.3 for GPU support will fail to load the custom model in this tutorial. It is a known issue and fixed in TF 2.3 -- which is caused by static graph ops that are generated in the serving function. If you encounter this issue on your own custom models, use a container image for TF 2.3 with GPU support.<jupyter_code>if os.getenv("IS_TESTING_TRAIN_GPU"):
TRAIN_GPU, TRAIN_NGPU = (
aip.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_TRAIN_GPU")),
)
else:
TRAIN_GPU, TRAIN_NGPU = (aip.AcceleratorType.NVIDIA_TESLA_K80, 1)
if os.getenv("IS_TESTING_DEPOLY_GPU"):
DEPLOY_GPU, DEPLOY_NGPU = (
aip.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_DEPOLY_GPU")),
)
else:
DEPLOY_GPU, DEPLOY_NGPU = (None, None)<jupyter_output><empty_output><jupyter_text>#### Container (Docker) image
Next, we will set the Docker container images for training and prediction
- TensorFlow 1.15
- `gcr.io/cloud-aiplatform/training/tf-cpu.1-15:latest`
- `gcr.io/cloud-aiplatform/training/tf-gpu.1-15:latest`
- TensorFlow 2.1
- `gcr.io/cloud-aiplatform/training/tf-cpu.2-1:latest`
- `gcr.io/cloud-aiplatform/training/tf-gpu.2-1:latest`
- TensorFlow 2.2
- `gcr.io/cloud-aiplatform/training/tf-cpu.2-2:latest`
- `gcr.io/cloud-aiplatform/training/tf-gpu.2-2:latest`
- TensorFlow 2.3
- `gcr.io/cloud-aiplatform/training/tf-cpu.2-3:latest`
- `gcr.io/cloud-aiplatform/training/tf-gpu.2-3:latest`
- TensorFlow 2.4
- `gcr.io/cloud-aiplatform/training/tf-cpu.2-4:latest`
- `gcr.io/cloud-aiplatform/training/tf-gpu.2-4:latest`
- XGBoost
- `gcr.io/cloud-aiplatform/training/xgboost-cpu.1-1`
- Scikit-learn
- `gcr.io/cloud-aiplatform/training/scikit-learn-cpu.0-23:latest`
- Pytorch
- `gcr.io/cloud-aiplatform/training/pytorch-cpu.1-4:latest`
- `gcr.io/cloud-aiplatform/training/pytorch-cpu.1-5:latest`
- `gcr.io/cloud-aiplatform/training/pytorch-cpu.1-6:latest`
- `gcr.io/cloud-aiplatform/training/pytorch-cpu.1-7:latest`
For the latest list, see [Pre-built containers for training](https://cloud.google.com/ai-platform-unified/docs/training/pre-built-containers).
- TensorFlow 1.15
- `gcr.io/cloud-aiplatform/prediction/tf-cpu.1-15:latest`
- `gcr.io/cloud-aiplatform/prediction/tf-gpu.1-15:latest`
- TensorFlow 2.1
- `gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-1:latest`
- `gcr.io/cloud-aiplatform/prediction/tf2-gpu.2-1:latest`
- TensorFlow 2.2
- `gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-2:latest`
- `gcr.io/cloud-aiplatform/prediction/tf2-gpu.2-2:latest`
- TensorFlow 2.3
- `gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-3:latest`
- `gcr.io/cloud-aiplatform/prediction/tf2-gpu.2-3:latest`
- XGBoost
- `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.1-2:latest`
- `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.1-1:latest`
- `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.0-90:latest`
- `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.0-82:latest`
- Scikit-learn
- `gcr.io/cloud-aiplatform/prediction/sklearn-cpu.0-23:latest`
- `gcr.io/cloud-aiplatform/prediction/sklearn-cpu.0-22:latest`
- `gcr.io/cloud-aiplatform/prediction/sklearn-cpu.0-20:latest`
For the latest list, see [Pre-built containers for prediction](https://cloud.google.com/ai-platform-unified/docs/predictions/pre-built-containers)<jupyter_code>if os.getenv("IS_TESTING_TF"):
TF = os.getenv("IS_TESTING_TF")
else:
TF = "2-1"
if TF[0] == "2":
if TRAIN_GPU:
TRAIN_VERSION = "tf-gpu.{}".format(TF)
else:
TRAIN_VERSION = "tf-cpu.{}".format(TF)
if DEPLOY_GPU:
DEPLOY_VERSION = "tf2-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf2-cpu.{}".format(TF)
else:
if TRAIN_GPU:
TRAIN_VERSION = "tf-gpu.{}".format(TF)
else:
TRAIN_VERSION = "tf-cpu.{}".format(TF)
if DEPLOY_GPU:
DEPLOY_VERSION = "tf-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf-cpu.{}".format(TF)
TRAIN_IMAGE = "gcr.io/cloud-aiplatform/training/{}:latest".format(TRAIN_VERSION)
DEPLOY_IMAGE = "gcr.io/cloud-aiplatform/prediction/{}:latest".format(DEPLOY_VERSION)
print("Training:", TRAIN_IMAGE, TRAIN_GPU, TRAIN_NGPU)
print("Deployment:", DEPLOY_IMAGE, DEPLOY_GPU, DEPLOY_NGPU)<jupyter_output><empty_output><jupyter_text>#### Machine Type
Next, set the machine type to use for training and prediction.
- Set the variables `TRAIN_COMPUTE` and `DEPLOY_COMPUTE` to configure the compute resources for the VMs you will use for for training and prediction.
- `machine type`
- `n1-standard`: 3.75GB of memory per vCPU.
- `n1-highmem`: 6.5GB of memory per vCPU
- `n1-highcpu`: 0.9 GB of memory per vCPU
- `vCPUs`: number of \[2, 4, 8, 16, 32, 64, 96 \]
*Note: The following is not supported for training:*
- `standard`: 2 vCPUs
- `highcpu`: 2, 4 and 8 vCPUs
*Note: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs*.<jupyter_code>if os.getenv("IS_TESTING_TRAIN_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_TRAIN_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
TRAIN_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Train machine type", TRAIN_COMPUTE)
if os.getenv("IS_TESTING_DEPLOY_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_DEPLOY_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Deploy machine type", DEPLOY_COMPUTE)<jupyter_output><empty_output><jupyter_text># Tutorial
Now you are ready to start creating your own custom model and training for CIFAR10.## Set up clients
The Vertex AI client library works as a client/server model. On your side (the Python script) you will create a client that sends requests and receives responses from the Vertex AI server.
You will use different clients in this tutorial for different steps in the workflow. So set them all up upfront.
- Model Service for `Model` resources.
- Endpoint Service for deployment.
- Job Service for batch jobs and custom training.
- Prediction Service for serving.<jupyter_code># client options same for all services
client_options = {"api_endpoint": API_ENDPOINT}
def create_job_client():
client = aip.JobServiceClient(client_options=client_options)
return client
def create_model_client():
client = aip.ModelServiceClient(client_options=client_options)
return client
def create_endpoint_client():
client = aip.EndpointServiceClient(client_options=client_options)
return client
def create_prediction_client():
client = aip.PredictionServiceClient(client_options=client_options)
return client
clients = {}
clients["job"] = create_job_client()
clients["model"] = create_model_client()
clients["endpoint"] = create_endpoint_client()
clients["prediction"] = create_prediction_client()
for client in clients.items():
print(client)<jupyter_output><empty_output><jupyter_text>## Train a model
There are two ways you can train a custom model using a container image:
- **Use a Google Cloud prebuilt container**. If you use a prebuilt container, you will additionally specify a Python package to install into the container image. This Python package contains your code for training a custom model.
- **Use your own custom container image**. If you use your own container, the container needs to contain your code for training a custom model.## Prepare your custom job specification
Now that your clients are ready, your first step is to create a Job Specification for your custom training job. The job specification will consist of the following:
- `worker_pool_spec` : The specification of the type of machine(s) you will use for training and how many (single or distributed)
- `python_package_spec` : The specification of the Python package to be installed with the pre-built container.### Prepare your machine specification
Now define the machine specification for your custom training job. This tells Vertex AI what type of machine instance to provision for the training.
- `machine_type`: The type of GCP instance to provision -- e.g., n1-standard-8.
- `accelerator_type`: The type, if any, of hardware accelerator. In this tutorial if you previously set the variable `TRAIN_GPU != None`, you are using a GPU; otherwise you will use a CPU.
- `accelerator_count`: The number of accelerators.<jupyter_code>if TRAIN_GPU:
machine_spec = {
"machine_type": TRAIN_COMPUTE,
"accelerator_type": TRAIN_GPU,
"accelerator_count": TRAIN_NGPU,
}
else:
machine_spec = {"machine_type": TRAIN_COMPUTE, "accelerator_count": 0}<jupyter_output><empty_output><jupyter_text>### Prepare your disk specification
(optional) Now define the disk specification for your custom training job. This tells Vertex AI what type and size of disk to provision in each machine instance for the training.
- `boot_disk_type`: Either SSD or Standard. SSD is faster, and Standard is less expensive. Defaults to SSD.
- `boot_disk_size_gb`: Size of disk in GB.<jupyter_code>DISK_TYPE = "pd-ssd" # [ pd-ssd, pd-standard]
DISK_SIZE = 200 # GB
disk_spec = {"boot_disk_type": DISK_TYPE, "boot_disk_size_gb": DISK_SIZE}<jupyter_output><empty_output><jupyter_text>### Define the worker pool specification
Next, you define the worker pool specification for your custom training job. The worker pool specification will consist of the following:
- `replica_count`: The number of instances to provision of this machine type.
- `machine_spec`: The hardware specification.
- `disk_spec` : (optional) The disk storage specification.
- `python_package`: The Python training package to install on the VM instance(s) and which Python module to invoke, along with command line arguments for the Python module.
Let's dive deeper now into the python package specification:
-`executor_image_spec`: This is the docker image which is configured for your custom training job.
-`package_uris`: This is a list of the locations (URIs) of your python training packages to install on the provisioned instance. The locations need to be in a Cloud Storage bucket. These can be either individual python files or a zip (archive) of an entire package. In the later case, the job service will unzip (unarchive) the contents into the docker image.
-`python_module`: The Python module (script) to invoke for running the custom training job. In this example, you will be invoking `trainer.task.py` -- note that it was not neccessary to append the `.py` suffix.
-`args`: The command line arguments to pass to the corresponding Pythom module. In this example, you will be setting:
- `"--model-dir=" + MODEL_DIR` : The Cloud Storage location where to store the model artifacts. There are two ways to tell the training script where to save the model artifacts:
- direct: You pass the Cloud Storage location as a command line argument to your training script (set variable `DIRECT = True`), or
- indirect: The service passes the Cloud Storage location as the environment variable `AIP_MODEL_DIR` to your training script (set variable `DIRECT = False`). In this case, you tell the service the model artifact location in the job specification.
- `"--epochs=" + EPOCHS`: The number of epochs for training.
- `"--steps=" + STEPS`: The number of steps (batches) per epoch.
- `"--distribute=" + TRAIN_STRATEGY"` : The training distribution strategy to use for single or distributed training.
- `"single"`: single device.
- `"mirror"`: all GPU devices on a single compute instance.
- `"multi"`: all GPU devices on all compute instances.<jupyter_code>JOB_NAME = "custom_job_" + TIMESTAMP
MODEL_DIR = "{}/{}".format(BUCKET_NAME, JOB_NAME)
if not TRAIN_NGPU or TRAIN_NGPU < 2:
TRAIN_STRATEGY = "single"
else:
TRAIN_STRATEGY = "mirror"
EPOCHS = 20
STEPS = 100
DIRECT = True
if DIRECT:
CMDARGS = [
"--model-dir=" + MODEL_DIR,
"--epochs=" + str(EPOCHS),
"--steps=" + str(STEPS),
"--distribute=" + TRAIN_STRATEGY,
]
else:
CMDARGS = [
"--epochs=" + str(EPOCHS),
"--steps=" + str(STEPS),
"--distribute=" + TRAIN_STRATEGY,
]
worker_pool_spec = [
{
"replica_count": 1,
"machine_spec": machine_spec,
"disk_spec": disk_spec,
"python_package_spec": {
"executor_image_uri": TRAIN_IMAGE,
"package_uris": [BUCKET_NAME + "/trainer_cifar10.tar.gz"],
"python_module": "trainer.task",
"args": CMDARGS,
},
}
]<jupyter_output><empty_output><jupyter_text>### Assemble a job specification
Now assemble the complete description for the custom job specification:
- `display_name`: The human readable name you assign to this custom job.
- `job_spec`: The specification for the custom job.
- `worker_pool_specs`: The specification for the machine VM instances.
- `base_output_directory`: This tells the service the Cloud Storage location where to save the model artifacts (when variable `DIRECT = False`). The service will then pass the location to the training script as the environment variable `AIP_MODEL_DIR`, and the path will be of the form:
/model<jupyter_code>if DIRECT:
job_spec = {"worker_pool_specs": worker_pool_spec}
else:
job_spec = {
"worker_pool_specs": worker_pool_spec,
"base_output_directory": {"output_uri_prefix": MODEL_DIR},
}
custom_job = {"display_name": JOB_NAME, "job_spec": job_spec}<jupyter_output><empty_output><jupyter_text>### Examine the training package
#### Package layout
Before you start the training, you will look at how a Python package is assembled for a custom training job. When unarchived, the package contains the following directory/file layout.
- PKG-INFO
- README.md
- setup.cfg
- setup.py
- trainer
- \_\_init\_\_.py
- task.py
The files `setup.cfg` and `setup.py` are the instructions for installing the package into the operating environment of the Docker image.
The file `trainer/task.py` is the Python script for executing the custom training job. *Note*, when we referred to it in the worker pool specification, we replace the directory slash with a dot (`trainer.task`) and dropped the file suffix (`.py`).
#### Package Assembly
In the following cells, you will assemble the training package.<jupyter_code># Make folder for Python training script
! rm -rf custom
! mkdir custom
# Add package information
! touch custom/README.md
setup_cfg = "[egg_info]\n\ntag_build =\n\ntag_date = 0"
! echo "$setup_cfg" > custom/setup.cfg
setup_py = "import setuptools\n\nsetuptools.setup(\n\n install_requires=[\n\n 'tensorflow_datasets==1.3.0',\n\n ],\n\n packages=setuptools.find_packages())"
! echo "$setup_py" > custom/setup.py
pkg_info = "Metadata-Version: 1.0\n\nName: CIFAR10 image classification\n\nVersion: 0.0.0\n\nSummary: Demostration training script\n\nHome-page: www.google.com\n\nAuthor: Google\n\nAuthor-email: [email protected]\n\nLicense: Public\n\nDescription: Demo\n\nPlatform: Vertex AI"
! echo "$pkg_info" > custom/PKG-INFO
# Make the training subfolder
! mkdir custom/trainer
! touch custom/trainer/__init__.py<jupyter_output><empty_output><jupyter_text>#### Task.py contents
In the next cell, you write the contents of the training script task.py. We won't go into detail, it's just there for you to browse. In summary:
- Get the directory where to save the model artifacts from the command line (`--model_dir`), and if not specified, then from the environment variable `AIP_MODEL_DIR`.
- Loads CIFAR10 dataset from TF Datasets (tfds).
- Builds a model using TF.Keras model API.
- Compiles the model (`compile()`).
- Sets a training distribution strategy according to the argument `args.distribute`.
- Trains the model (`fit()`) with epochs and steps according to the arguments `args.epochs` and `args.steps`
- Saves the trained model (`save(args.model_dir)`) to the specified model directory.<jupyter_code>%%writefile custom/trainer/task.py
# Single, Mirror and Multi-Machine Distributed Training for CIFAR-10
import tensorflow_datasets as tfds
import tensorflow as tf
from tensorflow.python.client import device_lib
import argparse
import os
import sys
tfds.disable_progress_bar()
parser = argparse.ArgumentParser()
parser.add_argument('--model-dir', dest='model_dir',
default=os.getenv("AIP_MODEL_DIR"), type=str, help='Model dir.')
parser.add_argument('--lr', dest='lr',
default=0.01, type=float,
help='Learning rate.')
parser.add_argument('--epochs', dest='epochs',
default=10, type=int,
help='Number of epochs.')
parser.add_argument('--steps', dest='steps',
default=200, type=int,
help='Number of steps per epoch.')
parser.add_argument('--distribute', dest='distribute', type=str, default='single',
help='distributed training strategy')
args = parser.parse_args()
print('Python Version = {}'.format(sys.version))
print('TensorFlow Version = {}'.format(tf.__version__))
print('TF_CONFIG = {}'.format(os.environ.get('TF_CONFIG', 'Not found')))
print('DEVICES', device_lib.list_local_devices())
# Single Machine, single compute device
if args.distribute == 'single':
if tf.test.is_gpu_available():
strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")
else:
strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0")
# Single Machine, multiple compute device
elif args.distribute == 'mirror':
strategy = tf.distribute.MirroredStrategy()
# Multiple Machine, multiple compute device
elif args.distribute == 'multi':
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
# Multi-worker configuration
print('num_replicas_in_sync = {}'.format(strategy.num_replicas_in_sync))
# Preparing dataset
BUFFER_SIZE = 10000
BATCH_SIZE = 64
def make_datasets_unbatched():
# Scaling CIFAR10 data from (0, 255] to (0., 1.]
def scale(image, label):
image = tf.cast(image, tf.float32)
image /= 255.0
return image, label
datasets, info = tfds.load(name='cifar10',
with_info=True,
as_supervised=True)
return datasets['train'].map(scale).cache().shuffle(BUFFER_SIZE).repeat()
# Build the Keras model
def build_and_compile_cnn_model():
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(32, 32, 3)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(32, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(
loss=tf.keras.losses.sparse_categorical_crossentropy,
optimizer=tf.keras.optimizers.SGD(learning_rate=args.lr),
metrics=['accuracy'])
return model
# Train the model
NUM_WORKERS = strategy.num_replicas_in_sync
# Here the batch size scales up by number of workers since
# `tf.data.Dataset.batch` expects the global batch size.
GLOBAL_BATCH_SIZE = BATCH_SIZE * NUM_WORKERS
train_dataset = make_datasets_unbatched().batch(GLOBAL_BATCH_SIZE)
with strategy.scope():
# Creation of dataset, and model building/compiling need to be within
# `strategy.scope()`.
model = build_and_compile_cnn_model()
model.fit(x=train_dataset, epochs=args.epochs, steps_per_epoch=args.steps)
model.save(args.model_dir)<jupyter_output><empty_output><jupyter_text>#### Store training script on your Cloud Storage bucket
Next, you package the training folder into a compressed tar ball, and then store it in your Cloud Storage bucket.<jupyter_code>! rm -f custom.tar custom.tar.gz
! tar cvf custom.tar custom
! gzip custom.tar
! gsutil cp custom.tar.gz $BUCKET_NAME/trainer_cifar10.tar.gz<jupyter_output><empty_output><jupyter_text>### Train the model
Now start the training of your custom training job on Vertex AI. Use this helper function `create_custom_job`, which takes the following parameter:
-`custom_job`: The specification for the custom job.
The helper function calls job client service's `create_custom_job` method, with the following parameters:
-`parent`: The Vertex AI location path to `Dataset`, `Model` and `Endpoint` resources.
-`custom_job`: The specification for the custom job.
You will display a handful of the fields returned in `response` object, with the two that are of most interest are:
`response.name`: The Vertex AI fully qualified identifier assigned to this custom training job. You save this identifier for using in subsequent steps.
`response.state`: The current state of the custom training job.<jupyter_code>def create_custom_job(custom_job):
response = clients["job"].create_custom_job(parent=PARENT, custom_job=custom_job)
print("name:", response.name)
print("display_name:", response.display_name)
print("state:", response.state)
print("create_time:", response.create_time)
print("update_time:", response.update_time)
return response
response = create_custom_job(custom_job)<jupyter_output><empty_output><jupyter_text>Now get the unique identifier for the custom job you created.<jupyter_code># The full unique ID for the custom job
job_id = response.name
# The short numeric ID for the custom job
job_short_id = job_id.split("/")[-1]
print(job_id)<jupyter_output><empty_output><jupyter_text>### Get information on a custom job
Next, use this helper function `get_custom_job`, which takes the following parameter:
- `name`: The Vertex AI fully qualified identifier for the custom job.
The helper function calls the job client service's`get_custom_job` method, with the following parameter:
- `name`: The Vertex AI fully qualified identifier for the custom job.
If you recall, you got the Vertex AI fully qualified identifier for the custom job in the `response.name` field when you called the `create_custom_job` method, and saved the identifier in the variable `job_id`.<jupyter_code>def get_custom_job(name, silent=False):
response = clients["job"].get_custom_job(name=name)
if silent:
return response
print("name:", response.name)
print("display_name:", response.display_name)
print("state:", response.state)
print("create_time:", response.create_time)
print("update_time:", response.update_time)
return response
response = get_custom_job(job_id)<jupyter_output><empty_output><jupyter_text># Deployment
Training the above model may take upwards of 20 minutes time.
Once your model is done training, you can calculate the actual time it took to train the model by subtracting `end_time` from `start_time`. For your model, we will need to know the location of the saved model, which the Python script saved in your local Cloud Storage bucket at `MODEL_DIR + '/saved_model.pb'`.<jupyter_code>while True:
response = get_custom_job(job_id, True)
if response.state != aip.JobState.JOB_STATE_SUCCEEDED:
print("Training job has not completed:", response.state)
model_path_to_deploy = None
if response.state == aip.JobState.JOB_STATE_FAILED:
break
else:
if not DIRECT:
MODEL_DIR = MODEL_DIR + "/model"
model_path_to_deploy = MODEL_DIR
print("Training Time:", response.update_time - response.create_time)
break
time.sleep(60)
print("model_to_deploy:", model_path_to_deploy)<jupyter_output><empty_output><jupyter_text>## Load the saved model
Your model is stored in a TensorFlow SavedModel format in a Cloud Storage bucket. Now load it from the Cloud Storage bucket, and then you can do some things, like evaluate the model, and do a prediction.
To load, you use the TF.Keras `model.load_model()` method passing it the Cloud Storage path where the model is saved -- specified by `MODEL_DIR`.<jupyter_code>import tensorflow as tf
model = tf.keras.models.load_model(MODEL_DIR)<jupyter_output><empty_output><jupyter_text>## Evaluate the model
Now find out how good the model is.
### Load evaluation data
You will load the CIFAR10 test (holdout) data from `tf.keras.datasets`, using the method `load_data()`. This will return the dataset as a tuple of two elements. The first element is the training data and the second is the test data. Each element is also a tuple of two elements: the image data, and the corresponding labels.
You don't need the training data, and hence why we loaded it as `(_, _)`.
Before you can run the data through evaluation, you need to preprocess it:
x_test:
1. Normalize (rescaling) the pixel data by dividing each pixel by 255. This will replace each single byte integer pixel with a 32-bit floating point number between 0 and 1.
y_test:
2. The labels are currently scalar (sparse). If you look back at the `compile()` step in the `trainer/task.py` script, you will find that it was compiled for sparse labels. So we don't need to do anything more.<jupyter_code>import numpy as np
from tensorflow.keras.datasets import cifar10
(_, _), (x_test, y_test) = cifar10.load_data()
x_test = (x_test / 255.0).astype(np.float32)
print(x_test.shape, y_test.shape)<jupyter_output><empty_output><jupyter_text>### Perform the model evaluation
Now evaluate how well the model in the custom job did.<jupyter_code>model.evaluate(x_test, y_test)<jupyter_output><empty_output><jupyter_text>## Upload the model for serving
Next, you will upload your TF.Keras model from the custom job to Vertex AI `Model` service, which will create a Vertex AI `Model` resource for your custom model. During upload, you need to define a serving function to convert data to the format your model expects. If you send encoded data to Vertex AI, your serving function ensures that the data is decoded on the model server before it is passed as input to your model.
### How does the serving function work
When you send a request to an online prediction server, the request is received by a HTTP server. The HTTP server extracts the prediction request from the HTTP request content body. The extracted prediction request is forwarded to the serving function. For Google pre-built prediction containers, the request content is passed to the serving function as a `tf.string`.
The serving function consists of two parts:
- `preprocessing function`:
- Converts the input (`tf.string`) to the input shape and data type of the underlying model (dynamic graph).
- Performs the same preprocessing of the data that was done during training the underlying model -- e.g., normalizing, scaling, etc.
- `post-processing function`:
- Converts the model output to format expected by the receiving application -- e.q., compresses the output.
- Packages the output for the the receiving application -- e.g., add headings, make JSON object, etc.
Both the preprocessing and post-processing functions are converted to static graphs which are fused to the model. The output from the underlying model is passed to the post-processing function. The post-processing function passes the converted/packaged output back to the HTTP server. The HTTP server returns the output as the HTTP response content.
One consideration you need to consider when building serving functions for TF.Keras models is that they run as static graphs. That means, you cannot use TF graph operations that require a dynamic graph. If you do, you will get an error during the compile of the serving function which will indicate that you are using an EagerTensor which is not supported.### Serving function for image data
To pass images to the prediction service, you encode the compressed (e.g., JPEG) image bytes into base 64 -- which makes the content safe from modification while transmitting binary data over the network. Since this deployed model expects input data as raw (uncompressed) bytes, you need to ensure that the base 64 encoded data gets converted back to raw bytes before it is passed as input to the deployed model.
To resolve this, define a serving function (`serving_fn`) and attach it to the model as a preprocessing step. Add a `@tf.function` decorator so the serving function is fused to the underlying model (instead of upstream on a CPU).
When you send a prediction or explanation request, the content of the request is base 64 decoded into a Tensorflow string (`tf.string`), which is passed to the serving function (`serving_fn`). The serving function preprocesses the `tf.string` into raw (uncompressed) numpy bytes (`preprocess_fn`) to match the input requirements of the model:
- `io.decode_jpeg`- Decompresses the JPG image which is returned as a Tensorflow tensor with three channels (RGB).
- `image.convert_image_dtype` - Changes integer pixel values to float 32.
- `image.resize` - Resizes the image to match the input shape for the model.
- `resized / 255.0` - Rescales (normalization) the pixel data between 0 and 1.
At this point, the data can be passed to the model (`m_call`).
#### XAI Signatures
When the serving function is saved back with the underlying model (`tf.saved_model.save`), you specify the input layer of the serving function as the signature `serving_default`.
For XAI image models, you need to save two additional signatures from the serving function:
- `xai_preprocess`: The preprocessing function in the serving function.
- `xai_model`: The concrete function for calling the model.<jupyter_code>CONCRETE_INPUT = "numpy_inputs"
def _preprocess(bytes_input):
decoded = tf.io.decode_jpeg(bytes_input, channels=3)
decoded = tf.image.convert_image_dtype(decoded, tf.float32)
resized = tf.image.resize(decoded, size=(32, 32))
rescale = tf.cast(resized / 255.0, tf.float32)
return rescale
@tf.function(input_signature=[tf.TensorSpec([None], tf.string)])
def preprocess_fn(bytes_inputs):
decoded_images = tf.map_fn(
_preprocess, bytes_inputs, dtype=tf.float32, back_prop=False
)
return {
CONCRETE_INPUT: decoded_images
} # User needs to make sure the key matches model's input
@tf.function(input_signature=[tf.TensorSpec([None], tf.string)])
def serving_fn(bytes_inputs):
images = preprocess_fn(bytes_inputs)
prob = m_call(**images)
return prob
m_call = tf.function(model.call).get_concrete_function(
[tf.TensorSpec(shape=[None, 32, 32, 3], dtype=tf.float32, name=CONCRETE_INPUT)]
)
tf.saved_model.save(
model,
model_path_to_deploy,
signatures={
"serving_default": serving_fn,
# Required for XAI
"xai_preprocess": preprocess_fn,
"xai_model": m_call,
},
)<jupyter_output><empty_output><jupyter_text>## Get the serving function signature
You can get the signatures of your model's input and output layers by reloading the model into memory, and querying it for the signatures corresponding to each layer.
When making a prediction request, you need to route the request to the serving function instead of the model, so you need to know the input layer name of the serving function -- which you will use later when you make a prediction request.
You also need to know the name of the serving function's input and output layer for constructing the explanation metadata -- which is discussed subsequently.<jupyter_code>loaded = tf.saved_model.load(model_path_to_deploy)
serving_input = list(
loaded.signatures["serving_default"].structured_input_signature[1].keys()
)[0]
print("Serving function input:", serving_input)
serving_output = list(loaded.signatures["serving_default"].structured_outputs.keys())[0]
print("Serving function output:", serving_output)
input_name = model.input.name
print("Model input name:", input_name)
output_name = model.output.name
print("Model output name:", output_name)<jupyter_output><empty_output><jupyter_text>### Explanation Specification
To get explanations when doing a prediction, you must enable the explanation capability and set corresponding settings when you upload your custom model to an Vertex AI `Model` resource. These settings are referred to as the explanation metadata, which consists of:
- `parameters`: This is the specification for the explainability algorithm to use for explanations on your model. You can choose between:
- Shapley - *Note*, not recommended for image data -- can be very long running
- XRAI
- Integrated Gradients
- `metadata`: This is the specification for how the algoithm is applied on your custom model.
#### Explanation Parameters
Let's first dive deeper into the settings for the explainability algorithm.
#### Shapley
Assigns credit for the outcome to each feature, and considers different permutations of the features. This method provides a sampling approximation of exact Shapley values.
Use Cases:
- Classification and regression on tabular data.
Parameters:
- `path_count`: This is the number of paths over the features that will be processed by the algorithm. An exact approximation of the Shapley values requires M! paths, where M is the number of features. For the CIFAR10 dataset, this would be 784 (28*28).
For any non-trival number of features, this is too compute expensive. You can reduce the number of paths over the features to M * `path_count`.
#### Integrated Gradients
A gradients-based method to efficiently compute feature attributions with the same axiomatic properties as the Shapley value.
Use Cases:
- Classification and regression on tabular data.
- Classification on image data.
Parameters:
- `step_count`: This is the number of steps to approximate the remaining sum. The more steps, the more accurate the integral approximation. The general rule of thumb is 50 steps, but as you increase so does the compute time.
#### XRAI
Based on the integrated gradients method, XRAI assesses overlapping regions of the image to create a saliency map, which highlights relevant regions of the image rather than pixels.
Use Cases:
- Classification on image data.
Parameters:
- `step_count`: This is the number of steps to approximate the remaining sum. The more steps, the more accurate the integral approximation. The general rule of thumb is 50 steps, but as you increase so does the compute time.
In the next code cell, set the variable `XAI` to which explainabilty algorithm you will use on your custom model.<jupyter_code>XAI = "ig" # [ shapley, ig, xrai ]
if XAI == "shapley":
PARAMETERS = {"sampled_shapley_attribution": {"path_count": 10}}
elif XAI == "ig":
PARAMETERS = {"integrated_gradients_attribution": {"step_count": 50}}
elif XAI == "xrai":
PARAMETERS = {"xrai_attribution": {"step_count": 50}}
parameters = aip.ExplanationParameters(PARAMETERS)<jupyter_output><empty_output><jupyter_text>#### Explanation Metadata
Let's first dive deeper into the explanation metadata, which consists of:
- `outputs`: A scalar value in the output to attribute -- what to explain. For example, in a probability output \[0.1, 0.2, 0.7\] for classification, one wants an explanation for 0.7. Consider the following formulae, where the output is `y` and that is what we want to explain.
y = f(x)
Consider the following formulae, where the outputs are `y` and `z`. Since we can only do attribution for one scalar value, we have to pick whether we want to explain the output `y` or `z`. Assume in this example the model is object detection and y and z are the bounding box and the object classification. You would want to pick which of the two outputs to explain.
y, z = f(x)
The dictionary format for `outputs` is:
{ "outputs": { "[your_display_name]":
"output_tensor_name": [layer]
}
}
- [your_display_name]: A human readable name you assign to the output to explain. A common example is "probability".
- "output_tensor_name": The key/value field to identify the output layer to explain.
- [layer]: The output layer to explain. In a single task model, like a tabular regressor, it is the last (topmost) layer in the model.
- `inputs`: The features for attribution -- how they contributed to the output. Consider the following formulae, where `a` and `b` are the features. We have to pick which features to explain how the contributed. Assume that this model is deployed for A/B testing, where `a` are the data_items for the prediction and `b` identifies whether the model instance is A or B. You would want to pick `a` (or some subset of) for the features, and not `b` since it does not contribute to the prediction.
y = f(a,b)
The minimum dictionary format for `inputs` is:
{ "inputs": { "[your_display_name]":
"input_tensor_name": [layer]
}
}
- [your_display_name]: A human readable name you assign to the input to explain. A common example is "features".
- "input_tensor_name": The key/value field to identify the input layer for the feature attribution.
- [layer]: The input layer for feature attribution. In a single input tensor model, it is the first (bottom-most) layer in the model.
Since the inputs to the model are tabular, you can specify the following two additional fields as reporting/visualization aids:
- "modality": "image": Indicates the field values are image data.
Since the inputs to the model are images, you can specify the following additional fields as reporting/visualization aids:
- "modality": "image": Indicates the field values are image data.
<jupyter_code>random_baseline = np.random.rand(32, 32, 3)
input_baselines = [{"number_vaue": x} for x in random_baseline]
INPUT_METADATA = {"input_tensor_name": CONCRETE_INPUT, "modality": "image"}
OUTPUT_METADATA = {"output_tensor_name": serving_output}
input_metadata = aip.ExplanationMetadata.InputMetadata(INPUT_METADATA)
output_metadata = aip.ExplanationMetadata.OutputMetadata(OUTPUT_METADATA)
metadata = aip.ExplanationMetadata(
inputs={"image": input_metadata}, outputs={"class": output_metadata}
)
explanation_spec = aip.ExplanationSpec(metadata=metadata, parameters=parameters)<jupyter_output><empty_output><jupyter_text>### Upload the model
Use this helper function `upload_model` to upload your model, stored in SavedModel format, up to the `Model` service, which will instantiate a Vertex AI `Model` resource instance for your model. Once you've done that, you can use the `Model` resource instance in the same way as any other Vertex AI `Model` resource instance, such as deploying to an `Endpoint` resource for serving predictions.
The helper function takes the following parameters:
- `display_name`: A human readable name for the `Endpoint` service.
- `image_uri`: The container image for the model deployment.
- `model_uri`: The Cloud Storage path to our SavedModel artificat. For this tutorial, this is the Cloud Storage location where the `trainer/task.py` saved the model artifacts, which we specified in the variable `MODEL_DIR`.
The helper function calls the `Model` client service's method `upload_model`, which takes the following parameters:
- `parent`: The Vertex AI location root path for `Dataset`, `Model` and `Endpoint` resources.
- `model`: The specification for the Vertex AI `Model` resource instance.
Let's now dive deeper into the Vertex AI model specification `model`. This is a dictionary object that consists of the following fields:
- `display_name`: A human readable name for the `Model` resource.
- `metadata_schema_uri`: Since your model was built without an Vertex AI `Dataset` resource, you will leave this blank (`''`).
- `artificat_uri`: The Cloud Storage path where the model is stored in SavedModel format.
- `container_spec`: This is the specification for the Docker container that will be installed on the `Endpoint` resource, from which the `Model` resource will serve predictions. Use the variable you set earlier `DEPLOY_GPU != None` to use a GPU; otherwise only a CPU is allocated.
- `explanation_spec`: This is the specification for enabling explainability for your model.
Uploading a model into a Vertex AI Model resource returns a long running operation, since it may take a few moments. You call response.result(), which is a synchronous call and will return when the Vertex AI Model resource is ready.
The helper function returns the Vertex AI fully qualified identifier for the corresponding Vertex AI Model instance upload_model_response.model. You will save the identifier for subsequent steps in the variable model_to_deploy_id.<jupyter_code>IMAGE_URI = DEPLOY_IMAGE
def upload_model(display_name, image_uri, model_uri):
model = aip.Model(
display_name=display_name,
artifact_uri=model_uri,
metadata_schema_uri="",
explanation_spec=explanation_spec,
container_spec={"image_uri": image_uri},
)
response = clients["model"].upload_model(parent=PARENT, model=model)
print("Long running operation:", response.operation.name)
upload_model_response = response.result(timeout=180)
print("upload_model_response")
print(" model:", upload_model_response.model)
return upload_model_response.model
model_to_deploy_id = upload_model(
"cifar10-" + TIMESTAMP, IMAGE_URI, model_path_to_deploy
)<jupyter_output><empty_output><jupyter_text>### Get `Model` resource information
Now let's get the model information for just your model. Use this helper function `get_model`, with the following parameter:
- `name`: The Vertex AI unique identifier for the `Model` resource.
This helper function calls the Vertex AI `Model` client service's method `get_model`, with the following parameter:
- `name`: The Vertex AI unique identifier for the `Model` resource.<jupyter_code>def get_model(name):
response = clients["model"].get_model(name=name)
print(response)
get_model(model_to_deploy_id)<jupyter_output><empty_output><jupyter_text>## Model deployment for batch prediction
Now deploy the trained Vertex AI `Model` resource you created for batch prediction. This differs from deploying a `Model` resource for on-demand prediction.
For online prediction, you:
1. Create an `Endpoint` resource for deploying the `Model` resource to.
2. Deploy the `Model` resource to the `Endpoint` resource.
3. Make online prediction requests to the `Endpoint` resource.
For batch-prediction, you:
1. Create a batch prediction job.
2. The job service will provision resources for the batch prediction request.
3. The results of the batch prediction request are returned to the caller.
4. The job service will unprovision the resoures for the batch prediction request.## Make a batch prediction request
Now do a batch prediction to your deployed model.### Get test items
You will use examples out of the test (holdout) portion of the dataset as a test items.<jupyter_code>test_image_1 = x_test[0]
test_label_1 = y_test[0]
test_image_2 = x_test[1]
test_label_2 = y_test[1]
print(test_image_1.shape)<jupyter_output><empty_output><jupyter_text>### Prepare the request content
You are going to send the CIFAR10 images as compressed JPG image, instead of the raw uncompressed bytes:
- `cv2.imwrite`: Use openCV to write the uncompressed image to disk as a compressed JPEG image.
- Denormalize the image data from \[0,1) range back to [0,255).
- Convert the 32-bit floating point values to 8-bit unsigned integers.<jupyter_code>import cv2
cv2.imwrite("tmp1.jpg", (test_image_1 * 255).astype(np.uint8))
cv2.imwrite("tmp2.jpg", (test_image_2 * 255).astype(np.uint8))<jupyter_output><empty_output><jupyter_text>### Copy test item(s)
For the batch prediction, you will copy the test items over to your Cloud Storage bucket.<jupyter_code>! gsutil cp tmp1.jpg $BUCKET_NAME/tmp1.jpg
! gsutil cp tmp2.jpg $BUCKET_NAME/tmp2.jpg
test_item_1 = BUCKET_NAME + "/tmp1.jpg"
test_item_2 = BUCKET_NAME + "/tmp2.jpg"<jupyter_output><empty_output><jupyter_text>### Make the batch input file
Now make a batch input file, which you will store in your local Cloud Storage bucket. The batch input file can only be in JSONL format. For JSONL file, you make one dictionary entry per line for each data item (instance). The dictionary contains the key/value pairs:
- `input_name`: the name of the input layer of the underlying model.
- `'b64'`: A key that indicates the content is base64 encoded.
- `content`: The compressed JPG image bytes as a base64 encoded string.
Each instance in the prediction request is a dictionary entry of the form:
{serving_input: {'b64': content}}
To pass the image data to the prediction service you encode the bytes into base64 -- which makes the content safe from modification when transmitting binary data over the network.
- `tf.io.read_file`: Read the compressed JPG images into memory as raw bytes.
- `base64.b64encode`: Encode the raw bytes into a base64 encoded string.<jupyter_code>import base64
import json
gcs_input_uri = BUCKET_NAME + "/" + "test.jsonl"
with tf.io.gfile.GFile(gcs_input_uri, "w") as f:
bytes = tf.io.read_file(test_item_1)
b64str = base64.b64encode(bytes.numpy()).decode("utf-8")
data = {serving_input: {"b64": b64str}}
f.write(json.dumps(data) + "\n")
bytes = tf.io.read_file(test_item_2)
b64str = base64.b64encode(bytes.numpy()).decode("utf-8")
data = {serving_input: {"b64": b64str}}
f.write(json.dumps(data) + "\n")<jupyter_output><empty_output><jupyter_text>### Compute instance scaling
You have several choices on scaling the compute instances for handling your batch prediction requests:
- Single Instance: The batch prediction requests are processed on a single compute instance.
- Set the minimum (`MIN_NODES`) and maximum (`MAX_NODES`) number of compute instances to one.
- Manual Scaling: The batch prediction requests are split across a fixed number of compute instances that you manually specified.
- Set the minimum (`MIN_NODES`) and maximum (`MAX_NODES`) number of compute instances to the same number of nodes. When a model is first deployed to the instance, the fixed number of compute instances are provisioned and batch prediction requests are evenly distributed across them.
- Auto Scaling: The batch prediction requests are split across a scaleable number of compute instances.
- Set the minimum (`MIN_NODES`) number of compute instances to provision when a model is first deployed and to de-provision, and set the maximum (`MAX_NODES) number of compute instances to provision, depending on load conditions.
The minimum number of compute instances corresponds to the field `min_replica_count` and the maximum number of compute instances corresponds to the field `max_replica_count`, in your subsequent deployment request.<jupyter_code>MIN_NODES = 1
MAX_NODES = 1<jupyter_output><empty_output><jupyter_text>### Make batch prediction request
Now that your batch of two test items is ready, let's do the batch request. Use this helper function `create_batch_prediction_job`, with the following parameters:
- `display_name`: The human readable name for the prediction job.
- `model_name`: The Vertex AI fully qualified identifier for the `Model` resource.
- `gcs_source_uri`: The Cloud Storage path to the input file -- which you created above.
- `gcs_destination_output_uri_prefix`: The Cloud Storage path that the service will write the predictions to.
- `parameters`: Additional filtering parameters for serving prediction results.
The helper function calls the job client service's `create_batch_prediction_job` metho, with the following parameters:
- `parent`: The Vertex AI location root path for Dataset, Model and Pipeline resources.
- `batch_prediction_job`: The specification for the batch prediction job.
Let's now dive into the specification for the `batch_prediction_job`:
- `display_name`: The human readable name for the prediction batch job.
- `model`: The Vertex AI fully qualified identifier for the `Model` resource.
- `dedicated_resources`: The compute resources to provision for the batch prediction job.
- `machine_spec`: The compute instance to provision. Use the variable you set earlier `DEPLOY_GPU != None` to use a GPU; otherwise only a CPU is allocated.
- `starting_replica_count`: The number of compute instances to initially provision, which you set earlier as the variable `MIN_NODES`.
- `max_replica_count`: The maximum number of compute instances to scale to, which you set earlier as the variable `MAX_NODES`.
- `model_parameters`: Additional filtering parameters for serving prediction results. No Additional parameters are supported for custom models.
- `input_config`: The input source and format type for the instances to predict.
- `instances_format`: The format of the batch prediction request file: `csv` or `jsonl`.
- `gcs_source`: A list of one or more Cloud Storage paths to your batch prediction requests.
- `output_config`: The output destination and format for the predictions.
- `prediction_format`: The format of the batch prediction response file: `csv` or `jsonl`.
- `gcs_destination`: The output destination for the predictions.
This call is an asychronous operation. You will print from the response object a few select fields, including:
- `name`: The Vertex AI fully qualified identifier assigned to the batch prediction job.
- `display_name`: The human readable name for the prediction batch job.
- `model`: The Vertex AI fully qualified identifier for the Model resource.
- `generate_explanations`: Whether True/False explanations were provided with the predictions (explainability).
- `state`: The state of the prediction job (pending, running, etc).
Since this call will take a few moments to execute, you will likely get `JobState.JOB_STATE_PENDING` for `state`.<jupyter_code>BATCH_MODEL = "cifar10_batch-" + TIMESTAMP
def create_batch_prediction_job(
display_name,
model_name,
gcs_source_uri,
gcs_destination_output_uri_prefix,
parameters=None,
):
if DEPLOY_GPU:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_type": DEPLOY_GPU,
"accelerator_count": DEPLOY_NGPU,
}
else:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_count": 0,
}
batch_prediction_job = {
"display_name": display_name,
# Format: 'projects/{project}/locations/{location}/models/{model_id}'
"model": model_name,
"model_parameters": json_format.ParseDict(parameters, Value()),
"input_config": {
"instances_format": IN_FORMAT,
"gcs_source": {"uris": [gcs_source_uri]},
},
"output_config": {
"predictions_format": OUT_FORMAT,
"gcs_destination": {"output_uri_prefix": gcs_destination_output_uri_prefix},
},
"dedicated_resources": {
"machine_spec": machine_spec,
"starting_replica_count": MIN_NODES,
"max_replica_count": MAX_NODES,
},
"generate_explanation": True,
}
response = clients["job"].create_batch_prediction_job(
parent=PARENT, batch_prediction_job=batch_prediction_job
)
print("response")
print(" name:", response.name)
print(" display_name:", response.display_name)
print(" model:", response.model)
try:
print(" generate_explanation:", response.generate_explanation)
except:
pass
print(" state:", response.state)
print(" create_time:", response.create_time)
print(" start_time:", response.start_time)
print(" end_time:", response.end_time)
print(" update_time:", response.update_time)
print(" labels:", response.labels)
return response
IN_FORMAT = "jsonl"
OUT_FORMAT = "jsonl"
response = create_batch_prediction_job(
BATCH_MODEL, model_to_deploy_id, gcs_input_uri, BUCKET_NAME
)<jupyter_output><empty_output><jupyter_text>Now get the unique identifier for the batch prediction job you created.<jupyter_code># The full unique ID for the batch job
batch_job_id = response.name
# The short numeric ID for the batch job
batch_job_short_id = batch_job_id.split("/")[-1]
print(batch_job_id)<jupyter_output><empty_output><jupyter_text>### Get information on a batch prediction job
Use this helper function `get_batch_prediction_job`, with the following paramter:
- `job_name`: The Vertex AI fully qualified identifier for the batch prediction job.
The helper function calls the job client service's `get_batch_prediction_job` method, with the following paramter:
- `name`: The Vertex AI fully qualified identifier for the batch prediction job. In this tutorial, you will pass it the Vertex AI fully qualified identifier for your batch prediction job -- `batch_job_id`
The helper function will return the Cloud Storage path to where the predictions are stored -- `gcs_destination`.<jupyter_code>def get_batch_prediction_job(job_name, silent=False):
response = clients["job"].get_batch_prediction_job(name=job_name)
if silent:
return response.output_config.gcs_destination.output_uri_prefix, response.state
print("response")
print(" name:", response.name)
print(" display_name:", response.display_name)
print(" model:", response.model)
try: # not all data types support explanations
print(" generate_explanation:", response.generate_explanation)
except:
pass
print(" state:", response.state)
print(" error:", response.error)
gcs_destination = response.output_config.gcs_destination
print(" gcs_destination")
print(" output_uri_prefix:", gcs_destination.output_uri_prefix)
return gcs_destination.output_uri_prefix, response.state
predictions, state = get_batch_prediction_job(batch_job_id)<jupyter_output><empty_output><jupyter_text>### Get the predictions
When the batch prediction is done processing, the job state will be `JOB_STATE_SUCCEEDED`.
Finally you view the predictions stored at the Cloud Storage path you set as output. The predictions will be in a JSONL format, which you indicated at the time you made the batch prediction job, under a subfolder starting with the name `prediction`, and under that folder will be a file called `prediction.results-xxxxx-of-xxxxx`.
Now display (cat) the contents. You will see multiple JSON objects, one for each prediction.
Finally you view the explanations stored at the Cloud Storage path you set as output. The explanations will be in a JSONL format, which you indicated at the time you made the batch explanation job, under a subfolder starting with the name `prediction`, and under that folder will be a file called `explanation-results-xxxx-of-xxxx`.
Let's display (cat) the contents. You will a row for each prediction -- in this case, there is just one row. The row is the softmax probability distribution for the corresponding CIFAR10 classes.<jupyter_code>def get_latest_predictions(gcs_out_dir):
""" Get the latest prediction subfolder using the timestamp in the subfolder name"""
folders = !gsutil ls $gcs_out_dir
latest = ""
for folder in folders:
subfolder = folder.split("/")[-2]
if subfolder.startswith("prediction-"):
if subfolder > latest:
latest = folder[:-1]
return latest
while True:
predictions, state = get_batch_prediction_job(batch_job_id, True)
if state != aip.JobState.JOB_STATE_SUCCEEDED:
print("The job has not completed:", state)
if state == aip.JobState.JOB_STATE_FAILED:
break
else:
folder = get_latest_predictions(predictions)
! gsutil ls $folder/explanation.results*
print("Results:")
! gsutil cat $folder/explanation.results*
print("Errors:")
! gsutil cat $folder/prediction.errors*
break
time.sleep(60)<jupyter_output><empty_output><jupyter_text># Cleaning up
To clean up all GCP resources used in this project, you can [delete the GCP
project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.
Otherwise, you can delete the individual resources you created in this tutorial:
- Dataset
- Pipeline
- Model
- Endpoint
- Batch Job
- Custom Job
- Hyperparameter Tuning Job
- Cloud Storage Bucket<jupyter_code>delete_dataset = True
delete_pipeline = True
delete_model = True
delete_endpoint = True
delete_batchjob = True
delete_customjob = True
delete_hptjob = True
delete_bucket = True
# Delete the dataset using the Vertex AI fully qualified identifier for the dataset
try:
if delete_dataset and "dataset_id" in globals():
clients["dataset"].delete_dataset(name=dataset_id)
except Exception as e:
print(e)
# Delete the training pipeline using the Vertex AI fully qualified identifier for the pipeline
try:
if delete_pipeline and "pipeline_id" in globals():
clients["pipeline"].delete_training_pipeline(name=pipeline_id)
except Exception as e:
print(e)
# Delete the model using the Vertex AI fully qualified identifier for the model
try:
if delete_model and "model_to_deploy_id" in globals():
clients["model"].delete_model(name=model_to_deploy_id)
except Exception as e:
print(e)
# Delete the endpoint using the Vertex AI fully qualified identifier for the endpoint
try:
if delete_endpoint and "endpoint_id" in globals():
clients["endpoint"].delete_endpoint(name=endpoint_id)
except Exception as e:
print(e)
# Delete the batch job using the Vertex AI fully qualified identifier for the batch job
try:
if delete_batchjob and "batch_job_id" in globals():
clients["job"].delete_batch_prediction_job(name=batch_job_id)
except Exception as e:
print(e)
# Delete the custom job using the Vertex AI fully qualified identifier for the custom job
try:
if delete_customjob and "job_id" in globals():
clients["job"].delete_custom_job(name=job_id)
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex AI fully qualified identifier for the hyperparameter tuning job
try:
if delete_hptjob and "hpt_job_id" in globals():
clients["job"].delete_hyperparameter_tuning_job(name=hpt_job_id)
except Exception as e:
print(e)
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME<jupyter_output><empty_output>
|
permissive
|
/ai-platform-unified/notebooks/unofficial/gapic/custom/showcase_custom_image_classification_batch_explain.ipynb
|
ModestGoblin/ai-platform-samples
| 46 |
<jupyter_start><jupyter_text>DP-SGD has three privacy-specific hyperparameters and one existing hyperamater that you must tune:
1. `l2_norm_clip` (float) - The maximum Euclidean (L2) norm of each gradient that is applied to update model parameters. This hyperparameter is used to bound the optimizer's sensitivity to individual training points.
2. `noise_multiplier` (float) - The amount of noise sampled and added to gradients during training. Generally, more noise results in better privacy (often, but not necessarily, at the expense of lower utility).
3. `microbatches` (int) - Each batch of data is split in smaller units called microbatches. By default, each microbatch should contain a single training example. This allows us to clip gradients on a per-example basis rather than after they have been averaged across the minibatch. This in turn decreases the (negative) effect of clipping on signal found in the gradient and typically maximizes utility. However, computational overhead can be reduced by increasing the size of microbatches to include more than one training examples. The average gradient across these multiple training examples is then clipped. The total number of examples consumed in a batch, i.e., one step of gradient descent, remains the same. The number of microbatches should evenly divide the batch size. <jupyter_code>l2_norm_clip = 0.5
noise_multiplier = 1
learning_rate = 1e-2
gaussian_stdev = l2_norm_clip * noise_multiplier
gaussian_noise_var = gaussian_stdev ** 2
public_var_multiplier = 1 # Multiply public variance by this multiplier
public_stdev_multipler = math.sqrt(public_var_multiplier)
num_microbatches = batch_size
if batch_size % num_microbatches != 0:
raise ValueError('Batch size should be an integer multiple of the number of microbatches')
train, test = tf.keras.datasets.cifar10.load_data()
train_data, train_labels = train
test_data, test_labels = test
train_data = np.array(train_data, dtype=np.float32) / 255
test_data = np.array(test_data, dtype=np.float32) / 255
train_labels = np.array(train_labels, dtype=np.int32)
test_labels = np.array(test_labels, dtype=np.int32)
train_labels = tf.keras.utils.to_categorical(train_labels, num_classes=10)
test_labels = tf.keras.utils.to_categorical(test_labels, num_classes=10)
assert train_data.min() == 0.
assert train_data.max() == 1.
assert test_data.min() == 0.
assert test_data.max() == 1.
from sklearn.model_selection import train_test_split
# test_size refers to private data size
public_data, private_data, public_labels, private_labels = \
train_test_split(train_data, train_labels, test_size=199/200)
num_batches = private_data.shape[0] // batch_size
print(public_data.shape)
print(public_labels.shape)
print(private_data.shape)
print(private_labels.shape)
print(test_data.shape)
print(test_labels.shape)
from tensorflow_privacy.privacy.analysis import compute_dp_sgd_privacy
compute_dp_sgd_privacy.compute_dp_sgd_privacy(
n=private_labels.shape[0], batch_size=batch_size, noise_multiplier=noise_multiplier, epochs=epochs, delta=1e-5)
# CNN model
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
def cnn_model():
model = tf.keras.models.Sequential()
model.add(Conv2D(32, (3, 3), padding='same',
input_shape=train_data.shape[1:], trainable=False))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3), trainable=False))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same', trainable=False))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3), trainable=False))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512, trainable=False))
model.add(Activation('relu'))
model.add(Dense(64, trainable=False))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(10))
model.add(Activation('softmax'))
model.load_weights('cifar10_pretrained_weights.h5')
return model
cnn_model().summary()
print([t.numpy().shape for t in cnn_model().trainable_weights])
def regress_all(arr):
# Add 1s for bias
m, n = arr.shape
arr = np.insert(arr, arr.shape[1], 1, axis=1)
all_coeffs = []
for i in range(n):
coeff_mask = np.ones(arr.shape, dtype=np.bool)
coeff_mask[:, i] = False
dependant_mask = np.zeros(arr.shape, dtype=np.bool)
dependant_mask[:, i] = True
coeffs = scipy.linalg.lstsq(arr[coeff_mask].reshape((m, n)), arr[dependant_mask])[0]
coeffs = np.insert(coeffs, i, 0)
coeffs = np.delete(coeffs, -1) # Delete bias
all_coeffs.append(coeffs)
return all_coeffs
### IN WEIGHTS
# @jit(nopython=True, fastmath=True)
# def multiply_coeffs(coeffs, weights):
# # (10, 64, 64) @ (64, 10) -> (64, 10)
# res = np.zeros(weights.shape)
# for i in range(weights.shape[0]):
# for j in range(weights.shape[1]):
# res[i, j] = np.dot(coeffs[j, i], weights[:, j])
# return res
# EPSILON = 1e-6
# def construct_graphical_model_and_get_map(public_grads, dp_grads, normalize=False):
# public_grads_means = [np.mean(l, axis=0) for l in public_grads]
# public_grads_stdev = [np.std(l, axis=0, ddof=1) * public_stdev_multipler
# for l in public_grads]
# public_grads_stdev[0] = np.maximum(public_grads_stdev[0], EPSILON)
# public_weights = public_grads[0]
# observed_weights = dp_grads[0]
# observed_biases = dp_grads[1]
# if normalize:
# public_weights -= public_grads_means[0]
# public_weights /= public_grads_stdev[0]
# regress_coeffs = []
# for i in range(public_weights.shape[2]):
# regress_coeffs.append(regress_all(public_weights[:, :, i]))
# regress_coeffs = np.asarray(regress_coeffs, dtype=np.float64) # Scipy minimize requires float64
# def log_likelihood(X):
# X = X.reshape(observed_weights.shape, order='F') # Col-major faster for np.dot
# res = multiply_coeffs(regress_coeffs, X)
# # Prior using public mean and stdev
# if normalize:
# ll = scipy.stats.norm.logpdf(X, loc=res, scale=1)
# else:
# ll = scipy.stats.norm.logpdf(X, loc=public_grads_means[0] + res,
# scale=public_grads_stdev[0])
# # Emission probability
# if normalize:
# X *= public_grads_stdev[0]
# X += public_grads_means[0]
# ll += scipy.stats.norm.logpdf(X, loc=observed_weights, scale=gaussian_stdev)
# return -np.sum(ll)
# map_weights = optimize.minimize(log_likelihood, np.zeros(observed_weights.shape), method='L-BFGS-B').x
# map_weights = map_weights.reshape(observed_weights.shape)
# if normalize:
# map_weights *= public_grads_stdev[0]
# map_weights += public_grads_means[0]
# public_bias_var = np.square(public_grads_stdev[1])
# map_biases = ((observed_biases * public_bias_var) + (public_grads_means[1] * gaussian_noise_var)) / \
# (public_bias_var + gaussian_noise_var)
# return [map_weights, map_biases]
### OUT WEIGHTS
@jit(nopython=True, fastmath=True)
def multiply_coeffs(coeffs, weights):
# (64, 10, 10) @ (64, 10) -> (64, 10)
res = np.zeros(weights.shape)
for i in range(weights.shape[0]):
for j in range(weights.shape[1]):
res[i, j] = np.dot(coeffs[i, j], weights[i, :])
return res
EPSILON = 1e-6
def construct_graphical_model_and_get_map(public_grads, dp_grads, normalize=False):
public_grads_means = [np.mean(l, axis=0) for l in public_grads]
public_grads_stdev = [np.std(l, axis=0, ddof=1) * public_stdev_multipler
for l in public_grads]
public_grads_stdev[0] = np.maximum(public_grads_stdev[0], EPSILON)
public_weights = public_grads[0]
observed_weights = dp_grads[0]
observed_biases = dp_grads[1]
if normalize:
public_weights -= public_grads_means[0]
public_weights /= public_grads_stdev[0]
regress_coeffs = []
for i in range(public_weights.shape[1]):
regress_coeffs.append(regress_all(public_weights[:, i, :]))
regress_coeffs = np.asarray(regress_coeffs, dtype=np.float64) # Scipy minimize requires float64
def log_likelihood(X):
X = X.reshape(observed_weights.shape, order='C')
res = multiply_coeffs(regress_coeffs, X)
# Prior using public mean and stdev
if normalize:
ll = scipy.stats.norm.logpdf(X, loc=res, scale=1)
else:
ll = scipy.stats.norm.logpdf(X, loc=public_grads_means[0] + res,
scale=public_grads_stdev[0])
# Emission probability
ll += scipy.stats.norm.logpdf(X, loc=observed_weights, scale=gaussian_stdev)
return -np.sum(ll)
map_weights = optimize.minimize(log_likelihood, np.zeros(observed_weights.shape), method='L-BFGS-B').x
map_weights = map_weights.reshape(observed_weights.shape)
if normalize:
map_weights *= public_grads_stdev[0]
map_weights += public_grads_means[0]
public_bias_var = np.square(public_grads_stdev[1])
map_biases = ((observed_biases * public_bias_var) + (public_grads_means[1] * gaussian_noise_var)) / \
(public_bias_var + gaussian_noise_var)
return [map_weights, map_biases]
loss_fn = tf.keras.losses.CategoricalCrossentropy(
from_logits=True, reduction=tf.losses.Reduction.NONE)
def get_public_grads(public_x, public_y, loss_fn, model):
public_grads = []
# x needs to have extra dimension for number of examples,
# even if it's 1 for our case
public_x = np.expand_dims(public_x, axis=1)
for x, y in zip(public_x, public_y):
# for x, y in tqdm(zip(public_x, public_y), total=public_x.shape[0], desc='Public Dataset Iter'):
with tf.GradientTape() as tape:
loss_value = loss_fn(y, model(x))
grad = tape.gradient(loss_value, model.trainable_weights)
if not public_grads:
public_grads = [[] for _ in grad]
for i, t in enumerate(grad):
public_grads[i].append(t.numpy())
public_grads = [np.asarray(l) for l in public_grads]
return public_grads
def get_public_grads_mean_var(public_x, public_y, loss_fn, model):
# x needs to have extra dimension for number of examples,
# even if it's 1 for our case
public_x = np.expand_dims(public_x, axis=1)
# https://math.stackexchange.com/questions/20593/calculate-variance-from-a-stream-of-sample-values
mean_k = None
v_k = None
k = 0
for x, y in zip(public_x, public_y):
k += 1
with tf.GradientTape() as tape:
loss_value = loss_fn(y, model(x))
grad = tape.gradient(loss_value, model.trainable_weights)
numpy_grad = [t.numpy() for t in grad]
if k == 1:
mean_k = numpy_grad
v_k = [np.zeros(t.shape) for t in numpy_grad]
else:
prev_mean_k = mean_k
mean_k = [mean_k[i] + (t - mean_k[i]) / k for i, t in enumerate(numpy_grad)]
v_k = [v_k[i] + np.multiply(t - prev_mean_k[i],
t - mean_k[i])
for i, t in enumerate(numpy_grad)]
unbiased_variance = [t / (k - 1) for t in v_k]
return mean_k, unbiased_variance
def evaluate_model(model, loss_fn, x, y, batch_size=None):
pred = model.predict(x, batch_size=batch_size)
loss = np.mean(loss_fn(y, pred).numpy())
acc = np.mean(tf.keras.metrics.categorical_accuracy(y, pred).numpy())
return (loss, acc)
from tensorflow.keras.callbacks import EarlyStopping
pretrained_model = cnn_model()
pretrained_model.compile(optimizer='adam',
loss=loss_fn, metrics=['accuracy'])
baseline_history = pretrained_model.fit(public_data, public_labels,
epochs=1000,
batch_size=batch_size,
verbose=0,
callbacks=[TQDMNotebookCallback()])
evaluate_model(pretrained_model, loss_fn, test_data, test_labels)
sgd_model_pretrained = cnn_model()
sgd_model_pretrained.set_weights(pretrained_model.get_weights())
sgd_model_pretrained.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
dpsgd_model_pretrained = cnn_model()
dpsgd_model_pretrained.set_weights(pretrained_model.get_weights())
dpsgd_optimizer_pretrained = DPAdamGaussianOptimizer(
l2_norm_clip=l2_norm_clip,
noise_multiplier=noise_multiplier,
num_microbatches=num_microbatches)
dpsgd_model_pretrained.compile(optimizer=dpsgd_optimizer_pretrained, loss=loss_fn, metrics=['accuracy'])
bayesian_model_pretrained = cnn_model()
bayesian_model_pretrained.set_weights(pretrained_model.get_weights())
bayesian_optimizer_pretrained = DPAdamGaussianOptimizer(
l2_norm_clip=l2_norm_clip,
noise_multiplier=noise_multiplier,
num_microbatches=num_microbatches)
bayesian_model_pretrained.compile(optimizer=bayesian_optimizer_pretrained, loss=loss_fn, metrics=['accuracy'])
bayesian_network_model_pretrained = cnn_model()
bayesian_network_model_pretrained.set_weights(pretrained_model.get_weights())
bayesian_network_optimizer_pretrained = DPAdamGaussianOptimizer(
l2_norm_clip=l2_norm_clip,
noise_multiplier=noise_multiplier,
num_microbatches=num_microbatches)
bayesian_network_model_pretrained.compile(
optimizer=bayesian_network_optimizer_pretrained, loss=loss_fn, metrics=['accuracy'])
# Iterate over epochs.
sgd_loss_pretrained_batches = []
sgd_acc_pretrained_batches = []
dpsgd_loss_pretrained_batches = []
dpsgd_acc_pretrained_batches = []
bayesian_loss_pretrained_batches = []
bayesian_acc_pretrained_batches = []
bayesian_network_loss_pretrained_batches = []
bayesian_network_acc_pretrained_batches = []
# Used for picking a random minibatch
idx_array = np.arange(private_data.shape[0])
for epoch in tqdm(range(epochs), desc='Epoch'):
# Iterate over the batches of the dataset.
for step in tqdm(range(num_batches), desc='Batch', leave=False):
# Pick a random minibatch
random_idx = np.random.choice(idx_array, batch_size, replace=False)
x_batch_train = private_data[random_idx]
y_batch_train = private_labels[random_idx]
### Normal SGD
loss, acc = evaluate_model(sgd_model_pretrained, loss_fn, train_data, train_labels, batch_size=batch_size)
sgd_loss_pretrained_batches.append(loss)
sgd_acc_pretrained_batches.append(acc)
sgd_model_pretrained.fit(x_batch_train, y_batch_train, batch_size=batch_size, verbose=0)
### DPSGD pretrained
# Evaluate DPSGD model
loss, acc = evaluate_model(dpsgd_model_pretrained, loss_fn, train_data, train_labels, batch_size=batch_size)
dpsgd_loss_pretrained_batches.append(loss)
dpsgd_acc_pretrained_batches.append(acc)
# Open a GradientTape to record the operations run
# during the forward pass, which enables autodifferentiation.
with tf.GradientTape(persistent=True) as tape:
# Run the forward pass of the layer.
# The operations that the layer applies
# to its inputs are going to be recorded
# on the GradientTape.
logits = dpsgd_model_pretrained(x_batch_train) # Logits for this minibatch
# Compute the loss value for this minibatch.
loss = lambda: loss_fn(y_batch_train, logits)
# Use the gradient tape to automatically retrieve
# the gradients of the trainable variables with respect to the loss.
grads = dpsgd_optimizer_pretrained.compute_gradients(
loss, dpsgd_model_pretrained.trainable_weights, gradient_tape=tape)
del tape
# Run one step of gradient descent by updating
# the value of the variables to minimize the loss.
dpsgd_optimizer_pretrained.apply_gradients(grads)
### Our simple Bayesian DPSGD pretrained
# Evaluate custom model
loss, acc = evaluate_model(bayesian_model_pretrained, loss_fn, train_data, train_labels, batch_size=batch_size)
bayesian_loss_pretrained_batches.append(loss)
bayesian_acc_pretrained_batches.append(acc)
means, variances = get_public_grads_mean_var(public_data, public_labels, loss_fn, bayesian_model_pretrained)
variances = [layer * public_var_multiplier for layer in variances]
# Open a GradientTape to record the operations run
# during the forward pass, which enables autodifferentiation.
with tf.GradientTape(persistent=True) as tape:
# Run the forward pass of the layer.
# The operations that the layer applies
# to its inputs are going to be recorded
# on the GradientTape.
logits = bayesian_model_pretrained(x_batch_train) # Logits for this minibatch
# Compute the loss value for this minibatch.
loss = lambda: loss_fn(y_batch_train, logits)
# Use the gradient tape to automatically retrieve
# the gradients of the trainable variables with respect to the loss.
grads = bayesian_optimizer_pretrained.compute_gradients(
loss, bayesian_model_pretrained.trainable_weights, gradient_tape=tape)
del tape
# X = N(means, variances)
# Y = X + N(0, gaussian_noise_var)
# MLE of X is ((variances * Y) + (gaussian_noise_var * means)) / (variances + gaussian_noise_var)
# https://www.wolframalpha.com/input/?i=differentiate+-log%28%CF%83%29+-+1%2F2+log%282+%CF%80%29+-+1%2F2+%28%28x+-+%CE%BC%29%2F%CF%83%29%5E2+-log%28%CE%A3%29+-+1%2F2+log%282+%CF%80%29+-+1%2F2+%28%28y+-+x%29%2F%CE%A3%29%5E2+wrt+x
# https://www.wolframalpha.com/input/?i=solve+%28y+-+x%29%2F%CE%A3%5E2+-+%28x+-+%CE%BC%29%2F%CF%83%5E2+for+x
Ys = [grad[0] for grad in grads]
Xs = [((Y * variances[i]) + (means[i] * gaussian_noise_var)) / (variances[i] + gaussian_noise_var)
for i, Y in enumerate(Ys)]
adjusted_grads = zip(Xs, bayesian_model_pretrained.trainable_weights)
# Run one step of gradient descent by updating
# the value of the variables to minimize the loss.
bayesian_optimizer_pretrained.apply_gradients(adjusted_grads)
### Our Bayesian graph DPSGD pretrained
# Evaluate custom model
loss, acc = evaluate_model(bayesian_network_model_pretrained, loss_fn, train_data, train_labels, batch_size=batch_size)
bayesian_network_loss_pretrained_batches.append(loss)
bayesian_network_acc_pretrained_batches.append(acc)
# Open a GradientTape to record the operations run
# during the forward pass, which enables autodifferentiation.
with tf.GradientTape(persistent=True) as tape:
# Run the forward pass of the layer.
# The operations that the layer applies
# to its inputs are going to be recorded
# on the GradientTape.
logits = bayesian_network_model_pretrained(x_batch_train) # Logits for this minibatch
# Compute the loss value for this minibatch.
loss = lambda: loss_fn(y_batch_train, logits)
# Use the gradient tape to automatically retrieve
# the gradients of the trainable variables with respect to the loss.
grads = bayesian_network_optimizer_pretrained.compute_gradients(
loss,
bayesian_network_model_pretrained.trainable_weights,
gradient_tape=tape)
del tape
public_grads = get_public_grads(public_data, public_labels, loss_fn, bayesian_model_pretrained)
dp_grads = [g[0].numpy() for g in grads]
break
map_grads = construct_graphical_model_and_get_map(public_grads, dp_grads, normalize=True)
map_grads = [tf.convert_to_tensor(l, dtype='float32') for l in map_grads]
bayesian_network_optimizer_pretrained.apply_gradients(
zip(map_grads, bayesian_network_model_pretrained.trainable_weights))
metrics = pd.DataFrame({
# 'sgd_acc': sgd_acc_pretrained_batches,
'dpsgd_acc': dpsgd_acc_pretrained_batches,
'simple_bayesian_acc': bayesian_acc_pretrained_batches,
'bayesian_network_acc': bayesian_network_acc_pretrained_batches,
})
print(metrics)
sns.set(rc={'figure.figsize':(16, 10)})
ax = sns.lineplot(data=metrics)
ax.set(xlabel='Minibatch', ylabel='Acc',
title='Pretrained CIFAR10 Bayesian DPSGD (Norm Clip={}, Public Size={}, Variance-Mult={})'.format(
l2_norm_clip, public_data.shape[0], public_var_multiplier))
# plt.savefig('cifar_bayesian_network_all_normalized_in-weights_variance{}_dpsgd-norm{}.png'
# .format(public_var_multiplier, l2_norm_clip))<jupyter_output><empty_output>
|
no_license
|
/cifar_bayesian_transfer_learning.ipynb
|
qinghao1/gradient
| 1 |
<jupyter_start><jupyter_text>
# **Exploratory Data Analysis Lab**
Estimated time needed: **30** minutes
In this module you get to work with the cleaned dataset from the previous module.
In this assignment you will perform the task of exploratory data analysis.
You will find out the distribution of data, presence of outliers and also determine the correlation between different columns in the dataset.
## Objectives
In this lab you will perform the following:
* Identify the distribution of data in the dataset.
* Identify outliers in the dataset.
* Remove outliers from the dataset.
* Identify correlation between features in the dataset.
***
## Hands on Lab
Import the pandas module.
<jupyter_code>import pandas as pd<jupyter_output><empty_output><jupyter_text>Load the dataset into a dataframe.
<jupyter_code>df = pd.read_csv("https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBM-DA0321EN-SkillsNetwork/LargeData/m2_survey_data.csv")<jupyter_output><empty_output><jupyter_text>## Distribution
### Determine how the data is distributed
The column `ConvertedComp` contains Salary converted to annual USD salaries using the exchange rate on 2019-02-01.
This assumes 12 working months and 50 working weeks.
Plot the distribution curve for the column `ConvertedComp`.
<jupyter_code>%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
sns.distplot(df['ConvertedComp'])
plt.show()<jupyter_output>/usr/local/lib/python3.7/dist-packages/seaborn/distributions.py:2557: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms).
warnings.warn(msg, FutureWarning)
<jupyter_text>Plot the histogram for the column `ConvertedComp`.
<jupyter_code># your code goes here
hist = df['ConvertedComp'].hist()
<jupyter_output><empty_output><jupyter_text>What is the median of the column `ConvertedComp`?
<jupyter_code>df['ConvertedComp'].median()
<jupyter_output><empty_output><jupyter_text>How many responders identified themselves only as a **Man**?
<jupyter_code>
df['Gender'].value_counts()
<jupyter_output><empty_output><jupyter_text>Find out the median ConvertedComp of responders identified themselves only as a **Woman**?
<jupyter_code>df.loc[df['Gender'] == 'Woman', ['ConvertedComp']].median()
<jupyter_output><empty_output><jupyter_text>Give the five number summary for the column `Age`?
**Double click here for hint**.
<!--
min,q1,median,q3,max of a column are its five number summary.
-->
<jupyter_code>df['Age'].describe()
<jupyter_output><empty_output><jupyter_text>Plot a histogram of the column `Age`.
<jupyter_code>age_hist = df['Age'].hist()
<jupyter_output><empty_output><jupyter_text>## Outliers
### Finding outliers
Find out if outliers exist in the column `ConvertedComp` using a box plot?
<jupyter_code>sns.boxplot(x=df['Age'])<jupyter_output><empty_output><jupyter_text>Find out the Inter Quartile Range for the column `ConvertedComp`.
<jupyter_code>iqr = df['ConvertedComp'][df['ConvertedComp'].between(df['ConvertedComp'].quantile(.25), df['ConvertedComp'].quantile(.75), inclusive=True)]
q1 = df['ConvertedComp'].quantile(.25)
q3 = df['ConvertedComp'].quantile(.75)
mask = df['ConvertedComp'].between(q1, q3, inclusive=True)
iqr = df.loc[mask, 'ConvertedComp']
iqr_q3_q1 = q3 - q1
print(iqr)
print('The IQR for the q1 and q3:', iqr_q3_q1)<jupyter_output>0 61000.0
1 95179.0
2 90000.0
4 65277.0
5 31140.0
...
11386 47724.0
11387 39180.0
11389 35518.0
11391 68748.0
11396 80371.0
Name: ConvertedComp, Length: 5380, dtype: float64
The IQR for the q1 and q3: 73132.0
<jupyter_text>Find out the upper and lower bounds.
<jupyter_code>lower_bound = q1 -(1.5 * iqr_q3_q1)
upper_bound = q3 +(1.5 * iqr_q3_q1)
print(lower_bound)
print(upper_bound)
<jupyter_output>-82830.0
209698.0
<jupyter_text>Identify how many outliers are there in the `ConvertedComp` column.
<jupyter_code>def detect_outlier(data_1):
outliers=[]
count_outliers = 0
threshold=3
mean_1 = np.mean(data_1)
std_1 =np.std(data_1)
for y in data_1:
z_score= (y - mean_1)/std_1
if np.abs(z_score) > threshold:
outliers.append(y)
count_outliers+=1
return count_outliers
print(detect_outlier(df['ConvertedComp']))
<jupyter_output>240
<jupyter_text>Create a new dataframe by removing the outliers from the `ConvertedComp` column.
<jupyter_code># your code goes here
<jupyter_output><empty_output><jupyter_text>## Correlation
### Finding correlation
Find the correlation between `Age` and all other numerical columns.
<jupyter_code>df.corr()<jupyter_output><empty_output>
|
no_license
|
/M3ExploratoryDataAnalysis_lab_solution.ipynb
|
INSEAInnovationEdge/complete_formation
| 15 |
<jupyter_start><jupyter_text>Having multiple stimuli with different time courses for different nodes currently requires an extra class, see usage below<jupyter_code>class MultiStimuliRegion(patterns.StimuliRegion):
def __init__(self, *stimuli):
self.stimuli = stimuli
def configure_space(self, *args, **kwds):
[stim.configure_space(*args, **kwds) for stim in self.stimuli]
def configure_time(self, *args, **kwds):
[stim.configure_time(*args, **kwds) for stim in self.stimuli]
def __call__(self, *args, **kwds):
return np.array([stim(*args, **kwds) for stim in self.stimuli]).sum(axis=0)<jupyter_output><empty_output><jupyter_text>Now we can make several pulse trains with different temporal configurations and node weights, and combine them with above class.<jupyter_code>conn = connectivity.Connectivity(load_default=True)
nnode = conn.weights.shape[0]
def make_train(node_idx, node_weights, **params):
weighting = np.zeros(nnode)
weighting[node_idx] = node_weights
eqn_t = equations.PulseTrain()
eqn_t.parameters.update(params)
stimulus = patterns.StimuliRegion(
temporal=eqn_t,
connectivity=conn,
weight=weighting)
return stimulus
train1 = make_train([10, 20], 1.0, onset=1.5e3, T=100.0, tau=50.0)
train2 = make_train([30, 40], 2.0, onset=1.5e3, T=200.0, tau=100.0)
train3 = make_train(r_[7:74:5], 0.2, onset=5e2, T=50.0, tau=20.0)
stimulus = MultiStimuliRegion(train1, train2, train3)
stimulus.configure_space()
time = r_[1e3:2e3:10.0]
stimulus.configure_time(time)
pattern = stimulus()
imshow(pattern, interpolation='none')
xlabel('Time')
ylabel('Space')
colorbar()<jupyter_output><empty_output>
|
non_permissive
|
/tvb_documentation/demos/multiple_stimuli.ipynb
|
DARSakthi/tvb-root
| 2 |
<jupyter_start><jupyter_text>What types of crimes are most common? Where are different types of crimes most likely to occur? Does the frequency of crimes change over the day? Week? Year?<jupyter_code>import pandas as pd
from pandas import Series, DataFrame
%pylab inline
df1=pd.read_csv('crime.csv',encoding = 'unicode_escape')
df1.head()
df2=pd.read_csv('offense_codes.csv',encoding = 'unicode_escape')
df2.head()
df1['SHOOTING'].value_counts()
top_crimes=df1[df1['OFFENSE_CODE'].isin(df1['OFFENSE_CODE'].value_counts()[0:10].index.values)]
topcrimes_merge=top_crimes.merge(df2,left_on=top_crimes['OFFENSE_CODE'],right_on=df2['CODE'])
topcrimes_merge[['OFFENSE_CODE','NAME','OFFENSE_CODE_GROUP']].drop_duplicates()<jupyter_output><empty_output><jupyter_text>The most common crimes are vandalism, towed motor vehicle, investigate property, verbal dispute, assault and theft.<jupyter_code>topcrimes_merge.head()
df1.groupby(['STREET'])['OFFENSE_CODE_GROUP'].count().sort_values(ascending=False)[0:5]<jupyter_output><empty_output><jupyter_text>Different types of crimes are most likely to occur in Washigton street, Blue Hill avenue, Boylston st, Dorchester Avenue and Tremont Street.<jupyter_code>df1.drop_duplicates().groupby('HOUR')['INCIDENT_NUMBER'].count().sort_index().plot(kind='line')
sorter = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday']
sorterIndex = dict(zip(sorter,range(len(sorter))))
sorterIndex
df_week=df1.drop_duplicates().groupby('DAY_OF_WEEK')['INCIDENT_NUMBER'].count().to_frame().reset_index()
df_week['DAY']=df_week['DAY_OF_WEEK']
df_week['DAY_OF_WEEK'] = df_week['DAY_OF_WEEK'].map(sorterIndex)
df_week.sort_values(by='DAY_OF_WEEK',inplace=True)
plot(df_week['DAY'],df_week['INCIDENT_NUMBER'])
df1.drop_duplicates().groupby('YEAR')['INCIDENT_NUMBER'].count().sort_index().plot(kind='bar')<jupyter_output><empty_output>
|
no_license
|
/Crimes in Boston.ipynb
|
nehaannajohn/Crimes-in-Boston-Kaggle
| 3 |
<jupyter_start><jupyter_text># Pseudo-random number generators
<jupyter_code>%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np<jupyter_output><empty_output><jupyter_text>## Pseudo-random number generators
A computer is a deterministic system that executes a series of instructions (program) in a completely predictable fashion. In the absence of some external source of real randomness (eg. a thermal "noise" diode) we have to settle for a *pseudo-random* number generator that uses a deterministic algorithm to produce a sequence of apparently random values. All PNRGs consist of some function $f$ that operates on some number $x_i$ to produce a new value $x_{i+1}$
$$ x_{i+1} = f(x_i;a_1,a_2,\ldots a_n) $$
where $a_n$ are internal parameters specific to each implementation.
A useful class of PRNGs are based on multiplicative linear congruential generators
$$ x_{i+1} = \left( a x_i + b \right) \;mod\, m$$
which use fast integer multiplication and modulo arithmetic to produce numbers in the range from 1 to $m-1$.
For example
$$ x_{i+1} = 6 x_i \;mod\, 13 $$
starting with a seed of $1$ produces the sequence
$$\ldots 1,6,10,8,9,2,12,7,3,5,4,11,1 \ldots$$
while starting with a seed of $2$ produces the sequence
$$\ldots 2,12,7,3,5,4,11,1,6,10,8,9,2 \ldots$$
Ideally, the result will be a circular list of some permutation of the integers between $1$ and $m-1$.
The choice for multiplicative factor and modulus is critical, as most values produce non-random sequences.
One good pair of parameters found by Park and Miller
$$ x_{i+1} = 7^5 x_i \;mod\, (2^{31} - 1) = 16807 x_i \;mod\, 2147483647 $$
produces a complete sequence of 32 bit random numbers.*Note:* state-of-the-art PRNGs may be based on more sophisticated algorithms, but they all produce some kind of circular permuted list. This is equivalent to drawing cards from a shuffled deck, as the next card is unpredictable but not random.
[https://en.wikipedia.org/wiki/Mersenne_Twister]
The numpy random framework currently uses a Mersenne Twister pseudo-random number generator. It has a very long period of $2^{19937} − 1$. The internal state is complex, and can be obtained as follows.<jupyter_code>import numpy as np
seed = 123
r = np.random.RandomState(seed)
np.random.RandomState.get_state(r)<jupyter_output><empty_output><jupyter_text>## A terrible example
https://en.wikipedia.org/wiki/RANDU
RANDU is a linear congruential pseudorandom number generator of the Park–Miller type, which has been used since the 1960s. It is defined by the recurrence:
$$ V_{j+1} = 65539\cdot V_j\, \bmod\, 2^{31} $$
with the initial seed number $V_0$ as an odd number. It generates pseudorandom integers $V_{j}$ which are uniformly distributed in the interval [1, $2^{31}$ − 1], but in practical applications are often mapped into pseudorandom rationals $X_j$ in the interval (0, 1), by the formula:
$$ X_j = \frac{V_{j}}{2^{31}} $$
IBM's RANDU is widely considered to be one of the most ill-conceived random number generators ever designed.
The reason for choosing these particular values is that with a 32-bit-integer word size, the arithmetic of mod $2^{31}$ and 65539 (i.e., $2^{16}+3$) calculations could be done quickly, using special features of some computer hardware.<jupyter_code>def prng_randu( nsamples=2, seed=13, multval=65539, modval=2**31, scale=True):
sequence = np.zeros( nsamples, dtype=np.int32 )
sequence[0] = seed
for i in range(1,nsamples):
sequence[i] = multval*sequence[i-1] % modval
if scale:
sequence = sequence / np.double(modval)
return sequence
print( prng_randu(9 , scale=False))
#V = [13] ; modt=2**31 ; nn=500000
#for i in range(nn-1): V.append( 65539*V[-1] % modt )
#x = np.array(V) / 2.0**31
x = prng_randu(500000)
plt.plot(x[0:999])
display( plt.title('Does this look random?') )
# If successive numbers were not actually independent then we might
# expect to see some patterns emerging when we plot one sample as
# x and the next sample as y.
#
xx, yy = x[0:6000].reshape(-1,2).T
plt.plot( xx,yy, '.')
plt.title('2-dimensional lag plot')
#
# Humans are very good at detecting patterns, even when there
# isn't actually anything there. However, I would say that
# there is structure in this plot. A quantitative analysis
# might be done using the joint probability.<jupyter_output><empty_output><jupyter_text><jupyter_code># Tried plotting more points, but that didn't help.
#
xx, yy = x.reshape(-1,2).T
plt.plot( xx,yy, '.', ms=0.2)
plt.title('2-dimensional lag plot', alpha=0.1)
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
nn = 500000
#xx, yy, zz = x[0:6000].reshape(3,-1)
xx, yy, zz = x[0:nn-3:3], x[1:nn-2:3], x[2:nn-1:3]
plt.plot( xx,yy,zz, '.', ms=1.5, alpha=0.1)
plt.title('hyperplanes')
plt.plot( xx[zz>0.9], yy[zz>0.9], '.', ms=1.5, alpha=0.1)
plt.title('slice for z>0.9')<jupyter_output><empty_output><jupyter_text>Extending this approach to three dimensions turns out to be very informative. Rather than being uniformly distributed around the volume, it turns out that all of the points occur on a small number of planes. This will drastically reduce the randomness, as knowing one value will let us make predictions about others.
### Optional: try changing the PRNG parameters and see what happens to the hyperplanes.## Entropy estimation<jupyter_code>#%matplotlib notebook
#import matplotlib.pyplot as plt
#import numpy as np
def symbol_entropy(datalist):
'''Estimate the Shannon entropy for a sequence of symbols.
'''
hist = {}
for item in datalist:
if item not in hist:
hist[item] = 1
else:
hist[item] += 1
counts = np.array( [hist[item] for item in hist])
prob = counts / np.sum(counts)
prob = prob[ prob != 0 ] #; print(prob)
entropy = -np.sum( prob * np.log2(prob) )
return entropy<jupyter_output><empty_output><jupyter_text>### Limited sample limitations
We can use `numpy.random.randint` to generate a sequence of $N$ random integers between 0 and $2^d$. From this we can construct a histogram, estimate the probability distribution, and calculate the entropy.
If we actually do this for $d=31$ and $N=1,000,000$ then we may be surprised to obtain an entropy of just under 20 bits rather than 31. This occurs because we are not adequately sampling the complete set of outcomes. If $N<<2^d$ then most of the histogram bins will have zero counts, approximately $N$ will have one count, and a very few will have two or more. This is effectively the same as a uniform distribution with $N$ bins
$$2^{19.93} = 1,000,000$$
which will have an entropy of almost 20 bits.
The obvious solution to this problem is to increase the number of samples until we can ensure good statistics in all bins. However, this may be difficult for very large $d$.
<jupyter_code>V = np.random.randint(low=0, high=2**31,
size=1_000_000, dtype=np.int32)
entropy = symbol_entropy( V )
print( entropy, 2**entropy )<jupyter_output>19.931126569324213 999693.6758730386
<jupyter_text>An alternative strategy is to break large numbers down into smaller components. For example, we might have doubts about the sampling of rare words in Spamlet, but should be relatively confident in the distribution of letters.
The fundamental elements of binary numbers are bits, but we will leave them for optional questions.
Here we will explore the randomness of a 32-bit integer sequence by breaking each value down into four 8-bit "bytes". If the 32-bit sequence is completely random, then each of the four byte sequences should also be random.<jupyter_code>def int32_to_bytes( value ):
''' Break down a 32-bit integer into four 8-bit bytes'''
dt1 = np.dtype(('i4', [('bytes','u1',4)]))
return np.array(value).view(dtype=dt1)['bytes']
sequence = np.int32([0, 1, 2, 255, 256, 1024])
elements = int32_to_bytes(sequence)
for i in range(len(sequence)):
print( sequence[i], '\t', elements[i,:])<jupyter_output>0 [0 0 0 0]
1 [1 0 0 0]
2 [2 0 0 0]
255 [255 0 0 0]
256 [0 1 0 0]
1024 [0 4 0 0]
<jupyter_text>### randint is random
Test the numpy `randint` function on bytes and get the expected value of 31-bits of entropy per 31-bit "word".<jupyter_code>V = np.random.randint(
low=0, high=2**31, size=1_000_000, dtype=np.int32)
b4 = int32_to_bytes(V)
for i in [0,1,2,3]:
sent = symbol_entropy( b4[:,i] )
print(sent)
<jupyter_output>7.999814410978283
7.999814295237033
7.999796607416781
6.999908447887705
<jupyter_text>### randu is not
The classic `randu` function gives approximately 20-bits of entropy per 31-bit word when analyzed this way. Note: this should be considered as a best-case upper-bound. It is entirely possible that some other way of analyzing the output of `randu` might give an even lower number.<jupyter_code>V = prng_randu(1_000_000)
b4 = int32_to_bytes(V)
for i in [0,1,2,3]:
sent = symbol_entropy( b4[:,i] )
print(sent)
# q) what happens if we look at differences between sucessive samples?
#
# a) the 3rd bytes are slightly less random and
# the 4th bytes are significantly more random
#
V = prng_randu(500000)
V = V[1:] - V[0:-1]
b4 = int32_to_bytes(V)
#b4 = b4[1:,:] - b4[0:-1,:]
#b4 = b4[:,1:] - b4[:,0:-1]
for i in range(b4.shape[1]):
sent = symbol_entropy( b4[:,i] )
print(sent)<jupyter_output>4.975778208633038
4.981437431456746
4.596261054661544
5.463596406933021
<jupyter_text>## Bitwise (optional)
The paper by Park & Miller mentions a classic PRNG that was commonly used by UNIX operating systems, and claims that "the low bits of the numbers generated are not very random".
We can test this claim by defining a function as given in equation 14.<jupyter_code>def unix_rand(seed=None):
unix_rand.seed = unix_rand.seed if seed is None else seed
if unix_rand.seed is None:
unix_rand.seed = 0
multval, addval, maxval = 1103515245, 12345, 2**31
unix_rand.seed = (multval * unix_rand.seed + addval) % maxval
return unix_rand.seed
print( unix_rand(0) )
print( unix_rand() )
print( unix_rand() )<jupyter_output>12345
1406932606
654583775
<jupyter_text>## Optional: random bits?
Write a function that takes an integer sequence and determines whether a specific bit is set for each value ie.
function call:
isbitset( sequence=[0,2,1,4,7], setbit=1 )
result:
[False, True, False, False, True]
Use this function to examine the output from the unix_rand generator. Then examine the output from the numpy.random generator. Compare and discuss.
Carry out a quantitative analysis using "mutual information".<jupyter_code>import numpy as np
# generate a short sequence of pseudo-random numbers
#
z = [unix_rand(seed=0)]
for i in range(5):
z.append(unix_rand() )
# turn the list into a numpy array
#
z = np.array(z)
setbit = 4
bitmask = 2**setbit
print('setbit=', setbit, 'bitmask=', bitmask)
# look at the sequence
print('\n', z )
# "mask" the bit with a binary "and"
print('\n', z & bitmask )
# is the bit set? (True or False)
print('\n', (z & bitmask) != 0)
# is the bit set? (1 or 0)
print('\n', (z & bitmask) // bitmask)<jupyter_output>setbit= 4 bitmask= 16
[ 12345 1406932606 654583775 1449466924 229283573 1109335178]
[16 16 16 0 16 0]
[ True True True False True False]
[1 1 1 0 1 0]
|
no_license
|
/Class Notes/phys481_week04c_pseudorandom-notes.ipynb
|
kennethsharman/Physics-II
| 11 |
<jupyter_start><jupyter_text># Task 1 Regression on Ames Housing Dataset<jupyter_code>x = pd.read_excel('http://www.amstat.org/publications/jse/v19n3/decock/AmesHousing.xls')<jupyter_output><empty_output><jupyter_text>Documentation for each variable is here:
http://jse.amstat.org/v19n3/decock/DataDocumentation.txt<jupyter_code>x
x.head()
x.columns
x['Lot Area']
x['Lot Area'][1]<jupyter_output><empty_output><jupyter_text>## 1.1
Visualize the univariate distribution of each continuous, and the distribution of the target. Do you notice anything? Is there something that might require special treatment?<jupyter_code>continuous_var = ['Lot Frontage', 'Lot Area', 'Mas Vnr Area', 'BsmtFin SF 1', 'BsmtFin SF 2', 'Bsmt Unf SF',
'Total Bsmt SF', '1st Flr SF', '2nd Flr SF', 'Low Qual Fin SF', 'Gr Liv Area', 'Garage Area',
'Wood Deck SF', 'Open Porch SF', 'Enclosed Porch', '3Ssn Porch', 'Screen Porch', 'Pool Area',
'Misc Val', 'SalePrice']<jupyter_output><empty_output><jupyter_text>Let's plot each individual variable:<jupyter_code>fig, axs = plt.subplots(nrows=4, ncols=5, constrained_layout=False, figsize=(30,20))
for j, ax in enumerate(axs.flat):
ax.plot(x[continuous_var[j]])
ax.set_title(continuous_var[j], fontsize = 25)<jupyter_output><empty_output><jupyter_text>## 1.2
Visualize the dependency of the target on each continuous feature (2d scatter plot).Next, we will plot correlation between each variable and the target variable `SalePrice`:<jupyter_code>fig, axs = plt.subplots(nrows=4, ncols=5, constrained_layout=False, figsize=(30,20))
for j, ax in enumerate(axs.flat):
ax.scatter(x[continuous_var[j]], x[continuous_var[-1]])
ax.set_ylabel(continuous_var[j], fontsize = 25)
ax.set_xlabel(continuous_var[-1], fontsize = 25)
fig.tight_layout()
plt.show()
x[continuous_var[-1]]<jupyter_output><empty_output><jupyter_text>## 1.3
Split data in training and test set. Do not use the test-set unless for a final evaluation in 1.6. For each categorical variable, cross-validate a Linear Regression model using just this variable (one-hot-encoded). Visualize the relationship of the categorical variables that provide the best R^2 value with the target.
Splitting the data in training and test set:<jupyter_code>from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(x[continuous_var[:-1]], x[continuous_var[-1]], random_state=0)
from sklearn.linear_model import Ridge, LinearRegression, Lasso, RidgeCV, LassoCV
from sklearn.model_selection import cross_val_score
y_train.shape
cross_val_score(LinearRegression(), X_train, y_train, cv=5)<jupyter_output>/Users/Mitevski1/opt/anaconda3/lib/python3.7/site-packages/sklearn/model_selection/_validation.py:530: FutureWarning: From version 0.22, errors during fit will result in a cross validation score of NaN by default. Use error_score='raise' if you want an exception raised or error_score=np.nan to adopt the behavior from version 0.22.
FutureWarning)
<jupyter_text>We need to get rid of the `NaN` values. Here we are replacing the missing data with the mean of that column: <jupyter_code>X_train.fillna(X_train.mean(), inplace=True)
y_train.fillna(y_train.mean(), inplace=True)
X_test.fillna(X_test.mean(), inplace=True)
y_test.fillna(y_test.mean(), inplace=True)
np.mean(cross_val_score(LinearRegression(), X_train, y_train, cv=5))
lr = LinearRegression().fit(X_train, y_train)
print("Training set score: {:.2f}".format(lr.score(X_train, y_train)))
print("Test set score: {:.2f}".format(lr.score(X_test, y_test)))<jupyter_output>Training set score: 0.73
Test set score: 0.65
|
permissive
|
/homeworks_solutions/hw_1_sol_ivan.ipynb
|
edjz2019/COMS4995_apam
| 7 |
<jupyter_start><jupyter_text>### Data Set ###
#### we'll use the TensorFlow flowers dataset ####<jupyter_code>data_root = tf.keras.utils.get_file(
'flower_photos','https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
untar=True)
type(data_root)
len(data_root)
#data_root # it shows the image folder path e.g. 'C:\\Users\\<usernaem>\\.keras\\datasets\\flower_photos'
# load this data into our model is using tf.keras.preprocessing.image.ImageDataGenerator
# It generate batches of tensor image data with real-time data augmentation
image_generator = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1/255)
image_data = image_generator.flow_from_directory(str(data_root))
type(image_data)
image_data
# The resulting object is an iterator that returns image_batch, label_batch pairs.
for image_batch,label_batch in image_data:
print("Image batch shape: ", image_batch.shape)
print("Label batch shape: ", label_batch.shape)
break
# each batch - 32 images; image size - 256 x 256 x 3 (RGB); 5 classes<jupyter_output><empty_output><jupyter_text>### Transfer Learning ###<jupyter_code># We are downloading an available image classifier from TensorFlow Hub
# https://tfhub.dev/s?module-type=image-classification
classifier_url = "https://tfhub.dev/google/imagenet/mobilenet_v2_140_224/classification/3"
# older version: "https://tfhub.dev/google/imagenet/mobilenet_v2_100_224/classification/2"
IMAGE_SIZE = hub.get_expected_image_size(hub.Module(classifier_url))
IMAGE_SIZE # this is expected height, width for this classifier.
# # A batch of images with shape [batch_size, height, width, 3] should be fed to it
def classifier(x):
classifier_module = hub.Module(classifier_url) # we are using tensorflow_hub
return classifier_module(x)<jupyter_output><empty_output><jupyter_text>https://www.tensorflow.org/hub/api_docs/python/hub/Module
Class Module
Part of a TensorFlow model that can be transferred between models.
A Module represents a part of a TensorFlow graph that can be exported to disk (based on the SavedModel format) and later re-loaded. A Module has a defined interface that allows it to be used in a replaceable way, with little or no knowledge of its internals and its serialization format. <jupyter_code>classifier
classifier_layer = layers.Lambda(classifier, input_shape = IMAGE_SIZE+[3])
classifier_model = tf.keras.Sequential([classifier_layer])
classifier_model.summary()
# so it is as if we are taking the available classifier, define appropriate input shape and create the model
IMAGE_SIZE+[3]<jupyter_output><empty_output><jupyter_text>#### Rebuild the data generator ####<jupyter_code>image_data = image_generator.flow_from_directory(str(data_root), target_size=IMAGE_SIZE)
for image_batch,label_batch in image_data:
print("Image batch shape: ", image_batch.shape)
print("Labe batch shape: ", label_batch.shape)
break
# NOW each batch - 32 images; image size - 224 x 224 x 3 (RGB); 5 classes
# When using Keras, TFHub modules need to be manually initialized.
import tensorflow.keras.backend as K
sess = K.get_session()
init = tf.global_variables_initializer()
sess.run(init)
# download a single image and try the model
import numpy as np
import PIL.Image as Image
grace_hopper = tf.keras.utils.get_file('image.jpg','https://storage.googleapis.com/download.tensorflow.org/example_images/grace_hopper.jpg')
grace_hopper = Image.open(grace_hopper).resize(IMAGE_SIZE)
grace_hopper
grace_hopper = np.array(grace_hopper)/255.0
grace_hopper.shape
# Add a batch dimension, and pass the image to the model.
grace_hopper[np.newaxis, ...].shape<jupyter_output><empty_output><jupyter_text>np.newaxis
the newaxis is used to increase the dimension of the existing array by one more dimension, when used once. Thus,
1D array will become 2D array
2D array will become 3D array
3D array will become 4D array
4D array will become 5D array<jupyter_code>result = classifier_model.predict(grace_hopper[np.newaxis, ...])
result.shape
# The result is a 1001 element vector of logits, rating the probability of each class for the image.
# So the top class ID can be found with argmax:
predicted_class = np.argmax(result[0], axis=-1)
predicted_class
# Decode the predictions
labels_path = tf.keras.utils.get_file('ImageNetLabels.txt','https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt')
imagenet_labels = np.array(open(labels_path).read().splitlines())
predicted_class_name = imagenet_labels[predicted_class]
predicted_class_name
plt.imshow(grace_hopper)
plt.axis('off')
_ = plt.title("Prediction: " + predicted_class_name)<jupyter_output><empty_output><jupyter_text>### Run on batch of images ###<jupyter_code>image_batch.shape
result_batch = classifier_model.predict(image_batch)
result_batch.shape
labels_batch = imagenet_labels[np.argmax(result_batch, axis=-1)]
labels_batch
plt.figure(figsize=(10,9))
for n in range(30):
plt.subplot(6,5,n+1)
plt.imshow(image_batch[n])
plt.title(labels_batch[n])
plt.axis('off')
_ = plt.suptitle("ImageNet predictions")
# SO we downloaded a model and used it<jupyter_output><empty_output><jupyter_text>### Simple Transfer Learning ###TensorFlow Hub also distributes models without the top classification layer. These can be used to easily do transfer learning.<jupyter_code>feature_extractor_url = "https://tfhub.dev/google/imagenet/mobilenet_v2_100_224/feature_vector/3"
# its a different URL from previous section
# older: "https://tfhub.dev/google/imagenet/mobilenet_v2_100_224/feature_vector/2"
def feature_extractor(x):
feature_extractor_module = hub.Module(feature_extractor_url)
return feature_extractor_module(x)
IMAGE_SIZE = hub.get_expected_image_size(hub.Module(feature_extractor_url))
IMAGE_SIZE
image_data = image_generator.flow_from_directory(str(data_root), target_size=IMAGE_SIZE)
for image_batch,label_batch in image_data:
print("Image batch shape: ", image_batch.shape)
print("Labe batch shape: ", label_batch.shape)
break
features_extractor_layer = layers.Lambda(feature_extractor, input_shape=IMAGE_SIZE+[3])
features_extractor_layer.trainable = False # we'll train only our own last layer
# define our model
model = tf.keras.Sequential([
features_extractor_layer,
layers.Dense(image_data.num_classes, activation='softmax') # basically we are adding a Dense softmax layer
])
model.summary()
# Initialize the TFHub module.
init = tf.global_variables_initializer()
sess.run(init)
# Test run a single batch, to see that the result comes back with the expected shape.
result = model.predict(image_batch)
result.shape
result # we have not trained the last layer yet - so this result is not useful<jupyter_output><empty_output><jupyter_text>### Train the Model ###<jupyter_code>model.compile(
optimizer=tf.train.AdamOptimizer(),
loss='categorical_crossentropy',
metrics=['accuracy'])
class CollectBatchStats(tf.keras.callbacks.Callback):
def __init__(self):
self.batch_losses = []
self.batch_acc = []
def on_batch_end(self, batch, logs=None):
self.batch_losses.append(logs['loss'])
self.batch_acc.append(logs['acc'])
steps_per_epoch = image_data.samples//image_data.batch_size
batch_stats = CollectBatchStats()
model.fit((item for item in image_data), epochs=1,
steps_per_epoch=steps_per_epoch,
callbacks = [batch_stats])
plt.figure()
plt.ylabel("Loss")
plt.xlabel("Training Steps")
plt.ylim([0,2])
plt.plot(batch_stats.batch_losses)
plt.figure()
plt.ylabel("Accuracy")
plt.xlabel("Training Steps")
plt.ylim([0,1])
plt.plot(batch_stats.batch_acc)<jupyter_output><empty_output><jupyter_text>### Prediction ###<jupyter_code>image_data.class_indices
# https://keras.io/preprocessing/image/
# The dictionary containing the mapping from class names to class indices can be obtained via the attribute class_indices
image_data.class_indices.items()
label_names = sorted(image_data.class_indices.items(), key=lambda pair:pair[1])
label_names = np.array([key.title() for key, value in label_names])
label_names
result_batch = model.predict(image_batch)
labels_batch = label_names[np.argmax(result_batch, axis=-1)]
labels_batch
plt.figure(figsize=(10,9))
for n in range(30):
plt.subplot(6,5,n+1)
plt.imshow(image_batch[n])
plt.title(labels_batch[n])
plt.axis('off')
_ = plt.suptitle("Model predictions")<jupyter_output><empty_output>
|
no_license
|
/TransferLearningImageRecognition.ipynb
|
i-chatterjee/AIMLExperiments
| 9 |
<jupyter_start><jupyter_text>### Задание 1.
Считайте таблицу с признаками из файла features.csv с помощью кода, приведенного выше. Удалите признаки, связанные с итогами матча (они помечены в описании данных как отсутствующие в тестовой выборке).<jupyter_code>import pandas as pd
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import KFold
from sklearn.cross_validation import StratifiedKFold
from sklearn.cross_validation import cross_val_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
import time
import datetime
%matplotlib inline
df = pd.read_csv("features.csv", index_col='match_id')
df.head()<jupyter_output><empty_output><jupyter_text>#### Описание признаков в таблице
- `match_id`: идентификатор матча в наборе данных
- `start_time`: время начала матча (unixtime)
- `lobby_type`: тип комнаты, в которой собираются игроки (расшифровка в `dictionaries/lobbies.csv`)
- Наборы признаков для каждого игрока (игроки команды Radiant — префикс `rN`, Dire — `dN`):
- `r1_hero`: герой игрока (расшифровка в dictionaries/heroes.csv)
- `r1_level`: максимальный достигнутый уровень героя (за первые 5 игровых минут)
- `r1_xp`: максимальный полученный опыт
- `r1_gold`: достигнутая ценность героя
- `r1_lh`: число убитых юнитов
- `r1_kills`: число убитых игроков
- `r1_deaths`: число смертей героя
- `r1_items`: число купленных предметов
- Признаки события "первая кровь" (first blood). Если событие "первая кровь" не успело произойти за первые 5 минут, то признаки принимают пропущенное значение
- `first_blood_time`: игровое время первой крови
- `first_blood_team`: команда, совершившая первую кровь (0 — Radiant, 1 — Dire)
- `first_blood_player1`: игрок, причастный к событию
- `first_blood_player2`: второй игрок, причастный к событию
- Признаки для каждой команды (префиксы `radiant_` и `dire_`)
- `radiant_bottle_time`: время первого приобретения командой предмета "bottle"
- `radiant_courier_time`: время приобретения предмета "courier"
- `radiant_flying_courier_time`: время приобретения предмета "flying_courier"
- `radiant_tpscroll_count`: число предметов "tpscroll" за первые 5 минут
- `radiant_boots_count`: число предметов "boots"
- `radiant_ward_observer_count`: число предметов "ward_observer"
- `radiant_ward_sentry_count`: число предметов "ward_sentry"
- `radiant_first_ward_time`: время установки командой первого "наблюдателя", т.е. предмета, который позволяет видеть часть игрового поля
- Итог матча (данные поля отсутствуют в тестовой выборке, поскольку содержат информацию, выходящую за пределы первых 5 минут матча)
- `duration`: длительность
- `radiant_win`: 1, если победила команда Radiant, 0 — иначе
- Состояние башен и барраков к концу матча (см. описание полей набора данных)
- `tower_status_radiant`
- `tower_status_dire`
- `barracks_status_radiant`
- `barracks_status_dire`<jupyter_code>features_to_delete = ["duration", "radiant_win", "tower_status_radiant", "tower_status_dire",
"barracks_status_radiant", "barracks_status_dire"]
features = df.drop(features_to_delete, axis=1)
y = df["radiant_win"]
features[:5]<jupyter_output><empty_output><jupyter_text>### Задание 2.
Проверьте выборку на наличие пропусков с помощью функции count(), которая для каждого столбца показывает число заполненных значений. Много ли пропусков в данных? Запишите названия признаков, имеющих пропуски, и попробуйте для любых двух из них дать обоснование, почему их значения могут быть пропущены.<jupyter_code>features.shape
print "Number of features with missed vales", sum(features.count()<97230)
features[features.columns[features.count()<97230]].count()<jupyter_output>Number of features with missed vales 12
<jupyter_text>#### Ответ 2.
Всего 12 признаков, имеющие пропущенные значения:
- `first_blood_time`
- `first_blood_team`
- `first_blood_player1`
- `first_blood_player2`
- `radiant_bottle_time`
- `radiant_courier_time`
- `radiant_flying_courier_time`
- `radiant_first_ward_time`
- `dire_bottle_time`
- `dire_courier_time`
- `dire_flying_courier_time`
- `dire_first_ward_time`
Первые три признака (`first_blood_time`, `first_blood_team`, `first_blood_player1`) рассказывают про парметры первой крови в битве и, как сообщается в описании: "Если событие "первая кровь" не успело произойти за первые 5 минут, то признаки принимают пропущенное значение", то есть в 77677 играх из 97230 в тренировочном сете это событие прозошло позже или не произошло.
Также видно, что не в каждой игре был приобретен предмет "bottle" (метрики `radiant_bottle_time` и `dire_bottle_time`).### Задание 3.
Замените пропуски на нули с помощью функции fillna(). На самом деле этот способ является предпочтительным для логистической регрессии, поскольку он позволит пропущенному значению не вносить никакого вклада в предсказание. Для деревьев часто лучшим вариантом оказывается замена пропуска на очень большое или очень маленькое значение — в этом случае при построении разбиения вершины можно будет отправить объекты с пропусками в отдельную ветвь дерева. Также есть и другие подходы — например, замена пропуска на среднее значение признака. Мы не требуем этого в задании, но при желании попробуйте разные подходы к обработке пропусков и сравните их между собой.<jupyter_code>X = features.fillna(0)
sum(X.count() < 97230)
features_avg = features
for f in features.columns[features.count()<97230]:
features_avg[f] = features[f].fillna(features[f].median()) # replace missed values of features with median value for
sum(features_avg.count()<97230)<jupyter_output><empty_output><jupyter_text>### Задание 4.
Какой столбец содержит целевую переменную? Запишите его название.#### Ответ 4.
Столбец `radiant_win`, теперь обозначен переменной y.### Задание 5.
Забудем, что в выборке есть категориальные признаки, и попробуем обучить градиентный бустинг над деревьями на имеющейся матрице "объекты-признаки". Зафиксируйте генератор разбиений для кросс-валидации по 5 блокам (KFold), не забудьте перемешать при этом выборку (shuffle=True), поскольку данные в таблице отсортированы по времени, и без перемешивания можно столкнуться с нежелательными эффектами при оценивании качества. Оцените качество градиентного бустинга (GradientBoostingClassifier) с помощью данной кросс-валидации, попробуйте при этом разное количество деревьев (как минимум протестируйте следующие значения для количества деревьев: 10, 20, 30). Долго ли настраивались классификаторы? Достигнут ли оптимум на испытанных значениях параметра n_estimators, или же качество, скорее всего, продолжит расти при дальнейшем его увеличении?<jupyter_code>df_shuffle = pd.concat([X,y], axis=1).reindex(np.random.permutation(pd.concat([X,y], axis=1).index))
X_shuffled = df_shuffle.drop("radiant_win", axis=1)
y_shuffled = df_shuffle["radiant_win"]
X_part, y_part = X_shuffled[:len(y_shuffled)/3], y_shuffled[:len(y_shuffled)/3]
for n_estimators in [10, 20, 30, 40, 50, 60, 70, 80]:
start_time = datetime.datetime.now()
clf = GradientBoostingClassifier(n_estimators=n_estimators, random_state=42)
kf = KFold(X_part.shape[0], random_state=42, shuffle=True, n_folds=5)
print n_estimators, cross_val_score(clf, X_part, y_part, cv=kf, scoring='roc_auc').mean()
print 'Time elapsed:', datetime.datetime.now() - start_time
# There is a trade-off between learning_rate and n_estimators. Let's try different learning_rate meanings:
for learning_rate in [1, 0.5, 0.3, 0.2, 0.1]:
clf = GradientBoostingClassifier(n_estimators=30, random_state=42, learning_rate=learning_rate)
kf = KFold(X_part.shape[0], random_state=42, shuffle=True, n_folds=5)
print learning_rate, cross_val_score(clf, X_part, y_part, cv=kf, scoring='roc_auc').mean()
clf = GradientBoostingClassifier(n_estimators=50, learning_rate=0.3)
kf = KFold(X.shape[0], random_state=42, shuffle=True, n_folds=5)
cross_val_score(clf, X, y, cv=kf, scoring='roc_auc').mean()<jupyter_output><empty_output><jupyter_text>#### Ответ 5.
Поскольку настройка алгоритмов занимала слишком много времени, я сначала попробовала вычисления на трети выборки. Как видно, время для настройки классификатора с количеством деревьев от 10 до 80 время варьируется от 20 секунд до 2,5 минут. Помня домашнее задание по теме градиентного бустинга, ожидаю похожую ситуацию: ошибки на тестовом сете после некоторого уменьшения выйдут на уровень константы.### Задание 2.1.
Оцените качество логистической регрессии (sklearn.linear_model.LogisticRegression с L2-регуляризацией) с помощью кросс-валидации по той же схеме, которая использовалась для градиентного бустинга. Подберите при этом лучший параметр регуляризации (C). Какое наилучшее качество у вас получилось? Как оно соотносится с качеством градиентного бустинга? Чем вы можете объяснить эту разницу? Быстрее ли работает логистическая регрессия по сравнению с градиентным бустингом?<jupyter_code>from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
X_scal = StandardScaler().fit_transform(X)
for C in [10, 5, 1, 0.5, 0.2, 0.1, 0.05, 0.01]:
start_time = datetime.datetime.now()
clf = LogisticRegression(penalty='l2', C=C, random_state=42)
kf = KFold(X_scal.shape[0], random_state=42, shuffle=True, n_folds=5)
print C, cross_val_score(clf, X_scal, y, cv=kf, scoring='roc_auc').mean()
print 'Time elapsed:', datetime.datetime.now() - start_time<jupyter_output>10 0.716522287837
Time elapsed: 0:00:16.217781
5<jupyter_text>#### Ответ 2.1
Наилучшее значение score получилось ~0.716, практически не зависит от параметра C. Это значение получилось лучше чем при градиентном бустинге (0.70).
При этом логистическая регрессия работает значительно быстрее градиентного бустинга: до 20 секунд.### Задание 2.2.
Среди признаков в выборке есть категориальные, которые мы использовали как числовые, что вряд ли является хорошей идеей. Категориальных признаков в этой задаче одиннадцать: lobby_type и r1_hero, r2_hero, ..., r5_hero, d1_hero, d2_hero, ..., d5_hero. Уберите их из выборки, и проведите кросс-валидацию для логистической регрессии на новой выборке с подбором лучшего параметра регуляризации. Изменилось ли качество? Чем вы можете это объяснить?<jupyter_code>params_to_drop = ["lobby_type", "r1_hero", "r2_hero", "r3_hero", "r4_hero", "r5_hero",
"d1_hero", "d2_hero", "d3_hero", "d4_hero", "d5_hero"]
X_wo_features = X.drop(params_to_drop, axis=1)
X_cutted = StandardScaler().fit_transform(X_wo_features)
for C in [10, 5, 1, 0.5, 0.2, 0.1, 0.05, 0.01]:
clf = LogisticRegression(penalty='l2', C=C, random_state=42)
kf = KFold(X_cutted.shape[0], random_state=42, shuffle=True, n_folds=5)
print C, cross_val_score(clf, X_cutted, y, cv=kf, scoring='roc_auc').mean()<jupyter_output> 10 0.716529971503
5 0.716530134606
1<jupyter_text>#### Ответ 2.2.
При изъятии указанных выше параметров улучшение качества модели почти не заметно, это может свидетельствовать о том, что в данном виде (в виде числовых признаков) эти параметры не играют важной роли в построении модели. Пример: герой под номером 1 очень похож на героя №2 и сильно не похож на героя №50, что может не соовтетствоать действительности: герои не ранжированы в этом списке.### Задание 2.3.
На предыдущем шаге мы исключили из выборки признаки rM_hero и dM_hero, которые показывают, какие именно герои играли за каждую команду. Это важные признаки — герои имеют разные характеристики, и некоторые из них выигрывают чаще, чем другие. Выясните из данных, сколько различных идентификаторов героев существует в данной игре (вам может пригодиться фукнция unique или value_counts).<jupyter_code>len(pd.Series(X[[i for i in params_to_drop[1:]]].values.ravel()).unique())<jupyter_output><empty_output><jupyter_text>#### Ответ 2.3.
108 различных героев.### Задание 2.4.
Воспользуемся подходом "мешок слов" для кодирования информации о героях. Пусть всего в игре имеет N различных героев. Сформируем N признаков, при этом i-й будет равен нулю, если i-й герой не участвовал в матче; единице, если i-й герой играл за команду Radiant; минус единице, если i-й герой играл за команду Dire. Ниже вы можете найти код, который выполняет данной преобразование. Добавьте полученные признаки к числовым, которые вы использовали во втором пункте данного этапа.<jupyter_code>N = 108
data = X[params_to_drop[1:]]
X_pick = np.zeros((data.shape[0], N+4))
for i, match_id in enumerate(data.index):
for p in xrange(5):
X_pick[i, data.ix[match_id, 'r%d_hero' % (p+1)]-1] = 1.0
X_pick[i, data.ix[match_id, 'd%d_hero' % (p+1)]-1] = -1.0
X_pick_df = pd.DataFrame(X_pick, columns=["hero_"+str(i) for i in range(X_pick.shape[1])], index=X_wo_features.index)
X_categor = pd.concat([X_pick_df, X_wo_features], axis=1)
X_categor[:4]<jupyter_output><empty_output><jupyter_text>### Задание 2.5.
Проведите кросс-валидацию для логистической регрессии на новой выборке с подбором лучшего параметра регуляризации. Какое получилось качество? Улучшилось ли оно? Чем вы можете это объяснить?<jupyter_code>X_categor_scaled = StandardScaler().fit_transform(X_categor)
for C in [10, 5, 1, 0.5, 0.2, 0.1, 0.05, 0.01]:
clf = LogisticRegression(penalty='l2', C=C, random_state=42)
kf = KFold(X_categor_scaled.shape[0], random_state=42, shuffle=True, n_folds=5)
print C, cross_val_score(clf, X_categor_scaled, y, cv=kf, scoring='roc_auc').mean()
# try sklearn.ensemble.BaggingClassifier
from sklearn.ensemble import BaggingClassifier
clf = LogisticRegression(penalty='l2', C=0.05, random_state=42)
for n in [5,7,10,15]:
clf_bagging = BaggingClassifier(base_estimator=clf, n_estimators=n)
kf = KFold(X_categor_scaled.shape[0], random_state=42, shuffle=True, n_folds=5)
print n, cross_val_score(clf, X_categor_scaled, y, cv=kf, scoring='roc_auc').mean()<jupyter_output>5 0.751936273132
7 0.751936273132
10 0.751936273132
15 0.751936273132
<jupyter_text>#### Ответ 2.5.
Качество получилось лучше - 0.752. Использование dummy variables позволило представить фичи "выбранные герои" в качестве категориальных.### Задание 2.6.
Постройте предсказания вероятностей победы команды Radiant для тестовой выборки с помощью лучшей из изученных моделей (лучшей с точки зрения AUC-ROC на кросс-валидации). Убедитесь, что предсказанные вероятности адекватные — находятся на отрезке [0, 1], не совпадают между собой (т.е. что модель не получилась константной).<jupyter_code>test = pd.read_csv("features_test.csv", index_col='match_id')
test[:5]
clf = LogisticRegression(penalty='l2', C=0.05, random_state=42)
clf.fit(X_categor_scaled, y)
test = test.fillna(0)
data_test = test[params_to_drop[1:]]
X_pick_test = np.zeros((data_test.shape[0], N+4))
for i, match_id in enumerate(data_test.index):
for p in xrange(5):
X_pick_test[i, data_test.ix[match_id, 'r%d_hero' % (p+1)]-1] = 1.0
X_pick_test[i, data_test.ix[match_id, 'd%d_hero' % (p+1)]-1] = -1.0
X_pick_df_test = pd.DataFrame(X_pick_test, columns=["hero_player"+str(i) for i in range(X_pick_test.shape[1])],
index=test.index)
X_categor_test = pd.concat([X_pick_df_test, test.drop(params_to_drop, axis=1)], axis=1)
X_categor_scaled_test = StandardScaler().fit_transform(X_categor_test)
clf.predict_proba(X_categor_scaled_test)[:,1]
#test_out = pd.concat([pd.DataFrame(X_categor_test.index), pd.DataFrame(clf.predict_proba(X_categor_scaled_test)[:,1])], axis=1)
test_out = pd.DataFrame(clf.predict_proba(X_categor_scaled_test)[:,1], index=X_categor_test.index, columns=['radiant_win'])
test_out
test_out.to_csv('submission.csv')
col_num = X_categor_scaled_test.shape[1]
test_out = features_test.drop(features_test.columns[[range(0, col_num - 1)]], axis=1)
test_out.to_csv('submission.csv') # Kaggle score: 0.75529<jupyter_output><empty_output>
|
no_license
|
/Param_param.ipynb
|
Kivakka/courses
| 11 |
<jupyter_start><jupyter_text># model with two types of Ailing
оценки говорят, что 10-40% могут не испытывать симптомы заболевания
предполагается, что бессимптомные ведут себя халатно и не изолируются
отличие от обычной SIR:
коэффициент p_I - доля безсимптомных/неизолирующихся
два коэффициента beta_I_I - для безсимптомных/неизолирущихся и с симптомами/изолирующихся
два коэффициента gamma - аналогично, показывают, как долго способны заражать эти две группы. gamma1 = 1/5 - т.е. примерно за 5
дней такой человек осознает симптомы и изолируется/ложится в больницу, gamma2 = 1/12 - т.е. примерно 12 дней уходит на выздоровление у таких людей.
<jupyter_code>import os
import math
import matplotlib.pyplot as plt
#import matplotlib.dates as mdates
import random
import numpy as np
import pandas as pd
import requests
import csv
import datetime as dt
import time
session_date = dt.datetime.now().date()
session_name = 'session_'+str(session_date)
session_mode = input('type of session: prod/test? _ ')
session_name = session_name+'_'+session_mode+'/'
try:
os.mkdir(session_name)
except:
pass
os.chdir(session_name)
response = requests.get('https://raw.githubusercontent.com/alexei-kouprianov/COVID.2019.ru/master/data/momentary.txt')
raw_data = response.text
if response.status_code == 250:
file_name = 'covid ' + session_name + '.txt'
with open('covid_stable.txt','w') as f:
f.write(raw_data)
else:
file_name = 'covid_stable.txt'
with open(file_name,'w') as f:
f.write(raw_data)
with open(file_name) as f:
covid_data = pd.read_csv(f,delimiter='\t')
#covid_data[0:4]
os.getcwd()
detected = covid_data['EVENT'] == 'detected'
recovered = covid_data['EVENT'] == 'recovered'
deceased = covid_data['EVENT'] == 'deceased'
moscow = covid_data['LOCUS'] == 'Moscow'
detected_moscow = covid_data[detected & moscow][['TIMESTAMP','NUMBER']]
recovered_moscow = covid_data[recovered & moscow][['TIMESTAMP','NUMBER']]
deceased_moscow = covid_data[deceased & moscow][['TIMESTAMP','NUMBER']]
detected_moscow.columns = ['DATE','DETECTED']
recovered_moscow.columns = ['DATE','RECOVERED']
deceased_moscow.columns = ['DATE','DECEASED']
detected_moscow['DATE'] = detected_moscow['DATE'].map(pd.to_datetime).map(dt.datetime.date)
detected_moscow.set_index('DATE',inplace=True)
recovered_moscow['DATE'] = recovered_moscow['DATE'].map(pd.to_datetime).map(dt.datetime.date)
recovered_moscow.set_index('DATE',inplace=True)
print(deceased_moscow[0:6])
# вручную убрал кривую дату. Стоит добавить автоматическую проверку
deceased_moscow.at[315,'DATE'] = dt.datetime(2020,3,30)
#
deceased_moscow['DATE'] = deceased_moscow['DATE'].map(pd.to_datetime).map(dt.datetime.date)
deceased_moscow.set_index('DATE',inplace=True)
date_cur = max(detected_moscow.index)
def F(SIADR,t,N=0,**params):
day = dt.timedelta(days=1)
beta_D = params['beta_D']
gamma_I = params['gamma_I']
gamma_A = params['gamma_A']
gamma_D = params['gamma_D']
alpha = params['alpha']
delta_I = params['delta_I']
delta_A = params['delta_A']
beta_A = params['beta_A']
beta_I = params['beta_I']
S = SIADR.loc[t-day,'S'] - (beta_I*SIADR.loc[t-day,'I'] +
beta_D*SIADR.loc[t-day,'D'] + beta_A*SIADR.loc[t-day,'A'])*SIADR.loc[t-day,'S']/N
I = SIADR.loc[t-day,'I']*(1 - delta_I - alpha - gamma_I) + (beta_I*SIADR.loc[t-day,'I'] + beta_D*SIADR.loc[t-day,'D'] + beta_A*SIADR.loc[t-day,'A'])*SIADR.loc[t-day,'S']/N
A = SIADR.loc[t-day,'A']*(1 - delta_A - gamma_A) + alpha*SIADR.loc[t-day,'I']
D = SIADR.loc[t-day,'D']*(1 - gamma_D) + delta_I*SIADR.loc[t-day,'I'] + delta_A*SIADR.loc[t-day,'A']
R = SIADR.loc[t-day,'R'] + gamma_D*SIADR.loc[t-day,'D'] + gamma_A*SIADR.loc[t-day,'A'] + gamma_I*SIADR.loc[t-day,'I']
SIADR.loc[t] = [S,I,A,D,R]
def dF(dSIADR,SIADR,t,N=0,**params):
day = dt.timedelta(days=1)
beta_D = params['beta_D']
gamma_I = params['gamma_I']
gamma_A = params['gamma_A']
gamma_D = params['gamma_D']
alpha = params['alpha']
delta_I = params['delta_I']
delta_A = params['delta_A']
beta_A = params['beta_A']
beta_I = params['beta_I']
dI = (beta_I*SIADR.loc[t-day,'I'] + beta_D*SIADR.loc[t-day,'D'] + beta_A*SIADR.loc[t-day,'A'])*SIADR.loc[t-day,'S']/N
dS = SIADR.loc[t-day,'S'] - dI
dA = alpha*SIADR.loc[t-day,'I']
dD = delta_I*SIADR.loc[t-day,'I'] + delta_A*SIADR.loc[t-day,'A']
dR = gamma_D*SIADR.loc[t-day,'D'] + gamma_A*SIADR.loc[t-day,'A'] + gamma_I*SIADR.loc[t-day,'I']
dSIADR.loc[t] = [dS,dI,dA,dD,dR]
def statistics(dSIADR, acc_):
maxs=dSIADR.idxmax()
print('max dD = {} at {}'.format(dSIADR.loc[maxs['dD']]['dD'], maxs['dD']))
print('max dA = {} at {}'.format(dSIADR.loc[maxs['dA']]['dA'], maxs['dA']))
print('max dI = {} at {}'.format(dSIADR.loc[maxs['dI']]['dI'], maxs['dI']))
print("total I = {}".format(acc_['dI'][-1]),
"total I/N = {}".format(acc_['dI'][-1]/N),
"total D = {}".format(acc_['dD'][-1]),
"total D/I = {}".format(acc_['dD'][-1]/acc_['dI'][-1]))
def process_calculation(N,T,n, inits, t_int, dyn_params):
'''initialization'''
SIADRs = []
dSIADRs = []
date0 = t_int[0]
for i in range(n):
SIADR = pd.DataFrame({'S':inits[i]['S'],'I':inits[i]['I'],'A':inits[i]['A'],
'D':inits[i]['D'],'R':inits[i]['R']},index = [date0])
dSIADR = pd.DataFrame({'dS':N-inits[i]['S'],'dI':inits[i]['I'],'dA':inits[i]['A'],
'dD':inits[i]['D'],'dR':inits[i]['R']},index = [date0])
SIADRs.append(SIADR)
dSIADRs.append(dSIADR)
'''iterations'''
for t in t_int[1::]:
for i in range(n):
F(SIADRs[i],t,N,**dyn_params[i].loc[t])
dF(dSIADRs[i],SIADRs[i],t,N,**dyn_params[i].loc[t])
return(SIADRs, dSIADRs)
''' initializing parameters of the model '''
#first COVID case - 3.03
#R,I,D : 1763, 24.4k, 176 (47th day)
# согласно источникам, количество бессимптомных случаев может составлять от 20 до 50% от общего числа случаев
N = 13000000
beta_I = 0.2 # коэффициент заражаемости (I-I)
beta_A = 0.6 # коэффициент заражаемости (C-I)
beta_D = 0.1 # коэффициент заражаемости (D-I)
#beta_I_I2 = 0.046 # коэффициент заражаемости для изолирующихся
delta_I = 0 # коэффициент тестирования бессимптомных I-S
delta_A = 0.1 # коэффициент тестирования симптомных C-S
alpha = (2/3)*1/5 # коэффициент развития симптомов I-S (5 - дней на проявление симптомов, 2 - т.к. только 66% - с симптомами)
gamma_D = 1/15 # коэффициент выздоровления D-R
gamma_A = 1/20 # коэффициент выздоровления C-R
gamma_I = 1/20 # коэффициент выздоровления I-R
delta_I1 = delta_I
delta_A1 = delta_A
beta_A1 = beta_A
beta_I1 = beta_I
delta_I1 = delta_I
# R0_1 = 0.5
# R0_2 = 2.3
T = 350
I = 0
R = 0
A = 2
D = 0
S = N-I-A-R-D
date0 = dt.datetime(2020,2,22).date() # дата появления первого зараженного (еще не детектированного)
t_int = [date0 + dt.timedelta(days=i) for i in range(T)]
''' initializing variables '''
SIADR = pd.DataFrame({'S':S,'I':I,'A':A,'D':D,'R':R},index = [date0])
dSIADR = pd.DataFrame({'dS':N-S,'dI':I,'dA':A,'dD':D,'dR':R},index = [date0])
SIADR_1 = pd.DataFrame({'S':S,'I':I,'A':A,'D':D,'R':R},index = [date0])
dSIADR_1 = pd.DataFrame({'dS':N-S,'dI':I,'dA':A,'dD':D,'dR':R},index = [date0])
SIADR_2 = pd.DataFrame({'S':S,'I':I,'A':A,'D':D,'R':R},index = [date0])
dSIADR_2 = pd.DataFrame({'dS':N-S,'dI':I,'dA':A,'dD':D,'dR':R},index = [date0])
SIADR_3 = pd.DataFrame({'S':S,'I':I,'A':A,'D':D,'R':R},index = [date0])
dSIADR_3 = pd.DataFrame({'dS':N-S,'dI':I,'dA':A,'dD':D,'dR':R},index = [date0])
# modl, dmodl = process_calculation(N,T,1,)
dyn_params = []
beta_Is = pd.Series(index = t_int, data = [beta_I]*len(t_int))
beta_As = pd.Series(index = t_int, data = [beta_A]*len(t_int))
beta_Ds = pd.Series(index = t_int, data = [beta_D]*len(t_int))
delta_Is = pd.Series(index = t_int, data = [delta_I]*len(t_int))
delta_As = pd.Series(index = t_int, data = [delta_A]*len(t_int))
gamma_Is = pd.Series(index = t_int, data = [gamma_I]*len(t_int))
gamma_As = pd.Series(index = t_int, data = [gamma_A]*len(t_int))
gamma_Ds = pd.Series(index = t_int, data = [gamma_D]*len(t_int))
alphas = pd.Series(index = t_int, data = [alpha]*len(t_int))
setts = [pd.DataFrame({'beta_I':beta_Is,'beta_A':beta_As,'beta_D':beta_Ds,
'delta_I':delta_Is,'delta_A':delta_As,'alpha':alphas,
'gamma_I':gamma_Is, 'gamma_A':gamma_As, 'gamma_D':gamma_Ds,})]
inits = [{'S':S,'I':I,'D':D,'A':A,'R':R}]
mdl,dmdl = process_calculation(N,T,1,inits,t_int,setts)
''' calculating model day-by-day '''
# даты значимых событий
date_iso = dt.datetime(2020,3,31).date()
date_test_start = dt.datetime(2020,3,31).date()
date_iso_strict = dt.datetime(2020,4,13).date()
date_test_double = dt.datetime(2020,4,30).date()
date_start_prognose = dt.datetime(2020,5,8).date()
# задержка, связанная с периодом болезни - 5 days
day = dt.timedelta(days=1)
# covid_period = 5*day
# date_iso += covid_period
# date_iso_strict += covid_period
params={'beta_D':beta_D,'beta_A':beta_A,'beta_I':beta_I,
'gamma_I':gamma_I,'gamma_A':gamma_A,'gamma_D':gamma_D,
'alpha':alpha,'delta_I':delta_I,'delta_A':delta_A}
params1=params.copy()
params2=params.copy()
params3=params.copy()
start_time = time.time()
for t in t_int[1::]:
if t == date_iso:
beta_I = 0.1
delta_A = 0.175
beta_A = 0.2
delta_I = 0.04
#### для равномерного шага
d1 = (date_test_double-date_iso).days
d2 = (date_start_prognose-date_test_double).days
d3 = d1+d2
ddA = ((0.8-delta_A)/(d1+2*d2),2*(0.8-delta_A)/(d1+2*d2))
ddI = ((0.5-delta_I)/(d1+2*d2),2*(0.5-delta_I)/(d1+2*d2))
####
params.update({'delta_I':delta_I,'delta_A':delta_A,'beta_A':beta_A,'beta_I':beta_I})
params2=params.copy()
params3=params.copy()
# params3['delta_A'] = 1 # ВКЛЮЧАТЬ ДЛЯ СРАВНЕНИЯ
if t> date_iso:
if t < date_test_double:
params['delta_I'] += 0.005
params['delta_A'] += 0.005
params2=params.copy()
# params3=params.copy()
params3['delta_I'] += 0.005
params3['delta_A'] += 0.005
elif t <= date_start_prognose: #нужно контролировать, чтобы не вышло за пределы разумного
params['delta_I'] += 0.01
params['delta_A'] += 0.01
params2=params.copy()
params3=params.copy()
else:
#params2['beta_D'] = 0 # ВКЛЮЧАТЬ ДЛЯ СРАВНЕНИЯ
params2['beta_D'] = 0.05 # ВКЛЮЧАТЬ ДЛЯ СРАВНЕНИЯ
params3['beta_D'] = 0.05 # ВКЛЮЧАТЬ ДЛЯ ПОВТОРОНОГО СРАВНЕНИЯ
params3['delta_A'] = 0.7 # ВКЛЮЧАТЬ ДЛЯ СРАВНЕНИЯ
params3['delta_I'] = 0.5 # ВКЛЮЧАТЬ ДЛЯ СРАВНЕНИЯ
# params3['delta_A'] = 1 # ВКЛЮЧАТЬ ДЛЯ СРАВНЕНИЯ
# params3['delta_I'] = 1 # ВКЛЮЧАТЬ ДЛЯ СРАВНЕНИЯ
F(SIADR,t,N,**params)
dF(dSIADR,SIADR,t,N,**params)
F(SIADR_1,t,N,**params1)
dF(dSIADR_1,SIADR_1,t,N,**params1)
F(SIADR_2,t,N,**params2)
dF(dSIADR_2,SIADR_2,t,N,**params2)
F(SIADR_3,t,N,**params3)
dF(dSIADR_3,SIADR_3,t,N,**params3)
end_time = time.time()
print(end_time-start_time)
'''cummulative sums'''
acc_ = dSIADR.cumsum()
acc_1 = dSIADR_1.cumsum()
acc_2 = dSIADR_2.cumsum()
acc_3 = dSIADR_3.cumsum()
SIADR.plot(title=f'Динамика размеров групп',figsize=[16,10])
plt.savefig('SIADR.png')
'''Графики наилучшей модели и сравнения с данными'''
disp_dD = dSIADR_3['dD'][:date_cur]#dD3[[x<=date_cur for x in t_int]]
disp_dD.plot.bar(title=f'число новых зарегестрированных случаев',figsize=[16,15])
#mdates.date2num(date_test_start), mdates.date2num(date_cur)
# x1 = (date_test_start- date0).days
# y1 = disp_dD[x1]
# dx1 = 0.1*len(disp_dD.index)
# dy1 = 0.1*max(disp_dD)
# plt.arrow(x1,y1,dx1,dy1)
# d2 = date_test_double - date0
# plt.axvspan(d1,d1, color='red')
fig3, (ax3, ax4) = plt.subplots(nrows=2,ncols=1,figsize=[16,15],sharey=True)
ax3.set_title(f'число новых зарегестрированных случаев')
ax3.set_xlabel('date')
ax3.set_ylabel('number of new cases')
ax3.bar(disp_dD.index, disp_dD,label='prediction')
ax3.legend()
#dIR = [x-y for (x,y) in zip(dI,dR)]
# = detected_moscow.index.tolist()
ax4.bar(detected_moscow.index, detected_moscow['DETECTED'],label='actual',color='orange')
ax4.legend()
fig4, ax5 = plt.subplots(nrows=1,ncols=1,figsize=[16,10],sharey=True)
detected_moscow_ = pd.Series(data = detected_moscow['DETECTED'])
ax5.set_title(f'Число новых зарегестрированных случаев (модель) - (данные)')
ax5.set_xlabel('date')
ax5.set_ylabel('number of new cases')
errs = disp_dD.subtract(detected_moscow_,fill_value=0)
ax5.bar(errs.index, errs)
fig3.savefig('SIADR_vs_data.png')
fig4.savefig('SIADR_vs_data_diff.png')
print('difference = {}'.format(errs.sum()))
SIADR[['I','D','A']].plot(title='Численность групп ',figsize=[16,10])
dSIADR[['dI','dA','dD','dR']].plot(title='ежедневный прирост в группах', figsize = [16,10])
#fig.savefig('SIADR_daily_growth.png')
s_w = [ i for i in t_int if date0 <i and i <= date_cur+20*day]
statistics(dSIADR,acc_)
dSIADR[['dI','dA','dD','dR']].loc[s_w].plot(title='ежедневный прирост в группах - скачки из-за мер', figsize = [16,10])
fig.savefig('SIADR_closeup.png')
dIs = pd.DataFrame({'dI':dSIADR['dI'],'dI2':dSIADR_2['dI'],'dI3':dSIADR_3['dI']})
dIs.plot(title='Прирост заболевших для разных моделей',figsize=[16,10])
plt.legend(['просто модель',
'если с {} заразность диагностированных снизится в два раза'.format(date_start_prognose),
'если с {} диагностируются все случаи'.format(date_start_prognose)])
plt.savefig('SIADR-three-models-dI.png')
acc_[['dI','dD','dA','dR']].plot(figsize=[16,10],title='Суммарное количество случаев в каждой группе')
plt.legend(['Infected','Detected','Ailing','Recovered'])
plt.savefig('SIADR-test1-acc.png')
statistics(dSIADR_2,acc_2)
acc_2[['dI','dD','dA','dR']].plot(figsize=[16,10],title='Суммарное количество случаев в каждой группе')
plt.legend(['Infected','Detected','Ailing','Recovered'])
plt.savefig('SIADR-test2-acc.png')
dSIADR_2[['dI','dA','dD']].loc[s_w].plot(title='ежедневный прирост в группах - скачки из-за мер', figsize = [16,10])
plt.savefig('SIADR-test1-closeup.png')
statistics(dSIADR_3,acc_3)
acc_3[['dI','dD','dA','dR']].plot(figsize=[16,10],title='Суммарное количество случаев в каждой группе')
plt.legend(['Infected','Detected','Ailing','Recovered'])
plt.savefig('SIADR-test3-acc.png')
dSIADR_3[['dI','dA','dD']].loc[s_w].plot(title='ежедневный прирост в группах - скачки из-за мер', figsize = [16,10])
plt.savefig('SIADR-test3-closeup.png')<jupyter_output><empty_output>
|
no_license
|
/.ipynb_checkpoints/SIADR-checkpoint.ipynb
|
pvt-Joker/SIADR-model
| 1 |
<jupyter_start><jupyter_text>     
     
     
     
     
   
[Home Page](../Start_Here.ipynb)
[Previous Notebook](Manipulation_of_Image_Data_and_Category_Determination_using_Text_Data.ipynb)
     
     
     
     
[1](The_Problem_Statement.ipynb)
[2](Approach_to_the_Problem_&_Inspecting_and_Cleaning_the_Required_Data.ipynb)
[3](Manipulation_of_Image_Data_and_Category_Determination_using_Text_Data.ipynb)
[4]
[5](Competition.ipynb)
     
     
     
     
[Next Notebook](Competition.ipynb)# Tropical Cyclone Intensity Estimation using a Deep Convolutional Neural Network - Part 3
**Contents of the this notebook:**
- [Understand the drawbacks of existing solution](#Understanding-the-drawbacks)
- [Working out the solution](#Working-out-the-solution)
- [Data Augmentation](#Data-Augmentation)
- [Training the model](#Training-the-Model-with-Data-Augmentation)
**By the end of this notebook participant will:**
- Learn about improving the previous model.
- Data Augmentation.
- Tweaking Hyperparameters.# Understanding the drawbacks
```python3
Simply put, a machine learning model is only as good as the data it is fed with
```
We have achieved an accuracy nearly of ~85% running with 4 epochs. Now we will try to increase the accuracy by taking a closer look at the dataset and images. We can observe the following from our previous notebook : <jupyter_code>NC :441
TD :4033
TC :7948
H1 :5340
H2 :3150
H3 :2441
H4 :2114
H5 :390<jupyter_output><empty_output><jupyter_text>First thing that we will notice from the category count is that the number of images per category is very un-uniform with ratios of TC: H5 **greater than 1:20**, This imbalance can bias the vision of our CNN model because predicting wrong on the minority class wouldn't impact the model a lot as the class contribution is less than 5% of the dataset.
The same can be shown also by the heatmap we obtained in the previous notebook : Notice Most of Classes with higher data was predicted correctly and the minority class was more mis-predicted than the other classes

Let us see now how we solve that problem using data augmentation : ## Working out the solution
## Data Augmentation
To decrease the un-uniformity, we will be flipping and rotating images to compensate for the lack of data for class with less samples:

We will be using OpenCV for Flipping and Image Rotations.
``` python
cv2.flip(img,0)
cv2.flip(img,1)
cv2.warpAffine(img, cv2.getRotationMatrix2D(center, 90, 1.0), (h, w))
cv2.warpAffine(img, cv2.getRotationMatrix2D(center, 180, 1.0), (w, h))
cv2.warpAffine(img, cv2.getRotationMatrix2D(center, 270, 1.0), (h, w))
```
There are other ways to counter data imbalance such as Class weightage, Oversampling, SMOTE etc..# Training the Model with Data Augmentation
We create a new function called `augmentation(name,category,filenames,labels,i)` and here we add more samples to Category which have imbalanced data. <jupyter_code>import sys
sys.path.append('/workspace/python/source_code')
# Import Utlility functions
from utils import *
import os
os.environ["CUDA_VISIBLE_DEVICES"]="0"
# Define the Augmentation Function
def augmentation(name,category,filenames,labels,i):
# Important Constants
file_path = "Dataset/Aug/"
images = []
(h, w) = (232,232)
center = (w / 2, h / 2)
angle90 = 90
angle180 = 180
angle270 = 270
scale = 1.0
img = load_image(name , interpolation = cv2.INTER_LINEAR)
if category == 0 :
images.append(cv2.flip(img,0))
elif category == 1 :
pass
elif category == 2 :
pass
elif category == 3 :
pass
elif category == 4 :
pass
elif category == 5 :
pass
elif category == 6 :
pass
elif category == 7 :
images.append(cv2.flip(img,0))
for j in range(len(images)):
cv2.imwrite(file_path+str(i+j)+'.jpeg',images[j])
filenames.append(file_path+str(i+j)+'.jpeg')
labels.append(category)
i = i + len(images)
return i<jupyter_output><empty_output><jupyter_text>##### We pass this function to our `load_dataset()` function to generate these augmentations.
Kindly wait for a couple of minutes while augments the images.<jupyter_code>filenames,labels = load_dataset(augment_fn = augmentation)
# Set the Size of the Validation set
val_filenames , val_labels = make_test_set(filenames,labels,val=0.1)
#Make train test set
test = 0.1
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(filenames, labels, test_size=test, random_state=1)
import tensorflow as tf
y_train = tf.one_hot(y_train,depth=8)
y_test = tf.one_hot(y_test,depth=8)
val_labels = tf.one_hot(val_labels,depth=8)
# Make Dataset compatible with Tensorflow Data Pipelining.
train,test,val = make_dataset((x_train,y_train,128),(x_test,y_test,32),(val_filenames,val_labels,32))<jupyter_output><empty_output><jupyter_text># The model described in the paper :
Now we will be using the model described in the paper to evaluate it's accuracy on the new dataset.
<jupyter_code>import numpy as np
np.random.seed(1337)
import tensorflow.keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten ,Dropout, MaxPooling2D
from tensorflow.keras import backend as K
#Reset Graphs and Create Sequential model
K.clear_session()
model = Sequential()
#Convolution Layers
model.add(Conv2D(64, kernel_size=10,strides=3, activation='relu', input_shape=(232,232,3)))
model.add(MaxPooling2D(pool_size=(3, 3),strides=2))
model.add(Conv2D(256, kernel_size=5,strides=1,activation='relu'))
model.add(MaxPooling2D(pool_size=(3, 3),strides=2))
model.add(Conv2D(288, kernel_size=3,strides=1,padding='same',activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2),strides=1))
model.add(Conv2D(272, kernel_size=3,strides=1,padding='same',activation='relu'))
model.add(Conv2D(256, kernel_size=3,strides=1,activation='relu'))
model.add(MaxPooling2D(pool_size=(3, 3),strides=2))
model.add(Dropout(0.5))
model.add(Flatten())
#Linear Layers
model.add(Dense(3584,activation='relu'))
model.add(Dense(2048,activation='relu'))
model.add(Dense(8, activation='softmax'))
# Print Model Summary
model.summary()
import functools
# Include Top-2 Accuracy Metrics
top2_acc = functools.partial(tensorflow.keras.metrics.top_k_categorical_accuracy, k=2)
top2_acc.__name__ = 'top2_acc'
#Define Number of Epochs
epochs = 4
#But Training our model from scratch will take a long time
#So we will load a partially trained model to speedup the process
K.clear_session()
model = tf.keras.models.load_model("trained_16.h5",custom_objects={'top2_acc': top2_acc})
# Optimizer
sgd = tensorflow.keras.optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.9)
#Compile Model with Loss Function , Optimizer and Metrics
model.compile(loss=tensorflow.keras.losses.categorical_crossentropy,
optimizer=sgd,
metrics=['accuracy',top2_acc])
# Train the Model
trained_model = model.fit(train,
epochs=epochs,
verbose=1,
validation_data=val)
# Test Model Aganist Validation Set
score = model.evaluate(test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
<jupyter_output><empty_output><jupyter_text>### Visualisations<jupyter_code>import matplotlib.pyplot as plt
f = plt.figure(figsize=(15,5))
ax = f.add_subplot(121)
ax.plot(trained_model.history['accuracy'])
ax.plot(trained_model.history['val_accuracy'])
ax.set_title('Model Accuracy')
ax.set_ylabel('Accuracy')
ax.set_xlabel('Epoch')
ax.legend(['Train', 'Val'])
ax2 = f.add_subplot(122)
ax2.plot(trained_model.history['loss'])
ax2.plot(trained_model.history['val_loss'])
ax2.set_title('Model Loss')
ax2.set_ylabel('Loss')
ax2.set_xlabel('Epoch')
ax2.legend(['Train', 'Val'],loc= 'upper left')
plt.show()
import seaborn as sn
from sklearn.metrics import confusion_matrix
import pandas as pd
#Plotting a heatmap using the confusion matrix
pred = model.predict(val)
p = np.argmax(pred, axis=1)
y_valid = np.argmax(val_labels, axis=1, out=None)
results = confusion_matrix(y_valid, p)
classes=['NC','TD','TC','H1','H3','H3','H4','H5']
df_cm = pd.DataFrame(results, index = [i for i in classes], columns = [i for i in classes])
plt.figure(figsize = (15,15))
sn.heatmap(df_cm, annot=True, cmap="Blues")<jupyter_output><empty_output><jupyter_text>Let us now save our Model and the trained weights for future usage :<jupyter_code>#Save Our Model
model.save('cyc_pred.h5')<jupyter_output><empty_output>
|
non_permissive
|
/hpc_ai/ai_science_climate/English/python/jupyter_notebook/Tropical_Cyclone_Intensity_Estimation/Countering_Data_Imbalance.ipynb
|
rugvedpund/gpubootcamp
| 6 |
<jupyter_start><jupyter_text>To select variables we either make our selection in terms of new variables as follows.<jupyter_code>good_new_variables = score_frame.variable[score_frame.recommended].unique()
good_new_variables<jupyter_output><empty_output><jupyter_text>Or in terms of original variables as follows.<jupyter_code>good_original_variables = score_frame.orig_variable[score_frame.recommended].unique()
good_original_variables<jupyter_output><empty_output><jupyter_text>Notice, in each case we must call unique as each variable (derived or original) is potentially qualified against each possible outcome.The cross frame and score frame look like the following.<jupyter_code>cross_frame.head()
treatment.score_frame_<jupyter_output><empty_output>
|
permissive
|
/Examples/Multinomial/MultinomialExample1.ipynb
|
sthagen/pyvtreat
| 3 |
<jupyter_start><jupyter_text>### 张量
我们了解pytorch 首先先从其所支持的数据类型Tensors说起。
Tensors 即张量,类似于Numpy中的ndarrays 但是支持GPU计算(最重要的)
接下来会介绍一下 pytorch中对于tensor的一些基础操作
* 初始化
* 加减乘除
* reshape
* 与numpy的转换
* 如何放入cuda
在对这些基础操作熟稔以后,便可进行后面的操作。<jupyter_code>import torch<jupyter_output><empty_output><jupyter_text>#### Tensors的初始化<jupyter_code># 随机创建矩阵 不进行初始化
x = torch.empty(5, 3)
print(x)
## 创建随机初始化矩阵
x = torch.rand(5, 3)
print(x)
# 创建一个0填充的矩阵,并指定数据类型为long
x = torch.zeros(5, 3, dtype=torch.long)
print(x)
# 创建tensor 使用现有数据初始化
x = torch.tensor([5.5, 3])
print(x)
# 使用randn_like 基于现有张量创建新张量, 可以修改数据类型,值会自动变化
x = x.new_ones(5, 3, dtype=torch.double) # new_* 方法来创建对象
print(x)
x = torch.randn_like(x, dtype=torch.float) # 覆盖 dtype!
print(x) # 对象的size 是相同的,只是值和类型发生了变化
# 获取维度相关信息 x.size()
print(x.size())
# 想同纬度的可以直接+
y = torch.rand(5, 3)
print(x + y)
# 也可以使用torch.add(x, y) 加
print(torch.add(x, y))
# torch.add(x, y, out = z) 可以使用z作为输出
result = torch.empty(5, 3)
torch.add(x, y, out=result)
print(result)
# y.add_(x) 会替换
# adds x to y
y.add_(x)
print(y)<jupyter_output>tensor([[-0.6111, 3.1165, -1.2318],
[ 0.4679, 1.8432, 1.3623],
[-0.6212, 0.0605, -0.7033],
[ 0.8364, 0.3263, -0.2188],
[-0.8649, 2.0848, 0.1780]])
<jupyter_text>任何 以``_`` 结尾的操作都会用结果替换原变量. 例如: ``x.copy_(y)``, ``x.t_()``, 都会改变 ``x``.<jupyter_code>print(x[:, 1])
# torch.view 可以改变tensor 的维度
x = torch.randn(4, 4)
y = x.view(16)
z = x.view(-1, 8) # size -1 从其他维度推断
print(x.size(), y.size(), z.size())
# 只有一个元素的张量,可以使用x.item() 来获得python数据类型的数值
x = torch.randn(1)
print(x)
print(x.item())
# 使用x.numpy() 可以将tensor 转换为 NumPy 类数组
# 二者 共享底层内存地址
a = torch.ones(5)
print(a)
b = a.numpy()
print(b)
a.add_(1)
print(a)
print(b)<jupyter_output>tensor([2., 2., 2., 2., 2.])
[2. 2. 2. 2. 2.]
<jupyter_text>Torch Tensor与NumPy数组共享底层内存地址,修改一个会导致另一个的变化<jupyter_code># 使用torch.from_numpy(x) 可以实现将 NumPy Array 转化成 Torch Tensor
import numpy as np
a = np.ones(5)
b = torch.from_numpy(a)
np.add(a, 1, out=a)
print(a)
print(b)
# 所有的 Tensor 类型默认都是基于CPU 素以我们在运行的时候 ,需要使用x.to(torch.device("cuda")
# 将tensor移动到相应的GPU上面
# is_available 函数判断是否有cuda可以使用
# ``torch.device``将张量移动到指定的设备中
if torch.cuda.is_available():
device = torch.device("cuda") # a CUDA 设备对象
y = torch.ones_like(x, device=device) # 直接从GPU创建张量
x = x.to(device) # 或者直接使用``.to("cuda")``将张量移动到cuda中
z = x + y
print(z)
print(z.to("cpu", torch.double)) # ``.to`` 也会对变量的类型做更改<jupyter_output>tensor([0.9292], device='cuda:0')
tensor([0.9292], dtype=torch.float64)
<jupyter_text>### 自动求导
核心重点!
神经网络需要前向传播(计算与损失函数的差) + 反向传播(更新权重)
更新权重的关键在于计算梯度,用梯度去更新,但是显然,梯度是比较难求的
(补充 其实就是用线性拟合非线性)
对于向量求梯度,我们可以使用雅可比矩阵,而Pytorch 可以通过使用一个计算图来辅助完成雅可比矩阵的计算。<jupyter_code>import torch
x = torch.ones(2, 2, requires_grad=True)
print(x)
y = x + 2
print(y)
print(y.grad_fn)
z = y * y * 3
out = z.mean()
print(z, out)
a = torch.randn(2, 2)
a = ((a * 3) / (a - 1))
print(a.requires_grad)
a.requires_grad_(True)
print(a.requires_grad)
b = (a * a).sum()
print(b.grad_fn)
# 反向传播 因为 out是一个纯量(scalar),out.backward() 等于out.backward(torch.tensor(1))
out.backward()
print(x.grad)
# 向量雅可比
x = torch.randn(3, requires_grad=True)
y = x * 2
while y.data.norm() < 1000:
y = y * 2
print(y)
# y不再是个标量。torch.autograd无法直接计算出完整的雅可比行列,但是如果我们只想要vector-Jacobian product,
# 只需将向量作为参数传入backward
gradients = torch.tensor([0.1, 1.0, 0.0001], dtype=torch.float)
y.backward(gradients)
print(x.grad)
# 如果.requires_grad=True但是你又不希望进行autograd的计算,
# 那么可以将变量包裹在 with torch.no_grad()中:
print(x.requires_grad)
print((x ** 2).requires_grad)
with torch.no_grad():
print((x ** 2).requires_grad)<jupyter_output>True
True
False
<jupyter_text>### 定义网络模型
使用torch.nn包来构建神经网络
一个nn.Module包含各个层和一个forward(input)方法,该方法返回output。神经网络的典型训练过程如下:
* 定义包含一些可学习的参数(或者叫权重)神经网络模型;
* 在数据集上迭代;
* 通过神经网络处理输入;
* 计算损失(输出结果和正确值的差值大小);
* 将梯度反向传播回网络的参数;
* 更新网络的参数,主要使用如下简单的更新原则: weight = weight - learning_rate * gradient<jupyter_code># 在模型中必须要定义 forward 函数,backward 函数(用来计算梯度)会被autograd自动创建
# 可以在 forward 函数中使用任何针对 Tensor 的操作。
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# 1 input image channel, 6 output channels, 5x5 square convolution
# kernel
self.conv1 = nn.Conv2d(1, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5)
# an affine operation: y = Wx + b
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
# Max pooling over a (2, 2) window
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
# If the size is a square you can only specify a single number
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
x = x.view(-1, self.num_flat_features(x))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
net = Net()
print(net)
# net.parameters()返回可被学习的参数(权重)列表和值
params = list(net.parameters())
print(len(params))
print(params[0].size()) # conv1's .weight
input = torch.randn(1, 1, 32, 32)
out = net(input)
print(out)
net.zero_grad()
out.backward(torch.randn(1, 10))<jupyter_output><empty_output><jupyter_text>``torch.nn`` 只支持小批量输入。整个 ``torch.nn`` 包都只支持小批量样本,而不支持单个样本。 例如,``nn.Conv2d`` 接受一个4维的张量, ``每一维分别是sSamples * nChannels * Height * Width(样本数*通道数*高*宽)``。 如果你有单个样本,只需使用 ``input.unsqueeze(0)`` 来添加其它的维数#### 计算损失
#### 更新网络权重<jupyter_code>output = net(input)
target = torch.randn(10) # 随机值作为样例
target = target.view(1, -1) # 使target和output的shape相同
criterion = nn.MSELoss()
loss = criterion(output, target)
print(loss)<jupyter_output>tensor(0.7140, grad_fn=<MseLossBackward>)
<jupyter_text>现在,如果在反向过程中跟随loss , 使用它的 .grad_fn 属性,将看到如下所示的计算图。
input -> conv2d -> relu -> maxpool2d -> conv2d -> relu -> maxpool2d
-> view -> linear -> relu -> linear -> relu -> linear
-> MSELoss
-> loss
所以,当我们调用 loss.backward()时,整张计算图都会 根据loss进行微分,而且图中所有设置为requires_grad=True的张量 将会拥有一个随着梯度累积的.grad 张量。<jupyter_code>print(loss.grad_fn) # MSELoss
print(loss.grad_fn.next_functions[0][0]) # Linear
print(loss.grad_fn.next_functions[0][0].next_functions[0][0]) # ReLU
# 调用loss.backward(),并查看conv1层的偏差(bias)项在反向传播前后的梯度。
net.zero_grad() # 清除梯度
print('conv1.bias.grad before backward')
print(net.conv1.bias.grad)
loss.backward()
print('conv1.bias.grad after backward')
print(net.conv1.bias.grad)
import torch.optim as optim
# create your optimizer
optimizer = optim.SGD(net.parameters(), lr=0.01)
# in your training loop:
optimizer.zero_grad() # zero the gradient buffers
output = net(input)
loss = criterion(output, target)
loss.backward()
optimizer.step() # Does the update<jupyter_output><empty_output><jupyter_text>### 训练一个分类器
训练网络前 数据的处理:
一般情况下处理图像、文本、音频和视频数据时,可以使用标准的Python包来加载数据到一个numpy数组中。 然后把这个数组转换成 torch.*Tensor。
图像可以使用 Pillow, OpenCV
音频可以使用 scipy, librosa
文本可以使用原始Python和Cython来加载,或者使用 NLTK或 SpaCy 处理
特别的,对于图像任务,我们创建了一个包 torchvision,它包含了处理一些基本图像数据集的方法。这些数据集包括 Imagenet, CIFAR10, MNIST 等。除了数据加载以外,torchvision 还包含了图像转换器, torchvision.datasets 和 torch.utils.data.DataLoader。
torchvision包不仅提供了巨大的便利,也避免了代码的重复。#### 训练一个图像分类器
一般的处理步骤:
1. 使用torchvision加载和归一化CIFAR10训练集和测试集 【加载数据集】
2. 定义一个卷积神经网络
3. 定义损失函数
4. 在训练集上训练网络
5. 在测试集上测试网络<jupyter_code>import torch
import torchvision
import torchvision.transforms as transforms
# 加载数据集
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=4,
shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')<jupyter_output>0it [00:00, ?it/s]<jupyter_text>加载完数据集,一般我们show一下数据 以保证我们加载数据集的正确性 <jupyter_code># 可视化 数据集
import matplotlib.pyplot as plt
import numpy as np
# 展示图像的函数
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
# 获取随机数据
dataiter = iter(trainloader)
images, labels = dataiter.next()
# 展示图像
imshow(torchvision.utils.make_grid(images))
# 显示图像标签
print(' '.join('%5s' % classes[labels[j]] for j in range(4)))
# 定义网络
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
net = Net()
# 定义 损失函数 和 优化器
import torch.optim as optim
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
# 训练网络
# 数据迭代器上循环,将数据输入给网络,并优化
for epoch in range(2): # 多批次循环
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# 获取输入
inputs, labels = data
# 梯度置0
optimizer.zero_grad()
# 正向传播,反向传播,优化
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# 打印状态信息
running_loss += loss.item()
if i % 2000 == 1999: # 每2000批次打印一次
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print('Finished Training')
# 测试结果
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))<jupyter_output><empty_output><jupyter_text>上述仅是使用了CPU在训练,我们希望网络可以在GPU上训练 <jupyter_code>device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# 确认我们的电脑支持CUDA,然后显示CUDA信息:
print(device)<jupyter_output><empty_output>
|
no_license
|
/60mins_for_pytorch.ipynb
|
zuhaoya/Learn_For_Pytorch
| 11 |
<jupyter_start><jupyter_text># Dataset info
**Metadata**
- Day
- Year
- event ID
----------------------
### Predictor variables
- number of stations **Cuantitativo**
- number of pairs **Cuantitativo**
- mean elevation **Cuantitativo**
- lowest elevation **Cuantitativo**
- biggest elevation **Cuantitativo**
** Double frequency phase_delay signal information **
- Phase Delay mean **for each station** **Cuantitativo**
- Phase Delay standar deviation **Cuantitativo**
- Phase Delay max **Cuantitativo**
- Phase Delay min **Cuantitativo**
- Phase Delay std **Cuantitativo**
- Phase Delay kurtosis **Cuantitativo**
- Code Delay mean **Cuantitativo**
- Mean squared error between code and phase **Cuantitativo**
**Gradient signal information**
- Kurtosis mean **Cuantitativo**
- Gradient mean **Cuantitativo**
- Gradient max **Cuantitativo**
- Gradient min **Cuantitativo**
- **Outliers detected**
- **Segments descriptor ? **
**Space weather data**
- Kp index (Every 3 hours) **Cuantitativo**
- A index (Daily) **Cuantitativo**
- Sunspot number **Cuantitativo**
- Sunspot area **Cuantitativo**
- Radio Flux Penticton 10.7 cm **Cuantitativo**
- X-Ray flux flares c y s **Cuantitativo**
-------------------------------
### Original Classes
**True**
- Gradients with few peaks (conus). **(1)**
- Gradients for scintillation (regme) are very varying. **(2)**
**False**
- Short arcs that create high variations on the substraction.**(3)**
- Outliers in one station (faulty for some reason) creating false gradient with all the neighbours.**(4)**
- Outliers in all stations at some time.**(5)**
There's very few elements on classes 4, 5 so we can add them to class 3.<jupyter_code>#colors = plt.cm.GnBu(np.linspace(0, 1, 10))
colors = plt.cm.PuBuGn(np.linspace(0, 1, 10))<jupyter_output><empty_output><jupyter_text>### Original data<jupyter_code>data_iono = pd.read_csv("/home/wanda/master_project/Data/events_morefeatures.csv",index_col=0)
print data_iono.shape
data_iono.columns
data_iono1 = data_iono[data_iono.BIN_LABEL==1]
data_iono0 = data_iono[data_iono.BIN_LABEL==0]<jupyter_output><empty_output><jupyter_text>## Indices geomagenticos A y Kp<jupyter_code>plt.figure(figsize=(10,5))
plt.subplot(1,2,1)
plt.title("Class 0")
data_iono0.A_index.plot('hist')
plt.subplot(1,2,2)
plt.title("Class 1")
data_iono1.A_index.plot('hist')
print ("A index")
plt.figure(figsize=(10,5))
plt.subplot(1,2,1)
plt.title("Class 0")
data_iono0.K_index.plot('hist')
plt.subplot(1,2,2)
plt.title("Class 1")
data_iono1.K_index.plot('hist')
print ("K index")
plt.figure(figsize=(10,5))
plt.subplot(1,2,1)
plt.title("Class 0")
data_iono0.kurtosis_grad.plot('hist')
plt.subplot(1,2,2)
plt.title("Class 1")
data_iono1.kurtosis_grad.plot('hist')
print ("Kurtosis gradient(event)")
plt.figure(figsize=(10,5))
plt.subplot(1,2,1)
plt.title("Class 0")
data_iono0.sunsplot_number.plot('hist')
plt.subplot(1,2,2)
plt.title("Class 1")
data_iono1.sunsplot_number.plot('hist')
print ("Sunspot number")
plt.figure(figsize=(10,5))
plt.subplot(1,2,1)
plt.title("Class 0")
data_iono0.mse.plot('hist')
plt.subplot(1,2,2)
plt.title("Class 1")
data_iono1.mse.plot('hist')
print ("Mean square error") #much smaller on class 1!
purp = "#b366ff"
blue = "#9999ff"
cool_grey = "#a3a3c2"
other_gray = "#808080"
plt.figure(figsize=(5,5))
plt.figtext(.5,.9,"Classes Proportion", fontsize=15, ha='center')
data_iono.BIN_LABEL.value_counts().plot(kind='pie',autopct='%1.1f%%', fontsize=15, colors=[other_gray,blue]) <jupyter_output><empty_output><jupyter_text>In our study data is splitted in four parts. 3 Folds or 75% of the data are used to train the algorithms, and will be oversampled since data of class "1" or "True ionospheric event" is less common. The other part is conserved original to test.<jupyter_code>print "Each fold has ",data_iono.shape[0]/4.," elements."
print "Elements of class 1 or True Event: ",data_iono[data_iono.BIN_LABEL==1].shape[0]/4.
print "Elements of class 0 or False Event: ",data_iono[data_iono.BIN_LABEL==0].shape[0]/4.
metadata = ['prn','network','STATIONS','PAIRS', 'ID','YEAR', 'DAY','TIME' ]
labels = ['BIN_LABEL' 'LABEL']
X = data_iono[['A_index', 'C_flare', 'K_index', 'N_PAIRS', 'N_STATIONS',
'kurtosis_delay', 'kurtosis_grad', 'max_elevation', 'max_grad',
'max_phase_delay', 'mean_code_delay', 'mean_elevation', 'mean_grad',
'mean_phase_delay', 'min_elevation', 'min_grad', 'min_phase_delay',
'mse', 'new_regions', 'radio_flux', 'std_grad', 'std_phase',
'sunsplot_number', 'sunspot_area', 'bucket_grad',
'bucket_grad_mean', 'k_index_bucket', 'elev_min_bucket',
'elev_mean_bucket', 'elev_max_bucket', 'grad_elev']]
print "Dataset shape: ",X.shape
y_bin, y_classes = data_iono.BIN_LABEL, data_iono.LABEL
#https://stats.stackexchange.com/questions/95797/how-to-split-the-dataset-for-cross-validation-learning-curve-and-final-evaluat<jupyter_output><empty_output>
|
no_license
|
/Exploration/Exploration.ipynb
|
wandarinca/ML_IONO
| 4 |
<jupyter_start><jupyter_text># NLP - Count VerbsBy [Leonardo Tozo](https://www.linkedin.com/in/leotozo/)****************************
Hello,
This is part of my personal portfolio, my intention with this series of notebooks is to keep practicing and improving my A.I & Machine Learning skills.
*Leonardo Tozo Bisinoto*
*MBA in Artificial Intelligence & Machine Learning*
*LinkedIn: https://www.linkedin.com/in/leotozo/*
*Github: https://github.com/leotozo*
**************************** This data analysis uses the IMDB reviews dataset. I will perform a basic NLP techniques.<jupyter_code>import pandas as pd<jupyter_output><empty_output><jupyter_text># Reading the IMDB dataset.<jupyter_code>df = pd.read_csv(
'./movies.csv',encoding='utf-8'
).sample(10000)
<jupyter_output><empty_output><jupyter_text># Describing the IMDB dataset.<jupyter_code>df.describe()<jupyter_output><empty_output><jupyter_text># Displaying the first 5 rows of the dataset.<jupyter_code>df.head()<jupyter_output><empty_output><jupyter_text># Removing the missing values.<jupyter_code>df.dropna(inplace=True)
<jupyter_output><empty_output><jupyter_text># Displaying the dataset shape (# of rows, # of columns)
<jupyter_code>df.shape<jupyter_output><empty_output><jupyter_text># Creating a new colounnm name + description<jupyter_code>df["reviews"] = df['review'] + " " + df['sentiment']
df.reviews[0]<jupyter_output><empty_output><jupyter_text>## How manay Unigrams are there before and after removing stopwords
<jupyter_code>import nltk
nltk.download('stopwords')<jupyter_output>[nltk_data] Downloading package stopwords to
[nltk_data] C:\Users\LEONARDOTOZOBISINOTO\AppData\Roaming\nltk_dat
[nltk_data] a...
[nltk_data] Package stopwords is already up-to-date!
<jupyter_text>## Unigrams count before and after removing the stopwords<jupyter_code>from sklearn.feature_extraction.text import CountVectorizer
vect = CountVectorizer(ngram_range=(1,1))
vect.fit(df.texto)
text_vect = vect.transform(df.texto)
print('UNIGRAMS with the STOPWORDS', text_vect.shape[1])
from sklearn.feature_extraction.text import CountVectorizer
stopwords = nltk.corpus.stopwords.words('portuguese')
vect = CountVectorizer(ngram_range=(1,1), stop_words=stopwords)
vect.fit(df.texto)
text_vect = vect.transform(df.texto)
print('UNIGRAMS without the STOPWORDS', text_vect.shape[1])<jupyter_output>UNIGRAMS without the STOPWORDS 35310
<jupyter_text>## Bigrams count before and after removing the stopwords
<jupyter_code>from sklearn.feature_extraction.text import CountVectorizer
vect = CountVectorizer(ngram_range=(2,2))
vect.fit(df.texto)
text_vect = vect.transform(df.texto)
print('BIGRAMS with the STOPWORDS', text_vect.shape[1])
from sklearn.feature_extraction.text import CountVectorizer
stopwords = nltk.corpus.stopwords.words('portuguese')
vect = CountVectorizer(ngram_range=(2,2), stop_words=stopwords)
vect.fit(df.texto)
text_vect = vect.transform(df.texto)
print('BIGRAMS without the STOPWORDS', text_vect.shape[1])<jupyter_output>BIGRAMAS sem STOPWORDS 145409
<jupyter_text>## Trigrams count before and after removing the stopwords<jupyter_code>from sklearn.feature_extraction.text import CountVectorizer
vect = CountVectorizer(ngram_range=(3,3))
vect.fit(df.texto)
text_vect = vect.transform(df.texto)
print('TRIGRAMS with the STOPWORDS', text_vect.shape[1])
from sklearn.feature_extraction.text import CountVectorizer
stopwords = nltk.corpus.stopwords.words('portuguese')
vect = CountVectorizer(ngram_range=(3,3), stop_words=stopwords)
vect.fit(df.texto)
text_vect = vect.transform(df.texto)
print('TRIGRAMS without the STOPWORDS', text_vect.shape[1])<jupyter_output>TRIGRAMS without STOPWORDS 177869
<jupyter_text>## Verbs and Adverbs count<jupyter_code>nltk.download('rslp')
nltk.download('punkt')
nltk.download('universal_tagset')
nltk.download('averaged_perceptron_tagger')
from nltk.tag import pos_tag
from nltk.tokenize import word_tokenize
word_tokenize('O Hobbit - 7ª Ed. 2013 Produto NovoBilbo Bolseiro é um hobbit que')
df['tokens'] = df.texto.apply(word_tokenize)
df.tokens.head()
df['tags'] = df.tokens.apply(pos_tag, tagset='universal')
df.tags.head(11)
df.tags[0]
from collections import Counter
counter = Counter()
counter['a'] += 2
counter
from collections import Counter
counter = Counter()
for tags in df.tags:
for _, tag in tags:
counter[tag] += 1
print('Verbs', counter.get('VERB'))
print('Adjectives', counter.get('ADJ'))
counter.get('NOUN')
df.tags[0]<jupyter_output><empty_output><jupyter_text>## Applying Stemmer in a sentence<jupyter_code>from nltk.stem import PorterStemmer
from nltk.stem.rslp import RSLPStemmer
tokens = df.tokens[0]
ps = PorterStemmer()
rslp = RSLPStemmer()
for tok in tokens:
print('PorterStemmer: %s \t\t RSLPStemmer: %s' % (ps.stem(tok), rslp.stem(tok)))
<jupyter_output>PorterStemmer: O RSLPStemmer: o
PorterStemmer: hobbit RSLPStemmer: hobbit
PorterStemmer: - RSLPStemmer: -
PorterStemmer: 7ª RSLPStemmer: 7ª
PorterStemmer: Ed RSLPStemmer: ed
PorterStemmer: . RSLPStemmer: .
PorterStemmer: 2013 RSLPStemmer: 2013
PorterStemmer: produto RSLPStemmer: produt
PorterStemmer: novobilbo RSLPStemmer: novobilb
PorterStemmer: bolseiro RSLPStemmer: bols
PorterStemmer: é RSLPStemmer: é
PorterStemmer: um RSLPStemmer: um
PorterStemmer: hobbit RSLPStemmer: hobbit
PorterStemmer: que RSLPStemmer: que
PorterStemmer: leva RSLPStemmer: lev
PorterStemmer: uma RSLPStemmer: uma
PorterStemmer: vida RSLPStemmer: vid
PorterStemmer: confortável RSLPStemmer: confort
PorterStemmer: e RSLPStemmer: e
PorterStemmer: sem RSLPStemmer: sem
PorterStemmer: ambiçõ RSLPStemmer: amb
PorterStemmer: . RSLPStemmer: .
PorterStemmer: ma RSLPStemmer: mas
PorterStemmer: seu RSLPStemmer: seu
PorterStemmer: contentamento RSLPStem[...]<jupyter_text>## Unigrams count after the Stemmer applying<jupyter_code>from nltk.stem.rslp import RSLPStemmer
from sklearn.feature_extraction.text import CountVectorizer
rslp = RSLPStemmer()
def stem_pandas(line):
return ' '.join([rslp.stem(token) for token in line])
df['stemmer'] = df.tokens.apply(stem_pandas)
df.stemmer.head()
stopwords = nltk.corpus.stopwords.words('portuguese')
vect = CountVectorizer(ngram_range=(1,1), stop_words=stopwords)
vect.fit(df.stemmer)
text_vect = vect.transform(df.stemmer)
print('UNIGRAMS without the STOPWORDS', text_vect.shape[1])<jupyter_output>UNIGRAMS without the STOPWORDS 26528
|
no_license
|
/NLP002-Verbs-Counting.ipynb
|
leotozo/II.Natural-Language-Processing
| 14 |
<jupyter_start><jupyter_text># Data cleaning
I first read in the data set about seattle crime incidents in summer 2014<jupyter_code># read in the data file
df_st = pd.read_csv('seattle_incidents_summer_2014.csv' )<jupyter_output>/Users/huikuanchao/anaconda/lib/python2.7/site-packages/IPython/core/interactiveshell.py:2902: DtypeWarning: Columns (9) have mixed types. Specify dtype option on import or set low_memory=False.
interactivity=interactivity, compiler=compiler, result=result)
<jupyter_text>In this step, The dataframe df_st is pivoted to achieve the new colunms offering the time information inclusing date, hour and catagorial information about the incidents' crime type <jupyter_code># convert the date reported colunme from string type to datetime type
date_report_ind = pd.DatetimeIndex(df_st['Date Reported'])
df_st['rp_date'] = date_report_ind.date.astype('datetime64')
df_st['rp_hour'] = date_report_ind.hour
df_st['Is_night'] = (date_report_ind.hour > 20) | (date_report_ind.hour < 6)
area_ind = df_st['District/Sector'].loc[df_st['District/Sector'].notnull()].unique()
df_st['ofns_tp_rdc'] = map(lambda x : x.split('-')[0], df_st['Offense Type'])
offens_ind = df_st['ofns_tp_rdc'].unique()
<jupyter_output><empty_output><jupyter_text># Data Analysis
I fristly looked at how the mean value of crime incidents accross all the crime types changes as a function of hours in a day.<jupyter_code>hourly = df_st.pivot_table('RMS CDW ID', aggfunc='count',index=['rp_date'], columns=['rp_hour'])
%matplotlib inline
fmt = plt.FuncFormatter(lambda x, *args: '{0}:00'.format(int(x)))
fig, ax = plt.subplots(1,1, figsize=(12, 8), sharex=True, sharey=True)
#plt.plot(hourly.mean(0))
ax.set_xlim(0, 23)
ax.xaxis.set_major_locator(plt.MultipleLocator(4))
ax.plot(range(24),hourly.mean(0), color='Red', lw=3)
ax.xaxis.set_major_formatter(fmt)
ax.set_xlabel('Time of Day',size=16)
ax.set_ylabel('Averaged # of Indicdents',size=16)<jupyter_output><empty_output><jupyter_text>The results shown in the about exhibits a highly nonlinear pattern in the curve, where the number of indicendts drops siginifcantly in the mornning after the sunrise while the number also shows a sharp increase at around 12:00pm and then plateaus at the level unitl the night.
## The effect of day and night
In the following section, I further studies what are the incidents distributed accross the crime types. First, I look at the incidents happed during daytime.<jupyter_code>type_nt_day = df_st.pivot_table('RMS CDW ID', aggfunc='count',index=['ofns_tp_rdc'], columns=['Is_night'])
#plot # of incidents vs. incidents type for both day and night periods
fig, ax = plt.subplots(1,1, figsize=(20, 8))
day_incid_type = np.nan_to_num(type_nt_day[False].values.T)
ax.bar(range(day_incid_type.size), day_incid_type, color='r', alpha=0.8,align='center')
ax.set_xticks(range(type_nt_day.index.size))
ax.set_xticklabels(type_nt_day.index,ha="right",rotation=60)
ax.set_ylabel('# of Indicdents in daytime',size=16)
#for c, y in zip(['r', 'b'], [False, True]):
# xs = np.arange(20)
# ax.bar(offens_ind, y, type_nt_day[y].values.T, zdir='y', color=c, alpha=0.8)<jupyter_output><empty_output><jupyter_text>As the figure shown above, the indicients distributed accross the types of crime in hight non-uniform manner. Specifficaly, we observed that theft and burglary are the top two frenqunent crime types. <jupyter_code>fig, ax = plt.subplots(1,1, figsize=(20, 8))
night_incid_type = np.nan_to_num(type_nt_day[True].values.T)
ax.bar(range(night_incid_type.size), night_incid_type, color='b', alpha=0.8,align='center')
ax.set_xticks(range(type_nt_day.index.size))
ax.set_xticklabels(type_nt_day.index,ha="right",rotation=60)
ax.set_ylabel('# of Indicdents in at night',size=16)
<jupyter_output><empty_output><jupyter_text>However, at night, the above figure shown that despit the theft still being the most frequent type of the crime, now assult becomes the second most frequent type. At the same time, we do recognized that the amount of burglary incidents is only margnial smaller than that of the assult incidents.
## Types of crime in different districts
Next, I'd like to explore how the type of indicents differs across districts in Seattle. <jupyter_code>df_corrt_dist = df_st.loc[df_st['District/Sector'].notnull()]
type_dist = df_corrt_dist.pivot_table('RMS CDW ID', aggfunc='count',index=['District/Sector'], columns=['ofns_tp_rdc'])
fig, ax = plt.subplots(1,1, figsize=(20, 8))
cent_type = np.nan_to_num(type_dist['THEFT'].values.T)
ax.bar(range(cent_type.size), cent_type, color='r', alpha=0.8,align='center')
ax.set_xticks(range(type_dist.index.size))
ax.set_xticklabels(type_dist.index,ha="right",rotation=60)
ax.set_ylabel('# of Theft Indicdents',size=16)
ax.set_xlabel('Districts of Seattle',size=16)
<jupyter_output><empty_output><jupyter_text>We investigate the distribution of theft indicents. The above figure suggests that for the center city (distric M) the number of theft incidents clearly shows a much higher value comparing with the values in other disricts, for example the district form the south area (district O,W and F ) and north area (district L).<jupyter_code>fig, ax = plt.subplots(1,1, figsize=(20, 8))
cent_type = np.nan_to_num(type_dist['BURGLARY'].values.T)
ax.bar(range(cent_type.size), cent_type, color='B', alpha=0.8,align='center')
ax.set_xticks(range(type_dist.index.size))
ax.set_xticklabels(type_dist.index,ha="right",rotation=60)
ax.set_ylabel('# of Burglary Indicdents',size=16)
ax.set_xlabel('Districts of Seattle',size=16)
<jupyter_output><empty_output><jupyter_text>Neverthless, the situation becomes quite different if we looked at the same distribution but of the burglary incidents. The figure in the above shows that the number of burglary is indeed reaches minimun in the center city (M) while reaches its highest value in the north area (L).
## Crime incidents changes during the summer
Finally, I've looked at how the crime incidents change during the summer. <jupyter_code>df_st['rp_month'] = date_report_ind.month
monthly = df_st.pivot_table('RMS CDW ID', aggfunc='count',index=['rp_month'], columns=['ofns_tp_rdc'])
%matplotlib inline
fmt = plt.FuncFormatter(lambda x, *args: '{0}:00'.format(int(x)))
fig, ax = plt.subplots(1,1, figsize=(12, 8), sharex=True, sharey=True)
#plt.plot(hourly.mean(0))
ax.set_xlim(5, 9)
ax.xaxis.set_major_locator(plt.MultipleLocator(1))
ax.plot(monthly.index, monthly.mean(1), color='Red', lw=3)
#ax.xaxis.set_major_formatter(fmt)
ax.set_xlabel('Month',size=16)
ax.set_ylabel('Averaged # of Indicdents',size=16)<jupyter_output><empty_output><jupyter_text>The above reuslts on the mean value of incident across different crime types as a function of months during the summer shows that July and August in the summer has the highest number of incidents. Yet, the number drop at both the beginning and the end of the summer.<jupyter_code>%matplotlib inline
fmt = plt.FuncFormatter(lambda x, *args: '{0}:00'.format(int(x)))
Theft_month = np.nan_to_num(monthly['THEFT'].values.T)
fig, ax = plt.subplots(1,1, figsize=(12, 8), sharex=True, sharey=True)
#plt.plot(hourly.mean(0))
ax.set_xlim(5, 9)
ax.xaxis.set_major_locator(plt.MultipleLocator(1))
ax.plot(Theft_month, color='Red', lw=3)
#ax.xaxis.set_major_formatter(fmt)
ax.set_xlabel('Month',size=16)
ax.set_ylabel('# of Theft Indicdents',size=16)<jupyter_output><empty_output><jupyter_text>However, If we chose to look at a specific type of crime, the situation might become slight different. For example, the above figure shows the same function for Theft, which exhibit a hight vlaue at both the begninning and middle of summer whil <jupyter_code>byday = df_st.pivot_table('RMS CDW ID', aggfunc='count',index=['rp_date'], columns=['ofns_tp_rdc'])
%matplotlib inline
fmt = plt.FuncFormatter(lambda x, *args: '{0}:00'.format(int(x)))
fig, ax = plt.subplots(1,1, figsize=(12, 8), sharex=True, sharey=True)
#plt.plot(hourly.mean(0))
#ax.set_xlim(5, 9)
#ax.xaxis.set_major_locator(plt.MultipleLocator(100))
ax.plot( byday.mean(1), color='B', lw=3)
#ax.xaxis.set_major_formatter(fmt)
ax.set_xlabel('Date',size=16)
ax.set_ylabel('Averaged # of Indicdents',size=16)<jupyter_output><empty_output>
|
non_permissive
|
/Seattle_crime_report-Copy1.ipynb
|
huikuanchao/seattle_crime_study
| 10 |
<jupyter_start><jupyter_text>### Project - Ensemble Techniques - Term Deposit Subscription Prediction
<jupyter_code>#Student Name : Makarand More
#Task#1 Exploratory Data Analysis
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use('classic')
%matplotlib inline
import seaborn as sns
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
#Load given Data
df_Concrete=pd.read_csv("concrete.csv")
#Task#1 Exploratory Data Analysis
df_Concrete.head()
df_Concrete.tail()
print(df_Concrete.info())
print()
print(df_Concrete.shape)
print()
print(df_Concrete.isnull().sum())
df_Concrete.isnull().sum().sum() #missing value count in dataframe for both row and column
#Note: Total Data count 1030 and fortunate there are no Missing (Null data) values. All of independent variables are numeric, non-categorical.
#All features except age and strength have same units(kg in m3 mixture) but have different scales. Thus we might need to scale the data so as to avoid bias in algorithms
df_Concrete.describe().transpose()
n = df_Concrete.nunique(axis=0)
print("No.of.unique values in each column :\n",
n)
#Notes: All of the data in the dataset is numerical
#Age data appears to have outliers because max value is very large as compared to 3rd IQR value
df_Concrete.skew()
#Notes: The distribution of all variables where age is highly skewed. Binning could be done if Target variable is categorical but not in this case as target variable is numeric.
#The two variables Coarse Aggregate (coarseagg) and Fine Aggregate (fineagg) are negetively skewed.
#For Age column median value is about 25 and there are few outlier.
fig = plt.figure(figsize=(12, 6))
plt.axvline(df_Concrete.strength.mean(),linestyle="dashed",label="mean", color='blue')
sns.distplot(df_Concrete.strength);
sns.boxplot(df_Concrete['age'])
plt.show()
plt.subplots(figsize=(12, 6))
ax = sns.boxplot(data=df_Concrete)
ax.set_xticklabels(ax.get_xticklabels(), rotation=45);
#Notes : Age column appears to be having maximum number of outliers, lag, Water, superplastic, fineagg features have some outliers
columns = list(df_Concrete)[:]
df_Concrete[columns].hist(stacked=False, bins=20, figsize=(14,10), color='#E14906');
sns.distplot(df_Concrete['coarseagg'])
plt.show()
sns.distplot(df_Concrete['fineagg'])
plt.show()
sns.distplot(df_Concrete['age'])
plt.show()
sns.heatmap(df_Concrete.corr() , annot=True, cmap='Reds')
#Notes# High positive corrleation between Cement and Strengthm that means more cement more concrete strength. Also , Age and Super Plasticizer are the other two factors influencing Compressive strength.
#High negative correlation between Super Plasticizer and Water. Also positive correlations between Super Plasticizer and Fly Ash, Fine Aggregate.
sns.pairplot(df_Concrete)
sns.scatterplot(y="strength", x="cement", hue="water",size="age", data=df_Concrete,sizes=(20, 200))
#Notes: Compressive strength increases with amount of cement and increase in age. The cement with older age require more water.
#Strength can improve when less water is use while prepration.
# Separating independent and dependent variables
X = df_Concrete.iloc[:,:-1] # Features - All columns but last
y = df_Concrete.iloc[:,-1] # Target - Last Column
##Split into training and test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30,random_state=7)
#Note : Standardizing the data i.e. to rescale the features to have a mean of zero and standard deviation of 1.
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
regression_model = LinearRegression()
regression_model.fit(X_train, y_train)
# Let us explore the coefficients for each of the independent attributes
#regression_model.coef_
pd.DataFrame(regression_model.coef_,
X.columns,
columns=['coef'])\
.sort_values(by='coef', ascending=False)
#Notes: The value of coef indicate some what same (as scatter plot) when increase Cement as X variable then increase Compressive strength
regression_model.score(X_train, y_train)
#out of sample score (R^2)
regression_model.score(X_test, y_test)
from sklearn.preprocessing import PolynomialFeatures
from sklearn import linear_model
poly = PolynomialFeatures(degree=4, interaction_only=True) #Note : higher degree help here to get better score
X_train2 = poly.fit_transform(X_train)
X_test2 = poly.fit_transform(X_test)
poly_clf = linear_model.LinearRegression()
poly_clf.fit(X_train2, y_train)
y_pred = poly_clf.predict(X_test2)
#In sample (training) R^2 will always improve with the number of variables!
print(poly_clf.score(X_train2, y_train))
#Out off sample (testing) R^2 is our measure of sucess
print(poly_clf.score(X_test2, y_test))
#Analysis based on DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
dtr = DecisionTreeRegressor()
dtr.fit(X_train, y_train)
y_pred_dtr = dtr.predict(X_test)
print("Model\t\t\t\t RMSE \t\t MSE \t\t MAE \t\t R2")
print("""Decision Tree Regressor \t {:.2f} \t\t {:.2f} \t\t{:.2f} \t\t{:.2f}""".format(
np.sqrt(mean_squared_error(y_test, y_pred_dtr)),mean_squared_error(y_test, y_pred_dtr),
mean_absolute_error(y_test, y_pred_dtr), r2_score(y_test, y_pred_dtr)))
plt.scatter(y_test, y_pred_dtr)
plt.plot([y_test.min(), y_test.max()], [y_test.min(), y_test.max()], 'k--', lw=2)
plt.xlabel("Predicted")
plt.ylabel("True")
plt.title("Decision Tree Regressor")
plt.show()
print('Training accuracy on selected features: %.3f' % dtr.score(X_train, y_train))
print('Testing accuracy on selected features: %.3f' % dtr.score(X_test, y_test))
#Analysis based on RandomForestRegressor
from sklearn.ensemble import RandomForestRegressor
rfr = RandomForestRegressor(n_estimators=100)
rfr.fit(X_train, y_train)
y_pred_rfr = rfr.predict(X_test)
print("Model\t\t\t\t RMSE \t\t MSE \t\t MAE \t\t R2")
print("""Random Forest Regressor \t {:.2f} \t\t {:.2f} \t\t{:.2f} \t\t{:.2f}""".format(
np.sqrt(mean_squared_error(y_test, y_pred_rfr)),mean_squared_error(y_test, y_pred_rfr),
mean_absolute_error(y_test, y_pred_rfr), r2_score(y_test, y_pred_rfr)))
plt.scatter(y_test, y_pred_rfr)
plt.plot([y_test.min(), y_test.max()], [y_test.min(), y_test.max()], 'k--', lw=2)
plt.xlabel("Predicted")
plt.ylabel("True")
plt.title("Random Forest Regressor")
plt.show()
print('Training accuracy on selected features: %.3f' % rfr.score(X_train, y_train))
print('Testing accuracy on selected features: %.3f' % rfr.score(X_test, y_test))
# View a list of the features and their importance scores
importances = rfr.feature_importances_
indices = np.argsort(importances)
a = df_Concrete.columns[:]
features= a
#plot it
plt.figure(figsize=(10,10))
plt.title('Feature Importances')
plt.barh(range(len(indices)), importances[indices], color='b', align='center')
plt.yticks(range(len(indices)), features[indices])
plt.xlabel('Relative Importance')
plt.show()
#Analysis based on GradientBoostingRegressor
from sklearn.ensemble import GradientBoostingRegressor ## inherient Regression Alog, Y good to have a numeric
gbcl = GradientBoostingRegressor(n_estimators = 50,random_state=1, max_depth=4)
gbcl = gbcl.fit(X_train, y_train)
y_pred_gbcl = gbcl.predict(X_test)
print("Model\t\t\t\t RMSE \t\t MSE \t\t MAE \t\t R2")
print("""Gradient Boosting Regressor \t {:.2f} \t\t {:.2f} \t\t{:.2f} \t\t{:.2f}""".format(
np.sqrt(mean_squared_error(y_test, y_pred_gbcl)),mean_squared_error(y_test, y_pred_gbcl),
mean_absolute_error(y_test, y_pred_gbcl), r2_score(y_test, y_pred_gbcl)))
plt.scatter(y_test, y_pred_gbcl)
plt.plot([y_test.min(), y_test.max()], [y_test.min(), y_test.max()], 'k--', lw=2)
plt.xlabel("Predicted")
plt.ylabel("True")
plt.title("Gradient Boosting Regressor")
plt.show()
print('Training accuracy on selected features: %.3f' % gbcl.score(X_train, y_train))
print('Testing accuracy on selected features: %.3f' % gbcl.score(X_test, y_test))
from sklearn.ensemble import AdaBoostRegressor ## inherient Regression Alog, Y good to have a numeric
abr = AdaBoostRegressor(n_estimators = 50,random_state=1)
abr = abr.fit(X_train, y_train)
y_pred_abr = abr.predict(X_test)
print("Model\t\t\t\t RMSE \t\t MSE \t\t MAE \t\t R2")
print("""Random Forest Regressor \t {:.2f} \t\t {:.2f} \t\t{:.2f} \t\t{:.2f}""".format(
np.sqrt(mean_squared_error(y_test, y_pred_abr)),mean_squared_error(y_test, y_pred_abr),
mean_absolute_error(y_test, y_pred_abr), r2_score(y_test, y_pred_abr)))
plt.scatter(y_test, y_pred_abr)
plt.plot([y_test.min(), y_test.max()], [y_test.min(), y_test.max()], 'k--', lw=2)
plt.xlabel("Predicted")
plt.ylabel("True")
plt.title("Decision Tree Regressor")
plt.show()
print('Training accuracy on selected features: %.3f' % abr.score(X_train, y_train))
print('Testing accuracy on selected features: %.3f' % abr.score(X_test, y_test))
#As 2nd iteration going to do some data cleaning by working working on outlier and -ve skew variables.
df_Concrete1=df_Concrete
df_Concrete1.shape
df_Concrete1.describe().transpose()
Q1= df_Concrete1.quantile(0.25)
Q3= df_Concrete1.quantile(0.75)
IQR = Q3-Q1
outliers = pd.DataFrame(((df_Concrete1 > (Q3+1.5*IQR)) | (df_Concrete1 < (Q1-IQR*1.5))).sum(axis=0), columns=['No. of outliers'])
outliers['Percentage of outliers'] = round(outliers['No. of outliers']*100/len(df_Concrete1), 2)
outliers
df_Concrete1 = df_Concrete1[~((df_Concrete1 < (Q1 - 1.5 * IQR)) |(df_Concrete1 > (Q3 + 1.5 * IQR))).any(axis=1)]
df_Concrete1.shape
#Notes : Thats means we removed all 89 outlieres from data set. Instead of removing all we can also do work on only outlier variables
#those has higer percentage for example Age and Superplastic. And instead of removing replace mean or median values.
df_Concrete1=df_Concrete1.drop(['coarseagg','fineagg'], axis=1)
df_Concrete1.shape
#For 2nd iteration doing again - Separating independent and dependent variables, split and StandardScaler
X1 = df_Concrete1.iloc[:,:-1] # Features - All columns but last
y1 = df_Concrete1.iloc[:,-1] # Target - Last Column
from sklearn.model_selection import train_test_split
X1_train, X1_test, y1_train, y1_test = train_test_split(X1, y1, test_size=0.30,random_state=7)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X1_train = sc.fit_transform(X1_train)
X1_test = sc.transform(X1_test)
print(y1_train)
#Analysis based on RandomForestRegressor
from sklearn.ensemble import RandomForestRegressor
rfr1 = RandomForestRegressor(n_estimators=100)
rfr1.fit(X1_train, y1_train)
y1_pred_rfr = rfr1.predict(X1_test)
print("Model\t\t\t\t RMSE \t\t MSE \t\t MAE \t\t R2")
print("""Random Forest Regressor \t {:.2f} \t\t {:.2f} \t\t{:.2f} \t\t{:.2f}""".format(
np.sqrt(mean_squared_error(y1_test, y1_pred_rfr)),mean_squared_error(y1_test, y1_pred_rfr),
mean_absolute_error(y1_test, y1_pred_rfr), r2_score(y1_test, y1_pred_rfr)))
plt.scatter(y1_test, y1_pred_rfr)
plt.plot([y1_test.min(), y1_test.max()], [y1_test.min(), y1_test.max()], 'k--', lw=2)
plt.xlabel("Predicted")
plt.ylabel("True")
plt.title("Random Forest Regressor")
plt.show()
print('Training accuracy on selected features: %.3f' % rfr1.score(X1_train, y1_train))
print('Testing accuracy on selected features: %.3f' % rfr1.score(X1_test, y1_test))
#Analysis based on GradientBoostingRegressor
from sklearn.ensemble import GradientBoostingRegressor ## inherient Regression Alog, Y good to have a numeric
gbcl1 = GradientBoostingRegressor(n_estimators = 50,random_state=1, max_depth=4)
gbcl1 = gbcl1.fit(X1_train, y1_train)
y1_pred_gbcl = gbcl1.predict(X1_test)
print("Model\t\t\t\t RMSE \t\t MSE \t\t MAE \t\t R2")
print("""Gradient Boosting Regressor \t {:.2f} \t\t {:.2f} \t\t{:.2f} \t\t{:.2f}""".format(
np.sqrt(mean_squared_error(y1_test, y1_pred_gbcl)),mean_squared_error(y1_test, y1_pred_gbcl),
mean_absolute_error(y1_test, y1_pred_gbcl), r2_score(y1_test, y1_pred_gbcl)))
plt.scatter(y1_test, y1_pred_gbcl)
plt.plot([y1_test.min(), y1_test.max()], [y1_test.min(), y1_test.max()], 'k--', lw=2)
plt.xlabel("Predicted")
plt.ylabel("True")
plt.title("Gradient Boosting Regressor")
plt.show()
print('Training accuracy on selected features: %.3f' % gbcl1.score(X1_train, y1_train))
print('Testing accuracy on selected features: %.3f' % gbcl1.score(X1_test, y1_test))
#3rd iteration using Kfold Cross-Validation to evaluate model performance
df_Concrete2=df_Concrete
df_Concrete2.shape
#For 3rd iteration doing again - Separating independent and dependent variables, split
#Plus include Kfold
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
X = df_Concrete2.iloc[:,:-1] # Features - All columns but last
y = df_Concrete2.iloc[:,-1] # Target - Last Column
#print(y)
num_folds = 100 #Note checked with mutiple fold values 100 given better accuracy then 50
seed = 7
kfold = KFold(n_splits=num_folds, random_state=seed)
results = cross_val_score(gbcl, X, y, scoring='neg_mean_squared_error', cv=kfold)
print("Mean Square Error: %.3f%% (%.3f%%)" % (results.mean()*100.0, results.std()*100.0))
results1 = cross_val_score(gbcl, X, y, cv=kfold)
print("Accuracy: %.3f%% (%.3f%%)" % (results1.mean()*100.0, results1.std()*100.0))
print('Training accuracy on selected features: %.3f' % gbcl.score(X_train, y_train))
print('Testing accuracy on selected features: %.3f' % gbcl.score(X_test, y_test))
result_Cross_Val = pd.DataFrame(columns=['model', 'Mean Square Error', 'Accuracy', 'Training Score', 'Testing Score'])
#result_Cross_Val=result_Cross_Val.append({'model':'ABC','Mean Square Error':11,'Accuracy':10,'Training Score':15,'Testing Score':15},ignore_index=True)
result_Cross_Val
for clf in (dtr, rfr,gbcl,abr):
result_mean_squared= cross_val_score(clf, X, y, scoring='neg_mean_squared_error', cv=kfold)
result_mean_squared=result_mean_squared.mean()*100,result_mean_squared.std()*100
#print("Mean Square Error: %.3f%% (%.3f%%)" % (result_mean_squared.mean()*100.0, result_mean_squared.std()*100.0))
results1 = cross_val_score(clf, X, y, cv=kfold)
result_Accuracy=results1.mean()*100.0, results1.std()*100.0
result_Training=clf.score(X_train, y_train)
result_Testing=clf.score(X_test, y_test)
result_Cross_Val=result_Cross_Val.append({'model':clf.__class__.__name__,'Mean Square Error':result_mean_squared,'Accuracy':result_Accuracy,'Training Score':result_Training,'Testing Score':result_Testing},ignore_index=True)
result_Cross_Val
# Grid Search or Random Search and hyperparameter tuning
df_Concrete3=df_Concrete
df_Concrete3.shape
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(df_Concrete3.drop(['ash', 'coarseagg', 'fineagg', 'strength'], axis=1),
df_Concrete3['strength'],
test_size = 0.2,
random_state=42)
X_train.shape, X_test.shape, y_train.shape, y_test.shape
# Prepare parameter grid #hyperparameter tuning
param_grid = {
'criterion': ['mse', 'mae', 'friedman_mse'],
'learning_rate': [0.05, 0.1, 0.15, 0.2],
'max_depth': [2, 3, 4, 5],
'max_features': ['sqrt', None],
'max_leaf_nodes': list(range(2, 10)),
'n_estimators': list(range(50, 500, 50)),
'subsample': [0.8, 0.9, 1.0]
}
# Perform hyper parameter tuning using GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
rs = RandomizedSearchCV(estimator=GradientBoostingRegressor(random_state=42), param_distributions=param_grid,
return_train_score= True, n_jobs=-1, verbose=2, cv = 10, n_iter=500)
rs.fit(X_train, y_train)
mean = rs.best_score_
std = rs.cv_results_['mean_test_score'].std()
print(f"Mean training score: {rs.cv_results_['mean_train_score'].mean()}")
print(f"Mean validation score: {mean}")
print(f"Validation standard deviation: {std}")
print(f"95% confidence interval: {str(round(mean-(1.96*std),3)) + ' <-> ' + str(round(mean+(1.96*std),3))}")
print(f"Best parameters: {rs.best_params_}")
print(f"Test score: {rs.score(X_test, y_test)}")
df_Concrete4=df_Concrete
df_Concrete4.shape
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(df_Concrete4.drop(['ash', 'coarseagg', 'fineagg', 'strength'], axis=1),
df_Concrete3['strength'],
test_size = 0.2,
random_state=42)
X_train.shape, X_test.shape, y_train.shape, y_test.shape
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
# Define model
model = RandomForestRegressor(n_estimators=100, random_state=0)
# Bundle preprocessing and modeling code in a pipeline
clf = Pipeline(steps=[('model', model)])
# Preprocessing of training data, fit model
clf.fit(X_train, y_train)
preds = clf.predict(X_test)
print('MAE:', mean_absolute_error(y_test, preds))
param_grid = {
'bootstrap': [True],
'max_depth': [80, 90, 100, 110],
'max_features': [2, 3],
'min_samples_leaf': [3, 4, 5],
'min_samples_split': [8, 10, 12]
}
grid_search = GridSearchCV(model, param_grid = param_grid,cv = 3 ) #n_jobs = -1, verbose = 2
grid_search.fit( X_train, y_train)
mean = grid_search.best_score_
std = grid_search.cv_results_['mean_test_score'].std()
print(f"Mean validation score: {mean}")
print(f"Validation standard deviation: {std}")
print(f"95% confidence interval: {str(round(mean-(1.96*std),3)) + ' <-> ' + str(round(mean+(1.96*std),3))}")
print(f"Best parameters: {grid_search.best_params_}")
print(f"Test score: {grid_search.score(X_test, y_test)}")
result_HyperParm = pd.DataFrame(columns=['Model', 'Mean validation score', 'Standard deviation', '95% confidencee', 'Testing Score'])
result_HyperParm
for clf2 in (rs, grid_search):
Result_mean=clf2.best_score_
Result_std = clf2.cv_results_['mean_test_score'].std()
Result_95Conf={str(round(Result_mean-(1.96*Result_std),3)) + ' <-> ' + str(round(Result_mean+(1.96*Result_std),3))}
Result_TScore=clf2.score(X_test, y_test)
result_HyperParm=result_HyperParm.append({'Model':clf2.estimator,'Mean validation score':Result_mean,'Standard deviation':Result_std,'95% confidencee':Result_95Conf,'Testing Score':Result_TScore},ignore_index=True)
result_HyperParm
#Overall Summary in order to predict the strength of concrete
#Outliers treatment by removing those dont see much overall performace improvments on all 3 models.Also removing the features does not affect the models.
#The main feature thats going to affect the strength are cement, slag, water, superplastic and age
#The best model are Gradient boosting and then Random Forest Regressor
#Using the above model, we can predict the strength accurately between 83% to 100% with 95% confidence.
#Student Name : Makarand More<jupyter_output><empty_output>
|
no_license
|
/Project_4_Concrete Strength Prediction_Makarand_More.ipynb
|
makmoreAUT/PGP-AIML
| 1 |
<jupyter_start><jupyter_text># Graph showing brand/team value ## Imports<jupyter_code>import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.patheffects as path_effects
import matplotlib.image as mpimg
from PIL import Image
from highlight_text import fig_text<jupyter_output><empty_output><jupyter_text>## Retrieve data<jupyter_code>data = pd.read_csv("../../data/marketvalue/brand_value.csv")
data<jupyter_output><empty_output><jupyter_text>## Create the plot<jupyter_code>barWidth = 0.2
fig,ax = plt.subplots(figsize = (11,7))
#Add grid to the plot
ax.grid(ls="dotted", lw="0.5", color="w", zorder=1)
hfont = {'fontname':'Impact'}
#Change the background color
fig.set_facecolor("#08143D")
ax.patch.set_facecolor("#08143D")
#Change the ticks color
ax.tick_params(colors="w", length=15)
plt.xticks(data['Year'])
#Change the axes color
ax.spines['bottom'].set_color("w")
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_color("w")
# Axes Label
plt.xlabel("Year", color="w", fontsize=15 , **hfont)
plt.ylabel("Millions U.S. dollars", color="w", fontsize=15 , **hfont)
# Title and credits
fig_text(0.12,0.97, s="Liverpool <brand/team value> from 2011 to 2019", highlight_textprops=[{"color": '#2bb6e0'}], fontsize = 16, fontweight = "bold", color = "w", **hfont)
fig.text(0.12,0.9, "(in million U.S. dollars)", fontsize = 12, fontweight="light", color = "w", **hfont)
fig.text(0.05, 0, "Created by Jacky KUOCH",fontstyle="italic",fontsize=9, color="w")
# Plot the bars with value on top
#Brand Finance
plt.bar(data['Year']-0.2, data['BrandFinance'], width=barWidth, color="#2bb6e0", label="Brand Finance (Brand value)", edgecolor = 'w')
for i in range(0,7):
plt.text(x = data['Year'][i]-0.4 , y = data['BrandFinance'][i]+15, s = data['BrandFinance'][i], size = 10, color="w")
for i in range(7,9):
plt.text(x = data['Year'][i]-0.5 , y = data['BrandFinance'][i]+15, s = data['BrandFinance'][i], size = 10, color="w")
#Forbes
plt.bar(data['Year'], data['Forbes'], width=barWidth, color="#8A0F77", label="Forbes (Team value)", edgecolor = 'w')
for i in range(0,5):
plt.text(x = data['Year'][i]-0.13, y = data['Forbes'][i]+25, s = data['Forbes'][i], size = 10, color="w")
for i in range(5,7):
plt.text(x = data['Year'][i]-0.17, y = data['Forbes'][i]+25, s = data['Forbes'][i], size = 10, color="w")
for i in range(7,9):
plt.text(x = data['Year'][i]-0.31, y = data['Forbes'][i]+25, s = data['Forbes'][i], size = 10, color="w")
#KPMG
plt.bar(data['Year']+0.2, data['KPMG'], width=barWidth, color="#0660A6", label="KPMG (Enterprise Value)", edgecolor = 'w')
for i in range(6,8):
plt.text(x = data['Year'][i]+0.15, y = data['KPMG'][i]+10, s = data['KPMG'][i], size = 10, color="w")
plt.text(x = data['Year'][8]+0.05, y = data['KPMG'][8]+20, s = data['KPMG'][8], size = 10, color="w")
plt.legend()
plt.show()
plt.tight_layout()
<jupyter_output><empty_output>
|
no_license
|
/script/dataviz/market_value.ipynb
|
thomastrg/LiverpoolEvolutionKlopp
| 3 |
<jupyter_start><jupyter_text>From above we can see there are two prominent features for the finance data, exercised_stock_options and other. <jupyter_code>import matplotlib.pyplot as plt
poi_features = np.array([])
non_poi_features = np.array([])
poi_color = "b"
non_poi_color = "r"
data = featureFormat(data_dict, ['poi', 'exercised_stock_options', 'other'], sort_keys = True)
for person in data:
if person[0] == 1: #poi
plt.scatter( person[1], person[2], color=poi_color)
else:
plt.scatter( person[1], person[2], color=non_poi_color)
plt.xlabel('other')
plt.ylabel('exercised_stock_options')
plt.legend()
plt.show()
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.preprocessing import MinMaxScaler
features_list = ['poi'] + numerical_email_features
data = featureFormat(data_dict, features_list, sort_keys = True)
labels, features = targetFeatureSplit(data)
scaled_features = MinMaxScaler().fit_transform(features)
print "Created labels"
# Create the RFE object and compute a cross-validated score.
svc = DecisionTreeClassifier(min_samples_split=5)
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(3))
print "About to fit data"
rfecv.fit(scaled_features, labels)
print "Done fitting data"
print("Optimal number of features : %d" % rfecv.n_features_)
print rfecv.ranking_
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
features_list = ['poi'] + numerical_email_features + financial_features
data = featureFormat(data_dict, features_list, sort_keys = True)
labels, features = targetFeatureSplit(data)
features = MinMaxScaler().fit_transform(features)
from sklearn.decomposition import PCA
clf = PCA(n_components=5)
clf.fit(features)
def score(features_train, labels_train, features_test, labels_test):
from sklearn.ensemble import AdaBoostClassifier
clf = AdaBoostClassifier()
clf.fit(features_train, labels_train)
return clf.score(features_test, labels_test)
def pca_result(features, labels, n_components):
from sklearn.decomposition import PCA
clf = PCA(n_components=n_components)
clf.fit(features)
return clf
def graph_pca_score(features, labels):
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=0.25, random_state=42)
scores = []
for i in xrange(1, len(features[0])):
pca = pca_result(X_train, y_train, i)
new_x_train = pca.transform(X_train)
new_x_test = pca.transform(X_test)
s = score(new_x_train, y_train, new_x_test, y_test)
scores.append(s)
plt.figure()
plt.xlabel("n_components")
plt.ylabel("adaboost_score")
plt.plot(range(1, len(features[0])), scores)
plt.show()
graph_pca_score(features, labels)<jupyter_output><empty_output>
|
no_license
|
/final_project/.ipynb_checkpoints/helpers_testing_ground-checkpoint.ipynb
|
kyle-dorman/ud120-projects
| 1 |
<jupyter_start><jupyter_text>### Reading dataset<jupyter_code>house_price = pd.read_csv('/home/hasan/DATA SET/Bengaluru_House_Data.csv')<jupyter_output><empty_output><jupyter_text>### Simple Introduction of dataset<jupyter_code>house_price.head()
house_price.shape
house_price.dtypes<jupyter_output><empty_output><jupyter_text>### Dropping Unnecessary column<jupyter_code>house_price.drop('availability', axis=1, inplace=True)
house_price.head()<jupyter_output><empty_output><jupyter_text>### Counting null data<jupyter_code>house_price.isnull().sum()<jupyter_output><empty_output><jupyter_text>### Replacing null data<jupyter_code># replacing null data of location, size, bath and balcony column
location_mode = house_price['location'].mode()[0]
size_mode = house_price['size'].mode()[0]
society_mode = house_price['society'].mode()[0]
bath_mean = int(house_price['bath'].mean())
balcony_mean = int(house_price['balcony'].mean())
house_price['location'].fillna(location_mode, inplace=True)
house_price['size'].fillna(size_mode, inplace=True)
house_price['society'].fillna(society_mode, inplace=True)
house_price['bath'].fillna(bath_mean, inplace=True)
house_price['balcony'].fillna(balcony_mean, inplace=True)
house_price.isnull().sum()
house_price.head(10)<jupyter_output><empty_output><jupyter_text>### Checking Different Categorical Data of Column<jupyter_code>#different area in area_type column
house_price['area_type'].unique()
#Total unique location
house_price['location'].nunique()
#Total different size
house_price['size'].unique()
house_price['society'].nunique()<jupyter_output><empty_output><jupyter_text>### preprocessing of size column<jupyter_code>house_price['Num_bed_room'] = house_price['size'].apply(lambda x: int(x.split(' ')[0]))
house_price.drop('size', axis=1, inplace=True)
house_price.head()<jupyter_output><empty_output><jupyter_text>### preprocessing of total_sqft column<jupyter_code>#unique data of total_sqft column
house_price['total_sqft'].unique()
#checking without float data
def is_float(data):
try:
float(data)
except:
return False
return True
house_price[~house_price['total_sqft'].apply(is_float)].head(40)
house_price['total_sqft'] = house_price['total_sqft'].str.replace('Sq. Meter', '')
house_price['total_sqft'] = house_price['total_sqft'].str.replace('Acres', '')
house_price['total_sqft'] = house_price['total_sqft'].str.replace('Sq. Yards', '')
house_price['total_sqft'] = house_price['total_sqft'].str.replace('Perch', '')
# changing data type to float
def data_type_to_float(x):
token = x.split('-')
if len(token)==2:
return (float(token[0]) + float(token[1]))/2
try:
return float(x)
except:
None
house_price['total_sqft'] = house_price['total_sqft'].apply(data_type_to_float)
house_price.head()
#multiply price column with 100000
house_price['price'] = house_price['price'] * 100000
house_price.head()<jupyter_output><empty_output><jupyter_text>### Changing Data types<jupyter_code>house_price['bath'] = house_price['bath'].astype('int')
house_price['balcony'] = house_price['balcony'].astype('int')
house_price['Num_bed_room'] = house_price['Num_bed_room'].astype('int')
house_price.dtypes<jupyter_output><empty_output><jupyter_text>### Exploratory Data Analysis<jupyter_code>different_area = house_price.groupby('area_type')['area_type'].count()
different_area.plot(kind='bar')<jupyter_output><empty_output><jupyter_text>We are seeling that based on area_type column our data is totally unlevel. Super built-up Area is more than any other area around more than 8000. Carpet Area is lowest number compared to other area.<jupyter_code>#total location in every area
total_location_in_area = house_price.groupby('area_type').agg({'location': 'count'})
total_location_in_area
# minimum, maximum and average price of house in every area
house_price.groupby('area_type')['price'].agg(['min', 'mean', 'max'])<jupyter_output><empty_output><jupyter_text>### Removing Outlier##### area_type column<jupyter_code>#counting different area
area_count = house_price.groupby('area_type')['area_type'].agg('count')
area_count
#unique area
house_price['area_type'].unique()
# index of the 'Carpet Area'
carpet_area_index = house_price[house_price.area_type=='Carpet Area'].index
carpet_area_index
# dropping some rows those carrry 'Carpet Area'
house_price.drop(carpet_area_index, axis=0, inplace=True)<jupyter_output><empty_output><jupyter_text>##### location column<jupyter_code># total unique locaion
house_price['location'].nunique()
#count every location
location_count = house_price.groupby('location')['location'].agg('count').sort_values(ascending=False)
location_count
#location that appear less than 10
less_ten = location_count[location_count<10]
len(less_ten)
#changing those location with are appear less than 10
house_price['location'] = house_price['location'].apply(lambda x:'other_location' if x in less_ten else x)
house_price['location'].nunique()
<jupyter_output><empty_output><jupyter_text>##### society column<jupyter_code>house_price['society'].nunique()
#count every location
society_count = house_price.groupby('society')['society'].agg('count').sort_values(ascending=False)
society_count
#society that appear less than 10
less_ten = society_count[society_count<5]
len(less_ten)
#changing those society with are appear less than 10
house_price['location'] = house_price['society'].apply(lambda x:'other_society' if x in less_ten else x)
house_price['location'].nunique()
house_price.shape<jupyter_output><empty_output><jupyter_text>##### bath column<jupyter_code>#unique bath
house_price['bath'].unique()
#outlier compared to bath and Num_bed_room
bath_bed = house_price[house_price.bath>house_price.Num_bed_room+2]
bath_bed
#dropping rows
house_price.drop(bath_bed.index, axis=0, inplace=True)
house_price.shape<jupyter_output><empty_output><jupyter_text>### Creating dummies column of categorical column<jupyter_code>house_price = pd.get_dummies(house_price)
house_price.head()
house_price.dtypes
house_price.dropna(axis=0, inplace=True)<jupyter_output><empty_output><jupyter_text>### Selecting Feature and Label data<jupyter_code>X = house_price.drop('price', axis=1)
y = house_price['price']
<jupyter_output><empty_output><jupyter_text>### Dividing dataset for train and test<jupyter_code>Xtrain,xtest, Ytrain,ytest = train_test_split(X,y, test_size=.25, random_state=42)<jupyter_output><empty_output><jupyter_text>### Model with Hyperparameter Tuning<jupyter_code>RFR = RandomForestRegressor(n_estimators=100)
# hyperparameter
n_estimators = [int(x) for x in np.linspace(start=100, stop=1200, num=12)]
max_features = ['auto', 'sqrt']
max_depth = [int(x) for x in np.linspace(start=5, stop=30, num=6)]
min_samples_split = [2,5,10,15,100]
min_samples_leaf = [1,2,5,10]
# Create the random grid
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf}
print(random_grid)
# using RandomizedSearchCV
RSC = RandomizedSearchCV(estimator = RFR, param_distributions=random_grid, scoring='neg_mean_squared_error', n_iter=10, cv=5, verbose=2, random_state=42, n_jobs=1)
RSC.fit(Xtrain, Ytrain)
predict_with_test_data = RSC.predict(xtest)
predict_with_test_data
# checking predicted accuracy
sns.distplot(ytest-predict_with_test_data)<jupyter_output><empty_output><jupyter_text>Figure have positive kurtosis.
From the figure we are seeing that most of the difference nearest to zero. So We can say our model is very good.<jupyter_code># checking predicted accuracy using scatter plot
plt.scatter(ytest,predict_with_test_data)<jupyter_output><empty_output><jupyter_text>### Model Saving<jupyter_code># save the model to disk
filename = 'finalized_model.sav'
pickle.dump(RSC, open(filename, 'wb'))
<jupyter_output><empty_output><jupyter_text>### Model Loading<jupyter_code># load the model from disk
loaded_model = pickle.load(open(filename, 'rb'))
<jupyter_output><empty_output><jupyter_text>### Predicting with Totally New Data<jupyter_code>def predict_price(area, location, society, total_sqft, bath, balcony, Num_bed_room):
loc_area = np.where(X.columns==area)[0]
loc_location = np.where(X.columns==location)[0]
loc_society = np.where(X.columns==society)[0]
x = np.zeros(3015)
x[0] = total_sqft
x[1] = bath
x[2] = balcony
x[3] = Num_bed_room
if (loc_area >=0) and (loc_location>=0) and (loc_society>=0):
x[loc_area] = 1
x[loc_location] = 1
x[loc_society] = 1
return loaded_model.predict([x])[0]
predict_price('Super built-up Area', 'Uttarahalli', 'Theanmp', 1521,3,2,3)<jupyter_output>/home/hasan/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:14: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.
|
non_permissive
|
/Bengaluru Property Price Prediction.ipynb
|
hasan-moni-321/RealState-Price-Prediction
| 23 |
<jupyter_start><jupyter_text># New Section<jupyter_code>!wget https://www.techsimplus.com/media/Mask_noMask.zip
!unzip Mask_noMask.zip
ls
from tensorflow.keras.layers import Dense, Dropout, Flatten, Input
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
from tensorflow.keras.applications import VGG19
from tensorflow.keras.applications.imagenet_utils import preprocess_input
from tensorflow.keras.utils import to_categorical
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
import numpy as np
import os
imagePath = []
for i in os.listdir("./dataset"):
for image in os.listdir("./dataset/"+i):
path = "./dataset/" + i + "/" + image
imagePath.append(path)
data = []
labels = []
for i in imagePath:
label = i.split('/')[-2]
# loading the image and resizing the image (224, 224)
image = load_img(i, target_size = (224, 224))
image = img_to_array(image)
image = preprocess_input(image)
data.append(image)
labels.append(label)
data = np.array(data)
labels = np.array(labels)
lb = LabelBinarizer()
labels = lb.fit_transform(labels)
labels = to_categorical(labels)
x_train, x_test, y_train, y_test = train_test_split(data, labels, test_size = 0.2, random_state = 42)
aug = ImageDataGenerator(rotation_range = 25, zoom_range = 0.1, width_shift_range = 0.2, height_shift_range = 0.2, horizontal_flip = True, fill_mode = 'nearest')
baseModel = VGG19(include_top = False, input_tensor = Input(shape = (224, 224, 3)))
headModel = baseModel.output
headModel = Flatten(name = 'flatten')(headModel)
headModel = Dense(128, activation = 'relu')(headModel)
headaModel = Dropout(0.5)(headModel)
headModel = Dense(2, activation = 'softmax')(headModel)
model = Model(inputs = baseModel.input, outputs = headModel)
model.summary()
for layer in baseModel.layers:
layer.trainable = False
model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
H = model.fit(aug.flow(x_train, y_train, batch_size = 35), validation_data = (x_test, y_test), epochs = 5)
!wget https://image.shutterstock.com/image-photo/portrait-mature-businessman-wearing-glasses-260nw-738242395.jpg ###Sample testing with a random image
import cv2
face = cv2.imread('portrait-mature-businessman-wearing-glasses-260nw-738242395.jpg')
face = cv2.resize(face, (224, 224))
face = img_to_array(face)
face = np.expand_dims(face, axis = 0)
face = preprocess_input(face)
model.predict(face)[0]
<jupyter_output><empty_output>
|
no_license
|
/Mask/Mask_Detection_Model.ipynb
|
SharatHegde07/Deep-Learning
| 1 |
<jupyter_start><jupyter_text># **Assignment For Numpy**Difficulty Level **Beginner**1. Import the numpy package under the name np<jupyter_code>import numpy as np<jupyter_output><empty_output><jupyter_text>2. Create a null vector of size 10 <jupyter_code>x= np.zeros(10)
print(x)<jupyter_output>[0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
<jupyter_text>3. Create a vector with values ranging from 10 to 49<jupyter_code>array_values = np.arange(10,50)
print(array_values)<jupyter_output>[10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49]
<jupyter_text>4. Find the shape of previous array in question 3<jupyter_code>np.shape(array_values)<jupyter_output><empty_output><jupyter_text>5. Print the type of the previous array in question 3<jupyter_code>type(array_values)<jupyter_output><empty_output><jupyter_text>6. Print the numpy version and the configuration
<jupyter_code>print(np.__version__)
print(np.show_config)<jupyter_output>1.19.4
<function show at 0x7fe019d70620>
<jupyter_text>7. Print the dimension of the array in question 3
<jupyter_code>array_values.ndim<jupyter_output><empty_output><jupyter_text>8. Create a boolean array with all the True values<jupyter_code>bool_arr = np.array([3,3], dtype=bool)<jupyter_output><empty_output><jupyter_text>9. Create a two dimensional array
<jupyter_code>dd_array = np.array([[1,2,3],[1,2,3]])
print(dd_array)
print("This Dimensions of Array:")
dd_array.ndim<jupyter_output>[[1 2 3]
[1 2 3]]
This Dimensions of Array:
<jupyter_text>10. Create a three dimensional array
<jupyter_code>ddd_array = np.array([[[1,2,3],[1,2,3],[1,2,3]]])
print(ddd_array)
print("This Dimensions of Array:")
ddd_array.ndim<jupyter_output>[[[1 2 3]
[1 2 3]
[1 2 3]]]
This Dimensions of Array:
<jupyter_text>Difficulty Level **Easy**11. Reverse a vector (first element becomes last)<jupyter_code>np.arange(20, 0 , -1)<jupyter_output><empty_output><jupyter_text>12. Create a null vector of size 10 but the fifth value which is 1 <jupyter_code>y = np.zeros(10)
print(y)
y[5] = 1
print(y)<jupyter_output>[0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 1. 0. 0. 0. 0.]
<jupyter_text>13. Create a 3x3 identity matrix<jupyter_code>ddd_array = np.array([[[1,2,3],[1,2,3],[1,2,3]]])
ddd_array<jupyter_output><empty_output><jupyter_text>14. arr = np.array([1, 2, 3, 4, 5])
---
Convert the data type of the given array from int to float <jupyter_code>arr = np.array([1, 2, 3, 4, 5])
float_arr = arr.astype(np.float64)
print(float_arr)<jupyter_output>[1. 2. 3. 4. 5.]
<jupyter_text>15. arr1 = np.array([[1., 2., 3.],
[4., 5., 6.]])
arr2 = np.array([[0., 4., 1.],
[7., 2., 12.]])
---
Multiply arr1 with arr2
<jupyter_code>arr1 = np.array([[1., 2., 3.],[4., 5., 6.]])
arr2 = np.array([[0., 4., 1.],[7., 2., 12.]])
arr1 * arr2<jupyter_output><empty_output><jupyter_text>16. arr1 = np.array([[1., 2., 3.],
[4., 5., 6.]])
arr2 = np.array([[0., 4., 1.],
[7., 2., 12.]])
---
Make an array by comparing both the arrays provided above<jupyter_code>arr1 = np.array([[1., 2., 3.], [4., 5., 6.]])
arr2 = np.array([[0., 4., 1.],[7., 2., 12.]])
compare = arr1 == arr2
new_array = compare.all()
print(new_array)<jupyter_output>False
<jupyter_text>17. Extract all odd numbers from arr with values(0-9)<jupyter_code>a = np.arange(0,9)
a[a % 2 == 1]<jupyter_output><empty_output><jupyter_text>18. Replace all odd numbers to -1 from previous array<jupyter_code>a = a-1
a<jupyter_output><empty_output><jupyter_text>19. arr = np.arange(10)
---
Replace the values of indexes 5,6,7 and 8 to **12**<jupyter_code>arr = np.arange(10)
arr
arr[5:9] = 12
arr<jupyter_output><empty_output><jupyter_text>20. Create a 2d array with 1 on the border and 0 inside<jupyter_code>a = np.ones([5,5])
print(a)
print("\n 1 on the border and 0 inside: \n")
a[1:-1, 1:-1] = 0
print(a)<jupyter_output>[[1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1.]]
1 on the border and 0 inside:
[[1. 1. 1. 1. 1.]
[1. 0. 0. 0. 1.]
[1. 0. 0. 0. 1.]
[1. 0. 0. 0. 1.]
[1. 1. 1. 1. 1.]]
<jupyter_text>Difficulty Level **Medium**21. arr2d = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
---
Replace the value 5 to 12<jupyter_code>arr2d = np.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
raveled_arr = arr2d.ravel()
raveled_arr[4]= 12
arr2d<jupyter_output><empty_output><jupyter_text>22. arr3d = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])
---
Convert all the values of 1st array to 64
<jupyter_code>arr3d = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])
arr3d[0] = 64
arr3d<jupyter_output><empty_output><jupyter_text>23. Make a 2-Dimensional array with values 0-9 and slice out the first 1st 1-D array from it<jupyter_code>a = np.arange(9).reshape((3,3))
a[0]<jupyter_output><empty_output><jupyter_text>24. Make a 2-Dimensional array with values 0-9 and slice out the 2nd value from 2nd 1-D array from it<jupyter_code>b = np.arange(9).reshape(3,3)
b[1:2,1:2]<jupyter_output><empty_output><jupyter_text>25. Make a 2-Dimensional array with values 0-9 and slice out the third column but only the first two rows<jupyter_code>c = np.arange(9).reshape(3,3)
print(c)
c[0:2,2]<jupyter_output>[[0 1 2]
[3 4 5]
[6 7 8]]
<jupyter_text>26. Create a 10x10 array with random values and find the minimum and maximum values<jupyter_code><jupyter_output><empty_output><jupyter_text>27. a = np.array([1,2,3,2,3,4,3,4,5,6]) b = np.array([7,2,10,2,7,4,9,4,9,8])
---
Find the common items between a and b
<jupyter_code><jupyter_output><empty_output><jupyter_text>28. a = np.array([1,2,3,2,3,4,3,4,5,6])
b = np.array([7,2,10,2,7,4,9,4,9,8])
---
Find the positions where elements of a and b match
<jupyter_code><jupyter_output><empty_output><jupyter_text>29. names = np.array(['Bob', 'Joe', 'Will', 'Bob', 'Will', 'Joe', 'Joe']) data = np.random.randn(7, 4)
---
Find all the values from array **data** where the values from array **names** are not equal to **Will**
<jupyter_code><jupyter_output><empty_output><jupyter_text>30. names = np.array(['Bob', 'Joe', 'Will', 'Bob', 'Will', 'Joe', 'Joe']) data = np.random.randn(7, 4)
---
Find all the values from array **data** where the values from array **names** are not equal to **Will** and **Joe**
<jupyter_code><jupyter_output><empty_output><jupyter_text>Difficulty Level **Hard**31. Create a 2D array of shape 5x3 to contain decimal numbers between 1 and 15.<jupyter_code><jupyter_output><empty_output><jupyter_text>32. Create an array of shape (2, 2, 4) with decimal numbers between 1 to 16.<jupyter_code><jupyter_output><empty_output><jupyter_text>33. Swap axes of the array you created in Question 32<jupyter_code><jupyter_output><empty_output><jupyter_text>34. Create an array of size 10, and find the square root of every element in the array, if the values less than 0.5, replace them with 0<jupyter_code><jupyter_output><empty_output><jupyter_text>35. Create two random arrays of range 12 and make an array with the maximum values between each element of the two arrays<jupyter_code><jupyter_output><empty_output><jupyter_text>36. names = np.array(['Bob', 'Joe', 'Will', 'Bob', 'Will', 'Joe', 'Joe'])
---
Find the unique names and sort them out!
<jupyter_code><jupyter_output><empty_output><jupyter_text>37. a = np.array([1,2,3,4,5])
b = np.array([5,6,7,8,9])
---
From array a remove all items present in array b
<jupyter_code><jupyter_output><empty_output><jupyter_text>38. Following is the input NumPy array delete column two and insert following new column in its place.
---
sampleArray = numpy.array([[34,43,73],[82,22,12],[53,94,66]])
---
newColumn = numpy.array([[10,10,10]])
<jupyter_code><jupyter_output><empty_output><jupyter_text>39. x = np.array([[1., 2., 3.], [4., 5., 6.]]) y = np.array([[6., 23.], [-1, 7], [8, 9]])
---
Find the dot product of the above two matrix
<jupyter_code><jupyter_output><empty_output><jupyter_text>40. Generate a matrix of 20 random values and find its cumulative sum<jupyter_code><jupyter_output><empty_output>
|
non_permissive
|
/Assignment 1 (Numpy_Fundamentals)_PIAIC70845.ipynb
|
Bazil21/PIAIC70845-Assignments
| 40 |
<jupyter_start><jupyter_text># QUANTIFYING MODEL RISK OF RISK MODELS
Paris 1 Panthéon-Sorbonne#### Notre base de donnée est constituée des rendements journaliers sur ces 2 dernières années (nous ne pouvions pas importer 3 années) du cours de l'entreprise Veolia, une multinationale française qui est spécialisée dans la gestion, l’optimisation et la valorisation des ressources en eau et en énergie.
#### Les variables sont alors : Ref = reference du cours (Veolia), La date (journaliere), Le prix de l'action, Le rendement de l'action et le volume
<jupyter_code>import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from copy import deepcopy
from tabulate import tabulate
from scipy.stats import norm
import random
import scipy
from stochastic.continuous import FractionalBrownianMotion
import pywt
from scipy.stats import gaussian_kde
from scipy import stats
url="C:/Users/louis/Downloads/"
db = pd.read_csv(url+"DG.csv", sep=";")
print(db.head())
db["Date"]=pd.to_datetime(db["Date"])
db["Rendements"]=db["Rendements"].replace(",",".",regex=True)
db["Rendements"]=db["Rendements"].replace("%","",regex=True)
db["Rendements"]=db["Rendements"].astype(float)
db["Prix_Action"]=db["Prix_Action"].replace(",",".",regex=True)
db["Prix_Action"]=db["Prix_Action"].astype(float)
db.head()
#db["Rendements"] = db["Rendements"] *100
plt.figure(figsize=(20,10))
(mu, sigma) = norm.fit(db['Rendements'])
n, bins, patches = plt.hist(db['Rendements'], normed=1,color = 'blue',edgecolor = 'black',bins = int(220/5))
y = norm.pdf( bins, mu, sigma)
l = plt.plot(bins, y, 'r--', linewidth=2)
plt.title("Distribution des rendements")
plt.ylabel("effectfs")
plt.xlabel("Valeur des rendements en %")<jupyter_output>C:\Users\louis\Anaconda3\lib\site-packages\ipykernel_launcher.py:3: MatplotlibDeprecationWarning:
The 'normed' kwarg was deprecated in Matplotlib 2.1 and will be removed in 3.1. Use 'density' instead.
This is separate from the ipykernel package so we can avoid doing imports until
<jupyter_text>On remarque que la distribution des rendements de notre actif peut s'apparenter à une loi normale de moyenne nulle
## 1. About various calculation methods for the VaR#### 1.1 Determine your VaR at 99% at a one-day horizon using alternatively the following methods:. empirical quantile,. parametric distribution,. nonparametric distribution.La VaR est un outil fort utile qui nous permet de mesurer la quantité mimale de perte à laquelle on est confronté pour un intervalle de confiance donné. Mais la limite est que l'on ne connaît pas la tendance des distributions de pertes en dessous de son niveau###### Empirical quantile<jupyter_code>tri=db.sort_values(by=["Rendements"])
tri.head()
NR=0.01
min=int(round(db.shape[0]*NR,0))
print(tri.iloc[min,3])<jupyter_output>-3.16
<jupyter_text>D'après nos données empiriques, nous avons 1% de chance d'avoir une perte supérieure à -3.15% . Pour une si faible probabilité, cette perte est relativement faible, et devrait être supportable par Véolia.###### Parametric distribution<jupyter_code>a=db.groupby("Rendements")["Rendements"].agg({"count"}).reset_index()
plt.bar(a["Rendements"],a["count"],width=0.3)
plt.title("Distribution des rendements")
VaR = np.mean(db["Rendements"]) - 2.32 * np.std(db["Rendements"])
print(round(VaR,2))<jupyter_output>-2.49
<jupyter_text>La VaR paramètrique est plus généreuse que la VaR empire. On aurait 99% de chances de faire un gain supérieur à -2,5%<jupyter_code>b=deepcopy(a)
b["Rendements"]=round(b["Rendements"],1)
b=b.groupby("Rendements")["count"].agg({"sum"}).reset_index()
plt.plot(b["Rendements"],b["sum"])<jupyter_output><empty_output><jupyter_text>###### nonparametric distribution<jupyter_code>valeur = 1000000
simulation = np.random.normal(np.mean(db["Rendements"]), np.std(db["Rendements"]), valeur)
Mte_carlo_VaR = np.percentile(simulation, 1)
Mte_carlo_VaR<jupyter_output><empty_output><jupyter_text>Les résultats de la VaR non paramétriques sont assez similaires à ceux de la loi paramétrique<jupyter_code>print("Resulat VaR historique : {}" .format(str(round(tri.iloc[min,3],2))))
print("Resulat VaR paramétrique : {}" .format(str(round(VaR,2))))
print("Resulat VaR non paramétrique : {}" .format(str(round(Mte_carlo_VaR,2))))<jupyter_output>Resulat VaR historique : -3.16
Resulat VaR paramétrique : -2.49
Resulat VaR non paramétrique : -2.5
<jupyter_text>La VaR 99% est la perte maximale possible avec une probabilité de 1% de survenance pour un horizon donné (1 jours énoncé).
Pour la calculer, on peut utiliser les données historiques. Cela impose comme limite de supposer que l'évolution de nos rendements n'ont pas de trend au cours du temps. (Une perte de 3% en 1980 est-elle équivalente à une perte de 3% en 2020?).
On peut également supposer la distribution de nos rendements comme normale. Au vu de la représentation graphique de nos rendements, cette hypothèse ne semble pas farfelue. Dans ce cas précis, nous la préfèrons. Si l'on sait que la distribution ne suit aucune loi, des approches non paramétriques sont à privilégier. Nous allons ci-dessous calculer l'estimateur de Pickands.
###### 1.2 Using the estimator of Pickands, determine the parameter of the GEV function of losses. Comment its value.L'estimateur de Pickands se détermine de la façon suivante :
\begin{align}
\ Pickand Estimator :\xi_{P}^{k,n} = \frac{1}{ln2}*ln(\frac{X_{k,n} - X_{2k,n}}{X_{2k,n} - X_{4k,n}})
\end{align}
L'estimateur de pickand est un estimateur de l'indice de queue. Plus cet indice sera élevé en valeur absolu, plus les valeurs extrêmes seront importantes.Celui-ci se calcul avec un paramètre k qui représente un décalage. Ce décalage fixe sera le plus important que k sera fort.
En effet, nous avons créer pour le calculer 3 indices,
si k=2 :
alors l'indice 1 prendra la poistion 509, l'indice 2 prendra la place 507 et l'indice 4 prendra la place 495.
Les données étant triées dans un ordre décroissant, alors l'estimateur de pickands sera calculé sur les rendements négatifs les plus élevés.
A contrario, si k=40 :
Indice 1 : 471 position / Indice 2 :431 / Indice 3 : 351
Ainsi, plus k sera élevé, plus l'estimateur de Pickand sera faible.l'estimateur de Pickand nous servira, à la question suivante, à calucler la VaR grâce à l'EVT<jupyter_code>def pickands(data,k):
temp=data.sort_values(by=["Rendements"],ascending=False).reset_index()
ordered_data=temp.Rendements
n= len(ordered_data)
indice_k= ordered_data[n-k+1]
indices_2k = ordered_data[n-2*k+1]
indices_4k = ordered_data[n-4*k+1]
estimateur_pickands = (1./np.log(2)) * np.log((indice_k - indices_2k) / (indices_2k - indices_4k))
print("l'estimateur de pickands prend la valeurs de: " + str(round(estimateur_pickands,2)))
return estimateur_pickands
pickands(db,5)
pick = []
valeur0=[]
kvalues =[]
n= len(db)
for k in range(2,128):
temp=db.sort_values(by=["Rendements"],ascending=False).reset_index()
ordered_data=temp.Rendements
indice_k= ordered_data[n-k+1]
indices_2k = ordered_data[n-2*k+1]
indices_4k = ordered_data[n-4*k+1]
estimateur_pickands = (1./np.log(2)) * np.log((indice_k - indices_2k) / (indices_2k - indices_4k))
valeur= 0
pick.append(estimateur_pickands)
valeur0.append(valeur)
kvalues.append(k)
plt.figure(figsize=(15,10))
plt.plot(kvalues,pick,c='red')
plt.plot(kvalues, valeur0,c='green')
plt.xlabel('Value of k')
plt.ylabel('Pickand')
plt.title("Evolution de la valeur de l'estimateur de Pickand en fonction de k")
plt.show()
<jupyter_output><empty_output><jupyter_text>Evolution de la valuer de l'estimation du paramètre de Pickands par rapport à la valeur k. On voit ici que l'estimateur décroit lorsque k tend vers l'infini.<jupyter_code>Pickands=pd.DataFrame(list(zip(pick,kvalues)), columns=['Estimateur_Pickland','kvalues'])
Pickands["Estimateur_Pickland"]= round(Pickands["Estimateur_Pickland"],3)
Pickands<jupyter_output><empty_output><jupyter_text>Si estimateur_pickands < 0, Weibull peut en être une bonne approximation.
Si au dessus, Frechet en sera une meilleure.L'avantage de l'estimateur de Pickand dans la GEV est le fait que celui-ci soit valable quelle que soit la distribution des extrêmes, que ça soit Gumbel, Weibull ou Fréchet.##### 1.3 Determine the VaR at 99% at a one-day horizon using the EVT approach.Le but est ici de déterminer la Value at risk d’une nouvelle manière non paramétrique.
Cette VaR serait déterminée à l’aide de l’EVT (Extreme Theory Value), cependant pour l’utiliser, nous avons besoin de l’estimateur de Pickands.
La théorie des valuers extrêmes permet, sous des conditions de régularité très générales, de modéliser les probas extrêmes par 3 lois.
On peut voir ci-dessous la distribution de ces 3 lois:
On voit dans notre cas, que nous sommes dans un cas de Weibull pour déterminer la VaR à 99 %.De cette courbe ne Weibull, nous pouvons déterminer la VaR. Cette VaR sera calculée par l'estimateur.
Cependant il est important de noter que l'estimateur dans le cas de Weibull doit être strictement néhgatif. Nous appliquons donc une petite boucle if pour régler ce problème étant donné le peut de valeur positive qu'on ait pour Pickand.The VaR using the Pickand estimator:
\begin{align}
\ VaR(p) = \frac{(\frac{k}{n(1-p)})^{\xi^{P}}-1}{1-2^{-\xi^{P}}}(X_{n-k+1:n} - X_{n-2k+1:n}) + X_{n-k+1:n}
\end{align}
where the Pickand estimateur is the GEV one.<jupyter_code>def EVT(data,k,p):
temp=data.sort_values(by=["Rendements"],ascending=False).reset_index()
ordered_data=temp.Rendements
estimateur = pickands(data,k)
n=len(ordered_data)
if estimateur > 0 :
val = (estimateur)
else :
val = (-estimateur)
VaRpickand = ((((k/(n*(1-p)))**(val))-1) / (1-2**(-val)) ) *(ordered_data[n-k+1] - ordered_data[n-2*k+1]) + ordered_data[n-k+1]
return VaRpickand
VaRpick = EVT(db,3,0.99)
print('La VAR à 99% est de {}'.format(str(round(VaRpick, 2))+"%"))<jupyter_output>l'estimateur de pickands prend la valeurs de: -2.86
La VAR à 99% est de -3.19%
<jupyter_text>Ainsi, en utilisant la formule de la VaR ci-dessus, en fixant le paramètre k à 3 nous obtenons une VaR à 99% de -3.19 %.Nous décidons ici de modéliser la VaR associée aux differents paramètres de k.
Nous savons que l'estimateur de Pickand est particulièrement peu stable, et que son paramètre k est compliqué à fixer. Par conséquent nous avons voulu observer cette volatilité de la VaR par la variation des paramètres k.<jupyter_code>k_value = []
VaRpickand_est = []
for k in range(2,127) :
VaRpickand= EVT(db,k,0.99)
k_value.append(k)
VaRpickand_est.append(VaRpickand)
visu=pd.DataFrame(list(zip(k_value,VaRpickand_est)), columns=['k_value','VaRpickand_est'])
visu["VaRpickand_est"]= round(visu["VaRpickand_est"],3)
plt.plot(visu["k_value"],visu["VaRpickand_est"], color= "orange")
plt.xlim(0,100)
plt.ylim(-10,0)
plt.xlabel("k_values")
plt.ylabel("VaR_99%")
plt.title("Evolution des VaR_99% en fonction du paramètre k")<jupyter_output><empty_output><jupyter_text>On constate bien ici que la VAR calculée à 99% dépend bien du k choisi. En effet, plus celui-ci sera élevé moins il paraitra plausible #### 1.4 What is Leadbetter's extremal index for the dataset? Comment its value and explain how you should change your VaR to take this into account (only provide an explanation, with equations, do not calculate a new VaR).l'extremal index est un paramètre qui mesure le nombre de clusters dépassant un certain seuil de pertes fixées dans un processus stationnaire. Ce paramètre varie entre 0 et 1. Plus il s'approche de 1, moins il y a effet de cluster dans nos données extrèmes. Le paramètre seuil doit être négatif, on cherche à comptabiliser le nombre de cluster enregistrant plus de pertes que ce seuil. Le b est la taille du cluster <jupyter_code>def Leadbetter(data,b,seuil):
n=len(data)
k = int(n/b)
value = data.Rendements.values
somme_value = sum((value < seuil)) #on prend inférieur car on s'intéresse à nos pertes (inférieur négativement)
somme_cluster = []
for i in np.arange(1,k+1):
a= (i-1)*b
z= i * b
n= value[a-1:z]
if len(n) != 0:
somme_cluster.append(max(value[a-1:z]) > seuil)
Leadbetter_index = sum(somme_cluster)/somme_value
return print(" la valeur de l'indice extrêmal de Leadbetter est de: {} ".format(str(round(Leadbetter_index,3))))
Leadbetter(db,8,-0.03)<jupyter_output> la valeur de l'indice extrêmal de Leadbetter est de: 0.254
<jupyter_text>nos pertes sont relativement peu regroupées dans des clusters. Cela signifie qu'il n'y a pas forcement beaucoup d'autocorrélation entre elles#### 1.5 ) What is a Hurst exponent and how can one interpret its value?L"Hurt exponent" est utilisé pour mesurer la mémoire de long-terme d'une série. C'est une mesure relative à l'autocorrélation des chocs browniens dans les séries temporelles. Un choc brownien est totalement aléatoire. Il intervient dans des modèles de pricings courant (de type Black&Scholes). l'Hurt expnonent mesure donc la corrélation dans le temps de ces chocs aléatoires sur le processus gaussien. On peut le déterminer par l'implémentation d'une méthode R/S (so-called rescaled range).
La valeur varie entre 0 et 1, si il est supérieur à 0.5 signifie qu'il y a une autocorrélation positive de long terme. Un choc brownien positif sera probablement suivi d'un autre. Si il est égal à 0.5, il n'y a pas d'autocorrélation. Si il est inférieur à 0.5, l'autocorrélation sera positive ou négative mais de court terme.The Hurst exponent, H est défini comme ceci:\begin{align}
\ {\displaystyle \mathbb {E} \left[{\frac {R(n)}{S(n)}}\right]=Cn^{H}{\text{ as }}n\to \infty \,,}
\end{align}
Avec n: le nombre d'observation de la série temporelle.
R(n): Le range du nième écart à la moyenne, et S(n) leur écart-type et
C est une constante#### 1.6 ) Propose a risk measure taking into account the Hurst exponent and calculate its value for the price series used in this project. The Hurst exponent must be estimated with the absolute-moment estimator. This risk measure must take into account the autocovariance of a fBm (like in the Nuzman-Poor approach for predictions).A méditer## 2 From VaR to ES#### 2.7 ) For each of the methods exposed above for calculating VaR, expose a method to extend them to ES. Calculate the 99% ES on the same sample.Petit rappel de nos VaR obtenues:
Le calcul de la VaR nous permet de déterminer le seuil au-dessus du quel 99% des rendements seront supérieurs. Cependant dans 1%¨des cas en théorie, les rendements seront inférieurs au seuil calculé par la VaR. <jupyter_code>print(tabulate([[str(round(tri.iloc[min,3],2))+"%",str(round(VaR,2))+"%",str(round(Mte_carlo_VaR,2))+"%",str(round(VaRpick, 2))+"%"]],headers=
["VAR 99% empirique","VAR 99% paramétrique","VAR 99% non paramétrique","VAR 99% par estimateur de Pickands"]))<jupyter_output>VAR 99% empirique VAR 99% paramétrique VAR 99% non paramétrique VAR 99% par estimateur de Pickands
------------------- ---------------------- -------------------------- ------------------------------------
-3.16% -2.49% -2.5% -3.19%
<jupyter_text>Formule de l'ES:
L'ES mesure la moyenne de la distribution des pertes en dessous du niveau de la VaR. Autrement dit, si on a une VaR de -1%, l'ES associé à cette VaR mesurera le niveau moyen des pertes en dessous de ce niveau de rendement<jupyter_code>VaR_trouvées=[round(tri.iloc[min,3],2),round(VaR,2),round(Mte_carlo_VaR,2),round(VaRpick, 2)]
VaR_trouvées<jupyter_output><empty_output><jupyter_text>La VaR présente de nombreuses limites, on ne connaît pas la distribution des pertes en dessous de son seuil. On pourrait très bien avoir des pertes fortement corrélées.
Donc on peut calculer la moyenne sous son rendement pour obtenir une distribution. En effet, il n'y a aucune raison de penser que la distribution des pertes en dessous d'un seuil suivra une distribution paramètrique.<jupyter_code>ES = []
VaR_99 = []
for i in VaR_trouvées :
somme = 0
for j in db["Rendements"] :
if j<i :
somme += j
moy = somme / len(db[db["Rendements"]<i])
ES.append(moy)
VaR_99.append(i)
Expected_Shortfall=pd.DataFrame(list(zip(VaR_99,ES)), columns=['VaR_99','ES'])
for i in range(Expected_Shortfall.shape[0]):
print(f"Pour la VaR fixée à {round(Expected_Shortfall.iloc[i,0],2)}% , l'expected shortfall est de : {round(Expected_Shortfall.iloc[i,1],2)}%")<jupyter_output>Pour la VaR fixée à -3.16% , l'expected shortfall est de : -3.28%
Pour la VaR fixée à -2.49% , l'expected shortfall est de : -3.18%
Pour la VaR fixée à -2.5% , l'expected shortfall est de : -3.18%
Pour la VaR fixée à -3.19% , l'expected shortfall est de : -3.31%
<jupyter_text>Nos moyennes en fonction de nos Var calculées sont relativements proches, cela peut être dû au fait que nous avons peu de données, ou que les niveaux de VaR sont trop proches (surtout pour le paramétrique et le non paramétrique)#### 2.8 ) Backtest the ESs and the corresponding VaRs on your sample. Pay attention to the strict separation of estimation sample and test sample for this question (for the other questions, simply estimate the risk measures on the whole sample). Comment the result about the relative accuracy of ES and VaR.Pour Backtester nos ES et VaR nous pourrons procéder à un test de Kupiec
Christffersen (1998) qu'une prÈvision de VaR est valide si et seulement si la séquence des violations fItgTt=1 satisfait les deux hypothèses suivantes:\
-L'hypothèse de couverture non conditionnelle(unconditionalcoverage, UC)\
-L'hypothèse d'indépendance(independence, IND)Cette loi est vérifiée si :
Si l'on suppose que les variables It(α) sonti.i.d., alors sous l'hypothèse UC le nombre total de violations N suit une loi Binomiale.
Si T est suffisament important, on peut approximer la loi Binomiale par une loi Normale et sous l'hypothèse d'UC il vient :
\begin{align}
\ Z = \frac{N-pT}{(p(1-p)T)^{1/2}} -> N(0,1)
\end{align}
N : données extraordinaires \
p : pourcentage VaR \
T : observations totales<jupyter_code>VaR_trouvées=[round(tri.iloc[min,3],2),round(VaR,2),round(Mte_carlo_VaR,2),round(VaRpick, 2)]
VaR_trouvées
T = len(db)
p = 0.01
réalisation_except = []
VaR_99 = []
for i in VaR_trouvées :
N = int(len(db[db["Rendements"]<i]))
Z = (N-(p*T))/((p*(1-p)*T)**(1/2))
VaR_99.append(i)
réalisation_except.append(Z)
stat=pd.DataFrame(list(zip(VaR_99,réalisation_except)), columns=['VaR_99','réalisation_except'])
for i in range(stat.shape[0]):
if abs(stat.iloc[i,1])>2.32:
print(f"Pour la VaR fixée à {round(stat.iloc[i,0],2)}% ,H0 rejetté: l'hypothèse d'UC donc la VaR n'est pas valable car > au fractile de la loi normale (2.32)")
else:
print(f"Pour la VaR fixée à {round(stat.iloc[i,0],2)}% ,H0 accepé: l'hypothèse d'UC donc la VaR est valable car < au fractile de la loi normale (2.32)")<jupyter_output>Pour la VaR fixée à -3.16% ,H0 accepé: l'hypothèse d'UC donc la VaR est valable car < au fractile de la loi normale (2.32)
Pour la VaR fixée à -2.49% ,H0 accepé: l'hypothèse d'UC donc la VaR est valable car < au fractile de la loi normale (2.32)
Pour la VaR fixée à -2.5% ,H0 accepé: l'hypothèse d'UC donc la VaR est valable car < au fractile de la loi normale (2.32)
Pour la VaR fixée à -3.19% ,H0 accepé: l'hypothèse d'UC donc la VaR est valable car < au fractile de la loi normale (2.32)
<jupyter_text>Le test de couverture inconditionnelle a pour but d’évaluer si la fréquence des exceptions est compatible avec le quantile de la perte que la VaR est censée refléter.
Soit N le nombre d'exceptions et T le nombre total d'observations, nous pouvons définir la fréquence des exceptions π= 𝑁/𝑇; idéalement, ce taux devrait refléter le niveau de confiance choisi.
Le test de Kupiec(1995) nous permet de vérifier l'hypothèse nulle que la fréquence des exceptions 𝜋=𝑁/𝑇 est égale au niveau de signification α que nous utilisons lors du calcul de la VaR.
Loi Chi² à 99% = 6.63<jupyter_code>T = len(db)
p = 0.01
réalisation_except = []
VaR_99 = []
for i in VaR_trouvées :
N = int(len(db[db["Rendements"]<i]))
LR = -2*np.log( ((1-p)**(T-N)) * p**N) + 2 *np.log( (1-(N/T))**(T-N) * (N/T)**N )
VaR_99.append(i)
réalisation_except.append(LR)
stat2=pd.DataFrame(list(zip(VaR_99,réalisation_except)), columns=['VaR_99','réalisation_except'])
for i in range(stat2.shape[0]):
if abs(stat2.iloc[i,1])>6.63:
print(f"Pour la VaR fixée à {round(stat2.iloc[i,0],2)}% ,H0 refusé : l'hypothèse d'UC donc la VaR n'est pas valable car > au fractile (6.63)")
else:
print(f"Pour la VaR fixée à {round(stat2.iloc[i,0],2)}% ,H0 accepté: l'hypothèse d'UC donc la VaR est valable car < au fractile (6.63)")<jupyter_output>Pour la VaR fixée à -3.16% ,H0 accepté: l'hypothèse d'UC donc la VaR est valable car < au fractile (6.63)
Pour la VaR fixée à -2.49% ,H0 accepté: l'hypothèse d'UC donc la VaR est valable car < au fractile (6.63)
Pour la VaR fixée à -2.5% ,H0 accepté: l'hypothèse d'UC donc la VaR est valable car < au fractile (6.63)
Pour la VaR fixée à -3.19% ,H0 accepté: l'hypothèse d'UC donc la VaR est valable car < au fractile (6.63)
<jupyter_text>## 3 Model risk as estimation uncertainty#### 3.9. What is model risk and what are the different natures of model risk?Le risque de modèle est le risque de faire des pertes à cause du modèle que l'on a choisi.
Par exemple, si l'on pose l'hypothèse de la normalité des rendements alors qu'ils ne le sont pas, les modèles que l'on va entraîner risqueront de nous produire des rendements aléatoires. (la chance peut toujours rapporter)
Il existe différentes natures à ce risque. Choisir le mauvais modèle comme nous avons vu en haut. Le risque de paramètrage: de choisir les mauvais paramètres dans le modèle (donc mauvais modèle). Le risque d'overfitter, de sur-entraîner un modèle mais ne pas pouvoir l'appliquer à d'autres données. Tous les risques sur les données doivent être pris en compte égalements.
Il faut donc faire bien attention à ce type de risque!#### 3.10. For the parametric VaR, determine the distribution of your parameters and determine, either theoretically or by simulations, the corresponding distribution of VaR (each parameter is a random variable; if we draw a vector of parameters, we have one value for the VaR which is different from the VaR obtained for another drawn vector of parameters).Nous effectuons ici 100000 tirages avec remises.
Chacun de ces tirages est un échantillon du jeu de donnée contenant 20 rendements chacun.
De chacun de ces tirages, nous en calculerons une moyenne et un écart-type, qui nous permettra ensuite de calculer la VaR99 associée à chacun de ces tirages.
On pourra ainsi observer la distribution des moyennes, écart-types et VaR de cette série de tirage.<jupyter_code>tirage=[]
moyenne_tirage=[]
std_tirage=[]
for i in range(1,100000):
aléa = db["Rendements"].values
sampling = random.choices(aléa, k=20)
moy = np.mean(sampling)
std = np.std(sampling)
tirage.append(i)
moyenne_tirage.append(moy)
std_tirage.append(std)
random=pd.DataFrame(list(zip(tirage,moyenne_tirage,std_tirage)), columns=['tirage','moyenne_tirage','std_tirage']).set_index("tirage")
random["VaR_99"] = random["moyenne_tirage"] - 2.32 * random["std_tirage"]
random<jupyter_output><empty_output><jupyter_text>On peut voir ci-dessous une distribution assez proche de la loi normale de chacun des paramètres.<jupyter_code>plt.figure(figsize=(20,10))
plt.subplot(212)
(mu, sigma) = norm.fit(random['VaR_99'])
n, bins, patches = plt.hist(random['VaR_99'], normed=1,color = 'blue',edgecolor = 'black',bins = int(180/5))
y = norm.pdf( bins, mu, sigma)
l = plt.plot(bins, y, 'r--', linewidth=2)
plt.title("Distribution de la VAR_99")
plt.ylabel("effectfs")
plt.xlabel("Valeur de la VaR_99%")
plt.subplot(222)
(mu, sigma) = norm.fit(random['std_tirage'])
n, bins, patches = plt.hist(random['std_tirage'], normed=1,color = 'blue',edgecolor = 'black',bins = int(180/5))
y = norm.pdf( bins, mu, sigma)
l = plt.plot(bins, y, 'r--', linewidth=2)
plt.title("Distribution des écarts-types")
plt.ylabel("effectfs")
plt.xlabel("Valeur des écart-types en %")
plt.subplot(221)
(mu, sigma) = norm.fit(random['moyenne_tirage'])
n, bins, patches = plt.hist(random['moyenne_tirage'], normed=1,color = 'blue',edgecolor = 'black',bins = int(180/5))
y = norm.pdf( bins, mu, sigma)
l = plt.plot(bins, y, 'r--', linewidth=2)
plt.title("Distribution des moyennes")
plt.ylabel("effectfs")
plt.xlabel("Valeur des rendements moyens en %")<jupyter_output>C:\Users\louis\Anaconda3\lib\site-packages\ipykernel_launcher.py:5: MatplotlibDeprecationWarning:
The 'normed' kwarg was deprecated in Matplotlib 2.1 and will be removed in 3.1. Use 'density' instead.
"""
C:\Users\louis\Anaconda3\lib\site-packages\ipykernel_launcher.py:14: MatplotlibDeprecationWarning:
The 'normed' kwarg was deprecated in Matplotlib 2.1 and will be removed in 3.1. Use 'density' instead.
C:\Users\louis\Anaconda3\lib\site-packages\ipykernel_launcher.py:23: MatplotlibDeprecationWarning:
The 'normed' kwarg was deprecated in Matplotlib 2.1 and will be removed in 3.1. Use 'density' instead.
<jupyter_text>Ces graphiques sont assez intéressants (si réalisés sur un grand nombre et su un grand jeu de données) pour modéliser la distribution de la VaR. En effet, la VAR 99 qui est le seuil de réalisation sous lequel 1% des pires rendements ont lieu, on peut avoir une idée avec ce graph de quel seuil de VaR il serait judicieux de prendre. En effet, on voit que prendre un VaR_99% à -1 serait particulièrement risquée.
Globalement on est assez satisfait de voir que notre VaR suit une distribution normale. On verra par la suite de ce TP que pour un mouvement Brownien cela n'est pas toujours le cas.#### 3.11. If we suppose that price returns are iid Gaussian variables, determine, with the help of simulations, the distribution of the EVT parameters as well as the corresponding distribution of VaREtant donné que nous supposons ici que nos rendements sont gaussiens, nous les crééons donc artificiellement, pour qu'ils puissent suivre du mieux possible une loi Normale.
Nous appliquons ici la même éthode que précédement, nous créeons 10000 tirages de 500 rendements, crées par le processus Brownien. Ces tirages sort ensuite trieés pour appliquer la calcul de la VaR avec l'estimateur de Pickand. Pour se faire, nous recalculons notre estimateur de Pickand pour chacun des tirages et l'insérons ensuite dans la formule de la VaR.<jupyter_code>import random
tirage=[]
Pickands_est=[]
Var_Pick=[]
k=4
mu, sigma = 0, 1
aléa = np.random.normal(mu, sigma, 100000)
for i in range(1,10000):
sampling = random.choices(aléa, k=400)
sampling = sorted(sampling,reverse=True)
n= len(sampling)
p=0.99
indice_k= sampling[n-k+1]
indices_2k = sampling[n-2*k+1]
indices_4k = sampling[n-4*k+1]
estimateur_pickands = (1/np.log(2)) * np.log((indice_k - indices_2k) / (indices_2k - indices_4k))
if estimateur_pickands > 0 :
val = (estimateur_pickands)
VaRpickand = ((((k/(n*(1-p)))**(val))-1) / (1-2**(-val)) ) *(indice_k - indices_2k) + indice_k
else :
val = (-estimateur_pickands)
VaRpickand = ((((k/(n*(1-p)))**(val))-1) / (1-2**(-val)) ) *(indice_k - indices_2k) + indice_k
tirage.append(i)
Var_Pick.append(VaRpickand)
Pickands_est.append(estimateur_pickands)
random=pd.DataFrame(list(zip(tirage,Pickands_est,Var_Pick)), columns=['tirage','Pickands_est','Var_Pick'])
random
plt.figure(figsize=(20,10))
plt.subplot(222)
(mu, sigma) = norm.fit(random['Pickands_est'])
n, bins, patches = plt.hist(random['Pickands_est'], normed=1,color = 'blue',edgecolor = 'black',bins = int(180/5))
y = norm.pdf( bins, mu, sigma)
l = plt.plot(bins, y, 'r--', linewidth=2)
plt.title("Distribution des estimateurs de Pickands")
plt.ylabel("effectfs")
plt.xlabel("Valeur des estimateurs en %")
plt.subplot(221)
(mu, sigma) = norm.fit(random['Var_Pick'])
n, bins, patches = plt.hist(random['Var_Pick'], normed=1,color = 'blue',edgecolor = 'black',bins = int(180/5))
y = norm.pdf( bins, mu, sigma)
l = plt.plot(bins, y, 'r--', linewidth=2)
plt.title("Distribution des VaR obtenues avec Pickands")
plt.ylabel("effectfs")
plt.xlabel("Valeur des VaR en %")<jupyter_output>C:\Users\louis\Anaconda3\lib\site-packages\ipykernel_launcher.py:6: MatplotlibDeprecationWarning:
The 'normed' kwarg was deprecated in Matplotlib 2.1 and will be removed in 3.1. Use 'density' instead.
C:\Users\louis\Anaconda3\lib\site-packages\ipykernel_launcher.py:15: MatplotlibDeprecationWarning:
The 'normed' kwarg was deprecated in Matplotlib 2.1 and will be removed in 3.1. Use 'density' instead.
from ipykernel import kernelapp as app
<jupyter_text>On voit que lorsque les rendements suivent une loi normale, cela s'en ressent dans la distribution des paramètres. En effet, on voit ici qu'il fit plutôt bien une tendance normale#### 3.12. Apply the same method than the one exposed in Question 11 to determine the distribution of VaR in the fBm case.Nous simulons ici un mouvement Brownien fractionnaire<jupyter_code>fbm = FractionalBrownianMotion(t=15, hurst=0.5) #plus le t est élevé, plus l'intervalle des ordonnées sera large.
# Hurst a 0.5 pour avoir la tendnace la plus neutre possible . Lorsque Hurst=0.5, absence d'autocorrélation.
s = fbm.sample(1000000)
times = fbm.times(1000000)
plt.plot(times, s)
plt.show()
s<jupyter_output><empty_output><jupyter_text>Plus le t est élevé, plus l'intervalle des ordonnées sera large.
Hurst a 0.5 pour avoir la tendnace la plus neutre possible . Lorsque Hurst=0.5, absence d'autocorrélation.
Aussi, en théorie, le mouvement Brownien fractionnaire est censé avoir une espérance égale à 0.Il nous livre une distribution telle que :<jupyter_code>plt.subplot(212)
plt.hist(s, color = 'blue', edgecolor = 'black',bins = int(180/5))
plt.title("Distribution de la VAR_99")
plt.ylabel("effectfs")
plt.xlabel("Valeur de la VaR_99%")<jupyter_output><empty_output><jupyter_text>Nous appliquons ici la même éthode que précédement, nous créeons 10000 tirages de 500 rendements, crées par le processus Brownien. Ces tirages sort ensuite trieés pour appliquer la calcul de la VaR avec l'estimateur de Pickand. Pour se faire, nous recalculons notre estimateur de Pickand pour chacun des tirages et l'insérons ensuite dans la formule de la VaR.<jupyter_code>import random
tirage=[]
Pickands_est=[]
Var_Pick=[]
k=4
for i in range(1,10000):
aléa = s
sampling = random.choices(aléa, k=500)
sampling = sorted(sampling,reverse=True)
n= len(sampling)
p=0.99
indice_k= sampling[n-k+1]
indices_2k = sampling[n-2*k+1]
indices_4k = sampling[n-4*k+1]
estimateur_pickands = (1/np.log(2)) * np.log((indice_k - indices_2k) / (indices_2k - indices_4k))
if estimateur_pickands > 0 :
val = (estimateur_pickands)
VaRpickand = ((((k/(n*(1-p)))**(val))-1) / (1-2**(-val)) ) *(indice_k - indices_2k) + indice_k
else :
val = (-estimateur_pickands)
VaRpickand = ((((k/(n*(1-p)))**(val))-1) / (1-2**(-val)) ) *(indice_k - indices_2k) + indice_k
tirage.append(i)
Var_Pick.append(VaRpickand)
Pickands_est.append(estimateur_pickands)
random=pd.DataFrame(list(zip(tirage,Pickands_est,Var_Pick)), columns=['tirage','Pickands_est','Var_Pick'])
#random = random[random["Var_Pick"] != 0]
plt.figure(figsize=(20,10))
plt.subplot(222)
(mu, sigma) = norm.fit(random['Pickands_est'])
n, bins, patches = plt.hist(random['Pickands_est'], normed=1,color = 'blue',edgecolor = 'black',bins = int(180/5))
y = norm.pdf( bins, mu, sigma)
l = plt.plot(bins, y, 'r--', linewidth=2)
plt.title("Distribution des estimateurs de Pickands")
plt.ylabel("effectfs")
plt.xlabel("Valeur des estimateurs en %")
plt.subplot(221)
(mu, sigma) = norm.fit(random['Var_Pick'])
n, bins, patches = plt.hist(random['Var_Pick'], normed=1,color = 'blue',edgecolor = 'black',bins = int(180/5))
y = norm.pdf( bins, mu, sigma)
l = plt.plot(bins, y, 'r--', linewidth=2)
plt.title("Distribution des VaR obtenues avec Pickands")
plt.ylabel("effectfs")
plt.xlabel("Valeur des VaR en %")<jupyter_output>C:\Users\louis\Anaconda3\lib\site-packages\ipykernel_launcher.py:6: MatplotlibDeprecationWarning:
The 'normed' kwarg was deprecated in Matplotlib 2.1 and will be removed in 3.1. Use 'density' instead.
C:\Users\louis\Anaconda3\lib\site-packages\ipykernel_launcher.py:15: MatplotlibDeprecationWarning:
The 'normed' kwarg was deprecated in Matplotlib 2.1 and will be removed in 3.1. Use 'density' instead.
from ipykernel import kernelapp as app
<jupyter_text>Du côté des estimateurs de Pickands, ceux-ci suivent régulièrement une distribution normale. Cependant, on peut voir ici que selon les tirages, la distribution des VaR sont peu similaires et ne suivent que pas toujours une loi normale. Même avec un coefficient de Hurst égal à 0 qui enlève toute autocorrélation.
Cela est assez étonnant du fait des propriétés du mouvement gaussien vu en cours : processus gaussien, d'espérance nulle et de premier terme égal à 0.
On pensait comme pour des rendements normaux revoir sortir une distribution parfaitement normale pour la VaR_99%. Cependant cela doit venir du peu de données utilisées.
#### 3.13. Represent in a graph the nonparametric VaR as a function of the bandwidth parameter of the kernel. Explain the method used to get this result.L'object de cet exercice est de modéliser notre VaR pour ce titre en fonction du bandwidth, un paramètre de l'estimation de densité par noyaux. Pour se faire :
Le résultat final sera un graphique avec le paramètre bandwidth en abscisse et sa VaR asociée.Nous retrouvons ici la distribution des rendements que nous étudions, contrasté par une loi normale.<jupyter_code>plt.figure(figsize=(10,6))
(mu, sigma) = norm.fit(db['Rendements'])
n, bins, patches = plt.hist(db['Rendements'], normed=1,color = 'blue',edgecolor = 'black',bins = int(220/5))
y = norm.pdf( bins, mu, sigma)
l = plt.plot(bins, y, 'r--', linewidth=2)
plt.title("Distribution des rendements")
plt.ylabel("effectfs")
plt.xlabel("Valeur des rendements en %")<jupyter_output>C:\Users\louis\Anaconda3\lib\site-packages\ipykernel_launcher.py:3: MatplotlibDeprecationWarning:
The 'normed' kwarg was deprecated in Matplotlib 2.1 and will be removed in 3.1. Use 'density' instead.
This is separate from the ipykernel package so we can avoid doing imports until
<jupyter_text>Nous observons que nos rendements sont assez proches d'une loi normale. Nous allons donc essayer de modéliser nos rendements par l'estimation de densité par noyau gaussienIl s'agit ici d'une estimation de notre série de rendement par noyau Gaussien. Le bandwidth est ici fixé à 0.3. C'est cette valeur que nous allons ici chercher à modifier.<jupyter_code>rendements = np.array(db["Rendements"])
gkde=stats.gaussian_kde(rendements,bw_method =0.3)
ind = sorted(np.array(db["Rendements"]))
kdepdf = gkde.evaluate(ind)
plt.figure()
# graph de l'histogramme des données
plt.hist(rendements, normed=1,color = 'blue',edgecolor = 'black',bins = int(220/5))
# graph des données générant la densité
plt.plot(ind, stats.norm.pdf(ind), color="r", label='Loi Normale')
# graph de la densité estimée par noyau gaussien
plt.plot(ind, kdepdf, label='Densité de Noyaux Gaussien', color="g")
plt.title('Kernel Density Estimation')
plt.legend()
plt.show()<jupyter_output>C:\Users\louis\Anaconda3\lib\site-packages\ipykernel_launcher.py:9: MatplotlibDeprecationWarning:
The 'normed' kwarg was deprecated in Matplotlib 2.1 and will be removed in 3.1. Use 'density' instead.
if __name__ == '__main__':
<jupyter_text>Pour obtenir notre VaR 99% par variation de brandwidth, nous créeons la fonction ci-dessous. Nous y fixons un bandwidthmin et max qui représenteront l'intervalle sur lequel le bandwidth variera. Celui variera par des pas de 0.01. Ainsi pour chaque bandwidth, la fonction ira calculer 1% de la densité de répartition du noyaux gaussien. La valeur placement retournée est l'indice de position du rendements associé que nous allons chercher dans l'objet "ind".<jupyter_code>def VaR_KDE(val_min,val_max,pas):
param_bandwidth = []
VaR_99 = []
for i in np.arange(val_min,val_max,pas):
rendements = np.array(db["Rendements"])
gkde=stats.gaussian_kde(rendements,bw_method = i)
ind = sorted(np.array(db["Rendements"]))#np.linspace(-4,4,10000)
kdepdf = gkde.evaluate(ind)
limite = sum(kdepdf)*0.01
placement = 0
somme=0
while somme < limite:
somme += kdepdf[placement]
placement += 1
result = ind[placement]
param_bandwidth.append(i)
VaR_99.append(result)
return pd.DataFrame(list(zip(param_bandwidth,VaR_99)), columns=['param_bandwidth','VaR_99'])<jupyter_output><empty_output><jupyter_text>Nous utilisons ici la fonction pour calculer la VaR à 99% pour chaque bandwidth. Ceux-ci évoluent par pas de 0.01 entre 0.1 et 10.1<jupyter_code>KDE_VaR = VaR_KDE(0.1,10.1,0.01)
KDE_VaR
plt.plot(KDE_VaR["param_bandwidth"],KDE_VaR["VaR_99"])
plt.title("Evolution de la VaR en fonction de différents BandWidth")
plt.xlabel("Bandwidth")
plt.ylabel("VaR 99 %")<jupyter_output><empty_output><jupyter_text>On voit que le graph est assez atypique, en marche d'escaliers, ce qui est assez normal, étant donné que la fonction marche par placement/position.Ainsi donc la VaR évouluera en pallier. Aussi, la mauvaise spécification du risque peut-être assez importante en fonction du choix du Bandwidth. En effet, la VaR est très différente entre un bandwidth proche de 0 et proche de 10.Il est intéressant de remarquer que les propriétés géométriques du bandwidth font que la distribution (si le paramètre est négatif) est miroire<jupyter_code>KDE_VaR2 = VaR_KDE(-10.02,10.02,0.02)
plt.plot(KDE_VaR2["param_bandwidth"],KDE_VaR2["VaR_99"])<jupyter_output><empty_output><jupyter_text>## 4 Model risk as specification uncertainty#### Question 14. Using the VaRs and ESs implemented in the first two sections, determine the diameter for VaRs as well as the diameter for ESs. Comment the result with respect to model risk: is it more relevant to use ES or VaR?On récupère dans un premier temps un dictionnaire comprenant l'ensemble des valeurs de nos VaR et de nos ES.<jupyter_code>def VaR_empirical(p,db):
tri=db.sort_values(by=["Rendements"])
NR=1-p
min=int(round(db.shape[0]*NR,0))
VaR_e = tri.iloc[min,3]
return VaR_e
def VaR_normale(p,db):
alpha=scipy.stats.norm.ppf(1-p)
VaR_n = np.mean(db["Rendements"]) + alpha * np.std(db["Rendements"])
return VaR_n
def VaR_non_para(db):
valeur = 1000000
simulation = np.random.normal(np.mean(db["Rendements"]), np.std(db["Rendements"]), valeur)
Mte_carlo_VaR = np.percentile(simulation, 2)
return Mte_carlo_VaR
def VaR_Pickands(p,db):
VaR_pick= EVT(db,3,p)
return VaR_pick
#ensemble des définitions de VaR (à p%) que l'on applique à un dataframe
<jupyter_output><empty_output><jupyter_text>Calcul des ES en fonction des niveaux de nos VaR des deux premières parties et intégration du calcul de nos diamètres<jupyter_code>def dict_VaR(p,df,db):
df[f"VaR_{p}"]= [VaR_empirical(p,db), VaR_normale(p,db),VaR_non_para(db),VaR_Pickands(p,db)]
moyenne=[]
for i in df[f"VaR_{p}"].values:
t= db[db["Rendements"] < i]["Rendements"]
moy= np.sum(t)/t.shape[0]
moyenne.append(moy)
df[f"ES_{p}"]=moyenne
a= max(df[f"ES_{p}"])-df[f"ES_{p}"].min()
b= max(df[f"VaR_{p}"])-df[f"VaR_{p}"].min()
print(f"Le Diamètre de nos ES à un seuil de {p*100}% est de " + str(round(a,2)))
print(f"Le Diamètre de nos VaR à un seuil de {p*100}% est de " + str(round(b,2)))
return df
Quest4_14 =pd.DataFrame()
dict_VaR(0.99,Quest4_14,db)<jupyter_output>Le Diamètre de nos ES à un seuil de 99.0% est de 0.41
Le Diamètre de nos VaR à un seuil de 99.0% est de 0.98
<jupyter_text>Le diamètre correspond à la différence entre la valeur maximale et la valeur minimale. L'ES est beaucoup plus stable que les VaR. Cela nous semble logique, puisque l'ES représente une moyenne et que la VaR est un point particulier. Les moyennes sont donc plus rapprochées entre les différentes méthodes que l'on a implementé. C'est un bon signe par rapport au travail fourni auparavant. On va donc préférer utiliser l'ES à la VaR, on a en effet plus de différences sur nos VaR que sur nos ES. Nous ne pouvons jamais être totalement certains du type de distribution de nos données (elles peuvent changer au cours du temps), donc l'ES est un meilleur choix car plus stable.#### Question 15. Is your conclusion at Question 14 the same if you change the confidence level from 99% to 90%, 95%, 98%, 99.5%, 99.9%, and 99.99%?<jupyter_code>Quest4_15=pd.DataFrame()
a=[0.9,0.95,0.98,0.99,0.995,0.999]
for i in a:
dict_VaR(i,Quest4_15,db)
Quest4_15<jupyter_output><empty_output><jupyter_text>On garde les mêmes conclusions pour tous les échantillons de paramètres. On observe cependant un problème. Pour une probabilité de 0.999, notre VaR estimé par pickands prend une trop forte valeur. Notre échantillon n'ayant pas de tel valeur, l'ES prend la valeur NaN.
Il faudra regarder plus en détail la VaR de pickands pour voir si il y a un problème.
Nous supposons pour le moment qu'il provient du manque de data.
Les conclusions précédentes sont vérifiées. Le diamètre de notre Expected-shortfall est toujours inférieure à celui de la value-at-risk pour l'ensemble des intervalles de confiance. (hormis le dernier intervalle de confiance où nos résultats sont faussés par le manque de données, plus on augmente l'intervalle de confiance, plus le diamètre semble diminuer)#### Question 16. Add a noise process (say a Gaussian white noise) to the price return process and calculate the average impact on the VaR for each model. Which VaR method is the most robust? Display your results for various amplitudes of noise.Un bruit blanc gaussien est un bruit (réalisation d'un processus totalement aléatoire) qui suit une loi normale de variance et de moyenne donnée. On va créer dans un premier temps une fonction qui ajoute du bruit à notre base de données. Pour se faire on prend chacun de nos rendements auquel on ajoute un bruit aléatoire (random) gaussien de moyenne que l'on fixera nul (il sera intéressant de varier ce paramètre, donc je le laisse dans la fonction), et de l'amplitude (std: représentant la variance du bruit). Cette même fonction nous donnera les graphes des rendements bruités<jupyter_code>db2=db.copy(deep=True)
def Noise(mean,std,data):
rdt=data["Rendements"].astype(float)
num_samples = len(data)
for i in rdt:
data["Rendements"]=rdt+np.random.normal(mean, std, size=num_samples)
plt.figure(figsize=(20,10))
plt.plot(data["Rendements"])
plt.plot(db["Rendements"])
plt.legend(["donnée_bruitée", "donnée_initiale"])
plt.ylabel("Rendements")
plt.xlabel("Time")
plt.show()
#Noise(0,100)
#bcp trop de bruit, se voit à l'échelle, normal, on a des rdt en %
Noise(0,3,db2)<jupyter_output><empty_output><jupyter_text>Que ce soit sur l'array ou sur le graphique, mettre une amplitude de 3 nous ajoute beaucoup de bruits, le maximum et le minimum de nos rendements ont été multiplié par 2 voire 3 fois leur valeur initiale. Nous faisons attention de stocker nos données pour la suite de l'exercide dans des dataframes. Ici db2<jupyter_code> db2.head() #on observe bien un changement de nos rendements...<jupyter_output><empty_output><jupyter_text>On va maintenant réutiliser nos VaR pour voir la différence avec les questions précédentes<jupyter_code>def dict_VaR2(p,df,db):
df[f"VaR_{p}"]= [VaR_empirical(p,db), VaR_normale(p,db),VaR_non_para(db),VaR_Pickands(p,db)]
return df
Quest4_16=pd.DataFrame()
dict_VaR2(0.99,Quest4_16,db2)<jupyter_output><empty_output><jupyter_text>En prenant une amplitude de 3, nous remarquons bien que notre volatilité de nos rendements est fortement bruité. Nos VaR, sont fortement impacté à la baisse. Avec un tel bruit, on ne peut pas considérer la var empirique comme solution viable (les données sont bien trop aléatoire, aucune utilité de l'utiliser). Les bruits suivants une loi gausienne, je prendrai plutôt une loi non paramètrique, car il y a trop d'instabilité. (pas une loi paramétrique car nous supposions que ce sont nos rendements qui suivent une loi normale... pas d'hypothèse sur le bruit). Ce serait la VaR à -6.5 dans notre cas<jupyter_code>db3=db.copy(deep=True)
Quest4_16b=pd.DataFrame()
Noise(0,6,db3)
dict_VaR2(0.99,Quest4_16b,db3)<jupyter_output><empty_output><jupyter_text>Toutes nos VaR sont bien instables à cause du bruit, en augmentant sont amplitiudes, elles sont plus à la baisse. Il serait intéressant de calculer l'impact d'une variation de l'amplitude sur nos VaR (c'est à dire les dérivées par rapport à p)#### Question 17. Remove the noise of the price return process using the projection of your signal at one scale. Do this with scaling functions (also called father wavelet).L'objectif est de reprendre une série avec du bruit, et de le retirer. Pour cela on devrait utiliser des ondelettes. Elle séparare les différents signal de notre base de rendements. Ici, elle aura pour but de trouver la partie aléatoire de notre série et de la séparer de la partie initiale. Mais dans d'autres cas, elle servira à séparer un trend d'une série, une tendance saisonnière... Concrétement, on va utiliser la méthode par escalier (haar) car c'est bien plus simple qu'une daubechies. Ayant échoué à la coder par nous même, nous avons utilisé la librairie pywt grâce à notre ami Google. (nous avons trouvé la fonction haar si dessous dans lequel nous avons testé différents paramètrages pour l'optimiser au mieux). Nous avons ensuite nous-même représenter la série débruitée avec celle initiale pour mieux les comparer. Nos résultats sont instables et peuvent changer en les relançant. Nous avons vu à certains moments que notre fonction haar capté une sorte de tendance saisonnière (en semestre), la série débruitée était plutôt plate sur les périodes correspondant à l'été. Cela dépend d'abord du bruit, mais comme nous n'avons dans ce cadre posé aucune hypothèse sur la temporalité dans notre exercice, il peut y avoir d'autres signaux. Hors nos rendements peuvent avoir des saisonnalités et des trends. Mais comme notre base comporte que des odnnées de 2 années, nous nous attarderons pas dessus.<jupyter_code>pywt.families()<jupyter_output><empty_output><jupyter_text>Optimisation de notre fonction haar (wavelet choisi dans le cadre de cet exercice). Nous avons trouvé la fonction, nous avons testé pour bon nombre de paramètres pour la faire tourner (ce qui fut initialement compliquer). Augmenter le nombre de level va accroître le découpage de la data initiale, on cherche ici seulement à décorréler le bruit de notre data, on va fixer le level à 1 et ne pas tenir compte d'autres signaux pouvant exister.<jupyter_code>##fonction que l'on a trouvé à l'aide internet, un ptit modifié pour tourner au mieux sur notre échantillon (paramètre de scaling)
def haar ( data_à_débruiter, level, s=0.3 ):
#s est un paramètre de scaling: on a fait plusieurs itérations pour trouver son optimum
h = [ 1, 1 ]; # lowpass filter
g = [ 1, -1 ]; # highpass filter
f = len ( h ); # length of the filter
# en changeant un petit peu les paramètres de filtre, on ne voit pas de grandes différences, on a donc laissé
#les paramètres de base
t = data_à_débruiter; # 'workspace' array
l = len ( t ); # length of the current signal
y = [0] * l; # initialise output
for i in range ( level ):
y [ 0:l ] = [0] * l; # initialise the next level
l2 = l // 2; # half approximation, half detail
for j in range ( l2 ):
for k in range ( f ):
y [j] += t [ 2*j + k ] * h [ k ] * s;
y [j+l2] += t [ 2*j + k ] * g [ k ] * s;
l = l2; # continue with the approximation
t [ 0:l ] = y [ 0:l ] ;
return y<jupyter_output><empty_output><jupyter_text>Création d'une fonction nous donnant les données bruitées, débruitées et initiales<jupyter_code>def Data_bruit():
données_bruitées = db2["Rendements"].values; #données bruitées
données_débruitées= haar (db2["Rendements"].values, 1)
données_initiale= db["Rendements"]
return données_bruitées, données_débruitées, données_initiale
rdt_bruitées,rdt_débruitées,rdt_initiaux =Data_bruit()
<jupyter_output><empty_output><jupyter_text>Représentation graphiques de nos données_débruitées et de données initiales<jupyter_code>plt.figure(figsize= (20, 10))
plt.plot(rdt_initiaux)
plt.plot(rdt_débruitées)
plt.ylabel("Rendements")
plt.xlabel("Time")
plt.legend(["données_initiale", "données_débruitées"])<jupyter_output><empty_output><jupyter_text>Notre série débruitée semble fonctionner par saisonnalité parfois: en la relaçant plusieurs fois, on obtient des tendances saisonnières (1 fois sur 3). Nous avons appliqué un bruit trop fort. La fonction a dans ces cas là du mal à déterminer les signaux. Mais comparativement au graphe précédent, on se rapproche beaucoup de nos données initiales, nous en sommes satisfaits, malgré la partie aléatoire de ce rendement.Nous savons bien que cette partie aléatoire n'est pas le résultat idéal mais nous allons quand même continuer sur la question 18. Idéalement, nous pensions que cette fonciton allait capter seulement le signal bruit blanc que l'on a ajouté à notre data. Mais je pense sérieusement qu'il doit y avoir un effet de trend ou de saisonnalité sur notre data qu'il faudrait analyser.#### Question 18. How do your VaR measures vary if they are applied to the denoised series? Display your results for various projection scales. Compare qualitatively your results with the ones of Question 16Pour cette question, on va réutiliser les fonctions du début de la partie 4 que nous avions créé. Nous allons changer la base db par notre base (b) représentant les données bruitées<jupyter_code>Quest18=pd.DataFrame()
db18=db.copy()
db18["Rendements"]=rdt_débruitées
dict_VaR(0.99,Quest18,db18)
<jupyter_output>Le Diamètre de nos ES à un seuil de 99.0% est de 0.71
Le Diamètre de nos VaR à un seuil de 99.0% est de 1.09
<jupyter_text>Nous remarquons ici un point très intéressant, l'écart entre nos diamètres s'est réduit. Mais les conclusions ne changent pas. Nous supposions qu'avec l'utilisation de nos données le diamètre des ES seraient plus importants, car graphiquement on semble observer plus de volatilité à la perte (ce qui doit forcement impacter la moyenne des rendements les plus faibles).
<jupyter_code>#Nous allons maintenant changer de scale, et prendre notre dataframe db3 qui a une amplitude deux fois supérieure maintenant
bruit,debruit,data_initiale =Data_bruit()
Quest18b=pd.DataFrame()
db18b=db.copy()
db18b["Rendements"]=debruit
dict_VaR(0.99,Quest18b,db18b)
<jupyter_output>Le Diamètre de nos ES à un seuil de 99.0% est de 0.82
Le Diamètre de nos VaR à un seuil de 99.0% est de 1.39
|
no_license
|
/Gestion_des_risques_F_git.ipynb
|
LouisGrunenwald/Risk_Managment
| 40 |
<jupyter_start><jupyter_text>##1) Numpy / 벡터 & 행렬
### A. Numerical Python - Numpy<jupyter_code>import numpy as np
test_array = np.array(["1", "4", 5, 8], float)
print(test_array)
print(type(test_array[3]))
test_array = np.array([1, 4, 5, "8"], np.float32) # String Type의 데이터를 입력해도
print(test_array)
print(type(test_array[3])) # Float Type으로 자동 형변환을 실시
print(test_array.dtype) # Array(배열) 전체의 데이터 Type을 반환함
print(test_array.shape) # Array(배열) 의 shape을 반환함
tensor = [[[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]],
[[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]]
print(np.array(tensor, int).shape)
print(np.array(tensor, int).ndim)
print(np.array(tensor, int).size)
<jupyter_output><empty_output><jupyter_text>> Reshape : Array의 shape의 크기를 변경함 (element의 갯수는 동일)
> flatten : 다차원 Array를 1차원 Array 로 변환<jupyter_code>tensor = [[1, 2, 3, 4], [1, 2, 3, 4]]
print(np.array(tensor, int))
print("-----------------------")
print(np.array(tensor).reshape(2, 2, 2))
print("-----------------------")
print(np.array(tensor).reshape(8))
print("-----------------------")
print(np.array(tensor).reshape(-1, 2))
print("-----------------------")
print(np.array(tensor).reshape(4, -1, 2))
print("-----------------------")
print(np.array(tensor).flatten())
<jupyter_output><empty_output><jupyter_text>> Slicing : Array의 원하는 Data만 추출할 때 사용<jupyter_code>a = np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]], int)
print(a[:,2:]) # 전체 Row의 2열 이상
print("-----------------------")
print(a[1,1:3]) # 1 Row의 1열 ~ 2열
print("-----------------------")
print(a[1:3]) # 1 Row ~ 2Row의 전체
<jupyter_output><empty_output><jupyter_text>> Arange : Array의 범위를 지정하여, 값의 List를 생성하는 명령어
> zeros : Array를 생성하는데 0으로 초기화
> ones : Array를 생성하는데 0으로 초기화
> empty : Array를 생성하는데 메모리만 잡아줌 (이상한 값(Garbage)이 나올 수 있음.)<jupyter_code>print(np.arange(30))
print("-----------------------")
print(np.arange(30).reshape(-1, 5))
print("-----------------------")
print(np.arange(3, 5, 0.5))
print("-----------------------")
print(np.arange(30))
print("-----------------------")
print(np.zeros(shape=(10,), dtype=np.int8))
print("-----------------------")
print(np.zeros((2, 5)))
print("-----------------------")
print(np.ones(shape=(10,), dtype=np.int8))
print("-----------------------")
print(np.ones((2, 5)))
print("-----------------------")
print(np.empty(shape=(10,), dtype=np.int8))
print("-----------------------")
print(np.empty((3, 5)))
<jupyter_output><empty_output><jupyter_text>> something_like : 배열의 크기만큼 1, 0 또는 empty array를 반환<jupyter_code>test_matrix = np.arange(30).reshape(5, 6)
print(np.ones_like(test_matrix))
print("-----------------------")
print(np.zeros_like(test_matrix))
print("-----------------------")
print(np.empty_like(test_matrix)) # 메모리에 이전 데이터가 남아서 나오는 것
<jupyter_output><empty_output><jupyter_text>> identity : 단위행렬
> eye : 대각선이 1 인 행렬
> diag : 대각 행렬의 값만 추출
> random sampling : 데이터 분포에 따른 sampling으로 array 생성<jupyter_code>print(np.identity(n=3, dtype=np.int8))
print("-----------------------")
print(np.eye(N=3, M=5, dtype=np.int8)) # N = row, M = column
print(np.eye(3))
print(np.eye(3, 5, k=2)) # k = start index
print("-----------------------")
test_matrix = np.arange(9).reshape(3, 3)
print(np.diag(test_matrix))
print(np.diag(test_matrix, k=1)) # k = start index
print("-----------------------")
print(np.random.uniform(0, 1, 10).reshape(2, 5)) # 균등분포
print(np.random.normal(0, 1, 10).reshape(2, 5)) # 정규분포
print("-----------------------")
print("-----------------------")
<jupyter_output><empty_output><jupyter_text>> axis : 모든 operation function 을 실행할 때, 기준이 되는 dimension 축<jupyter_code>test_array = np.arange(1, 13).reshape(3, 4)
print(test_array)
print("-----------------------")
print(test_array.sum(axis=1))
print("-----------------------")
print(test_array.sum(axis=0))
<jupyter_output><empty_output><jupyter_text>> concatenate : Numpy array를 합치는 함수<jupyter_code>a = np.array([1, 2, 3])
b = np.array([4, 5, 6])
print(np.vstack((a, b)))
print("-----------------------")
a = np.array([[1], [2], [3]])
b = np.array([[4], [5], [6]])
print(np.hstack((a, b)))
print("-----------------------")
print(np.concatenate((a, b), axis=0))
print(np.concatenate((a, b), axis=1))
<jupyter_output><empty_output><jupyter_text>> Numpy Performance
-. 속도 : for loop < list comprehension < numpy<jupyter_code>def sclar_vector_product(scalar, vector):
result = []
for value in vector:
result.append(scalar * value)
return result
iternation_max = 100000000
vector = list(range(iternation_max))
scalar = 2
%timeit sclar_vector_product(scalar, vector) # for loop을 이용한 성능
%timeit [scalar * value for value in range(iternation_max)] # list comprehension을 이용한 성능
%timeit np.arange(iternation_max) * scalar # numpy를 이용한 성능
<jupyter_output><empty_output><jupyter_text>> All & Any<jupyter_code>a = np.arange(10)
print(a > 5)
print("-----------------------")
print(np.any(a > 5)) # any = 하나라도 조건에 만족하면 True
print(np.any(a < 0))
print("-----------------------")
print(np.all(a > 5))
print(np.all(a < 10)) # all = 모두가 조건에 만족하면 True
<jupyter_output><empty_output><jupyter_text>> np.where : where(condition, TRUE, FALSE)<jupyter_code>a = np.array([1, 3, 0], float)
print(np.where(a > 0, 3, 6))
a = np.arange(10)
print(np.where(a > 5)) #Index 값 반환
a = np.array([1, np.NaN, np.Inf], float)
print(np.isnan(a))
print(np.isfinite(a))
<jupyter_output><empty_output><jupyter_text>> argmax & argmin : array 內 최대값 또는 최소값의 index를 반환 (axis 기반 반환)<jupyter_code>a = np.array([1, 2, 4, 5, 8, 78, 23, 3])
print(np.argmax(a))
print(np.argmin(a))
<jupyter_output><empty_output><jupyter_text>> boolean index : 특정 조건에 따른 값을 배열 형태로 추출<jupyter_code>test_array = np.array([1, 4, 0, 2, 3, 8, 9, 7], float)
print(test_array > 3)
print(test_array[test_array > 3])
condition = test_array > 3
print(test_array[condition])
A = np.array([
[12, 13, 14, 12, 16, 14, 11, 10, 9],
[11, 14, 12, 15, 15, 16, 10, 12, 11],
[10, 12, 12, 15, 14, 16, 10, 12, 12],
[ 9, 11, 16, 15, 14, 16, 15, 12, 10],
[12, 11, 16, 14, 10, 12, 16, 12, 13],
[10, 15, 16, 14, 14, 14, 16, 15, 12],
[13, 17, 14, 10, 14, 11, 14, 15, 10],
[10, 16, 12, 14, 11, 12, 14, 18, 11],
[10, 19, 12, 14, 11, 12, 14, 18, 10],
[14, 22, 17, 19, 16, 17, 18, 17, 13],
[10, 16, 12, 14, 11, 12, 14, 18, 11],
[10, 16, 12, 14, 11, 12, 14, 18, 11],
[10, 19, 12, 14, 11, 12, 14, 18, 10],
[14, 22, 12, 14, 11, 12, 14, 17, 13],
[10, 16, 12, 14, 11, 12, 14, 18, 11]])
B = A < 15
print(B)
print("-----------------------")
print(B.astype(np.int))
<jupyter_output><empty_output><jupyter_text>> fancy index : numpy는 array를 index value로 사용해서 값을 추출하는 방법<jupyter_code>a = np.array([2, 4, 6, 8], float)
b = np.array([0, 0, 1, 3, 2, 1], int)
print(a[b])
print(a.take(b))
a = np.array([[1, 4], [9, 16]], float)
b = np.array([0, 0, 1, 1, 0], int)
c = np.array([0, 1, 1, 1, 1], int)
a[b,c] # b를 row index, c를 column index로 변환하여 표시함
<jupyter_output><empty_output><jupyter_text>##1) Numpy / 벡터 & 행렬
### B. 벡터가 뭐에요?> Vector
-. 공간에서의 한 점을 나타냄
-. 원점으로부터 상대적 위치를 표현
-. 숫자를 곱해주면 길이만 변함
-. 숫자를 원소로 가지는 리스트(list) 또는 배열(array) <jupyter_code><jupyter_output><empty_output><jupyter_text>>■ 노름(norm) : 원점에서부터의 거리
-. L1 Norm : 각 성분 변화량의 절대값의 합
-. L2 Norm : 피타고라스 정리를 이용해 유클리드 거리를 계산
>◎ 노름의 종류에 따라 기하학적 성질이 달라짐 (기계학습의 목적에 따라 설정)
-. L1 Norm : Robust 학습, Lasso 회귀
-. L2 Norm : Laplace 근사, Ridge 회귀 <jupyter_code><jupyter_output><empty_output><jupyter_text>> 두 벡터 사이의 각도 구해보기 (단, L2 Norm 에서만 가능)
-. 제 2 코사인 법칙 이용
→ 두 벡터간의 각도 : arccos( inner(x, y) / L2(x) * L2(y) )
→ 내적(inner) : 정사영된 벡터의 길이
> 내적은 두 벡터의 유사도(similarity)를 측정하는데 사용 가능 <jupyter_code><jupyter_output><empty_output><jupyter_text>##1) Numpy / 벡터 & 행렬
### C. 행렬이 뭐에요?> 행렬(matrix) : 벡터를 원소로 가지는 2차원 배열
※ numpy 에서는 행(row)이 기본 단위이다!!
> 전치행렬(transpose matrix) : 행과 열을 바꾼 행렬 (우측 상단에 T 표시)
-. Xij -> Xji> 행렬곱 : i번째 행벡터와 j번째 열벡터 사이의 내적을 성분으로 가지는 행렬을 계산
-. numpy 에서는 @ 연산을 사용함>※ 수학과 다르게 numpy의 inner 함수는 i번째 "행벡터"와 j번째 "행벡터" 사이의 내적을 함!!
-. inner(x, y) 를 쓰면 수학적으로는 x * yT 를 계산한다고 보면 됨<jupyter_code>import numpy as np
# x = 4 * 3
x = np.array([[1, 2, 3],
[2, 3, 4],
[4, 5, 6],
[6, 7, 8]])
# y = 2 * 3
y = np.array([[1, 2, 3],
[2, 3, 4]])
print(x.shape)
print(y.shape)
print()
print(np.inner(x, y))
<jupyter_output><empty_output><jupyter_text>> 역행렬(inverse matrix) : 어떤 행렬 A의 연산을 거꾸로 되돌리는 행렬
-. 조건 : 행과 열의 숫자가 같고, 행렬식(determinant)이 0이 아닌 경우에만 계산 가능
-. AA-1 = A-1A = I (항등행렬)<jupyter_code>x = np.array([[1, -2, 3],
[7, 5, 0],
[-2, -1, 2]])
print(np.linalg.inv(x))
print()
print(x @ np.linalg.inv(x))
<jupyter_output><empty_output><jupyter_text>>※ 역행렬을 계산할 수 없다면 "유사역행렬(pseudo-inverse)" 또는 "무어-펜로즈(Moore-Penrose)" 역행렬 A+를 이용한다.
-. 행과 열의 개수가 같지 않아도 계산 가능!
-. 조건 1) 행이 열보다 더 클 경우 (n >= m)
→ A+ = (AT * A)-1 * AT
-. 조건 2) 열이 행보다 더 클 경우 (n <= m)
→ A+ = AT * (A * AT)-1
※ 곱셈의 순서가 매우 중요!!!<jupyter_code>y = np.array([[0, 1],
[1, -1],
[-2, 1]])
print(np.linalg.pinv(y))
print()
print(np.linalg.pinv(y) @ y)
<jupyter_output><empty_output><jupyter_text>> 선형회귀 분석 : 유사 역행렬을 이용하여 찾을 수 있다.
-. 데이터(X)가 더 많기 때문에 행의 개수(n)가 열의 개수(m)보다 많다.<jupyter_code># #### Scikit Learn 을 활용한 회귀분석 (y절편 자동으로 고려해준다.)
# from sklearn.linear_model import LinearRegression
# model = LinearRegression()
# model.fit(X, y)
# y_test = model.predict(x_test)
# #### Moore-Penrose 역행렬 (y절편 직접 고려해줘야 함.)
# X_ = np.array(np.append(x, [1]) for x in X) # intercept 항 추가
# beta = np.linalg.pinv(X_) @ y
# y_test = np.append(x, [1]) @ beta
<jupyter_output><empty_output><jupyter_text>##2) 경사하강법
### A. 경사하강법(순한맛)> 미분(Differentiation) : 변수의 움직임에 따름 함수값의 변화량 (접선의 기울기)<jupyter_code># sympy를 이용하여 미분을 바로 구할 수 있다.
import sympy as sym
from sympy.abc import x
print(sym.diff(sym.poly(x**2 + 2*x + 3), x))
<jupyter_output><empty_output><jupyter_text>> 경사상승법(Gradient ascent) : 함수의 극대값의 위치를 구할 때 사용 (미분 값을 더한다.)
> 경사하강법(Gradient descent) : 함수의 극소값의 위치를 구할 때 사용 (미분 값을 빼준다.)> 벡터가 입력인 다변수 함수의 경우 편미분(partial differentiation)을 사용
> 각 변수 별로 편미분을 계산한 그레디언트(gradient) 벡터를 이용하여 경사하강/상승법에 사용할 수 있다.<jupyter_code><jupyter_output><empty_output><jupyter_text>##2) 경사하강법
### B. 경사하강법(매운맛)<jupyter_code>import numpy as np
X = np.array([[1, 1], [1, 2], [2, 2],[2, 3]])
y = np.dot(X, np.array([1, 2])) + 3
beta_gd = [10.1, 15.1, -6.5]
X_ = np.array([np.append(x, [1]) for x in X])
for t in range(5000):
error = y - X_ @ beta_gd
# error = error / np.linalg.norm(error)
grad = - np.transpose(X_) @ error
beta_gd = beta_gd - 0.01 * grad
print(beta_gd)
<jupyter_output><empty_output><jupyter_text>> 확률적 경사하강법(stochastic gradient descent) : 데이터 일부를 활용하여 업데이트
> 볼록이 아닌(non-convex) 목적식은 SGD를 통해 최적화 할 수 있다.
> SGD는 데이터 일부만 활용하기 때문에 연산자원을 좀 더 효율적으로 활용할 수 있다.<jupyter_code><jupyter_output><empty_output>
|
no_license
|
/3_기초_수학_한걸음.ipynb
|
falling90/AI_Pre-course
| 25 |
<jupyter_start><jupyter_text># Circuits 1: Compiling arbitrary single-particle basis rotations in linear depth
This is the first of several tutorials demonstrating the compilation of quantum circuits. These tutorials build on one another and should be studied in order. In this tutorial we will discuss the compilation of circuits for implementing arbitrary rotations of the single-particle basis of an electronic structure simulation. As an example, we show how one can use these methods to simulate the evolution of an arbitrary non-interacting fermion model.## Background
### Second quantized fermionic operators
In order to represent fermionic systems on a quantum computer one must first discretize space. Usually, one expands the many-body wavefunction in a basis of spin-orbitals $\varphi_p = \varphi_p(r)$ which are single-particle basis functions. For reasons of spatial efficiency, all NISQ (and even most error-corrected) algorithms for simulating fermionic systems focus on representing operators in second-quantization. Second-quantized operators are expressed using the fermionic creation and annihilation operators, $a^\dagger_p$ and $a_p$. The action of $a^\dagger_p$ is to excite a fermion in spin-orbital $\varphi_p$ and the action of $a_p$ is to annihilate a fermion from spin-orbital $\varphi_p$. Specifically, if electron $i$ is represented in a space of spin-orbitals $\{\varphi_p(r_i)\}$ then $a^\dagger_p$ and $a_p$ are related to Slater determinants through the equivalence,
$$
\langle r_0 \cdots r_{\eta-1} | a^\dagger_{0} \cdots a^\dagger_{\eta-1} | \varnothing\rangle \equiv \sqrt{\frac{1}{\eta!}}
\begin{vmatrix}
\varphi_{0}\left(r_0\right) & \varphi_{1}\left( r_0\right) & \cdots & \varphi_{\eta-1} \left( r_0\right) \\
\varphi_{0}\left(r_1\right) & \varphi_{1}\left( r_1\right) & \cdots & \varphi_{\eta-1} \left( r_1\right) \\
\vdots & \vdots & \ddots & \vdots\\
\varphi_{0}\left(r_{\eta-1}\right) & \varphi_{1}\left(r_{\eta-1}\right) & \cdots & \varphi_{\eta-1} \left(r_{\eta-1}\right) \end{vmatrix}
$$
where $\eta$ is the number of electrons in the system, $|\varnothing \rangle$ is the Fermi vacuum and $\varphi_p(r)=\langle r|\varphi_p \rangle$ are the single-particle orbitals that define the basis. By using a basis of Slater determinants, we ensure antisymmetry in the encoded state.
### Rotations of the single-particle basis
Very often in electronic structure calculations one would like to rotate the single-particle basis. That is, one would like to generate new orbitals that are formed from a linear combination of the old orbitals. Any particle-conserving rotation of the single-particle basis can be expressed as
$$
\tilde{\varphi}_p = \sum_{q} \varphi_q u_{pq}
\quad
\tilde{a}^\dagger_p = \sum_{q} a^\dagger_q u_{pq}
\quad
\tilde{a}_p = \sum_{q} a_q u_{pq}^*
$$
where $\tilde{\varphi}_p$, $\tilde{a}^\dagger_p$, and $\tilde{a}^\dagger_p$ correspond to spin-orbitals and operators in the rotated basis and $u$ is an $N\times N$ unitary matrix. From the Thouless theorem, this single-particle rotation
is equivalent to applying the $2^N \times 2^N$ operator
$$
U(u) = \exp\left(\sum_{pq} \left[\log u \right]_{pq} \left(a^\dagger_p a_q - a^\dagger_q a_p\right)\right)
$$
where $\left[\log u\right]_{pq}$ is the $(p, q)$ element of the matrix $\log u$.
There are many reasons that one might be interested in performing such basis rotations. For instance, one might be interested in preparing the Hartree-Fock (mean-field) state of a chemical system, by rotating from some initial orbitals (e.g. atomic orbitals or plane waves) into the molecular orbitals of the system. Alternatively, one might be interested in rotating from a basis where certain operators are diagonal (e.g. the kinetic operator is diagonal in the plane wave basis) to a basis where certain other operators are diagonal (e.g. the Coulomb operator is diagonal in the position basis). Thus, it is a very useful thing to be able to apply circuits corresponding to $U(u)$ on a quantum computer in low depth.
### Compiling linear depth circuits to rotate the orbital basis
OpenFermion prominently features routines for implementing the linear depth / linear connectivity basis transformations described in [Phys. Rev. Lett. 120, 110501](https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.120.110501). While we will not discuss this functionality here, we also support routines for compiling the more general form of these transformations which do not conserve particle-number, known as a Bogoliubov transfomation, using routines described in [Phys. Rev. Applied 9, 044036](https://journals.aps.org/prapplied/abstract/10.1103/PhysRevApplied.9.044036). We will not discuss the details of how these methods are implemented here and instead refer readers to those papers. All that one needs in order to compile the circuit $U(u)$ using OpenFermion is the $N \times N$ matrix $u$, which we refer to in documentation as the "basis_transformation_matrix". Note that if one intends to apply this matrix to a computational basis state with only $\eta$ electrons, then one can reduce the number of gates required by instead supplying the $\eta \times N$ rectangular matrix that characterizes the rotation of the occupied orbitals only. OpenFermion will automatically take advantage of this symmetry.## OpenFermion example implementation: exact evolution under tight binding models
In this example will show how basis transforms can be used to implement exact evolution under a random Hermitian one-body fermionic operator
\begin{equation}
H = \sum_{pq} T_{pq} a^\dagger_p a_q.
\end{equation}
That is, we will compile a circuit to implement $e^{-i H t}$ for some time $t$. Of course, this is a tractable problem classically but we discuss it here since it is often useful as a subroutine for more complex quantum simulations. To accomplish this evolution, we will use basis transformations. Suppose that $u$ is the basis transformation matrix that diagonalizes $T$. Then, we could implement $e^{-i H t}$ by implementing $U(u)^\dagger (\prod_{k} e^{-i \lambda_k Z_k}) U(u)$ where $\lambda_k$ are the eigenvalues of $T$.
Below, we initialize the T matrix characterizing $H$ and then obtain the eigenvalues $\lambda_k$ and eigenvectors $u_k$ of $T$. We print out the OpenFermion FermionOperator representation of $T$.<jupyter_code>import openfermion
import numpy
# Set the number of qubits in our example.
n_qubits = 3
simulation_time = 1.
random_seed = 8317
# Generate the random one-body operator.
T = openfermion.random_hermitian_matrix(n_qubits, seed=random_seed)
# Diagonalize T and obtain basis transformation matrix (aka "u").
eigenvalues, eigenvectors = numpy.linalg.eigh(T)
basis_transformation_matrix = eigenvectors.transpose()
# Print out familiar OpenFermion "FermionOperator" form of H.
H = openfermion.FermionOperator()
for p in range(n_qubits):
for q in range(n_qubits):
term = ((p, 1), (q, 0))
H += openfermion.FermionOperator(term, T[p, q])
print(H)<jupyter_output>(0.5367212624097257+0j) [0^ 0] +
(-0.26033703159240107+3.3259173741375454j) [0^ 1] +
(1.3433603748462144+1.544987250567917j) [0^ 2] +
(-0.26033703159240107-3.3259173741375454j) [1^ 0] +
(-2.9143303700812435+0j) [1^ 1] +
(-1.52843836446248+1.3527486791390022j) [1^ 2] +
(1.3433603748462144-1.544987250567917j) [2^ 0] +
(-1.52843836446248-1.3527486791390022j) [2^ 1] +
(2.261633626116526+0j) [2^ 2]
<jupyter_text>Now we're ready to make a circuit! First we will use OpenFermion to generate the basis transform $U(u)$ from the basis transformation matrix $u$ by calling the Bogoliubov transform function (named as such because this function can also handle non-particle conserving basis transformations). Then, we'll apply local $Z$ rotations to phase by the eigenvalues, then we'll apply the inverse transformation. That will finish the circuit. We're just going to print out the first rotation to keep things easy-to-read, but feel free to play around with the notebook.<jupyter_code>import openfermion
import cirq
# Initialize the qubit register.
qubits = cirq.LineQubit.range(n_qubits)
# Start circuit with the inverse basis rotation, print out this step.
inverse_basis_rotation = cirq.inverse(openfermion.bogoliubov_transform(qubits, basis_transformation_matrix))
circuit = cirq.Circuit(inverse_basis_rotation)
print(circuit)
# Add diagonal phase rotations to circuit.
for k, eigenvalue in enumerate(eigenvalues):
phase = -eigenvalue * simulation_time
circuit.append(cirq.rz(rads=phase).on(qubits[k]))
# Finally, restore basis.
basis_rotation = openfermion.bogoliubov_transform(qubits, basis_transformation_matrix)
circuit.append(basis_rotation)<jupyter_output>0: ───────────────────────────────────────────────PhISwap(0.25)──────────Rz(0)───────────────────────────
│
1: ─────────────PhISwap(0.25)──────────Z^0.522────PhISwap(0.25)^-0.656───PhISwap(0.25)──────────Rz(0)────
│ │
2: ───Z^0.762───PhISwap(0.25)^-0.249───Z^-0.519──────────────────────────PhISwap(0.25)^-0.479───Rz(-π)───
<jupyter_text>Finally, we can check whether our circuit applied to a random initial state with the exact result. Print out the fidelity with the exact result.<jupyter_code># Initialize a random initial state.
initial_state = openfermion.haar_random_vector(
2 ** n_qubits, random_seed).astype(numpy.complex64)
# Numerically compute the correct circuit output.
import scipy
hamiltonian_sparse = openfermion.get_sparse_operator(H)
exact_state = scipy.sparse.linalg.expm_multiply(
-1j * simulation_time * hamiltonian_sparse, initial_state)
# Use Cirq simulator to apply circuit.
simulator = cirq.Simulator()
result = simulator.simulate(circuit, qubit_order=qubits,
initial_state=initial_state)
simulated_state = result.final_state_vector
# Print final fidelity.
fidelity = abs(numpy.dot(simulated_state, numpy.conjugate(exact_state)))**2
print(fidelity)<jupyter_output>1.0000000960717732
<jupyter_text>Thus, we see that the circuit correctly effects the intended evolution. We can now use Cirq's compiler to output the circuit using gates native to near-term devices, and then optimize those circuits. We'll output in QASM 2.0 just to demonstrate that functionality.<jupyter_code>xmon_circuit = cirq.google.optimized_for_xmon(circuit)
print(xmon_circuit.to_qasm())<jupyter_output>// Generated from Cirq v0.9.0.dev
OPENQASM 2.0;
include "qelib1.inc";
// Qubits: [0, 1, 2]
qreg q[3];
u3(pi*0.5,pi*0.6242939487,pi*1.3757060513) q[2];
u3(pi*0.5,pi*1.3861445133,pi*0.6138554867) q[1];
u3(pi*0.5,pi*1.3688709856,pi*0.6311290144) q[0];
cz q[1],q[2];
u3(pi*0.1242949803,pi*1.6242939487,pi*0.3757060513) q[2];
u3(pi*0.1242949803,pi*1.3861445133,pi*0.6138554867) q[1];
cz q[1],q[2];
u3(pi*0.2854634625,pi*0.1242939487,pi*1.8757060513) q[2];
u3(pi*0.539208605,pi*0.8861445133,pi*1.1138554867) q[1];
cz q[0],q[1];
u3(pi*0.328242091,pi*0.3861445133,pi*1.6138554867) q[1];
u3(pi*0.328242091,pi*1.3688709856,pi*0.6311290144) q[0];
cz q[0],q[1];
u3(pi*0.3063611382,pi*0.8861445133,pi*1.1138554867) q[1];
u3(pi*0.2484206297,pi*1.8688709856,pi*0.1311290144) q[0];
cz q[1],q[2];
u3(pi*0.2326621647,pi*1.6242939487,pi*0.3757060513) q[2];
u3(pi*0.2326621647,pi*1.3861445133,pi*0.6138554867) q[1];
cz q[1],q[2];
u3(pi*0.9969223054,pi*1.1242939487,pi*0.8757060513) q[2];
u3(pi*0.3413244084,pi*1.886[...]
|
non_permissive
|
/docs/tutorials/circuits_1_basis_change.ipynb
|
brettkoonce/OpenFermion
| 4 |
<jupyter_start><jupyter_text><jupyter_code># Write a function checkValidMove(game,inp) that takes the board game and inp as
# user input and check if the input is valid (You have to think about what is a valid input).
# If the input is invalid, you need to return False, otherwise return True.
# Can assume that the players always input integers.
# Write a function checkWin(game) to check if a board game has a winner.
# If there is a winner, return the winner symbol ‘X’ or ‘O’.
# Otherwise, if the game has no winner yet, return False.
# Modify the function tttGamePlay() to decide and end the game if there is a winner or finally tie.
# If it’s a tie game, print ‘Tie game!’
# Call function tttGamePlay() to start a Tic-Tac-Toe game
# Indicate position of input from 1-9
def createZeroMatrix(r,c):
output = [] #Final Output is Originally Empty List
for i in range(r): #Looping to get the number of rows desired
row = []
for j in range(c): #Loop to get number of columns desired
row.append(0) #Append 0 to each column
output.append(row)
return output
def printTTT(game):
for i in range(3):
print(f'{game[i][0]}|{game[i][1]}|{game[i][2]}')
if i !=2:
print( '-----') #Print this when i = 0 and i = 1
piece = ['X','O']
def checkValidMove(game,inp):
if inp < 1 or inp > 9:
return False
for i in game:
for j in range(3):
if inp == i[j]:
return True
return False
def tttGamePlay():
game = createZeroMatrix(3,3)
for i in range(3):
for j in range(3):
game[i][j] = i*3+j+1 #Labelling the positions for entry
player = 0
printTTT(game)
for i in range(9): #Anyhow play 9 times
print()
pos = int(input(f'Player {piece[player]} move:')) - 1
while checkValidMove(game, pos+1) == False:
print('Invalid Move')
pos = int(input(f'Player {piece[player]} move:')) - 1
game[pos//3][pos%3] = piece[player]
player = 1 - player
printTTT(game)
if checkWin(game) != False:
print(f'Player {checkWin(game)} won!')
return
print('Tie game!')
return
def checkWin(game):
count_X, count_O = 0, 0
for i in range(3): #Checking diagonals (Position 1,5,9)
if game[i][i] == 'X':
count_X += 1
if count_X == 3:
return 'X'
if game[i][i] == 'O':
count_O += 1
if count_O == 3:
return 'O'
count_X, count_O, j = 0, 0, 2
for i in range(3): #Checking diagonals (Position 3,5,7)
if game[i][j] == 'X':
count_X += 1
if count_X == 3:
return 'X'
if game[i][j] == 'O':
count_O += 1
if count_O == 3:
return 'O'
j -= 1
for i in range(3): #Checking rows
count_X = 0
count_O = 0
for j in range(3):
if game[i][j] == 'X':
count_X += 1
if count_X == 3:
return 'X'
if game[i][j] == 'O':
count_O += 1
if count_O == 3:
return 'O'
for i in range(3): #Checking columns
count_X = 0
count_O = 0
for j in range(3):
if game[j][i] == 'X':
count_X += 1
if count_X == 3:
return 'X'
if game[j][i] == 'O':
count_O += 1
if count_O == 3:
return 'O'
return False
tttGamePlay()<jupyter_output>1|2|3
-----
4|5|6
-----
7|8|9
Player X move:2
1|X|3
-----
4|5|6
-----
7|8|9
Player O move:3
1|X|O
-----
4|5|6
-----
7|8|9
Player X move:1
X|X|O
-----
4|5|6
-----
7|8|9
Player O move:5
X|X|O
-----
4|O|6
-----
7|8|9
Player X move:6
X|X|O
-----
4|O|X
-----
7|8|9
Player O move:7
X|X|O
-----
4|O|X
-----
O|8|9
Player O won!
|
no_license
|
/Tic_Tac_Toe.ipynb
|
TheRealNDX96/Tic-Tac-Toe
| 1 |
<jupyter_start><jupyter_text>### Read Image<jupyter_code>image = cv2.imread("things.jpg")
image.shape
r = 300/ image.shape[1]
dim = (300, (int(image.shape[0] * r)))
resized_image = cv2.resize(image, dim, interpolation=cv2.INTER_AREA)<jupyter_output><empty_output><jupyter_text>### Thresholding Techniques
- Simple Thresholding
- Adaptive Thresholding using Gaussian Thresholding
- OTSU Thresholding
- Riddler_Calvard Thresholding### Simple Thresholding<jupyter_code>simple_gray = cv2.cvtColor(resized_image, cv2.COLOR_BGR2GRAY)
simple_blurred = cv2.GaussianBlur(simple_gray, (5, 5), 0)
(T, thresh) = cv2.threshold(simple_blurred, 120, 255, cv2.THRESH_BINARY)
(T, threshinv) = cv2.threshold(simple_blurred, 120, 255, cv2.THRESH_BINARY_INV)
plt.imshow(simple_gray, cmap='gray')
plt.imshow(thresh, cmap='gray')<jupyter_output><empty_output><jupyter_text>### Adaptive Thresholding<jupyter_code>ad_gray = cv2.cvtColor(resized_image, cv2.COLOR_BGR2GRAY)
ad_blurred = cv2.GaussianBlur(ad_gray, (3, 3), 0)
ad_thresh = cv2.adaptiveThreshold(ad_blurred,
255,
cv2.ADAPTIVE_THRESH_MEAN_C,
cv2.THRESH_BINARY_INV,
7,
5)<jupyter_output><empty_output><jupyter_text>#### Gaussian Thresholding<jupyter_code>ad_gaussianthresh = cv2.adaptiveThreshold(ad_blurred,
255,
cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY_INV,
15,
7)
plt.imshow(ad_gaussianthresh, cmap='gray')<jupyter_output><empty_output><jupyter_text>### OTSU Thresholding<jupyter_code>otsu_gray = cv2.cvtColor(resized_image, cv2.COLOR_BGR2GRAY)
otsu_blurred = cv2.GaussianBlur(otsu_gray, (5, 5), 0)
T = mahotas.thresholding.otsu(otsu_blurred)
otsu_thresh = resized_image.copy()
otsu_thresh[otsu_thresh > T] = 255
otsu_thresh[otsu_thresh < 255] = 0
otsu_threshinv = cv2.bitwise_not(otsu_thresh)
plt.imshow(otsu_threshinv, cmap='gray')<jupyter_output><empty_output><jupyter_text>### Riddler-Calvard Thresholding<jupyter_code>RC_gray = cv2.cvtColor(resized_image, cv2.COLOR_BGR2GRAY)
RC_blurred = cv2.GaussianBlur(RC_gray, (5, 5), 0)
T_RC = mahotas.thresholding.rc(RC_blurred)
thresh_RC = resized_image.copy()
thresh_RC[thresh_RC > T_RC] = 255
thresh_RC[thresh_RC < 255] = 0
thresh_RCinv = cv2.bitwise_not(thresh_RC)
plt.imshow(thresh_RCinv, cmap='gray')<jupyter_output><empty_output>
|
no_license
|
/Image Processing/Image Segmentation.ipynb
|
jocaisip/Machine-Learning-I
| 6 |
<jupyter_start><jupyter_text># Logistic Regression in Theano
## for comparison with TensorFlow<jupyter_code>"""
This tutorial introduces logistic regression using Theano and stochastic
gradient descent.
Logistic regression is a probabilistic, linear classifier. It is parametrized
by a weight matrix :math:`W` and a bias vector :math:`b`. Classification is
done by projecting data points onto a set of hyperplanes, the distance to
which is used to determine a class membership probability.
Mathematically, this can be written as:
.. math::
P(Y=i|x, W,b) &= softmax_i(W x + b) \\
&= \frac {e^{W_i x + b_i}} {\sum_j e^{W_j x + b_j}}
The output of the model or prediction is then done by taking the argmax of
the vector whose i'th element is P(Y=i|x).
.. math::
y_{pred} = argmax_i P(Y=i|x,W,b)
This tutorial presents a stochastic gradient descent optimization method
suitable for large datasets.
References:
- textbooks: "Pattern Recognition and Machine Learning" -
Christopher M. Bishop, section 4.3.2
"""
__docformat__ = 'restructedtext en'
import cPickle
import gzip
import os
import sys
import timeit
import numpy
import theano
import theano.tensor as T
class LogisticRegression(object):
"""Multi-class Logistic Regression Class
The logistic regression is fully described by a weight matrix :math:`W`
and bias vector :math:`b`. Classification is done by projecting data
points onto a set of hyperplanes, the distance to which is used to
determine a class membership probability.
"""
def __init__(self, input, n_in, n_out):
""" Initialize the parameters of the logistic regression
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
"""
# start-snippet-1
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
self.W = theano.shared(
value=numpy.zeros(
(n_in, n_out),
dtype=theano.config.floatX
),
name='W',
borrow=True
)
# initialize the biases b as a vector of n_out 0s
self.b = theano.shared(
value=numpy.zeros(
(n_out,),
dtype=theano.config.floatX
),
name='b',
borrow=True
)
# symbolic expression for computing the matrix of class-membership
# probabilities
# Where:
# W is a matrix where column-k represent the separation hyperplane for
# class-k
# x is a matrix where row-j represents input training sample-j
# b is a vector where element-k represent the free parameter of
# hyperplane-k
self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)
# symbolic description of how to compute prediction as class whose
# probability is maximal
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
# end-snippet-1
# parameters of the model
self.params = [self.W, self.b]
# keep track of model input
self.input = input
def negative_log_likelihood(self, y):
"""Return the mean of the negative log-likelihood of the prediction
of this model under a given target distribution.
.. math::
\frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) =
\frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|}
\log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
\ell (\theta=\{W,b\}, \mathcal{D})
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
Note: we use the mean instead of the sum so that
the learning rate is less dependent on the batch size
"""
# start-snippet-2
# y.shape[0] is (symbolically) the number of rows in y, i.e.,
# number of examples (call it n) in the minibatch
# T.arange(y.shape[0]) is a symbolic vector which will contain
# [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of
# Log-Probabilities (call it LP) with one row per example and
# one column per class LP[T.arange(y.shape[0]),y] is a vector
# v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,
# LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is
# the mean (across minibatch examples) of the elements in v,
# i.e., the mean log-likelihood across the minibatch.
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
# end-snippet-2
def errors(self, y):
"""Return a float representing the number of errors in the minibatch
over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_pred.ndim:
raise TypeError(
'y should have the same shape as self.y_pred',
('y', y.type, 'y_pred', self.y_pred.type)
)
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_pred, y))
else:
raise NotImplementedError()
def load_data(dataset):
''' Loads the dataset
:type dataset: string
:param dataset: the path to the dataset (here MNIST)
'''
#############
# LOAD DATA #
#############
# Download the MNIST dataset if it is not present
data_dir, data_file = os.path.split(dataset)
if data_dir == "" and not os.path.isfile(dataset):
# Check if dataset is in the data directory.
new_path = os.path.join(
os.path.split(__file__)[0],
"..",
"data",
dataset
)
if os.path.isfile(new_path) or data_file == 'mnist.pkl.gz':
dataset = new_path
if (not os.path.isfile(dataset)) and data_file == 'mnist.pkl.gz':
import urllib
origin = (
'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'
)
print 'Downloading data from %s' % origin
urllib.urlretrieve(origin, dataset)
print '... loading data'
# Load the dataset
f = gzip.open(dataset, 'rb')
train_set, valid_set, test_set = cPickle.load(f)
f.close()
#train_set, valid_set, test_set format: tuple(input, target)
#input is an numpy.ndarray of 2 dimensions (a matrix)
#witch row's correspond to an example. target is a
#numpy.ndarray of 1 dimensions (vector)) that have the same length as
#the number of rows in the input. It should give the target
#target to the example with the same index in the input.
def shared_dataset(data_xy, borrow=True):
""" Function that loads the dataset into shared variables
The reason we store our dataset in shared variables is to allow
Theano to copy it into the GPU memory (when code is run on GPU).
Since copying data into the GPU is slow, copying a minibatch everytime
is needed (the default behaviour if the data is not in a shared
variable) would lead to a large decrease in performance.
"""
data_x, data_y = data_xy
shared_x = theano.shared(numpy.asarray(data_x,
dtype=theano.config.floatX),
borrow=borrow)
shared_y = theano.shared(numpy.asarray(data_y,
dtype=theano.config.floatX),
borrow=borrow)
# When storing data on the GPU it has to be stored as floats
# therefore we will store the labels as ``floatX`` as well
# (``shared_y`` does exactly that). But during our computations
# we need them as ints (we use labels as index, and if they are
# floats it doesn't make sense) therefore instead of returning
# ``shared_y`` we will have to cast it to int. This little hack
# lets ous get around this issue
return shared_x, T.cast(shared_y, 'int32')
test_set_x, test_set_y = shared_dataset(test_set)
valid_set_x, valid_set_y = shared_dataset(valid_set)
train_set_x, train_set_y = shared_dataset(train_set)
rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),
(test_set_x, test_set_y)]
return rval
def sgd_optimization_mnist(learning_rate=0.13, n_epochs=1000,
dataset='mnist.pkl.gz',
batch_size=600):
"""
Demonstrate stochastic gradient descent optimization of a log-linear
model
This is demonstrated on MNIST.
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic
gradient)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
:type dataset: string
:param dataset: the path of the MNIST dataset file from
http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz
"""
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size
######################
# BUILD ACTUAL MODEL #
######################
print '... building the model'
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
# generate symbolic variables for input (x and y represent a
# minibatch)
x = T.matrix('x') # data, presented as rasterized images
y = T.ivector('y') # labels, presented as 1D vector of [int] labels
# construct the logistic regression class
# Each MNIST image has size 28*28
classifier = LogisticRegression(input=x, n_in=28 * 28, n_out=10)
# the cost we minimize during training is the negative log likelihood of
# the model in symbolic format
cost = classifier.negative_log_likelihood(y)
# compiling a Theano function that computes the mistakes that are made by
# the model on a minibatch
test_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
y: test_set_y[index * batch_size: (index + 1) * batch_size]
}
)
validate_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: valid_set_x[index * batch_size: (index + 1) * batch_size],
y: valid_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# compute the gradient of cost with respect to theta = (W,b)
g_W = T.grad(cost=cost, wrt=classifier.W)
g_b = T.grad(cost=cost, wrt=classifier.b)
# start-snippet-3
# specify how to update the parameters of the model as a list of
# (variable, update expression) pairs.
updates = [(classifier.W, classifier.W - learning_rate * g_W),
(classifier.b, classifier.b - learning_rate * g_b)]
# compiling a Theano function `train_model` that returns the cost, but in
# the same time updates the parameter of the model based on the rules
# defined in `updates`
train_model = theano.function(
inputs=[index],
outputs=cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# end-snippet-3
###############
# TRAIN MODEL #
###############
print '... training the model'
# early-stopping parameters
patience = 5000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience / 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_validation_loss = numpy.inf
test_score = 0.
start_time = timeit.default_timer()
done_looping = False
epoch = 0
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
minibatch_avg_cost = train_model(minibatch_index)
# iteration number
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i)
for i in xrange(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
print(
'epoch %i, minibatch %i/%i, validation error %f %%' %
(
epoch,
minibatch_index + 1,
n_train_batches,
this_validation_loss * 100.
)
)
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if this_validation_loss < best_validation_loss * \
improvement_threshold:
patience = max(patience, iter * patience_increase)
best_validation_loss = this_validation_loss
# test it on the test set
test_losses = [test_model(i)
for i in xrange(n_test_batches)]
test_score = numpy.mean(test_losses)
print(
(
' epoch %i, minibatch %i/%i, test error of'
' best model %f %%'
) %
(
epoch,
minibatch_index + 1,
n_train_batches,
test_score * 100.
)
)
# save the best model
with open('best_model.pkl', 'w') as f:
cPickle.dump(classifier, f)
if patience <= iter:
done_looping = True
break
end_time = timeit.default_timer()
print(
(
'Optimization complete with best validation score of %f %%,'
'with test performance %f %%'
)
% (best_validation_loss * 100., test_score * 100.)
)
print 'The code run for %d epochs, with %f epochs/sec' % (
epoch, 1. * epoch / (end_time - start_time))
print >> sys.stderr, ('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.1fs' % ((end_time - start_time)))
def predict():
"""
An example of how to load a trained model and use it
to predict labels.
"""
# load the saved model
classifier = cPickle.load(open('best_model.pkl'))
# compile a predictor function
predict_model = theano.function(
inputs=[classifier.input],
outputs=classifier.y_pred)
# We can test it on some examples from test test
dataset='mnist.pkl.gz'
datasets = load_data(dataset)
test_set_x, test_set_y = datasets[2]
test_set_x = test_set_x.get_value()
predicted_values = predict_model(test_set_x[:10])
print ("Predicted values for the first 10 examples in test set:")
print predicted_values
if __name__ == '__main__':
sgd_optimization_mnist()<jupyter_output><empty_output>
|
no_license
|
/Deep_Learning_from_Others/TDL_TensorFlowExamples/Session1/DeepMNIST_Theano.ipynb
|
pmnyc/Machine_Learning_Problems
| 1 |
<jupyter_start><jupyter_text><jupyter_code>!git clone https://github.com/Guillem96/activity-recognition
!cd activity-recognition && pip install -e .
import sys
import random
from pathlib import Path
import accelerate
import torch
import torchaudio
import torchvision.transforms as T
import matplotlib.pyplot as plt
from ar.metrics import accuracy, top_5_accuracy
from ar.utils.engine import train_one_epoch, evaluate
!mkdir -p data/speech_command
ds = torchaudio.datasets.SPEECHCOMMANDS('data/speech_command', download=True)
classes = Path('data/speech_command/SpeechCommands/speech_commands_v0.02')
classes = [p.stem for p in classes.iterdir()]
class2idx = {c: i for i, c in enumerate(classes)}
waveform, sample_rate, label, *_ = ds[random.randint(0, len(ds))]
plt.title(label)
plt.plot(waveform[0].numpy());
from IPython.display import Audio
Audio(waveform.numpy(), rate=sample_rate, autoplay=True)
fft = torch.fft.rfft(waveform, dim=1)
plt.plot(fft[0]);<jupyter_output>/usr/local/lib/python3.7/dist-packages/torch/_tensor.py:645: ComplexWarning: Casting complex values to real discards the imaginary part
return self.numpy().astype(dtype, copy=False)
<jupyter_text>## MelSpectrogram from scratchApply a Short Time Fourier Transform to our signal with a Hanning Window.<jupyter_code>plt.title('Hanning window')
plt.plot(torch.hann_window(400))
plt.xlabel('Window step')
plt.ylabel('Intensity')
plt.show()
window_len = 400
hop_len = 160
sample_wf = waveform.squeeze()[6000:7000]
plt.title('Hanning windows applied for STFT')
for i in range((sample_wf.size(0) // hop_len) - 1):
plt.plot(range(i * hop_len, (i * hop_len) + window_len),
torch.hann_window(window_len) * sample_wf.max())
plt.vlines(i * hop_len,
sample_wf.min(),
sample_wf.max(),
color='r',
linestyles='dotted')
plt.vlines((i * hop_len) + window_len,
sample_wf.min(),
sample_wf.max(),
color='g',
linestyles='dotted')
plt.plot(sample_wf, 'b')
plt.show()
stft = torch.stft(waveform.squeeze(),
n_fft=512,
hop_length=hop_len,
win_length=window_len,
window=torch.hann_window(400))
stft = torchaudio.functional.complex_norm(stft)
plt.title('Spectrogram')
plt.imshow(stft.log2().squeeze(), cmap='gray')
plt.xlabel('Time')
plt.ylabel('Frequencies')
plt.show()
fbanks = torchaudio.functional.create_fb_matrix(
stft.size(0),
0, sample_rate // 2,
23, sample_rate)
plt.title('Mel triangular filters')
plt.plot(fbanks)
plt.axis('off')
plt.show()
mel_spectogram = stft.transpose(1, 0) @ fbanks
plt.imshow(mel_spectogram.log2().t(), cmap='inferno')
plt.xlabel('Time')
plt.ylabel('Mel Features')
plt.show()
cosine_trans = torchaudio.functional.create_dct(13, 23, 'ortho')
plt.imshow(cosine_trans)
plt.axis('off');
mfcc = (mel_spectogram + 1e-6).log() @ cosine_trans
plt.imshow(mfcc.t(), cmap='inferno')
plt.xlabel('Time')
plt.ylabel('Mel Features')
plt.show()
train_len = int(len(ds) * .9)
rand_idx = torch.randperm(len(ds))
train_ds = torch.utils.data.Subset(ds, rand_idx[:train_len])
valid_ds = torch.utils.data.Subset(ds, rand_idx[train_len:])
def collate_fn(batch):
wf, sr, labels, *_ = zip(*batch)
padded_wf = []
max_len = max([o.size(1) for o in wf])
for w in wf:
w = w[:, :max_len]
offset = max_len - w.size(1)
pad_tensor = torch.zeros(1, offset)
w = torch.cat([w, pad_tensor], dim=-1)
padded_wf.append(w)
wf = torch.stack(padded_wf)
return wf, torch.as_tensor([class2idx[o] for o in labels])
train_dl = torch.utils.data.DataLoader(
train_ds, batch_size=64, collate_fn=collate_fn, pin_memory=True)
valid_dl = torch.utils.data.DataLoader(
valid_ds, batch_size=64, collate_fn=collate_fn, pin_memory=True)
x, y = next(iter(train_dl))
x.size(), y.size()
def show_batch(waveforms, labels):
plt.figure(figsize=(20, 6))
for i, (wf, l) in enumerate(zip(waveforms[:16], labels[:16])):
plt.subplot(4, 4, i + 1)
# Remove background noise and padding
# to plot only the audio of interest
wf = wf.squeeze().unfold(0, 20, 1).mean(-1)
wf = wf[wf.abs() > 0.005]
plt.title(classes[l.item()])
plt.plot(wf.numpy())
plt.axis('off')
show_batch(x, y)
plt.show()
device = torch.device('cuda')
model = torch.nn.Sequential(
torchaudio.transforms.MFCC(sample_rate, 20, log_mels=True,
melkwargs=dict(n_fft=512,
win_length=400,
hop_length=160)),
torch.nn.Conv2d(1, 32, 5),
torch.nn.Conv2d(32, 32, 3),
torch.nn.BatchNorm2d(32),
torch.nn.ReLU(inplace=True),
torch.nn.Conv2d(32, 64, 3),
torch.nn.Conv2d(64, 64, 3),
torch.nn.BatchNorm2d(64),
torch.nn.ReLU(inplace=True),
torch.nn.Flatten(1),
torch.nn.Linear(58240, 512),
torch.nn.Dropout(.5),
torch.nn.Linear(512, len(classes)),
torch.nn.LogSoftmax(-1))
optimzer = torch.optim.Adam(model.parameters(), lr=1e-3)
loss_fn = torch.nn.NLLLoss()
accelerator = accelerate.Accelerator()
accelerator.device
model, train_dl, valid_dl, optimizer = accelerator.prepare(model, train_dl, valid_dl, optimzer)
for epoch in range(5):
train_one_epoch(train_dl, model, optimizer, loss_fn, epoch,
accelerator=accelerator)
evaluate(valid_dl, model, loss_fn,
metrics=[accuracy, top_5_accuracy],
epoch=epoch,
accelerator=accelerator)
<jupyter_output><empty_output>
|
non_permissive
|
/notebooks/Audio Classification PyTorch.ipynb
|
Guillem96/activity-recognition
| 2 |
<jupyter_start><jupyter_text># This is my title
## This is my subtitle
This is my text
#### List:
- item 1
- item 2<jupyter_code>%%HTML
<p>Um <strong>texto</strong> qualquer</p><jupyter_output><empty_output>
|
permissive
|
/aula089-jupyter-notebook/FirstOfficialNote.ipynb
|
axellbrendow/python3-basic-to-advanced
| 1 |
<jupyter_start><jupyter_text>First we import some datasets of interest<jupyter_code>#the seed information
df_seeds = pd.read_csv('../input/WNCAATourneySeeds_SampleTourney2018.csv')
#tour information
df_tour = pd.read_csv('../input/WRegularSeasonCompactResults_PrelimData2018.csv')<jupyter_output><empty_output><jupyter_text>Now we separate the winners from the losers and organize our dataset<jupyter_code>df_seeds['seed_int'] = df_seeds['Seed'].apply( lambda x : int(x[1:3]) )
df_winseeds = df_seeds.loc[:, ['TeamID', 'Season', 'seed_int']].rename(columns={'TeamID':'WTeamID', 'seed_int':'WSeed'})
df_lossseeds = df_seeds.loc[:, ['TeamID', 'Season', 'seed_int']].rename(columns={'TeamID':'LTeamID', 'seed_int':'LSeed'})
df_dummy = pd.merge(left=df_tour, right=df_winseeds, how='left', on=['Season', 'WTeamID'])
df_concat = pd.merge(left=df_dummy, right=df_lossseeds, on=['Season', 'LTeamID'])<jupyter_output><empty_output><jupyter_text>Now we match the detailed results to the merge dataset above<jupyter_code>df_concat['DiffSeed'] = df_concat[['LSeed', 'WSeed']].apply(lambda x : 0 if x[0] == x[1] else 1, axis = 1)<jupyter_output><empty_output><jupyter_text>Here we get our submission info<jupyter_code>#prepares sample submission
df_sample_sub = pd.read_csv('../input/WSampleSubmissionStage2.csv')
df_sample_sub['Season'] = df_sample_sub['ID'].apply(lambda x : int(x.split('_')[0]) )
df_sample_sub['TeamID1'] = df_sample_sub['ID'].apply(lambda x : int(x.split('_')[1]) )
df_sample_sub['TeamID2'] = df_sample_sub['ID'].apply(lambda x : int(x.split('_')[2]) )<jupyter_output><empty_output><jupyter_text># Training Data Creation<jupyter_code>winners = df_concat.rename( columns = { 'WTeamID' : 'TeamID1',
'LTeamID' : 'TeamID2',
'WScore' : 'Team1_Score',
'LScore' : 'Team2_Score'}).drop(['WSeed', 'LSeed', 'WLoc'], axis = 1)
winners['Result'] = 1.0
losers = df_concat.rename( columns = { 'WTeamID' : 'TeamID2',
'LTeamID' : 'TeamID1',
'WScore' : 'Team2_Score',
'LScore' : 'Team1_Score'}).drop(['WSeed', 'LSeed', 'WLoc'], axis = 1)
losers['Result'] = 0.0
train = pd.concat( [winners, losers], axis = 0).reset_index(drop = True)
train['Score_Ratio'] = train['Team1_Score'] / train['Team2_Score']
train['Score_Total'] = train['Team1_Score'] + train['Team2_Score']
train['Score_Pct'] = train['Team1_Score'] / train['Score_Total']<jupyter_output><empty_output><jupyter_text>We will only consider years relevant to our test submission<jupyter_code>df_sample_sub['Season'].unique()<jupyter_output><empty_output><jupyter_text>Now lets just look at TeamID2, or just the second team info.<jupyter_code>train_test_inner = pd.merge( train.loc[ train['Season'].isin([2018]), : ].reset_index(drop = True),
df_sample_sub.drop(['ID', 'Pred'], axis = 1),
on = ['Season', 'TeamID1', 'TeamID2'], how = 'inner' )
train_test_inner.head()<jupyter_output><empty_output><jupyter_text>From the inner join, we will create data per team id to estimate the parameters we are missing that are independent of the year. Essentially, we are trying to estimate the average behavior of the team across the year.<jupyter_code>team1d_num_ot = train_test_inner.groupby(['Season', 'TeamID1'])['NumOT'].median().reset_index()\
.set_index('Season').rename(columns = {'NumOT' : 'NumOT1'})
team2d_num_ot = train_test_inner.groupby(['Season', 'TeamID2'])['NumOT'].median().reset_index()\
.set_index('Season').rename(columns = {'NumOT' : 'NumOT2'})
num_ot = team1d_num_ot.join(team2d_num_ot).reset_index()
#sum the number of ot calls and subtract by one to prevent overcounting
num_ot['NumOT'] = num_ot[['NumOT1', 'NumOT2']].apply(lambda x : round( x.sum() ), axis = 1 )
num_ot.head()<jupyter_output><empty_output><jupyter_text>Here we look at the comparable statistics. For the TeamID2 column, we would consider the inverse of the ratio, and 1 minus the score attempt percentage.<jupyter_code>team1d_score_spread = train_test_inner.groupby(['Season', 'TeamID1'])[['Score_Ratio', 'Score_Pct']].median().reset_index()\
.set_index('Season').rename(columns = {'Score_Ratio' : 'Score_Ratio1', 'Score_Pct' : 'Score_Pct1'})
team2d_score_spread = train_test_inner.groupby(['Season', 'TeamID2'])[['Score_Ratio', 'Score_Pct']].median().reset_index()\
.set_index('Season').rename(columns = {'Score_Ratio' : 'Score_Ratio2', 'Score_Pct' : 'Score_Pct2'})
score_spread = team1d_score_spread.join(team2d_score_spread).reset_index()
#geometric mean of score ratio of team 1 and inverse of team 2
score_spread['Score_Ratio'] = score_spread[['Score_Ratio1', 'Score_Ratio2']].apply(lambda x : ( x[0] * ( x[1] ** -1.0) ), axis = 1 ) ** 0.5
#harmonic mean of score pct
score_spread['Score_Pct'] = score_spread[['Score_Pct1', 'Score_Pct2']].apply(lambda x : 0.5*( x[0] ** -1.0 ) + 0.5*( 1.0 - x[1] ) ** -1.0, axis = 1 ) ** -1.0
score_spread.head()<jupyter_output><empty_output><jupyter_text>Now lets create a model just solely based on the inner group and predict those probabilities.
We will get the teams with the missing result.<jupyter_code>X_train = train_test_inner.loc[:, ['Season', 'NumOT', 'Score_Ratio', 'Score_Pct']]
train_labels = train_test_inner['Result']
train_test_outer = pd.merge( train.loc[ train['Season'].isin([2014, 2015, 2016, 2017]), : ].reset_index(drop = True),
df_sample_sub.drop(['ID', 'Pred'], axis = 1),
on = ['Season', 'TeamID1', 'TeamID2'], how = 'outer' )
train_test_outer = train_test_outer.loc[ train_test_outer['Result'].isnull(),
['TeamID1', 'TeamID2', 'Season']]
train_test_missing = pd.merge( pd.merge( score_spread.loc[:, ['TeamID1', 'TeamID2', 'Season', 'Score_Ratio', 'Score_Pct']],
train_test_outer, on = ['TeamID1', 'TeamID2', 'Season']),
num_ot.loc[:, ['TeamID1', 'TeamID2', 'Season', 'NumOT']],
on = ['TeamID1', 'TeamID2', 'Season'])<jupyter_output><empty_output><jupyter_text>We scale our data for our keras classifier, and make sure our categorical variables are properly processed.<jupyter_code>X_test = train_test_missing.loc[:, ['Season', 'NumOT', 'Score_Ratio', 'Score_Pct']]
n = X_train.shape[0]
train_test_merge = pd.concat( [X_train, X_test], axis = 0 ).reset_index(drop = True)
train_test_merge = pd.concat( [pd.get_dummies( train_test_merge['Season'].astype(object) ),
train_test_merge.drop('Season', axis = 1) ], axis = 1 )
train_test_merge = pd.concat( [pd.get_dummies( train_test_merge['NumOT'].astype(object) ),
train_test_merge.drop('NumOT', axis = 1) ], axis = 1 )
X_train = train_test_merge.loc[:(n - 1), :].reset_index(drop = True)
X_test = train_test_merge.loc[n:, :].reset_index(drop = True)
x_max = X_train.max()
x_min = X_train.min()
X_train = ( X_train - x_min ) / ( x_max - x_min + 1e-14)
X_test = ( X_test - x_min ) / ( x_max - x_min + 1e-14)
train_labels.value_counts()
X_train.head()
from sklearn.linear_model import LogisticRegressionCV
model = LogisticRegressionCV(cv=80,scoring="neg_log_loss",random_state=1
#,penalty="l1"
#,Cs= Cs_#list(np.arange(1e-7,1e-9,-0.5e-9)) # [0.5,0.1,0.01,0.001] #list(np.power(1, np.arange(-10, 10)))
#,max_iter=1000, tol=1e-11
#,solver="liblinear"
#,n_jobs=4
)
model.fit(X_train, train_labels)
#---
Cs = model.Cs_
list(np.power(10.0, np.arange(-10, 10)))
dir(model)
sco = model.scores_[1].mean(axis=0)
#---
import matplotlib.pyplot as plt
plt.plot(Cs
#np.log10(Cs)
,sco)
# plt.ylabel('some numbers')
plt.show()
sco.min()
Cs_= list(np.arange(1.1e-9 - 5e-11
,1.051e-9
,0.2e-13))
len(Cs_)
Cs_= list(np.arange(1e-11
,9.04e-11#1.0508e-9
,0.2e-12))
len(Cs_)
#Cs_= list(np.arange(5.6e-13 - ( (0.01e-13)*1)
# ,5.61e-13 - ( (0.01e-13)*1)#1.0508e-9
# ,0.2e-15))
#len(Cs_)
Cs_= list(np.arange(1e-11
,5.5e-11#1.0508e-9
,0.2e-12))
len(Cs_)
Cs_= list(np.arange(1e-14
,5.5e-11#1.0508e-9
,0.2e-12))
len(Cs_)#awsome
#Cs_= list(np.arange(1.5e-11
# ,2.53e-11#1.0508e-9
# ,0.2e-13)) #+[3.761e-11]
#len(Cs_)
#X_train.dtypes
Cs_= list(np.arange(1e-15
,0.51e-10 #1.0508e-9
,0.1e-12))
len(Cs_)#new again
Cs_= list(np.arange(9e-14
,10.1e-13 #1.0508e-9
,0.1e-14))
len(Cs_)#new again cont. lowerlevel
Cs_= list(np.arange(9e-14
,10.1e-13 #1.0508e-9
,0.1e-14))
len(Cs_)#new again cont. lowerlevel
#LogisticRegressionCV(Cs=10, class_weight=None, cv=107, dual=False,
# fit_intercept=True, intercept_scaling=1.0, max_iter=100,
# multi_class='ovr', n_jobs=1, penalty='l2', random_state=2,
# refit=True, scoring='neg_log_loss', solver='lbfgs', tol=0.0001,
# verbose=0) #-0.7
from sklearn.linear_model import LogisticRegression
model = LogisticRegression(scoring="neg_log_loss",random_state=1
#,penalty="l1"
,C=8.129999999999969e-13#list(np.arange(1e-7,1e-9,-0.5e-9)) # [0.5,0.1,0.01,0.001] #list(np.power(1, np.arange(-10, 10)))
,max_iter=1000, tol=1e-11
#,solver="liblinear"
,n_jobs=4)
model.fit(X_train, train_labels)
#---
Cs = model.Cs_
list(np.power(10.0, np.arange(-10, 10)))
dir(model)
sco = model.scores_[1].mean(axis=0)
#---
import matplotlib.pyplot as plt
plt.plot(Cs
#np.log10(Cs)
,sco)
# plt.ylabel('some numbers')
plt.show()
Cs= list(np.linspace(9e-15
,10.1e-14 #1.0508e-9
,200))
len(Cs)#new again cont. lowerlevel
from sklearn import svm, grid_search, datasets
parameters = dict(C=Cs)
model = LogisticRegression(random_state=1
#,penalty="l1"
,C=8.129999999999969e-13#list(np.arange(1e-7,1e-9,-0.5e-9)) # [0.5,0.1,0.01,0.001] #list(np.power(1, np.arange(-10, 10)))
,max_iter=1000, tol=1e-11
,solver="lbfgs"
,n_jobs=1)
clf = grid_search.GridSearchCV(model, parameters,scoring="neg_log_loss",cv=80,n_jobs=8)
clf.fit(X_train, train_labels)
scores = [x[1] for x in clf.grid_scores_]
scores = np.array(scores).reshape(len(Cs))
plt.plot(Cs, scores)
plt.legend()
plt.xlabel('Cs')
plt.ylabel('Mean score')
plt.show()
print("C:",clf.best_estimator_.C," loss:",clf.best_score_)
clf.grid_scores_
scores = [x[1] for x in clf.grid_scores_]
scores = np.array(scores).reshape(len(Cs))
plt.plot(Cs, scores)
plt.legend()
plt.xlabel('Cs')
plt.ylabel('Mean score')
plt.show()
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(clf.grid_scores_)
# plt.ylabel('some numbers')
plt.show()
index_min = np.argmin(sco)
Cs_[index_min] #3.761e-11
sco.min()
#list(np.power(10.0, np.arange(-10, 10)))
#list(np.arange(0.5,1e-4,-0.05))
print(sco.max())
#-0.6931471779248422
print(sco.min() < -0.693270048530996)
print(sco.min()+0.693270048530996)
sco.min()
import matplotlib.pyplot as plt
plt.plot(model.scores_[1])
# plt.ylabel('some numbers')
plt.show()<jupyter_output><empty_output><jupyter_text>Here we store our probabilities<jupyter_code>train_test_inner['Pred1'] = model.predict_proba(X_train)[:,1]
train_test_missing['Pred1'] = model.predict_proba(X_test)[:,1]<jupyter_output><empty_output><jupyter_text>We merge our predictions<jupyter_code>sub = pd.merge(df_sample_sub,
pd.concat( [train_test_missing.loc[:, ['Season', 'TeamID1', 'TeamID2', 'Pred1']],
train_test_inner.loc[:, ['Season', 'TeamID1', 'TeamID2', 'Pred1']] ],
axis = 0).reset_index(drop = True),
on = ['Season', 'TeamID1', 'TeamID2'], how = 'outer')<jupyter_output><empty_output><jupyter_text>We get the 'average' probability of success for each team<jupyter_code>team1_probs = sub.groupby('TeamID1')['Pred1'].apply(lambda x : (x ** -1.0).mean() ** -1.0 ).fillna(0.5).to_dict()
team2_probs = sub.groupby('TeamID2')['Pred1'].apply(lambda x : (x ** -1.0).mean() ** -1.0 ).fillna(0.5).to_dict()<jupyter_output><empty_output><jupyter_text>Any missing value for the prediciton will be imputed with the product of the probabilities calculated above. We assume these are independent events.<jupyter_code>sub['Pred'] = sub[['TeamID1', 'TeamID2','Pred1']]\
.apply(lambda x : team1_probs.get(x[0]) * ( 1 - team2_probs.get(x[1]) ) if np.isnan(x[2]) else x[2],
axis = 1)
sub = sub.drop_duplicates(subset=["ID"], keep='first')
sub[['ID', 'Pred']].to_csv('sub.csv', index = False)
sub[['ID', 'Pred']].head(20)<jupyter_output><empty_output>
|
non_permissive
|
/MNIST_2017/dump_/women_2018_gridsearchCV.ipynb
|
minesh1291/Practicing-Kaggle
| 15 |
<jupyter_start><jupyter_text># Apriori Algorithm for minimum support of 0.1<jupyter_code>frequent_itemsets = apriori(data1, min_support=0.1, use_colnames=True)
frequent_itemsets
rules = association_rules(frequent_itemsets, metric="lift", min_threshold=0.7)
rules
rules.sort_values('lift',ascending = False)[0:20]
rules[rules.lift>1]<jupyter_output><empty_output><jupyter_text># Apriori Algorithm for minimum support of 0.2<jupyter_code>frequent_itemsets1 = apriori(data1, min_support=0.2, use_colnames=True)
frequent_itemsets1
rules1 = association_rules(frequent_itemsets1, metric="lift", min_threshold=0.7)
rules1
rules1[rules1.lift>1]<jupyter_output><empty_output><jupyter_text># Apriori Algorithm for minimum support of 0.3<jupyter_code>frequent_itemsets2 = apriori(data1, min_support=0.3, use_colnames=True)
frequent_itemsets2
rules2 = association_rules(frequent_itemsets2, metric="lift", min_threshold=0.7)
rules2<jupyter_output><empty_output><jupyter_text># For metric = confidence<jupyter_code>frequent_itemsets_confidence = apriori(data1, min_support=0.1, use_colnames=True)
frequent_itemsets_confidence
rules_confidence = association_rules(frequent_itemsets_confidence, metric="confidence", min_threshold=0.7)
rules_confidence<jupyter_output><empty_output><jupyter_text># For metric = support<jupyter_code>frequent_itemsets_support = apriori(data1, min_support=0.1, use_colnames=True)
frequent_itemsets_support
rules_support = association_rules(frequent_itemsets_support, metric="support", min_threshold=0.7)
rules_support<jupyter_output><empty_output><jupyter_text># Visualizing<jupyter_code>import matplotlib.pyplot as plt
plt.scatter(rules.support,rules.confidence)
plt.title('Support v/s Confidence')
plt.xlabel('Support')
plt.ylabel('Confidence')
plt.show()
plt.scatter(rules.support,rules.lift)
plt.title('Support v/s Lift')
plt.xlabel('Support')
plt.ylabel('Lift')
plt.show()
plt.scatter(rules.confidence,rules.lift)
plt.title('Confidence v/s Lift')
plt.xlabel('Confidence')
plt.ylabel('Lift')
plt.show()<jupyter_output><empty_output>
|
no_license
|
/Association Rules Assignment-2.ipynb
|
Yusuffkhan31/Association-rules
| 6 |
<jupyter_start><jupyter_text># maps<jupyter_code>l = list(map(int,['1','2','3']))
print(l)
list(map(int,l))
list(map(str,l))
i = input().split()
list(map(str,input().split()))
list(map(str,input()))
def addition(n):
return n+n
list(map(addition,[3,5,7,8,9,423]))
def power(n):
return n**n
list(map(power,[3,6,7,2]))
def square(n):
return n*n
l = list(map(square,[3,54,45,435,4]))
for i in l:
print(i)<jupyter_output>9
2916
2025
189225
16
<jupyter_text>## Filters in Python<jupyter_code>Li = [1,2,'a','b','c',3]
def isDigit(n):
n = str(n)
if n.isdigit():
return True
return False
list(filter(isDigit,Li))
Li = [1,2,7,9,3,4,3]
def isDigit(n):
n = str(n)
if n.isdigit():
return True
return False
list(filter(isDigit,Li))
list(filter(primes,Li))
def even(n):
if n%2==0:
return True
list(filter(even,Li))
def odd(n):
if n%2!=0:
return True
list(filter(odd,Li))<jupyter_output><empty_output><jupyter_text># LAMBDAS<jupyter_code>h = lambda x : x%2==0
list(filter(h,[1,2,3,4,5]))
def odd_fun(n):
if n%2!=0:
return True
return False
l = [1,2,3,4,5,6,7,8,9,10]
o = lambda i:i%2!=0
list(filter(o,l))
from random import randint as r
for i in range(1,10+1):
print(r(1,25))
print([r(1,25) for i in range(1,11)])
m1 = [r(1,25) for i in range(1,11)]
m2 = [r(1,25) for i in range(1,11)]
m3 = [r(1,25) for i in range(1,11)]
print(m1)
print(m2)
print(m3)
a1 = list(map(lambda x,y:(x+y)//2,m1,m2))
print(a1)
a2 = list(map(lambda x,y,z:(x+y+z)//3,m1,m2,m3))
print(a2)
3+5+5
13/3
k = lambda x:x<10
list(filter(k,a1))
r = lambda x:x<10
list(filter(r,a2))
a2
S<jupyter_output><empty_output><jupyter_text>## Numpy<jupyter_code>import numpy as np
l = [1,2,3,4]
a1 = np.array(l)
print(a1)
l2 = [[1,2,3],[4,5,6]]
a2 = np.array(l2)
print(a2)
l3 = [[1,2,3],[4,5,6],[6,7,8]]
a3 = np.array(l3)
print(a3)
a = np.arange(100).reshape(5,20)
a
b = np.arange(20,30).reshape(2,5)
print(b)
c = np.arange(20,26).reshape(2,3)
print(c)
x = np.arange(1,7).reshape(2,3)
y = np.arange(22,28).reshape(2,3)
z= np.arange(11,17).reshape(2,3)
x
y
z
<jupyter_output><empty_output>
|
no_license
|
/20-SEP/20sep2019.ipynb
|
kalyan123-dot/python
| 4 |
<jupyter_start><jupyter_text># Name : Niranjan
Status : Intern
Organisation : The Sparks Foundation
Task - 1 : Prediction using Supervised ML### Predict the percentage of marks of an student based on the number of study hours.
## STEP 1 : Importing the Libaries & dataset### Importing all libraries required for this task<jupyter_code>import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
data=pd.read_csv("http://bit.ly//w-data")
data
data.head()
data.tail()<jupyter_output><empty_output><jupyter_text># STEP 2 : Checking for Missing Values<jupyter_code>data.info()
data.isnull().sum()
data.describe()<jupyter_output><empty_output><jupyter_text># STEP 3:Visualising the data<jupyter_code>data.plot(x='Hours', y='Scores', style='o')
plt.title('Hours vs Percentage')
plt.xlabel('Hours Studied')
plt.ylabel('Percentage Score')
plt.grid()
plt.show()<jupyter_output><empty_output><jupyter_text>#### From the graph above, we can clearly see that there is a positive linear relation between the number of hours studied and percentage of score.<jupyter_code>data.corr()<jupyter_output><empty_output><jupyter_text># ## STEP 4 : Data preparation & Splitting the dataset### Using iloc function we will divide the data <jupyter_code>x=data.iloc[:,0].values.reshape(-1,1)
y=data.iloc[:,-1]
x
y
x.shape
y.shape<jupyter_output><empty_output><jupyter_text>### Splitting data into training and testing data(Ratio - 80:20)# <jupyter_code>from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y,test_size=0.20, random_state=0)<jupyter_output><empty_output><jupyter_text># STEP 5: Training the Model<jupyter_code>from sklearn.linear_model import LinearRegression
LR=LinearRegression()
model=LR.fit(x_train,y_train)<jupyter_output><empty_output><jupyter_text># ### Printing the coefficient and intercept of the model after training<jupyter_code>LR.coef_
LR.intercept_
# predicting on training data-set
y_train_predicted = LR.predict(x_train)
print(y_train_predicted)
from sklearn.metrics import r2_score,mean_squared_error
# evaluating the model on training dataset
rmse_train = np.sqrt(mean_squared_error(y_train, y_train_predicted))
r2_train = r2_score(y_train, y_train_predicted)
print(rmse_train)
print(r2_train)<jupyter_output>5.558613350226342
0.9515510725211552
<jupyter_text># STEP 6 : Visualizing the model<jupyter_code>plt.scatter(x, y, color = 'blue')
plt.plot(x, LR.predict(x), color = 'red')
plt.title('Linear Regression')
plt.xlabel('Hours')
plt.ylabel('Scores')
plt.show()<jupyter_output><empty_output><jupyter_text># STEP 7 : Making Predictions# <jupyter_code># predicting on test data-set
y_pred=LR.predict(x_test)
print(y_pred)
# evaluating the model on test dataset
rmse_test = np.sqrt(mean_squared_error(y_test, y_pred))
r2_test = r2_score(y_test, y_pred)
print(rmse_test)
print(r2_test)<jupyter_output>4.6474476121003665
0.9454906892105356
<jupyter_text>### Comparing actual and predicated values of a dataframes# <jupyter_code>#Actual VS Predicted
df = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred})
df<jupyter_output><empty_output><jupyter_text>### Predicting a new value - No. of hours studied = 9.25# <jupyter_code>hours = 9.25
own_pred = LR.predict([[hours]])
print("Number of Hours = {}".format(hours))
print("Predicted Score = {}".format(own_pred[0]))<jupyter_output>Number of Hours = 9.25
Predicted Score = 93.69173248737538
<jupyter_text># Step 8 : Model Evaluation<jupyter_code>from sklearn import metrics
from math import sqrt
print('Mean Absolute Error:',metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:',metrics.mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error:',np.sqrt(metrics.mean_squared_error(y_test, y_pred)))<jupyter_output>Mean Absolute Error: 4.183859899002975
Mean Squared Error: 21.5987693072174
Root Mean Squared Error: 4.6474476121003665
|
no_license
|
/Prediction_Using_Supervised_ML.ipynb
|
niranjangundarapu/The-Spark-Foundation-Internship
| 13 |
<jupyter_start><jupyter_text>
# Hierarchical Clustering
Estimated time needed: **25** minutes
## Objectives
After completing this lab you will be able to:
* Use scikit-learn to do Hierarchical clustering
* Create dendograms to visualize the clustering
Table of contents
Hierarchical Clustering - Agglomerative
Generating Random Data
Agglomerative Clustering
Dendrogram Associated for the Agglomerative Hierarchical Clustering
Clustering on the Vehicle Dataset
Data Cleaning
Clustering Using Scipy
Clustering using scikit-learn
Hierarchical Clustering - Agglomerative
We will be looking at a clustering technique, which is Agglomerative Hierarchical Clustering. Remember that agglomerative is the bottom up approach.
In this lab, we will be looking at Agglomerative clustering, which is more popular than Divisive clustering.
We will also be using Complete Linkage as the Linkage Criteria. NOTE: You can also try using Average Linkage wherever Complete Linkage would be used to see the difference!
<jupyter_code>import numpy as np
import pandas as pd
from scipy import ndimage
from scipy.cluster import hierarchy
from scipy.spatial import distance_matrix
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets import make_blobs
%matplotlib inline<jupyter_output><empty_output><jupyter_text>
Generating Random Data
We will be generating a set of data using the make_blobs class.
Input these parameters into make_blobs:
n_samples: The total number of points equally divided among clusters.
Choose a number from 10-1500
centers: The number of centers to generate, or the fixed center locations.
Choose arrays of x,y coordinates for generating the centers. Have 1-10 centers (ex. centers=[[1,1], [2,5]])
cluster_std: The standard deviation of the clusters. The larger the number, the further apart the clusters
Choose a number between 0.5-1.5
Save the result to X1 and y1.
<jupyter_code>X1, y1 = make_blobs(n_samples=50, centers=[[4,4], [-2, -1], [1, 1], [10,4]], cluster_std=0.9)<jupyter_output><empty_output><jupyter_text>Plot the scatter plot of the randomly generated data.
<jupyter_code>plt.scatter(X1[:, 0], X1[:, 1], marker='o') <jupyter_output><empty_output><jupyter_text>
Agglomerative Clustering
We will start by clustering the random data points we just created.
The Agglomerative Clustering class will require two inputs:
n_clusters: The number of clusters to form as well as the number of centroids to generate.
Value will be: 4
linkage: Which linkage criterion to use. The linkage criterion determines which distance to use between sets of observation. The algorithm will merge the pairs of cluster that minimize this criterion.
Value will be: 'complete'
Note: It is recommended you try everything with 'average' as well
Save the result to a variable called agglom .
<jupyter_code>agglom = AgglomerativeClustering(n_clusters = 4, linkage = 'average')<jupyter_output><empty_output><jupyter_text>Fit the model with X2 and y2 from the generated data above.
<jupyter_code>agglom.fit(X1,y1)<jupyter_output><empty_output><jupyter_text>Run the following code to show the clustering!
Remember to read the code and comments to gain more understanding on how the plotting works.<jupyter_code># Create a figure of size 6 inches by 4 inches.
plt.figure(figsize=(6,4))
# These two lines of code are used to scale the data points down,
# Or else the data points will be scattered very far apart.
# Create a minimum and maximum range of X1.
x_min, x_max = np.min(X1, axis=0), np.max(X1, axis=0)
# Get the average distance for X1.
X1 = (X1 - x_min) / (x_max - x_min)
# This loop displays all of the datapoints.
for i in range(X1.shape[0]):
# Replace the data points with their respective cluster value
# (ex. 0) and is color coded with a colormap (plt.cm.spectral)
plt.text(X1[i, 0], X1[i, 1], str(y1[i]),
color=plt.cm.nipy_spectral(agglom.labels_[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
# Remove the x ticks, y ticks, x and y axis
plt.xticks([])
plt.yticks([])
#plt.axis('off')
# Display the plot of the original data before clustering
plt.scatter(X1[:, 0], X1[:, 1], marker='.')
# Display the plot
plt.show()<jupyter_output><empty_output><jupyter_text>Dendrogram Associated for the Agglomerative Hierarchical Clustering
Remember that a distance matrix contains the distance from each point to every other point of a dataset .
Use the function distance_matrix, which requires two inputs. Use the Feature Matrix, X1 as both inputs and save the distance matrix to a variable called dist_matrix
Remember that the distance values are symmetric, with a diagonal of 0's. This is one way of making sure your matrix is correct. (print out dist_matrix to make sure it's correct)
<jupyter_code>dist_matrix = distance_matrix(X1,X1)
print(dist_matrix)<jupyter_output>[[0. 0.05649111 0.07149279 ... 0.44015728 0.62839907 0.30822043]
[0.05649111 0. 0.09154081 ... 0.39463529 0.59265606 0.29724404]
[0.07149279 0.09154081 0. ... 0.40508051 0.57709453 0.37749748]
...
[0.44015728 0.39463529 0.40508051 ... 0. 0.23113944 0.60915544]
[0.62839907 0.59265606 0.57709453 ... 0.23113944 0. 0.8348486 ]
[0.30822043 0.29724404 0.37749748 ... 0.60915544 0.8348486 0. ]]
<jupyter_text>Using the linkage class from hierarchy, pass in the parameters:
The distance matrix
'complete' for complete linkage
Save the result to a variable called Z .
<jupyter_code>Z = hierarchy.linkage(dist_matrix, 'complete')<jupyter_output>C:\Users\hotty\AppData\Local\Temp\ipykernel_18580\3518085107.py:1: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix
Z = hierarchy.linkage(dist_matrix, 'complete')
<jupyter_text>A Hierarchical clustering is typically visualized as a dendrogram as shown in the following cell. Each merge is represented by a horizontal line. The y-coordinate of the horizontal line is the similarity of the two clusters that were merged, where cities are viewed as singleton clusters.
By moving up from the bottom layer to the top node, a dendrogram allows us to reconstruct the history of merges that resulted in the depicted clustering.
Next, we will save the dendrogram to a variable called dendro. In doing this, the dendrogram will also be displayed.
Using the dendrogram class from hierarchy, pass in the parameter:
Z
<jupyter_code>dendro = hierarchy.dendrogram(Z)<jupyter_output><empty_output><jupyter_text>## Practice
We used **complete** linkage for our case, change it to **average** linkage to see how the dendogram changes.
<jupyter_code># write your code here
Z = hierarchy.linkage(dist_matrix, 'average')
dendro = hierarchy.dendrogram(Z)
<jupyter_output>C:\Users\hotty\AppData\Local\Temp\ipykernel_18580\2301254431.py:2: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix
Z = hierarchy.linkage(dist_matrix, 'average')
<jupyter_text>Click here for the solution
```python
Z = hierarchy.linkage(dist_matrix, 'average')
dendro = hierarchy.dendrogram(Z)
```
Clustering on Vehicle dataset
Imagine that an automobile manufacturer has developed prototypes for a new vehicle. Before introducing the new model into its range, the manufacturer wants to determine which existing vehicles on the market are most like the prototypes--that is, how vehicles can be grouped, which group is the most similar with the model, and therefore which models they will be competing against.
Our objective here, is to use clustering methods, to find the most distinctive clusters of vehicles. It will summarize the existing vehicles and help manufacturers to make decision about the supply of new models.
### Download data
To download the data, we will use **`!wget`** to download it from IBM Object Storage.\
**Did you know?** When it comes to Machine Learning, you will likely be working with large datasets. As a business, where can you host your data? IBM is offering a unique opportunity for businesses, with 10 Tb of IBM Cloud Object Storage: [Sign up now for free](http://cocl.us/ML0101EN-IBM-Offer-CC)
<jupyter_code>!wget -O cars_clus.csv https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-ML0101EN-SkillsNetwork/labs/Module%204/data/cars_clus.csv<jupyter_output>'wget' is not recognized as an internal or external command,
operable program or batch file.
<jupyter_text>## Read data
Let's read dataset to see what features the manufacturer has collected about the existing models.
<jupyter_code>filename = 'cars_clus.csv'
#Read csv
pdf = pd.read_csv(filename)
print ("Shape of dataset: ", pdf.shape)
pdf.head(5)<jupyter_output>Shape of dataset: (159, 16)
<jupyter_text>The feature sets include price in thousands (price), engine size (engine_s), horsepower (horsepow), wheelbase (wheelbas), width (width), length (length), curb weight (curb_wgt), fuel capacity (fuel_cap) and fuel efficiency (mpg).
Data Cleaning
Let's clean the dataset by dropping the rows that have null value:
<jupyter_code>print ("Shape of dataset before cleaning: ", pdf.size)
pdf[[ 'sales', 'resale', 'type', 'price', 'engine_s',
'horsepow', 'wheelbas', 'width', 'length', 'curb_wgt', 'fuel_cap',
'mpg', 'lnsales']] = pdf[['sales', 'resale', 'type', 'price', 'engine_s',
'horsepow', 'wheelbas', 'width', 'length', 'curb_wgt', 'fuel_cap',
'mpg', 'lnsales']].apply(pd.to_numeric, errors='coerce')
pdf = pdf.dropna()
pdf = pdf.reset_index(drop=True)
print ("Shape of dataset after cleaning: ", pdf.size)
pdf.head(5)<jupyter_output>Shape of dataset before cleaning: 2544
Shape of dataset after cleaning: 1872
<jupyter_text>### Feature selection
Let's select our feature set:
<jupyter_code>featureset = pdf[['engine_s', 'horsepow', 'wheelbas', 'width', 'length', 'curb_wgt', 'fuel_cap', 'mpg']]<jupyter_output><empty_output><jupyter_text>### Normalization
Now we can normalize the feature set. **MinMaxScaler** transforms features by scaling each feature to a given range. It is by default (0, 1). That is, this estimator scales and translates each feature individually such that it is between zero and one.
<jupyter_code>from sklearn.preprocessing import MinMaxScaler
x = featureset.values #returns a numpy array
min_max_scaler = MinMaxScaler()
feature_mtx = min_max_scaler.fit_transform(x)
feature_mtx [0:5]<jupyter_output><empty_output><jupyter_text>Clustering using Scipy
In this part we use Scipy package to cluster the dataset.
First, we calculate the distance matrix.
<jupyter_code>import scipy
leng = feature_mtx.shape[0]
D = scipy.zeros([leng,leng])
for i in range(leng):
for j in range(leng):
D[i,j] = scipy.spatial.distance.euclidean(feature_mtx[i], feature_mtx[j])
D<jupyter_output>C:\Users\hotty\AppData\Local\Temp\ipykernel_18580\458117257.py:3: DeprecationWarning: scipy.zeros is deprecated and will be removed in SciPy 2.0.0, use numpy.zeros instead
D = scipy.zeros([leng,leng])
<jupyter_text>In agglomerative clustering, at each iteration, the algorithm must update the distance matrix to reflect the distance of the newly formed cluster with the remaining clusters in the forest.
The following methods are supported in Scipy for calculating the distance between the newly formed cluster and each:
\- single
\- complete
\- average
\- weighted
\- centroid
We use **complete** for our case, but feel free to change it to see how the results change.
<jupyter_code>import pylab
import scipy.cluster.hierarchy
Z = hierarchy.linkage(D, 'complete')<jupyter_output>C:\Users\hotty\AppData\Local\Temp\ipykernel_18580\227076933.py:3: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix
Z = hierarchy.linkage(D, 'complete')
<jupyter_text>Essentially, Hierarchical clustering does not require a pre-specified number of clusters. However, in some applications we want a partition of disjoint clusters just as in flat clustering.
So you can use a cutting line:
<jupyter_code>from scipy.cluster.hierarchy import fcluster
max_d = 3
clusters = fcluster(Z, max_d, criterion='distance')
clusters<jupyter_output><empty_output><jupyter_text>Also, you can determine the number of clusters directly:
<jupyter_code>from scipy.cluster.hierarchy import fcluster
k = 5
clusters = fcluster(Z, k, criterion='maxclust')
clusters
<jupyter_output><empty_output><jupyter_text>Now, plot the dendrogram:
<jupyter_code>fig = pylab.figure(figsize=(18,50))
def llf(id):
return '[%s %s %s]' % (pdf['manufact'][id], pdf['model'][id], int(float(pdf['type'][id])) )
dendro = hierarchy.dendrogram(Z, leaf_label_func=llf, leaf_rotation=0, leaf_font_size =12, orientation = 'right')<jupyter_output><empty_output><jupyter_text>Clustering using scikit-learn
Let's redo it again, but this time using the scikit-learn package:
<jupyter_code>from sklearn.metrics.pairwise import euclidean_distances
dist_matrix = euclidean_distances(feature_mtx,feature_mtx)
print(dist_matrix)
Z_using_dist_matrix = hierarchy.linkage(dist_matrix, 'complete')
fig = pylab.figure(figsize=(18,50))
def llf(id):
return '[%s %s %s]' % (pdf['manufact'][id], pdf['model'][id], int(float(pdf['type'][id])) )
dendro = hierarchy.dendrogram(Z_using_dist_matrix, leaf_label_func=llf, leaf_rotation=0, leaf_font_size =12, orientation = 'right')<jupyter_output><empty_output><jupyter_text>Now, we can use the 'AgglomerativeClustering' function from scikit-learn library to cluster the dataset. The AgglomerativeClustering performs a hierarchical clustering using a bottom up approach. The linkage criteria determines the metric used for the merge strategy:
* Ward minimizes the sum of squared differences within all clusters. It is a variance-minimizing approach and in this sense is similar to the k-means objective function but tackled with an agglomerative hierarchical approach.
* Maximum or complete linkage minimizes the maximum distance between observations of pairs of clusters.
* Average linkage minimizes the average of the distances between all observations of pairs of clusters.
<jupyter_code>agglom = AgglomerativeClustering(n_clusters = 6, linkage = 'complete')
agglom.fit(dist_matrix)
agglom.labels_<jupyter_output>C:\Users\hotty\anaconda3\lib\site-packages\sklearn\cluster\_agglomerative.py:542: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix
out = hierarchy.linkage(X, method=linkage, metric=affinity)
<jupyter_text>We can add a new field to our dataframe to show the cluster of each row:
<jupyter_code>pdf['cluster_'] = agglom.labels_
pdf.head()
import matplotlib.cm as cm
n_clusters = max(agglom.labels_)+1
colors = cm.rainbow(np.linspace(0, 1, n_clusters))
cluster_labels = list(range(0, n_clusters))
# Create a figure of size 6 inches by 4 inches.
plt.figure(figsize=(16,14))
for color, label in zip(colors, cluster_labels):
subset = pdf[pdf.cluster_ == label]
for i in subset.index:
plt.text(subset.horsepow[i], subset.mpg[i],str(subset['model'][i]), rotation=25)
plt.scatter(subset.horsepow, subset.mpg, s= subset.price*10, c=color, label='cluster'+str(label),alpha=0.5)
# plt.scatter(subset.horsepow, subset.mpg)
plt.legend()
plt.title('Clusters')
plt.xlabel('horsepow')
plt.ylabel('mpg')<jupyter_output>*c* argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with *x* & *y*. Please use the *color* keyword-argument or provide a 2D array with a single row if you intend to specify the same RGB or RGBA value for all points.
*c* argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with *x* & *y*. Please use the *color* keyword-argument or provide a 2D array with a single row if you intend to specify the same RGB or RGBA value for all points.
*c* argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with *x* & *y*. Please use the *color* keyword-argument or provide a 2D array with a single row if you intend to specify the same RGB or RGBA value for all points.
*c* argument looks like a single numeric RGB or R[...]<jupyter_text>As you can see, we are seeing the distribution of each cluster using the scatter plot, but it is not very clear where is the centroid of each cluster. Moreover, there are 2 types of vehicles in our dataset, "truck" (value of 1 in the type column) and "car" (value of 0 in the type column). So, we use them to distinguish the classes, and summarize the cluster. First we count the number of cases in each group:
<jupyter_code>pdf.groupby(['cluster_','type'])['cluster_'].count()<jupyter_output><empty_output><jupyter_text>Now we can look at the characteristics of each cluster:
<jupyter_code>agg_cars = pdf.groupby(['cluster_','type'])['horsepow','engine_s','mpg','price'].mean()
agg_cars<jupyter_output>C:\Users\hotty\AppData\Local\Temp\ipykernel_18580\3307995906.py:1: FutureWarning: Indexing with multiple keys (implicitly converted to a tuple of keys) will be deprecated, use a list instead.
agg_cars = pdf.groupby(['cluster_','type'])['horsepow','engine_s','mpg','price'].mean()
<jupyter_text>It is obvious that we have 3 main clusters with the majority of vehicles in those.
**Cars**:
* Cluster 1: with almost high mpg, and low in horsepower.
* Cluster 2: with good mpg and horsepower, but higher price than average.
* Cluster 3: with low mpg, high horsepower, highest price.
**Trucks**:
* Cluster 1: with almost highest mpg among trucks, and lowest in horsepower and price.
* Cluster 2: with almost low mpg and medium horsepower, but higher price than average.
* Cluster 3: with good mpg and horsepower, low price.
Please notice that we did not use **type** and **price** of cars in the clustering process, but Hierarchical clustering could forge the clusters and discriminate them with quite a high accuracy.
<jupyter_code>plt.figure(figsize=(16,10))
for color, label in zip(colors, cluster_labels):
subset = agg_cars.loc[(label,),]
for i in subset.index:
plt.text(subset.loc[i][0]+5, subset.loc[i][2], 'type='+str(int(i)) + ', price='+str(int(subset.loc[i][3]))+'k')
plt.scatter(subset.horsepow, subset.mpg, s=subset.price*20, c=color, label='cluster'+str(label))
plt.legend()
plt.title('Clusters')
plt.xlabel('horsepow')
plt.ylabel('mpg')
<jupyter_output>*c* argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with *x* & *y*. Please use the *color* keyword-argument or provide a 2D array with a single row if you intend to specify the same RGB or RGBA value for all points.
*c* argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with *x* & *y*. Please use the *color* keyword-argument or provide a 2D array with a single row if you intend to specify the same RGB or RGBA value for all points.
*c* argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with *x* & *y*. Please use the *color* keyword-argument or provide a 2D array with a single row if you intend to specify the same RGB or RGBA value for all points.
*c* argument looks like a single numeric RGB or R[...]
|
no_license
|
/Machine Learning with Python/ML0101EN-Clus-Hierarchical-Cars.ipynb
|
MachinatorX/Coursera--IBM-Data-Science-Professional
| 26 |
<jupyter_start><jupyter_text># Regularization Notebook
### Goal: I have reached a point where I believe my model suffers from bias, and am looking for a way to add complexity without overfitting, so I am going to implement regularization to attempt to achieve that.<jupyter_code># import necessary modules
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split, cross_val_score, KFold
from sklearn.preprocessing import PolynomialFeatures, StandardScaler
from sklearn.linear_model import Lasso, LassoCV
from sklearn.linear_model import Ridge, RidgeCV
sns.set_style('darkgrid')
%config InlineBackend.figure_format = 'retina'
%matplotlib inline
pd.options.display.max_columns = 1000
# load training data
ames = pd.read_csv("../data/train.csv")
# define function to clean the data
def clean_ames_data(df):
'''Generalized function to clean a sample of Ames Housing Data'''
# convert column names to useable format
df.columns = [x.lower().replace(' ','_') for x in df.columns]
# drop 'id' and 'pid' columns
#df.drop(['id','pid'], axis=1, inplace=True)
# Dealing with NaN values. Handling the special case of Masonry Veneer Type first
df['mas_vnr_type'].fillna(value = 'None', inplace = True) # Assuming 'NaN' should be 'None' for Masonry Type
df['mas_vnr_area'].fillna(value = 0.0, inplace = True) # Assuming masonry area is 0.0 for houses with 'NaN' type
# for categorical variables, the missing values should actually be marked 'NA'
nulls = df.columns[df.isnull().any()]
for col in df[nulls].select_dtypes(include = 'object').columns:
df[col].fillna(value = 'NA', inplace = True)
# filtering for houses with no basement, replacing numerical columns 'NaNs' with 0.0
no_bsmt = df['bsmt_qual'] == 'NA'
for col in df[no_bsmt].filter(regex = 'bsmt'):
df[col].fillna(value = 0.0, inplace = True)
# use the same procedure to handle numerical columns for houses with no garage
no_garage = df['garage_type'] == 'NA'
for col in df[no_garage].filter(regex = 'garage'):
df[col].fillna(value = 0.0, inplace = True)
# clean the data
clean_ames_data(ames)
# Split data into X an y
X = ames.loc[:,ames.columns != 'saleprice']
y = ames['saleprice']
# Create categorical variable for Location
X['Location'] = X['neighborhood']
X['Location'].replace({'MeadowV':'Low','IDOTRR': 'Low','BrDale': 'Low','OldTown': 'Low',
'Edwards':'Low','BrkSide':'Low', 'Landmrk': 'LowMed','Sawyer': 'LowMed',
'SWISU':'LowMed','NAmes':'LowMed','NPkVill':'LowMed','Blueste':'LowMed',
'Mitchel':'LowMed','Gilbert':'MedHigh','Greens':'MedHigh','SawyerW':'MedHigh',
'NWAmes':'MedHigh','Blmngtn':'MedHigh','CollgCr':'MedHigh','ClearCr':'MedHigh',
'Crawfor':'MedHigh','Somerst':'High','Timber':'High','Veenker':'High','GrnHill':'High',
'NoRidge':'High','NridgHt':'High','StoneBr':'High'}, inplace = True)
# Create the dummy variables I will need
# define a function to create the dummy variables I need
style_mask = X['house_style'] == '2.5Fin'
# Create a dummy to indicate house style is 2.5Fin
X['StyleDummy'] = np.where(X['house_style'] == '2.5Fin', 1, 0)
# Create a dummy for being adjacent to or near a positive feature
X['PosFeature'] = np.where((X['condition_2'] == 'PosN') | (X['condition_2'] == 'PosA'), 1, 0)
# Create dummies for Total Rooms Above Grade
X = pd.get_dummies(X, columns = ['ms_zoning', 'full_bath',
'sale_type','central_air','Location',
'garage_cars','exter_qual'], drop_first = True)
numeric_columns = ['overall_qual', 'garage_area','gr_liv_area','year_built','totrms_abvgrd']
dummy_columns = ['StyleDummy','PosFeature',
'full_bath_1','full_bath_2','full_bath_3','full_bath_4',
'central_air_Y','Location_Low','Location_LowMed','Location_MedHigh',
'garage_cars_1.0','garage_cars_2.0','garage_cars_3.0','garage_cars_4.0','garage_cars_5.0',
'exter_qual_Fa','exter_qual_Gd','exter_qual_TA']
dummy_columns2 = ['StyleDummy','PosFeature',
'central_air_Y','Location_Low','Location_LowMed','Location_MedHigh',
'exter_qual_Fa','exter_qual_Gd','exter_qual_TA']
dummy_columns3 = ['central_air_Y','Location_Low','Location_LowMed','Location_MedHigh']
dummy_columns4 = ['PosFeature','central_air_Y','Location_Low','Location_LowMed','Location_MedHigh']
columns = numeric_columns + dummy_columns4
predictors = X[columns]
poly = PolynomialFeatures(include_bias = False)
X_poly = poly.fit_transform(predictors)
# Train test split
X_train, X_test, y_train, y_test = train_test_split(X_poly, y)
ss = StandardScaler()
kf = KFold(n_splits = 10, shuffle = True)
X_train_scaled = ss.fit_transform(X_train)
X_test_scaled = ss.fit_transform(X_test)
lasso_model = LassoCV(cv = kf)
lasso_model = lasso_model.fit(X_train_scaled, y_train)
lasso_optimal_alpha = lasso_model.alpha_
lasso_optimal_alpha
lasso_quick = Lasso(alpha = lasso_optimal_alpha)
cross_val_score(lasso_quick, X_train_scaled, y_train)<jupyter_output>/home/mattg/anaconda3/envs/dsi/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.
ConvergenceWarning)
/home/mattg/anaconda3/envs/dsi/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.
ConvergenceWarning)
/home/mattg/anaconda3/envs/dsi/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.
ConvergenceWarning)
|
no_license
|
/code/.ipynb_checkpoints/Regularization Notebook-checkpoint.ipynb
|
mattg12/Ames-Iowa-Project
| 1 |
<jupyter_start><jupyter_text>### Idiomatic programming
- Transformation code into beatiful,idiomatic python
- Replaces traditional index manipulation with python code looping idoms<jupyter_code># looping over a range of number
for i in [0,1,2,3,4,5]:
print(i**2,end=' ')
print()
# pythinic way
for i in range(6):
print(i**2,end=' ')
# Looping over the collection(List)
li=[1,2,3,4,5]
for i in range(len(li)):
print(li[i],end=' ')
print()
# Pythonic way
for i in li:
print(i,end=' ')
# looping from Backwards
li=[1,2,3,4,5]
for i in range(len(li)-1,-1,-1):
print(li[i],end=" ")
# pythonic Way
print()
for i in reversed(li):
print(i,end=' ')
# Looping over the collection with index
li=[1,2,3,4,5]
for i in range(len(li)):
print(i,"-->",li[i])
print()
# Pythonic way
for i,a in enumerate(li):
print(i,'-->',a)
# looping with 2 collections
li=[1,2,3,4,5]
a=['a','b','c','d','e','f','g']
n=min(len(li),len(a))
for i in range(n):
print(li[i],' ',a[i])
print()
# Pythonic Way
for i,a in zip(li,a):
print(i,' ',a)
li=[1,2,4,5,6,7,89,47]
for i in sorted(li):
print(i,end=' ')
print()
for i in sorted(li,reverse=True):
print(i,end=" ")
# Dictionaries
d={'Name':'Bharat','EmailID':'[email protected]','Adress':'Tokyo'}
for i in d:
print(i,end=' ')
print()
for i in d.keys():
if i.startswith('A'):
del d[i]
print(d)
for k in d.values():
print(k,end=' ')<jupyter_output>Bharat [email protected] <jupyter_text>### List Comprehensions<jupyter_code>li= []
for i in range(10):
a=i**2
li.append(a)
print(li,end=' ')
print()
print(sum(li))
# Pythonic way
sum(i**2 for i in range(10))
li=[]
for i in range(10):
li.append(i**2)
li
com_list=[i**2 for i in range(10)] # Comprehensive list
print(com_list)
com_list=[ i**2 for i in range(10) if i%2==0]
print(com_list)<jupyter_output>[0, 4, 16, 36, 64]
<jupyter_text>### Lambda Funtcion
- Annonymous Function means that a function is without a name
- Any lambda function in python will be defined with lambda
- Syntax : lambda arg : Expression<jupyter_code>def square(n):
return n*n
square(6)
a=lambda x:x*x
print(a(6))
a=lambda x:x+2
print(a(10))<jupyter_output>12
<jupyter_text>### Use of lambda with filter
<jupyter_code>def filterlist(li):
a=[]
for i in li:
if i%2 == 0:
a.append(i)
return a
li=[1,2,3,4,5,6,7,8]
filterlist(li)
li = [1,2,3,4,5,6,7,8]
lam_li=list(filter(lambda x: (x%2==0),li))
print(lam_li)
def squareList(li):
a=[]
for i in li:
a.append(i**2)
return a
li = [1,2,3,4,5,6,7,8]
squareList(li)
li = [1,2,3,4,5,6,7,8]
map_list=list(map(lambda x:x**2,li))
print(map_list)<jupyter_output>[1, 4, 9, 16, 25, 36, 49, 64]
<jupyter_text>### lambda <jupyter_code>def sumList(li):
s=0
for i in li:
s+=i
return s
li=[1,2,3,4,5,6,7,8]
sumList(li)
from functools import reduce
li=[1,2,3,4,5,6,7,8]
s=reduce((lambda x,y:x+y),li)
print(s)<jupyter_output>36
<jupyter_text>### Pandas
#### Use Cases
- Data Cleaning
- Data transformation
- series
<jupyter_code>import pandas as pd
internal1 = {'s1':35,'s2':35,'s3':35}
internal2 = {'s1':35,'s2':35,'s3':35}
internal1=pd.Series(internal1)
internal2=pd.Series(internal2)
print(internal1)
print(internal2)
final={'Internal1':internal1,'Internal2':internal2}
final=pd.DataFrame(final)
print(final)
final.columns
final.values[1]
final.values[2][0]=350
for row in final.values:
print('Internal1-',row[0],'Internal2-,row[2]')
final.loc['s4']=[20,20]
final
final.values[2]=[155,350]
final
pwd
filepath='DataFiles/Income.csv'
def readCsvData(filepath):
return pd.read_csv(filepath)
df=readCsvData
readCsvData(filepath)
print(df.values)
# Extract income of all states in year 2013
# Alabama : 41381
for row in df.values:
print(row[1],":",row[-1])
import pandas as pd
print(df.values)
# Average income of california
import pandas as pd
def avgofState():
s=0
for i in range(2,11):
print(df.values[3][i])
s = s + df.values[3][i]
return s//len(df.values[2][2:])
avgofState()
# Function which only displays the column names in the list
# GEOID State 2005 2006 2007 2008 2009 2010 2011 2012 2013
def printDataFrameColumn(df):
li=[]
columns= df.columns
for i in columns:
li.append(i)
return li
printDataFrameColumn(df)
filepath='DataFiles/RegularSeasonCompactResults.csv'
def readCsvData(filepath):
return pd.read_csv(filepath)
df=readCsvData(filepath)
readCsvData(filepath)
# To know the len of rows and columns -- shape
df.shape
# To know only first few rows -- head()
df.head()
# To know the last records -- tail
df.tail()
# converting all column names into list
df.columns.tolist()
# describe() min ,max,couint,mean and etc
df.describe()
df.max()
df['Lteam'].max() # One column value max value
df['Lteam'].min() # One column value min value
df['Season'].value_counts()
df.sort_values('Lscore').head()
df.sort_values('Lscore').tail()<jupyter_output><empty_output>
|
no_license
|
/Day 5.ipynb
|
bharatsenapati/Gitam-Vizag-python
| 6 |
<jupyter_start><jupyter_text>## Unpickle the Dataframe for Preprocessed Utility Data & Make Utility Function Object<jupyter_code># Unpickle the pre-processed DataFrame
df = pickle.load(open('dfu3.pkl', 'rb'))
# Unpickle the raw utility bill DataFrame, which is needed below to make
# the utility function object.
df_raw = pickle.load(open('df_raw.pkl', 'rb'))
df.head() # the processed data
# this is only needed to update any code changes I may have made
# since last importing the module above.
reload(bu)
reload(gu)
# Make an object that has the various utility functions.
# The object needs access to the raw utility bill DataFrame and the spreadsheet
# containing other application data.
ut = bu.Util(df_raw, '../data/Other_Building_Data.xlsx')
# Testing site. Final code will loop through all sites
site = 'ANSBG1'
template_data = {}<jupyter_output><empty_output><jupyter_text># Energy Use Overview Report - Page 4
<jupyter_code># From the main DataFrame, get only the rows for this site, and only get
# the needed columns for this analysis
usage_df1 = df.query('site_id == @site')[['service_type', 'fiscal_year', 'fiscal_mo', 'mmbtu']]
usage_df1.head()
usage_df2 = pd.pivot_table(
usage_df1,
values='mmbtu',
index=['fiscal_year'],
columns=['service_type'],
aggfunc=np.sum
)
usage_df2 = usage_df2.drop(labels=['Sewer', 'Water'], axis=1)
usage_df2
# Add in columns for the missing services
missing_services = bu.missing_energy_services(usage_df2.columns)
bu.add_columns(usage_df2, missing_services)
usage_df2
# Add a Total column that sums the other columns
usage_df2['total_energy'] = usage_df2.sum(axis=1)
cols = ['{}_mmbtu'.format(bu.change_name(col)) for col in usage_df2.columns]
usage_df2.columns = cols
usage_df2
# Create a list of columns to loop through and calculate percent total energy
usage_cols = list(usage_df2.columns.values)
print (usage_cols)
usage_cols.remove('total_energy_mmbtu')
for col in usage_cols:
col_name = col.split('_mmbtu')[0] + "_pct"
usage_df2[col_name] = usage_df2[col] / usage_df2.total_energy_mmbtu
usage_df2
# Add in degree days
months_present = bu.months_present(usage_df1)
deg_days = ut.degree_days_yearly(months_present, site)
usage_df2['hdd'] = deg_days
usage_df2
# Add in a column to show the numbers of months present for each year
# This will help to identify partial years.
mo_count = bu.month_count(months_present)
usage_df2['month_count'] = mo_count
usage_df2
# Calculate total heat energy and normalized heating usage
usage_df2['total_heat_mmbtu'] = usage_df2.natural_gas_mmbtu + usage_df2.district_heat_mmbtu + usage_df2.fuel_oil_mmbtu
usage_df2['total_specific_heat'] = usage_df2.total_heat_mmbtu * 1000 / usage_df2.hdd
usage_df2 = usage_df2.query("month_count == 12")
usage_df2
# Reverse the DataFrame
usage_df2.sort_index(ascending=False, inplace=True)
usage_df2 = usage_df2.drop('month_count', axis=1)
usage_df2<jupyter_output>C:\Anaconda2\envs\py35\lib\site-packages\ipykernel\__main__.py:2: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
from ipykernel import kernelapp as app
<jupyter_text>## Create Energy Usage Overview Graphs<jupyter_code># Reset the index so the fiscal year column can be passed to the graphing function
reset_usage_df2 = usage_df2.reset_index()
p4g2_filename, p4g2_url = gu.graph_filename_url(site, 'annual_energy_usage_distribution')
# Create the area graph
gu.area_use_distribution(reset_usage_df2, 'fiscal_year', usage_cols, p4g2_filename)
p4g1_filename, p4g1_url = gu.graph_filename_url(site, "annual_energy_usage")
gu.energy_use_stacked_bar(reset_usage_df2, 'fiscal_year', usage_cols, p4g1_filename)
# Convert df to dictionary
energy_use_overview_rows = bu.df_to_dictionaries(usage_df2)
# Add data and graphs to main dictionary
template_data['energy_usage_overview'] = dict(
graphs=[p4g1_url, p4g2_url],
table={'rows': energy_use_overview_rows},
)<jupyter_output><empty_output><jupyter_text># Create Usage Pie Charts<jupyter_code>p5g1_filename, p5g1_url = gu.graph_filename_url(site, "energy_usage")
gu.usage_pie_charts(usage_df2, usage_cols, 1, p5g1_filename, site)
# Add pie charts to template dictionary
template_data['energy_cost_usage'] = dict(
graphs=[p5g1_url])<jupyter_output><empty_output><jupyter_text># Electrical Usage Analysis - Page 6<jupyter_code>site_df = df.query("site_id == @site")
site_df.head()
# only look at elecricity records
electric_df = site_df.query("service_type == 'Electricity'")
# Make sure I'm not potentially missing anything with funky unit names
check_df = electric_df.query("usage > 0")
check_df.units.unique()
electric_df = electric_df.query("units == 'kWh' or units == 'kW'")
electric_df.head()
electric_df.query("units == 'kWh'")['item_desc'].unique()
electric_df.item_desc.unique()
electric_pivot_monthly = pd.pivot_table(electric_df,
index=['fiscal_year', 'fiscal_mo'],
columns=['units'],
values='usage',
aggfunc=np.sum)
electric_pivot_monthly.head()
# Do a month count for the elecricity bills
elec_months_present = bu.months_present(electric_pivot_monthly.reset_index())
elec_mo_count = bu.month_count(elec_months_present)
elec_mo_count_df = pd.DataFrame(elec_mo_count)
elec_mo_count_df
electric_pivot_annual = pd.pivot_table(electric_df,
index=['fiscal_year'],
columns=['units'],
values='usage',
aggfunc=np.sum
)
electric_use_annual = electric_pivot_annual[['kWh']]
electric_use_annual = electric_use_annual.rename(columns={'kWh':'ann_electric_usage_kWh'})
electric_use_annual
# Get average annual demand usage
electric_demand_avg = electric_pivot_monthly.groupby(['fiscal_year']).mean()
electric_demand_avg = electric_demand_avg[['kW']]
electric_demand_avg = electric_demand_avg.rename(columns={'kW': 'avg_demand_kW'})
electric_demand_avg
# Find annual maximum demand usage
electric_demand_max = electric_pivot_monthly.groupby(['fiscal_year']).max()
electric_demand_max = electric_demand_max[['kW']]
electric_demand_max = electric_demand_max.rename(columns={'kW': 'max_demand_kW'})
electric_demand_max
# Combine dataframes
electric_demand_join = pd.merge(electric_demand_max, electric_demand_avg, how='outer', left_index=True, right_index=True)
annual_electric_data = pd.merge(electric_demand_join, electric_use_annual, how='outer', left_index=True, right_index=True)
annual_electric_data
# Add percent change columns
annual_electric_data['usage_pct_change'] = annual_electric_data.ann_electric_usage_kWh.pct_change()
annual_electric_data['avg_demand_pct_change'] = annual_electric_data.avg_demand_kW.pct_change()
annual_electric_data['max_demand_pct_change'] = annual_electric_data.max_demand_kW.pct_change()
annual_electric_data = annual_electric_data.rename(columns={'avg_demand_kW': 'Average kW',
'ann_electric_usage_kWh': 'Total kWh'})
annual_electric_data = pd.merge(annual_electric_data, elec_mo_count_df, left_index=True, right_index=True, how='left')
annual_electric_data = annual_electric_data.query("month == 12")
annual_electric_data = annual_electric_data.sort_index(ascending=False)
annual_electric_data = annual_electric_data.rename(columns={'max_demand_kW':'kw_max',
'Average kW':'kw_avg',
'Total kWh':'kwh',
'usage_pct_change':'kwh_pct_change',
'avg_demand_pct_change':'kw_avg_pct_change',
'max_demand_pct_change':'kw_max_pct_change'})
annual_electric_data = annual_electric_data.drop('month', axis=1)
annual_electric_data<jupyter_output><empty_output><jupyter_text>## Create Electrical Usage Analysis Graphs - Page 6<jupyter_code># Axes labels
ylabel1 = 'Electricity Usage [kWh]'
ylabel2 = 'Electricity Demand [kW]'
p6g1_filename, p6g1_url = gu.graph_filename_url(site, "electricity_usage")
gu.stacked_bar_with_line(annual_electric_data.reset_index(), 'fiscal_year', ['kwh'], 'kw_avg',
ylabel1, ylabel2, "Test Title", p6g1_filename)
p6g2_filename, p6g2_url = gu.graph_filename_url(site, "monthly_electricity_usage_profile")
gu.create_monthly_profile(electric_pivot_monthly, 'kWh', 'Monthly Electricity Usage Profile [kWh]', 'blue',
"Test Title", p6g2_filename)
# Convert df to dictionary
electric_use_rows = bu.df_to_dictionaries(annual_electric_data)
# Add data and graphs to main dictionary
template_data['electrical_usage_analysis'] = dict(
graphs=[p6g1_url, p6g2_url],
table={'rows': electric_use_rows},
)<jupyter_output><empty_output><jupyter_text>## Electrical Cost Analysis Table - Page 7<jupyter_code># only look at elecricity records
electric_cost_df = site_df.query("service_type == 'Electricity'")
electric_cost_df.item_desc.unique()
# Costs don't always have units, so split the data into demand charges and usage charges (which includes other charges)
electric_cost_df['cost_categories'] = np.where(electric_cost_df.item_desc.isin(['KW Charge', 'On peak demand', 'Demand Charge']),
'demand_cost', 'usage_cost')
# Sum costs by demand and usage
electric_annual_cost = pd.pivot_table(electric_cost_df,
index=['fiscal_year'],
columns=['cost_categories'],
values='cost',
aggfunc=np.sum
)
# Create a total column
electric_annual_cost['Total Cost'] = electric_annual_cost.demand_cost + electric_annual_cost.usage_cost
electric_annual_cost
# Add percent change columns
electric_annual_cost['usage_cost_pct_change'] = electric_annual_cost.usage_cost.pct_change()
electric_annual_cost['demand_cost_pct_change'] = electric_annual_cost.demand_cost.pct_change()
electric_annual_cost['total_cost_pct_change'] = electric_annual_cost['Total Cost'].pct_change()
electric_annual_cost
# Left join the cost data to the annual electric data, which only shows complete years
electric_use_and_cost = pd.merge(annual_electric_data, electric_annual_cost, left_index=True, right_index=True, how='left')
electric_use_and_cost = electric_use_and_cost.sort_index(ascending=False)
electric_use_and_cost = electric_use_and_cost.drop(['kw_max', 'kw_max_pct_change'], axis=1)
electric_use_and_cost = electric_use_and_cost.rename(columns={'demand_cost':'kw_avg_cost',
'usage_cost':'kwh_cost',
'Total Cost':'total_cost',
'usage_cost_pct_change':'kwh_cost_pct_change',
'demand_cost_pct_change':'kw_avg_cost_pct_change'
})
electric_use_and_cost
<jupyter_output><empty_output><jupyter_text>## Create Electrical Cost Analysis Graphs - Page 7<jupyter_code>p7g1_filename, p7g1_url = gu.graph_filename_url(site, "electricity_cost")
renamed_use_and_cost = electric_use_and_cost.rename(columns={'kwh_cost':'Electricity Usage Cost [$]',
'kw_avg_cost':'Electricity Demand Cost [$]'})
gu.create_stacked_bar(renamed_use_and_cost.reset_index(), 'fiscal_year', ['Electricity Usage Cost [$]',
'Electricity Demand Cost [$]'],
'Electricity Cost [$]', "test title", p7g1_filename)
# Create Monthly Profile of Electricity Demand
p7g2_filename, p7g2_url = gu.graph_filename_url(site, "monthly_electricity_demand_profile")
gu.create_monthly_profile(electric_pivot_monthly, 'kW', 'Monthly Electricity Demand Profile [kW]', 'blue',
"test title", p7g2_filename)
# Convert df to dictionary
electric_cost_rows = bu.df_to_dictionaries(electric_use_and_cost)
# Add data and graphs to main dictionary
template_data['electrical_cost_analysis'] = dict(
graphs=[p7g1_url, p7g2_url],
table={'rows': electric_cost_rows},
)<jupyter_output><empty_output><jupyter_text># Create Heating Usage Analysis Table - Page 8<jupyter_code>usage_df2.head()
# Take only needed columns from earlier usage df
heating_usage = usage_df2[['natural_gas_mmbtu', 'fuel_oil_mmbtu', 'district_heat_mmbtu', 'hdd', 'total_heat_mmbtu']]
heating_usage
# Add in percent change columns
heating_usage['fuel_oil_pct_change'] = heating_usage.fuel_oil_mmbtu.pct_change()
heating_usage['natural_gas_pct_change'] = heating_usage.natural_gas_mmbtu.pct_change()
heating_usage['district_heat_pct_change'] = heating_usage.district_heat_mmbtu.pct_change()
heating_usage['total_heat_pct_change'] = heating_usage.total_heat_mmbtu.pct_change()
heating_usage
# Get the number of gallons, ccf, and 1,000 pounds of district heat by converting MMBTUs using the supplied conversions
heating_usage['fuel_oil_usage'] = heating_usage.fuel_oil_mmbtu * 1000000 / ut.fuel_btus_per_unit('Oil #1', 'gallons')
heating_usage['natural_gas_usage'] = heating_usage.natural_gas_mmbtu * 1000000 / ut.fuel_btus_per_unit('Natural Gas', 'ccf')
heating_usage<jupyter_output>C:\Anaconda2\envs\py35\lib\site-packages\ipykernel\__main__.py:2: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
from ipykernel import kernelapp as app
C:\Anaconda2\envs\py35\lib\site-packages\ipykernel\__main__.py:3: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
app.launch_new_instance()
<jupyter_text>## Create Heating Usage Analysis Graphs - Page 8<jupyter_code>p8g1_filename, p8g1_url = gu.graph_filename_url(site, "heating_degree_days")
gu.stacked_bar_with_line(heating_usage.reset_index(), 'fiscal_year', ['natural_gas_mmbtu', 'fuel_oil_mmbtu',
'district_heat_mmbtu'], 'hdd',
'Heating Fuel Usage [MMBTU/yr]', 'Heating Degree Days [Base 65F]', "test title", p8g1_filename)<jupyter_output><empty_output><jupyter_text>### Create Monthly Heating Usage dataframe for graph<jupyter_code>monthly_heating = pd.pivot_table(usage_df1,
values='mmbtu',
index=['fiscal_year', 'fiscal_mo'],
columns=['service_type'],
aggfunc=np.sum
)
monthly_heating.head()
monthly_heating
# Add in columns for the missing energy services
missing_services = bu.missing_energy_services(monthly_heating.columns)
bu.add_columns(monthly_heating, missing_services)
# Drop the non-heating services
monthly_heating = monthly_heating.drop(labels=['Electricity', 'Sewer', 'Water'], axis=1)
# Create a total heating column
monthly_heating['total_heating_energy'] = monthly_heating.sum(axis=1)
monthly_heating.head()
p8g2_filename, p8g2_url = gu.graph_filename_url(site, "monthly_heating_energy_profile")
gu.create_monthly_profile(monthly_heating, 'total_heating_energy', "Monthly Heating Energy Profile [MMBTU]", 'red',
"test title", p8g2_filename)
# Convert df to dictionary
heating_use_rows = bu.df_to_dictionaries(heating_usage)
# Add data and graphs to main dictionary
template_data['heating_usage_analysis'] = dict(
graphs=[p8g1_url, p8g2_url],
table={'rows': heating_use_rows},
)<jupyter_output><empty_output><jupyter_text>## Heating Cost Analysis Table - Page 9<jupyter_code># Import df that I exported from "alan_report_pages". This can be removed once code is combined
df2 = pd.read_csv(r"C:\Users\dustin\Google Drive\FNSB Data Analysis\data_from_alan_report_pages_df2.csv")
# Use only necessary columns
heating_cost = df2[['fiscal_year', 'Natural Gas', 'Oil #1', 'Steam', 'Total', 'pct_change']]
# Change column names so they aren't the same as the heating usage dataframe
heating_cost = heating_cost.rename(columns={'Natural Gas':'natural_gas_cost',
'Oil #1': 'fuel_oil_cost',
'Steam': 'district_heat_cost',
'Total': 'total_heat_cost',
'pct_change': 'total_heat_cost_pct_change'})
heating_cost
heating_usage
# Combine the heating cost and heating use dataframes
heating_cost_and_use = pd.merge(heating_cost, heating_usage, left_on='fiscal_year', right_index=True, how='right')
heating_cost_and_use
# Create percent change columns
heating_cost_and_use['fuel_oil_pct_change'] = heating_cost_and_use.fuel_oil_cost.pct_change()
heating_cost_and_use['natural_gas_pct_change'] = heating_cost_and_use.natural_gas_cost.pct_change()
heating_cost_and_use['district_heat_pct_change'] = heating_cost_and_use.district_heat_cost.pct_change()
# Create unit cost columns
heating_cost_and_use['fuel_oil_unit_cost'] = heating_cost_and_use.fuel_oil_cost / heating_cost_and_use.fuel_oil_mmbtu
heating_cost_and_use['natural_gas_unit_cost'] = heating_cost_and_use.natural_gas_cost / heating_cost_and_use.natural_gas_mmbtu
heating_cost_and_use['district_heat_unit_cost'] = heating_cost_and_use.district_heat_cost / heating_cost_and_use.district_heat_mmbtu
heating_cost_and_use['building_heat_unit_cost'] = heating_cost_and_use.total_heat_cost / heating_cost_and_use.total_heat_mmbtu
heating_cost_and_use
# Remove all columns not needed for the Heating Cost Analysis Table
heating_cost_and_use = heating_cost_and_use[['fiscal_year',
'fuel_oil_cost',
'fuel_oil_pct_change',
'natural_gas_cost',
'natural_gas_pct_change',
'district_heat_cost',
'district_heat_pct_change',
'fuel_oil_unit_cost',
'natural_gas_unit_cost',
'district_heat_unit_cost',
'building_heat_unit_cost',
'total_heat_cost',
'total_heat_cost_pct_change']]
heating_cost_and_use<jupyter_output><empty_output><jupyter_text>## Create DataFrame with the Monthly Average Price Per MMBTU for All Sites<jupyter_code>print (df.shape[0])
# Filter out natural gas customer charges as the unit cost goes to infinity if there is a charge but no use
df_no_gas_cust_charges = df.drop(df[(df['service_type'] == 'Natural Gas') & (df['units'] != 'CCF')].index)
# Filter out records with zero usage, which correspond to things like customer charges, etc.
nonzero_usage = df_no_gas_cust_charges.query("usage > 0")
print (nonzero_usage.shape[0])
nonzero_usage.head()
# Check to make sure it is okay to drop records w/ zero mmbtu
zero_mmbtu = nonzero_usage.query("mmbtu == 0")
zero_mmbtu.service_type.unique()
nonzero_usage = nonzero_usage.query("mmbtu > 0")
print (nonzero_usage.shape[0])
nonzero_usage.head()
nonzero_usage.cost.min()
# Further analysis showed that these zero and less than zero costs were for waste oil; presumably less than zero costs
# was because they were able to avoid disposal fees or something
nonzero_usage.query("cost <= 0")
# Looks like waste oil accounts for the negative costs
BALHHW = df_raw[(df_raw['Site ID'] == 'BALHHW')].sort_values(by='Cost')
BALHHW[BALHHW['Service Name'] == 'Oil #1']
# No idea why these costs are negative but it still seems like it should be filtered out
df_raw[(df_raw['Site ID'] == 'BENBG1')].sort_values(by='Cost')
# Filter out zero cost or less records (see analysis above)
nonzero_usage = nonzero_usage.query("cost > 0")
# Get the total fuel cost and usage for all buildings by year and month
grouped_nonzero_usage = nonzero_usage.groupby(['service_type', 'fiscal_year', 'fiscal_mo']).sum()
# Divide the total cost for all building by the total usage for all buildings so that the average is weighted correctly
grouped_nonzero_usage['avg_price_per_mmbtu'] = grouped_nonzero_usage.cost / grouped_nonzero_usage.mmbtu
# Get only the desired outcome, price per million BTU for each fuel type, and the number of calendar months it is based on
# i.e. the number of months of bills for each fuel for all buildings for that particular month.
grouped_nonzero_usage = grouped_nonzero_usage[['avg_price_per_mmbtu', 'cal_mo']]
# Drop electricity from the dataframe.
grouped_nonzero_usage = grouped_nonzero_usage.reset_index()
grouped_nonzero_heatfuel_use = grouped_nonzero_usage.query("service_type != 'Electricity'")
# Create a column for each service type
grouped_nonzero_heatfuel_use = pd.pivot_table(grouped_nonzero_heatfuel_use,
values='avg_price_per_mmbtu',
index=['fiscal_year', 'fiscal_mo'],
columns='service_type'
)
grouped_nonzero_heatfuel_use = grouped_nonzero_heatfuel_use.reset_index()
grouped_nonzero_heatfuel_use.head()<jupyter_output><empty_output><jupyter_text>## Monthly Cost Per MMBTU: Data and Graphs - Page 9<jupyter_code>raw_oil = df_raw[(df_raw['Service Name'] == 'Oil #1') & (df_raw['Item Description'] != 'Fuel Oil #1 (Gallons)')]
raw_oil['Item Description'].unique()
raw_oil.query("Units != 'Gallons'")['Cost'].sum()
raw_gas_analysis = df_raw[(df_raw['Service Name'] == 'Natural Gas') & (df_raw['Item Description'] != 'Natural gas (CCF)')]
raw_gas_analysis['Item Description'].unique()
raw_gas_analysis[raw_gas_analysis['Item Description'] == 'Misc. credit']
raw_gas_analysis[raw_gas_analysis['Item Description'] == 'Cost adjustments']
raw_gas_analysis[raw_gas_analysis['Item Description'] == 'Previous balance adj.']
# Heating energy use, in MMBTUs
monthly_heating.head()
# Query the dataframe for natural gas charges with CCF only?
df.query("service_type == 'Natural Gas'").head()
# Exclude demand charges from the natural gas costs. This is because the unit costs for natural gas go to infinity
# when there is zero usage but a customer charge
cost_df1 = df.drop(df[(df['service_type'] == 'Natural Gas') & (df['units'] != 'CCF')].index)
cost_df1.query("service_type == 'Natural Gas'").head()
# Create cost dataframe for given site from processed data
cost_df1 = cost_df1.query('site_id == @site')[['service_type', 'fiscal_year', 'fiscal_mo', 'cost']]
cost_df1.head()
# Split out by service type
monthly_heating_cost = pd.pivot_table(cost_df1,
values='cost',
index=['fiscal_year', 'fiscal_mo'],
columns=['service_type'],
aggfunc=np.sum
)
monthly_heating_cost.head()
# Add in columns for the missing energy services
missing_services = bu.missing_energy_services(monthly_heating_cost.columns)
bu.add_columns(monthly_heating_cost, missing_services)
# Drop the non-heating services
monthly_heating_cost = monthly_heating_cost.drop(labels=['Electricity', 'Sewer', 'Water'], axis=1)
# Create a total heating column
monthly_heating_cost['total_heating_cost'] = monthly_heating_cost.sum(axis=1)
monthly_heating_cost.head()
monthly_heating_cost = monthly_heating_cost.rename(columns={'Natural Gas':'Natural Gas Cost',
'Oil #1':'Oil #1 Cost',
'Steam': 'Steam Cost'})
monthly_heating_cost.head()
monthly_heat_energy_and_use = pd.merge(monthly_heating_cost, monthly_heating, left_index=True, right_index=True, how='outer')
monthly_heat_energy_and_use.head()
# Create unit cost columns in $ / MMBTU for each fuel type
monthly_heat_energy_and_use['fuel_oil_unit_cost'] = monthly_heat_energy_and_use['Oil #1 Cost'] / monthly_heat_energy_and_use['Oil #1']
monthly_heat_energy_and_use['natural_gas_unit_cost'] = monthly_heat_energy_and_use['Natural Gas Cost'] / monthly_heat_energy_and_use['Natural Gas']
monthly_heat_energy_and_use['district_heat_unit_cost'] = monthly_heat_energy_and_use['Steam Cost'] / monthly_heat_energy_and_use['Steam']
monthly_heat_energy_and_use['building_unit_cost'] = monthly_heat_energy_and_use.total_heating_cost / monthly_heat_energy_and_use.total_heating_energy
monthly_heat_energy_and_use.head()
# Reset the index for easier processing
monthly_heat_energy_and_use = monthly_heat_energy_and_use.reset_index()
# Add in unit costs for fuels that are currently blank
unit_cost_cols = ['fuel_oil_unit_cost', 'natural_gas_unit_cost', 'district_heat_unit_cost']
service_types = ['Oil #1_avg_unit_cost', 'Natural Gas_avg_unit_cost', 'Steam_avg_unit_cost']
unit_cost_dict = dict(zip(unit_cost_cols,service_types))
# Add in average unit costs calculated from all sites for each month
monthly_heat_energy_and_use = pd.merge(monthly_heat_energy_and_use, grouped_nonzero_heatfuel_use,
left_on=['fiscal_year', 'fiscal_mo'], right_on=['fiscal_year', 'fiscal_mo'],
how='left', suffixes=('', '_avg_unit_cost'))
monthly_heat_energy_and_use.head()
# Check each column to see if it is NaN (identified when the value does not equal itself) and if it is, fill with the average
# price per MMBTU taken from all sites
for col, service in unit_cost_dict.items():
monthly_heat_energy_and_use[col] = np.where(monthly_heat_energy_and_use[col] != monthly_heat_energy_and_use[col],
monthly_heat_energy_and_use[service],
monthly_heat_energy_and_use[col])
def fiscal_to_calendar(fiscal_year, fiscal_mo):
"""Converts a fiscal year and month into a calendar year and month for graphing purposes.
Returns (calendar_year, calendar_month) tuple."""
if fiscal_mo > 6:
calendar_month = fiscal_mo - 6
calendar_year = fiscal_year
else:
calendar_month = fiscal_mo + 6
calendar_year = fiscal_year - 1
return (calendar_year, calendar_month)
# Add calendar year and month columns
cal_year = []
cal_mo = []
for fiscal_year, fiscal_mo in zip(monthly_heat_energy_and_use.fiscal_year, monthly_heat_energy_and_use.fiscal_mo):
CalYear, CalMo = fiscal_to_calendar(fiscal_year, fiscal_mo)
cal_year.append(CalYear)
cal_mo.append(CalMo)
monthly_heat_energy_and_use['calendar_year'] = cal_year
monthly_heat_energy_and_use['calendar_mo'] = cal_mo
monthly_heat_energy_and_use.head()
# Create a date column using the calendar year and month to pass to the graphing function
def get_date(row):
return datetime.date(year=row['calendar_year'], month=row['calendar_mo'], day=1)
monthly_heat_energy_and_use['date'] = monthly_heat_energy_and_use[['calendar_year','calendar_mo']].apply(get_date, axis=1)
monthly_heat_energy_and_use.head()
%matplotlib inline
p9g1_filename, p9g1_url = gu.graph_filename_url(site, "energy_cost")
gu.fuel_price_comparison_graph(monthly_heat_energy_and_use, 'date', unit_cost_cols, 'building_unit_cost', p9g1_filename)
<jupyter_output><empty_output><jupyter_text>## Realized Savings from Fuel Switching: Page 9, Graph 2<jupyter_code>monthly_heat_energy_and_use.head()
old_usage_cols = ['Natural Gas', 'Oil #1', 'Steam']
# Create an indicator for whether a given heating fuel is available for the facility. This is done by checking the use for all
# months- if it is zero, then that building doesn't have the option to use that type of fuel.
for col in old_usage_cols:
new_col_name = col + "_available"
monthly_heat_energy_and_use[new_col_name] = np.where(monthly_heat_energy_and_use[col].sum() == 0, 0, 1)
monthly_heat_energy_and_use.head()
# Calculate what it would have cost if the building used only one fuel type
available_cols = ['Oil #1_available','Natural Gas_available','Steam_available']
available_dict = dict(zip(unit_cost_cols, available_cols))
print (available_dict)
hypothetical_cost_cols = []
for unit_cost, avail_col in available_dict.items():
new_col_name = unit_cost + "_hypothetical"
hypothetical_cost_cols.append(new_col_name)
monthly_heat_energy_and_use[new_col_name] = monthly_heat_energy_and_use[unit_cost] * \
monthly_heat_energy_and_use.total_heating_energy * monthly_heat_energy_and_use[avail_col]
monthly_heat_energy_and_use.head()
# Calculate the monthly savings to the building by not using the most expensive available fuel entirely
monthly_heat_energy_and_use['fuel_switching_savings'] = monthly_heat_energy_and_use[hypothetical_cost_cols].max(axis=1) \
- monthly_heat_energy_and_use.total_heating_cost
# Sort dataframe to calculate cumulative value
monthly_heat_energy_and_use = monthly_heat_energy_and_use.sort_values(by='date', ascending=True)
# Calculate cumulative value
monthly_heat_energy_and_use['cumulative_fuel_switching_savings'] = np.cumsum(monthly_heat_energy_and_use.fuel_switching_savings)
monthly_heat_energy_and_use.head()
p9g2_filename, p9g2_url = gu.graph_filename_url(site, "cumulative_fuel_switching_savings")
gu.create_monthly_line_graph(monthly_heat_energy_and_use, 'date', 'cumulative_fuel_switching_savings',
'Cumulative Fuel Switching Savings Realized [$]', p9g2_filename)
# Convert df to dictionary
heating_cost_rows = bu.df_to_dictionaries(heating_cost_and_use)
# Add data and graphs to main dictionary
template_data['heating_cost_analysis'] = dict(
graphs=[p9g1_url, p9g2_url],
table={'rows': heating_cost_rows},
)<jupyter_output><empty_output><jupyter_text># Water Analysis Table - Page 10<jupyter_code>water_use = df.query('site_id == @site')[['service_type', 'fiscal_year', 'fiscal_mo','cost', 'usage', 'units']]
water_use.head()
# Create month count field for all months that have water and sewer bills
water_use_only = water_use.query("service_type == 'Water'")
water_months_present = bu.months_present(water_use_only)
water_mo_count = bu.month_count(water_months_present)
water_mo_count
# Create annual water gallon usage dataframe
water_gal_df = pd.pivot_table(water_use,
values='usage',
index=['fiscal_year',],
columns=['service_type'],
aggfunc=np.sum
)
water_gal_df.head()
# Use only required columns
water_gal_df = water_gal_df[['Water']]
# Calculate percent change column
water_gal_df['water_use_pct_change'] = water_gal_df.Water.pct_change()
# Create annual water and sewer cost dataframe
water_cost_df = pd.pivot_table(water_use,
values='cost',
index=['fiscal_year',],
columns=['service_type'],
aggfunc=np.sum
)
water_cost_df.head()
# Calculate totals, percent change
water_cost_df = water_cost_df[water_cost_df.columns.difference(['Electricity', 'Natural Gas', 'Oil #1', 'Steam', 'Refuse'])]
# Rename columns only if they exist in the water cost dataframe
rename_dict = {'Sewer': 'Sewer Cost',
'Water': 'Water Cost'}
water_cost_df = water_cost_df.rename(columns={k: v for k, v in rename_dict.items() if k in water_cost_df})
# First check to make sure sewer data is included; if so, calculate total cost
water_cost_df['total_water_sewer_cost'] = np.where('Sewer Cost' in list(water_cost_df.columns.values),
water_cost_df['Sewer Cost'] + water_cost_df['Water Cost'],
water_cost_df['Water Cost'])
water_cost_df['water_cost_pct_change'] = water_cost_df['Water Cost'].pct_change()
# First check to make sure sewer data is included; if so, calculate percent change
water_cost_df['sewer_cost_pct_change'] = np.where('Sewer Cost' in list(water_cost_df.columns.values),
water_cost_df['Sewer Cost'].pct_change(),
np.nan)
water_cost_df['total_water_sewer_cost_pct_change'] = water_cost_df.total_water_sewer_cost.pct_change()
# Merge use and cost dataframes
water_use_and_cost = pd.merge(water_cost_df, water_gal_df, left_index=True, right_index=True, how='outer')
water_use_and_cost.head()
water_use_and_cost['water_unit_cost'] = water_use_and_cost.total_water_sewer_cost / water_use_and_cost.Water
water_use_and_cost['water_unit_cost_pct_change'] = water_use_and_cost.water_unit_cost.pct_change()
# Use only complete years
water_use_and_cost['month_count'] = water_mo_count
water_use_and_cost = water_use_and_cost.query("month_count == 12")
water_use_and_cost = water_use_and_cost.drop('month_count', axis=1)
water_use_and_cost = water_use_and_cost.sort_index(ascending=False)
water_use_and_cost = water_use_and_cost.rename(columns={'Sewer Cost':'sewer_cost',
'Water Cost':'water_cost',
'total_water_sewer_cost':'total_cost',
'total_water_sewer_cost_pct_change':'total_cost_pct_change',
'Water':'total_usage',
'water_usage_pct_change':'total_usage_pct_change',
'water_unit_cost':'total_unit_cost',
'water_unit_cost_pct_change':'total_unit_cost_pct_change'
})
water_use_and_cost<jupyter_output><empty_output><jupyter_text>## Create Water Cost Stacked Bar Graph - Page 10 Graph 1<jupyter_code>p10g1_filename, p10g1_url = gu.graph_filename_url(site, "utility_cost")
gu.create_stacked_bar(water_use_and_cost.reset_index(), 'fiscal_year', ['sewer_cost', 'water_cost'],
'Utility Cost [$]', "test title", p10g1_filename)<jupyter_output><empty_output><jupyter_text>## Create Monthly Profile of Water Usage - Page 10 Graph 2<jupyter_code># Create monthly water gallon dataframe
water_gal_df_monthly = pd.pivot_table(water_use,
values='usage',
index=['fiscal_year', 'fiscal_mo'],
columns=['service_type'],
aggfunc=np.sum
)
water_gal_df_monthly.head()
p10g2_filename, p10g2_url = gu.graph_filename_url(site, "monthly_water_usage_profile")
gu.create_monthly_profile(water_gal_df_monthly, 'Water', 'Monthly Water Usage Profile [gallons]', 'green', p10g2_filename)
# Convert df to dictionary
water_rows = bu.df_to_dictionaries(water_use_and_cost)
# Add data and graphs to main dictionary
template_data['water_analysis'] = dict(
graphs=[p10g1_url, p10g2_url],
table={'rows': water_rows},
)
template_data<jupyter_output><empty_output>
|
permissive
|
/testing/utility_dashboard_data.ipynb
|
alanmitchell/fnsb-benchmark
| 18 |
<jupyter_start><jupyter_text># Softmax exercise
*Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the [assignments page](http://vision.stanford.edu/teaching/cs231n/assignments.html) on the course website.*
This exercise is analogous to the SVM exercise. You will:
- implement a fully-vectorized **loss function** for the Softmax classifier
- implement the fully-vectorized expression for its **analytic gradient**
- **check your implementation** with numerical gradient
- use a validation set to **tune the learning rate and regularization** strength
- **optimize** the loss function with **SGD**
- **visualize** the final learned weights
<jupyter_code>import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading extenrnal modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000, num_dev=500):
"""
Load the CIFAR-10 dataset from disk and perform preprocessing to prepare
it for the linear classifier. These are the same steps as we used for the
SVM, but condensed to a single function.
"""
# Load the raw CIFAR-10 data
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# subsample the data
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
mask = np.random.choice(num_training, num_dev, replace=False)
X_dev = X_train[mask]
y_dev = y_train[mask]
# Preprocessing: reshape the image data into rows
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_val = np.reshape(X_val, (X_val.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
X_dev = np.reshape(X_dev, (X_dev.shape[0], -1))
# Normalize the data: subtract the mean image
mean_image = np.mean(X_train, axis = 0)
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
X_dev -= mean_image
# add bias dimension and transform into columns
X_train = np.hstack([X_train, np.ones((X_train.shape[0], 1))])
X_val = np.hstack([X_val, np.ones((X_val.shape[0], 1))])
X_test = np.hstack([X_test, np.ones((X_test.shape[0], 1))])
X_dev = np.hstack([X_dev, np.ones((X_dev.shape[0], 1))])
return X_train, y_train, X_val, y_val, X_test, y_test, X_dev, y_dev
# Invoke the above function to get our data.
X_train, y_train, X_val, y_val, X_test, y_test, X_dev, y_dev = get_CIFAR10_data()
print 'Train data shape: ', X_train.shape
print 'Train labels shape: ', y_train.shape
print 'Validation data shape: ', X_val.shape
print 'Validation labels shape: ', y_val.shape
print 'Test data shape: ', X_test.shape
print 'Test labels shape: ', y_test.shape
print 'dev data shape: ', X_dev.shape
print 'dev labels shape: ', y_dev.shape<jupyter_output>Train data shape: (49000, 3073)
Train labels shape: (49000,)
Validation data shape: (1000, 3073)
Validation labels shape: (1000,)
Test data shape: (1000, 3073)
Test labels shape: (1000,)
dev data shape: (500, 3073)
dev labels shape: (500,)
<jupyter_text>## Softmax Classifier
Your code for this section will all be written inside **cs231n/classifiers/softmax.py**.
<jupyter_code># First implement the naive softmax loss function with nested loops.
# Open the file cs231n/classifiers/softmax.py and implement the
# softmax_loss_naive function.
from cs231n.classifiers.softmax import softmax_loss_naive
import time
# Generate a random softmax weight matrix and use it to compute the loss.
W = np.random.randn(3073, 10) * 0.0001
loss, grad = softmax_loss_naive(W, X_dev, y_dev, 0.0)
# As a rough sanity check, our loss should be something close to -log(0.1).
print 'loss: %f' % loss
print 'sanity check: %f' % (-np.log(0.1))<jupyter_output>loss: 2.321306
sanity check: 2.302585
<jupyter_text>## Inline Question 1:
Why do we expect our loss to be close to -log(0.1)? Explain briefly.**
**Your answer:** with random initialization we would expect the probability of the correct class to be 0.1. The loss is -log(P(y[i])) which we would therefore expect to be -log(0.1)
<jupyter_code># Complete the implementation of softmax_loss_naive and implement a (naive)
# version of the gradient that uses nested loops.
loss, grad = softmax_loss_naive(W, X_dev, y_dev, 0.0)
# As we did for the SVM, use numeric gradient checking as a debugging tool.
# The numeric gradient should be close to the analytic gradient.
from cs231n.gradient_check import grad_check_sparse
f = lambda w: softmax_loss_naive(w, X_dev, y_dev, 0.0)[0]
grad_numerical = grad_check_sparse(f, W, grad, 10)
# similar to SVM case, do another gradient check with regularization
loss, grad = softmax_loss_naive(W, X_dev, y_dev, 1e2)
f = lambda w: softmax_loss_naive(w, X_dev, y_dev, 1e2)[0]
grad_numerical = grad_check_sparse(f, W, grad, 10)
# Now that we have a naive implementation of the softmax loss function and its gradient,
# implement a vectorized version in softmax_loss_vectorized.
# The two versions should compute the same results, but the vectorized version should be
# much faster.
tic = time.time()
loss_naive, grad_naive = softmax_loss_naive(W, X_dev, y_dev, 0.00001)
toc = time.time()
print 'naive loss: %e computed in %fs' % (loss_naive, toc - tic)
from cs231n.classifiers.softmax import softmax_loss_vectorized
tic = time.time()
loss_vectorized, grad_vectorized = softmax_loss_vectorized(W, X_dev, y_dev, 0.00001)
toc = time.time()
print 'vectorized loss: %e computed in %fs' % (loss_vectorized, toc - tic)
# As we did for the SVM, we use the Frobenius norm to compare the two versions
# of the gradient.
grad_difference = np.linalg.norm(grad_naive - grad_vectorized, ord='fro')
print 'Loss difference: %f' % np.abs(loss_naive - loss_vectorized)
print 'Gradient difference: %f' % grad_difference
# Use the validation set to tune hyperparameters (regularization strength and
# learning rate). You should experiment with different ranges for the learning
# rates and regularization strengths; if you are careful you should be able to
# get a classification accuracy of over 0.35 on the validation set.
from cs231n.classifiers import Softmax
results = {}
best_val = -1
best_softmax = None
learning_rates = [5e-7, 1e-6]
regularization_strengths = [1e6, 5e8]
max_count = 100
for count in xrange(max_count):
lr = np.random.uniform(learning_rates[0], learning_rates[1])
reg = np.random.uniform(regularization_strengths[0], regularization_strengths[1])
softmax = Softmax()
loss_hist = softmax.train(X_train,
y_train,
learning_rate=lr,
reg=reg,
num_iters=1500,
verbose=True)
y_train_pred = softmax.predict(X_train)
training_accuracy = np.mean(y_train == y_train_pred)
y_val_pred = softmax.predict(X_val)
validation_accuracy = np.mean(y_val == y_val_pred)
results[(lr, reg)] = (training_accuracy, validation_accuracy)
if validation_accuracy > best_val:
best_val = validation_accuracy
best_softmax = softmax
################################################################################
# TODO: #
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that you did for the SVM; save #
# the best trained softmax classifer in best_softmax. #
################################################################################
pass
################################################################################
# END OF YOUR CODE #
################################################################################
# Print out results.
for lr, reg in sorted(results):
train_accuracy, val_accuracy = results[(lr, reg)]
print 'lr %e reg %e train accuracy: %f val accuracy: %f' % (
lr, reg, train_accuracy, val_accuracy)
print 'best validation accuracy achieved during cross-validation: %f' % best_val
# evaluate on test set
# Evaluate the best softmax on test set
y_test_pred = best_softmax.predict(X_test)
test_accuracy = np.mean(y_test == y_test_pred)
print 'softmax on raw pixels final test set accuracy: %f' % (test_accuracy, )
# Visualize the learned weights for each class
w = best_softmax.W[:-1,:] # strip out the bias
w = w.reshape(32, 32, 3, 10)
w_min, w_max = np.min(w), np.max(w)
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
for i in xrange(10):
plt.subplot(2, 5, i + 1)
# Rescale the weights to be between 0 and 255
wimg = 255.0 * (w[:, :, :, i].squeeze() - w_min) / (w_max - w_min)
plt.imshow(wimg.astype('uint8'))
plt.axis('off')
plt.title(classes[i])<jupyter_output><empty_output>
|
no_license
|
/assignment1/.ipynb_checkpoints/softmax-checkpoint.ipynb
|
solpaul/CS231n-Assignments
| 3 |
<jupyter_start><jupyter_text>Matplotlib es una librería de graficación open source de Python, similar a la librería de graficación de MATLAB, que permite manipular la mayoría de los componentes de sus gráficas. Acá mostraremos la forma de modificar algunos de estos componentes como ejes, ticks, grid, labels, etc con la intención de generar gráficas mas agradables. Creado: Marzo 29 de 2015
Modificado: Junio 10 de 2019
Testeado en:
- Python 3.6.4
- matplotlib 2.1.2<jupyter_code>%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns<jupyter_output><empty_output><jupyter_text>## Mejoras en la gráfica de una funciónSe grafica la función $f(x) = x^2 + 3x - 1$ para $0 <= x <= 10$<jupyter_code># Datos de ejemplo
x = np.linspace(0, 10)
y = x**2 + 3*x - 1
# Se eliminan marcas en los ejes (ticks)
plt.tick_params(width=0)
# Se agrega malla (grid)
plt.grid(linestyle='-', color='#808080', alpha=0.3)
# Se eliminan ejes superior y derecho
ax = plt.gca()
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
# Nombres de los ejes:
# Si se agregan en signo pesos ($) permite interpretar lenguaje latex
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
# Matplotlib agrega por defecto una margen entre la curva generada y los ejes
# Se elimina la margen
plt.margins(0)
plt.plot(x, y, label='$x^2 + 3x - 1$')
# Se agrega legenda usando la mejor locación ('best') para que no se
# cruce con la curva generada
plt.legend(loc='best')
plt.show()<jupyter_output><empty_output><jupyter_text>## Estilo ggplotMatplotlib permite usar un estilo que imita el paquete de graficación ggplot del lenguaje R. Para esto basta con agregar al inicio del script la siguiente línea:<jupyter_code>plt.style.use('ggplot')<jupyter_output><empty_output><jupyter_text>Toda gráfica posterior a esta línea tendrá el estilo de ggplot<jupyter_code>plt.plot(x, y)
plt.show()<jupyter_output><empty_output><jupyter_text># Estilo seabornSeaborn es una librería basada en matplotlib con una visualización mejorada. Para incorporar el estilo de seaborn basta con usar el comando *set*<jupyter_code>sns.set()<jupyter_output><empty_output><jupyter_text>Toda gráfica posterior a este comando tendrá el estilo de seaborn<jupyter_code>
plt.plot(x, y)
plt.show()<jupyter_output><empty_output>
|
non_permissive
|
/matplotlib/mejoras visuales en matplotlib I.ipynb
|
vilmauricio/pyciencia
| 6 |
<jupyter_start><jupyter_text># 5) Predictions, Recommendations and Conclusions<jupyter_code>import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LinearRegression
import matplotlib.dates as mdates
from statsmodels.tsa.ar_model import AutoReg
import datetime as dt
import warnings
warnings.filterwarnings('ignore')<jupyter_output><empty_output><jupyter_text>## Predicting the number of vaccines that will administered### Getting all the datasets<jupyter_code># importing the prediction data
df_pred = pd.read_csv('../clean_data/predictions.csv')
# lets rename columns for easy use
df_pred.rename(columns={"Unnamed: 0": "date" , "7dayrollingavg_newlyconfirmed":"7D_roll_pred", 'County':'county'},inplace = True)
# lets reset the index to county
df_pred.set_index('county', inplace= True)
# import data wise california vaccine allocation
df_vacc_alloc = pd.read_csv('../clean_data/vaccine_population.csv')
df_vacc_alloc = df_vacc_alloc[df_vacc_alloc['jurisdiction'] == 'California']
df_vacc_alloc.drop(columns=['2019_population','hhs_region','total_first_allocation' ], inplace = True)
# importing vaccination adminstration data
df_vacc_admin = pd.read_csv('../clean_data/scrapped_ca_vaccine_ext_feb.csv')
df_vacc_admin.drop(columns=['Unnamed: 5', 'Unnamed: 4'], inplace = True)
# importing county population data
df_county_pop = pd.read_csv('../clean_data/cases_with_mask_use.csv')
# get county and population information only
df_county_pop = pd.DataFrame(df_county_pop.groupby(by = 'county')['population'].max())
<jupyter_output><empty_output><jupyter_text>## EDA on county population<jupyter_code>df_county_pop.head()
# Lets create a column that gives the county population ratio to California pop
# of county population to total california population. We will use this for dosage distribution
df_county_pop['ratio_county_pop'] = (df_county_pop['population']/ df_county_pop['population'].sum())
df_county_pop.reset_index(inplace= True)<jupyter_output><empty_output><jupyter_text>## EDA on prediction data<jupyter_code># lets look at the data
df_pred.head()
# '2021-02-14'<jupyter_output><empty_output><jupyter_text>## EDA on vaccination Adminstration data<jupyter_code>df_vacc_admin
# lets look at the data
df_vacc_admin = df_vacc_admin[df_vacc_admin['state']=='California'][['state' , 'date' ,'total_doses_distributed','total_doses_administered']]
df_vacc_admin["date"]= pd.to_datetime(df_vacc_admin["date"])
pd.set_option('display.max_rows', 100)
df_vacc_admin.head()<jupyter_output><empty_output><jupyter_text>#### Two steps to be added for this
#### Step 1: need to roll up the data for 7 day ahead. i.e 2021-01-16 should be summed up for 6 days ahead.
#### Step 2: need to split up the data for each county by date.
### Step 1: need to roll up the data for 7 day ahead. i.e 2021-01-16 should be summed up for 6 days ahead.<jupyter_code>df_vacc_admin.head()
df_vacc_admin.head()
# lets create new column that is the cumulative 7 days for doses adminstered and doses distributed
df_vacc_admin['doses_dist_7roll'] = 0
df_vacc_admin['doses_admin_7roll'] = 0
for i in range(0,30):
df_vacc_admin['doses_dist_7roll'][i]= df_vacc_admin['total_doses_distributed'][i+6]- df_vacc_admin['total_doses_distributed'][i]
df_vacc_admin['doses_admin_7roll'][i]= df_vacc_admin['total_doses_administered'][i+6]- df_vacc_admin['total_doses_administered'][i]
# lets remove the dates we dont need i.e any records beyond 2021-02-14
df_vacc_admin = df_vacc_admin[df_vacc_admin['date'] <= '2021-02-14']
# The adjusted close accounts for stock splits, so that is what we should graph
plt.plot(df_vacc_admin.index, df_vacc_admin['doses_dist_7roll'])
plt.title('total_doses_distributed')
plt.ylabel('doses_dist_7roll');
plt.xlabel('date');
plt.show()
# The adjusted close accounts for stock splits, so that is what we should graph
plt.plot(df_vacc_admin.index, df_vacc_admin['doses_admin_7roll'] , 'r')
plt.title('total_doses_administered')
plt.ylabel('doses_admin_7roll');
plt.xlabel('date');
plt.show()<jupyter_output><empty_output><jupyter_text>### Generating recommendations for how many vaccines each county should get<jupyter_code>df_county_pop.head()
df_vacc_admin.head()
# lets do a cross join and merge the data sets for county and county population ratio.
# this will help us to divide the doses admininstered and doses distributed 7 days cumulative by county
# since we have to do a cross join on two data frames that donot have a Key, lets create a key that is same.
df_county_pop['joinkey'] = 0
df_vacc_admin['joinkey'] = 0
# lets merge the vaccine and county population data together
df_county_vacc_dist_admin_7Droll = pd.merge(df_vacc_admin, df_county_pop, on="joinkey" , how="outer")
# selecting only the columns we need
df_county_vacc_dist_admin_7Droll = df_county_vacc_dist_admin_7Droll[['county' ,'date','doses_dist_7roll','doses_admin_7roll', 'ratio_county_pop']]
# rounding the county distribution and adminstration numbers
df_county_vacc_dist_admin_7Droll['doses_dist_7roll_county'] = round(df_county_vacc_dist_admin_7Droll['doses_dist_7roll'] * df_county_vacc_dist_admin_7Droll['ratio_county_pop'])
df_county_vacc_dist_admin_7Droll['doses_admin_7roll_county'] = round(df_county_vacc_dist_admin_7Droll['doses_admin_7roll'] * df_county_vacc_dist_admin_7Droll['ratio_county_pop'])
# selecting only the columns we needed
df_county_vacc_dist_admin_7Droll=df_county_vacc_dist_admin_7Droll[['county', 'date', 'doses_dist_7roll_county' , 'doses_admin_7roll_county' ]]
# final data set for Vaccine Adminstraion and Distribution by County and Date
df_county_vacc_dist_admin_7Droll.head()<jupyter_output><empty_output><jupyter_text>### Merge data sets together#### create a dataframe that has County, Date, Prediction, County population, vaccine allocation<jupyter_code># lets add population to predictions
df_county_vacc_pred_7Droll = pd.merge(df_pred ,df_county_pop, on ='county' , how = 'left' )
# since the 7 day rolling average was based off 100K , we need to calculate actual number of 7 day new patients
df_county_vacc_pred_7Droll['7D_roll_pred_tot'] = np.round((df_county_vacc_pred_7Droll['7D_roll_pred'] * df_county_vacc_pred_7Droll['population'])/ 100_000)
#df_county_vacc_pred_7Droll['7D_roll_pred_tot'] = np.round((df_county_vacc_pred_7Droll['7D_roll_pred'] * 100_000))
# lets look at merged data
df_county_vacc_pred_7Droll = df_county_vacc_pred_7Droll[['county', 'date' ,'7D_roll_pred_tot' ]]<jupyter_output><empty_output><jupyter_text>### Append the 'county to population ratio' to merged data set.<jupyter_code>df_county_vacc_pred_7Droll["date"]= pd.to_datetime(df_county_vacc_pred_7Droll["date"])
df_county_resource = pd.merge(df_county_vacc_pred_7Droll, df_county_vacc_dist_admin_7Droll, on=['county' , 'date'], how="inner")
df_county_resource.head()<jupyter_output><empty_output><jupyter_text>### Calculate the DELTA the difference between predicted new patients and vaccines available<jupyter_code>df_county_resource.head()
df_county_resource['delta'] = df_county_resource['7D_roll_pred_tot'] - ( df_county_resource['doses_dist_7roll_county'])
df_county_resource[df_county_resource['date'] == '2021-02-07']
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.lineplot(x="date", y="delta", data=df_county_resource)
plt.xticks(rotation=15)
plt.title('7 Day prediction for New Covid Patients')
plt.show()
# export the data
df_county_resource[df_county_resource['date'] == '2021-02-07'].to_csv('../clean_data/California_2021_02_07.csv', index=False) <jupyter_output><empty_output>
|
no_license
|
/main_code/5_Predictions_and_Conclusions.ipynb
|
lettywu/COVID-19-vaccinations
| 10 |
<jupyter_start><jupyter_text>**PYTHON SKLEARN PRE-PROCESSING + PIPELINE**
* Faire du pre-processing pour améliorer les performances en machine learning et en data science
* Traiter des données avec LabelEncoder,OneHotEncoder,MinMaxScaler,StandardScaler,et d'autres transformers du module sklearn.preprocessing
* Assembler plusieurs transformer avec le module Pipelinepre-processing :
---------------
Les algos de ML apprennent à partir des données qui leur sont fournies,par conséquent si ces données sont de mauvaise qualité,incomplètes,erronnées ,redondantes,l'algo qui en résulte sera lui même assez mauvais ,puisqu'il est sensé reflèter ce qu'il voit dans les données, c'est pour cette raison qu'il est impératif de bien préparer nos données avant leur passage dans la ML.Il faut les nettoyer,les filtrer,les normaliser : c'est le pre-processing(prétraitement).
Opérations de pre-processing :
1.Encodage (convertir les données qualitatives en valeurs numériques)
chien 0
chat 1
chien 0
oiseau 2
2.Normalisation(mettre sur une même échelle toutes les variables quantitatives ce qui facilite beaucoup l'apprentissage de la machine)
2 0
10 1
4 0.25
6 0.5
3.Imputation(remplacer des données manquantes par certaines valeurs statistiques)
2 2
1 1
3 3
'nan' 2
4.Sélection(sélection de variables qui utilise les tests statistiques comme le test de q2 pour sélectionner les variables les plus utiles pour le développement d'un modèle )
1 0 1
2 1 => 2
3 0 3
4 0 4
5.Extraction d'un modèle qui consiste à générer de nouvelles variables à partir d'informations cachées dans le dataset
sklearn.preprocessing : encodage et normalisation
sklearn.impute : imputation
sklearn.feature : sélection
sklearn.feature_extraction : extraction
sklearn.preprocessing :Le transformer
1.Les classes transformers
2.Des simples routines ou des fonctions mathématiques <jupyter_code>import numpy as np
from sklearn.preprocessing import LabelEncoder
X = np.array(['Chat',
'Chien',
'Chat',
'Oiseau'])<jupyter_output><empty_output><jupyter_text>**Transformer**
Pour transformer les données de façon cohérente ,les transformers disposent de 2 méthodes:
1.méthode fit(Xtrain):développe une fonction de transformation à partir de Xtrain
2.méthode transform(X) : applique la transformation sur les données Xtrain ,Xtest et toutes les autres données futures.
<jupyter_code>transformer = LabelEncoder()
transformer.fit(X)
transformer.transform(X)
#méthode fit_transform combiner les méthodes fit et transform
transformer.fit_transform(X)<jupyter_output><empty_output><jupyter_text>**Transform et Estimator**
En pratique ,si l'on désire développer un modèle de Ml , on commence par diviser notre dataset en 2 parties(trainset et testset)
(X,y) :(Xtrain ,ytrain) et (Xtest,ytest)
Avec les données du trainset , nous développons une fonction de transformation(transform) avec la méthode fit_transform(),ce qui permet de traiter nos données ,pour ensuite entrainer un estimateur avec la méthode fit()
Après cette étape ,nous pouvons utiliser le transformer(méthode transform()) et l'estimator(méthode predict()) tels que développés pour transformer les données du testset ,puis faire de nouvelles prédictions .
En combinant , un transformer et un estimator : on obtient une pipeline(càd une chaine de transformation)
<jupyter_code><jupyter_output><empty_output>
|
no_license
|
/08_sklearn_preprocessing.ipynb
|
AudryBarimbane/Python_Machine_Learning
| 3 |
<jupyter_start><jupyter_text># Multiclass Support Vector Machine exercise
*Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the [assignments page](http://vision.stanford.edu/teaching/cs231n/assignments.html) on the course website.*
In this exercise you will:
- implement a fully-vectorized **loss function** for the SVM
- implement the fully-vectorized expression for its **analytic gradient**
- **check your implementation** using numerical gradient
- use a validation set to **tune the learning rate and regularization** strength
- **optimize** the loss function with **SGD**
- **visualize** the final learned weights
<jupyter_code># Run some setup code for this notebook.
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
# This is a bit of magic to make matplotlib figures appear inline in the
# notebook rather than in a new window.
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# Some more magic so that the notebook will reload external python modules;
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2<jupyter_output>/opt/conda/envs/python2/lib/python2.7/site-packages/matplotlib/font_manager.py:273: UserWarning: Matplotlib is building the font cache using fc-list. This may take a moment.
warnings.warn('Matplotlib is building the font cache using fc-list. This may take a moment.')
<jupyter_text>## CIFAR-10 Data Loading and Preprocessing<jupyter_code># Load the raw CIFAR-10 data.
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# As a sanity check, we print out the size of the training and test data.
print 'Training data shape: ', X_train.shape
print 'Training labels shape: ', y_train.shape
print 'Test data shape: ', X_test.shape
print 'Test labels shape: ', y_test.shape
# Visualize some examples from the dataset.
# We show a few examples of training images from each class.
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
num_classes = len(classes)
samples_per_class = 7
for y, cls in enumerate(classes):
idxs = np.flatnonzero(y_train == y)
idxs = np.random.choice(idxs, samples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt_idx = i * num_classes + y + 1
plt.subplot(samples_per_class, num_classes, plt_idx)
plt.imshow(X_train[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls)
plt.show()
# Split the data into train, val, and test sets. In addition we will
# create a small development set as a subset of the training data;
# we can use this for development so our code runs faster.
num_training = 49000
num_validation = 1000
num_test = 1000
num_dev = 500
# Our validation set will be num_validation points from the original
# training set.
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
# Our training set will be the first num_train points from the original
# training set.
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
# We will also make a development set, which is a small subset of
# the training set.
mask = np.random.choice(num_training, num_dev, replace=False)
X_dev = X_train[mask]
y_dev = y_train[mask]
# We use the first num_test points of the original test set as our
# test set.
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
print 'Train data shape: ', X_train.shape
print 'Train labels shape: ', y_train.shape
print 'Validation data shape: ', X_val.shape
print 'Validation labels shape: ', y_val.shape
print 'Test data shape: ', X_test.shape
print 'Test labels shape: ', y_test.shape
# Preprocessing: reshape the image data into rows
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_val = np.reshape(X_val, (X_val.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
X_dev = np.reshape(X_dev, (X_dev.shape[0], -1))
# As a sanity check, print out the shapes of the data
print 'Training data shape: ', X_train.shape
print 'Validation data shape: ', X_val.shape
print 'Test data shape: ', X_test.shape
print 'dev data shape: ', X_dev.shape
# Preprocessing: subtract the mean image
# first: compute the image mean based on the training data
mean_image = np.mean(X_train, axis=0)
print mean_image[:10] # print a few of the elements
plt.figure(figsize=(4,4))
plt.imshow(mean_image.reshape((32,32,3)).astype('uint8')) # visualize the mean image
plt.show()
# second: subtract the mean image from train and test data
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
X_dev -= mean_image
# third: append the bias dimension of ones (i.e. bias trick) so that our SVM
# only has to worry about optimizing a single weight matrix W.
X_train = np.hstack([X_train, np.ones((X_train.shape[0], 1))])
X_val = np.hstack([X_val, np.ones((X_val.shape[0], 1))])
X_test = np.hstack([X_test, np.ones((X_test.shape[0], 1))])
X_dev = np.hstack([X_dev, np.ones((X_dev.shape[0], 1))])
print X_train.shape, X_val.shape, X_test.shape, X_dev.shape<jupyter_output>(49000, 3073) (1000, 3073) (1000, 3073) (500, 3073)
<jupyter_text>## SVM Classifier
Your code for this section will all be written inside **cs231n/classifiers/linear_svm.py**.
As you can see, we have prefilled the function `compute_loss_naive` which uses for loops to evaluate the multiclass SVM loss function. <jupyter_code># Evaluate the naive implementation of the loss we provided for you:
from cs231n.classifiers.linear_svm import svm_loss_naive
import time
# generate a random SVM weight matrix of small numbers
W = np.random.randn(3073, 10) * 0.0001
loss, grad = svm_loss_naive(W, X_dev, y_dev, 0.00001)
print 'loss: %f' % (loss, )<jupyter_output>loss: 8.855149
<jupyter_text>The `grad` returned from the function above is right now all zero. Derive and implement the gradient for the SVM cost function and implement it inline inside the function `svm_loss_naive`. You will find it helpful to interleave your new code inside the existing function.
To check that you have correctly implemented the gradient correctly, you can numerically estimate the gradient of the loss function and compare the numeric estimate to the gradient that you computed. We have provided code that does this for you:<jupyter_code># Once you've implemented the gradient, recompute it with the code below
# and gradient check it with the function we provided for you
# Compute the loss and its gradient at W.
loss, grad = svm_loss_naive(W, X_dev, y_dev, 0.0)
# Numerically compute the gradient along several randomly chosen dimensions, and
# compare them with your analytically computed gradient. The numbers should match
# almost exactly along all dimensions.
from cs231n.gradient_check import grad_check_sparse
f = lambda w: svm_loss_naive(w, X_dev, y_dev, 0.0)[0]
grad_numerical = grad_check_sparse(f, W, grad)
# do the gradient check once again with regularization turned on
# you didn't forget the regularization gradient did you?
loss, grad = svm_loss_naive(W, X_dev, y_dev, 1e2)
f = lambda w: svm_loss_naive(w, X_dev, y_dev, 1e2)[0]
grad_numerical = grad_check_sparse(f, W, grad)<jupyter_output>numerical: 18.417712 analytic: 18.417712, relative error: 4.457261e-12
numerical: -16.534115 analytic: -16.534115, relative error: 5.023271e-12
numerical: -2.202000 analytic: -2.202000, relative error: 4.689941e-11
numerical: -11.534667 analytic: -11.534667, relative error: 7.051501e-12
numerical: 3.368000 analytic: 3.368000, relative error: 5.880180e-11
numerical: -1.642571 analytic: -1.642571, relative error: 1.002080e-10
numerical: -7.102000 analytic: -7.102000, relative error: 6.482815e-12
numerical: -10.267025 analytic: -10.267025, relative error: 8.164838e-11
numerical: 5.299514 analytic: 5.307441, relative error: 7.473903e-04
numerical: 13.338367 analytic: 13.338367, relative error: 3.328613e-12
numerical: 6.005752 analytic: 6.005752, relative error: 3.796395e-11
numerical: 26.579851 analytic: 26.579851, relative error: 6.075335e-12
numerical: -12.514187 analytic: -12.514187, relative error: 5.010390e-12
numerical: -4.862713 analytic: -4.862713, relative error: 7.052339e-11
nume[...]<jupyter_text>### Inline Question 1:
It is possible that once in a while a dimension in the gradcheck will not match exactly. What could such a discrepancy be caused by? Is it a reason for concern? What is a simple example in one dimension where a gradient check could fail? *Hint: the SVM loss function is not strictly speaking differentiable*
**Your Answer:** *fill this in.*<jupyter_code># Next implement the function svm_loss_vectorized; for now only compute the loss;
# we will implement the gradient in a moment.
tic = time.time()
loss_naive, grad_naive = svm_loss_naive(W, X_dev, y_dev, 0.00001)
toc = time.time()
print 'Naive loss: %e computed in %fs' % (loss_naive, toc - tic)
from cs231n.classifiers.linear_svm import svm_loss_vectorized
tic = time.time()
loss_vectorized, _ = svm_loss_vectorized(W, X_dev, y_dev, 0.00001)
toc = time.time()
print 'Vectorized loss: %e computed in %fs' % (loss_vectorized, toc - tic)
# The losses should match but your vectorized implementation should be much faster.
print 'difference: %f' % (loss_naive - loss_vectorized)
# Complete the implementation of svm_loss_vectorized, and compute the gradient
# of the loss function in a vectorized way.
# The naive implementation and the vectorized implementation should match, but
# the vectorized version should still be much faster.
tic = time.time()
_, grad_naive = svm_loss_naive(W, X_dev, y_dev, 0.00001)
toc = time.time()
print 'Naive loss and gradient: computed in %fs' % (toc - tic)
tic = time.time()
_, grad_vectorized = svm_loss_vectorized(W, X_dev, y_dev, 0.00001)
toc = time.time()
print 'Vectorized loss and gradient: computed in %fs' % (toc - tic)
# The loss is a single number, so it is easy to compare the values computed
# by the two implementations. The gradient on the other hand is a matrix, so
# we use the Frobenius norm to compare them.
difference = np.linalg.norm(grad_naive - grad_vectorized, ord='fro')
print 'difference: %f' % difference<jupyter_output>Naive loss and gradient: computed in 0.126139s
Vectorized loss and gradient: computed in 0.009491s
difference: 0.000000
<jupyter_text>### Stochastic Gradient Descent
We now have vectorized and efficient expressions for the loss, the gradient and our gradient matches the numerical gradient. We are therefore ready to do SGD to minimize the loss.<jupyter_code># In the file linear_classifier.py, implement SGD in the function
# LinearClassifier.train() and then run it with the code below.
from cs231n.classifiers import LinearSVM
svm = LinearSVM()
tic = time.time()
loss_hist = svm.train(X_train, y_train, learning_rate=1e-7, reg=5e4,
num_iters=1500, verbose=True)
toc = time.time()
print 'That took %fs' % (toc - tic)
# A useful debugging strategy is to plot the loss as a function of
# iteration number:
plt.plot(loss_hist)
plt.xlabel('Iteration number')
plt.ylabel('Loss value')
plt.show()
# Write the LinearSVM.predict function and evaluate the performance on both the
# training and validation set
y_train_pred = svm.predict(X_train)
print 'training accuracy: %f' % (np.mean(y_train == y_train_pred), )
y_val_pred = svm.predict(X_val)
print 'validation accuracy: %f' % (np.mean(y_val == y_val_pred), )
# Use the validation set to tune hyperparameters (regularization strength and
# learning rate). You should experiment with different ranges for the learning
# rates and regularization strengths; if you are careful you should be able to
# get a classification accuracy of about 0.4 on the validation set.
learning_rates = [1e-7, 5e-5]
regularization_strengths = [5e4, 1e5]
# results is dictionary mapping tuples of the form
# (learning_rate, regularization_strength) to tuples of the form
# (training_accuracy, validation_accuracy). The accuracy is simply the fraction
# of data points that are correctly classified.
results = {}
best_val = -1 # The highest validation accuracy that we have seen so far.
best_svm = None # The LinearSVM object that achieved the highest validation rate.
################################################################################
# TODO: #
# Write code that chooses the best hyperparameters by tuning on the validation #
# set. For each combination of hyperparameters, train a linear SVM on the #
# training set, compute its accuracy on the training and validation sets, and #
# store these numbers in the results dictionary. In addition, store the best #
# validation accuracy in best_val and the LinearSVM object that achieves this #
# accuracy in best_svm. #
# #
# Hint: You should use a small value for num_iters as you develop your #
# validation code so that the SVMs don't take much time to train; once you are #
# confident that your validation code works, you should rerun the validation #
# code with a larger value for num_iters. #
################################################################################
for lr in learning_rates:
for reg in regularization_strengths:
svm = LinearSVM()
svm.train(X_train, y_train, learning_rate=lr, reg=reg, num_iters=1500)
acc_train = np.mean(y_train == svm.predict(X_train))
acc_val = np.mean(y_val == svm.predict(X_val))
if acc_val > best_val:
best_val = acc_val
best_svm = svm
results[(lr, reg)] = (acc_train, acc_val)
################################################################################
# END OF YOUR CODE #
################################################################################
# Print out results.
for lr, reg in sorted(results):
train_accuracy, val_accuracy = results[(lr, reg)]
print 'lr %e reg %e train accuracy: %f val accuracy: %f' % (
lr, reg, train_accuracy, val_accuracy)
print 'best validation accuracy achieved during cross-validation: %f' % best_val
# Visualize the cross-validation results
import math
x_scatter = [math.log10(x[0]) for x in results]
y_scatter = [math.log10(x[1]) for x in results]
# plot training accuracy
marker_size = 100
colors = [results[x][0] for x in results]
plt.subplot(2, 1, 1)
plt.scatter(x_scatter, y_scatter, marker_size, c=colors)
plt.colorbar()
plt.xlabel('log learning rate')
plt.ylabel('log regularization strength')
plt.title('CIFAR-10 training accuracy')
# plot validation accuracy
colors = [results[x][1] for x in results] # default size of markers is 20
plt.subplot(2, 1, 2)
plt.scatter(x_scatter, y_scatter, marker_size, c=colors)
plt.colorbar()
plt.xlabel('log learning rate')
plt.ylabel('log regularization strength')
plt.title('CIFAR-10 validation accuracy')
plt.show()
# Evaluate the best svm on test set
y_test_pred = best_svm.predict(X_test)
test_accuracy = np.mean(y_test == y_test_pred)
print 'linear SVM on raw pixels final test set accuracy: %f' % test_accuracy
# Visualize the learned weights for each class.
# Depending on your choice of learning rate and regularization strength, these may
# or may not be nice to look at.
w = best_svm.W[:-1,:] # strip out the bias
w = w.reshape(32, 32, 3, 10)
w_min, w_max = np.min(w), np.max(w)
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
for i in xrange(10):
plt.subplot(2, 5, i + 1)
# Rescale the weights to be between 0 and 255
wimg = 255.0 * (w[:, :, :, i].squeeze() - w_min) / (w_max - w_min)
plt.imshow(wimg.astype('uint8'))
plt.axis('off')
plt.title(classes[i])<jupyter_output><empty_output>
|
permissive
|
/assignment1/svm.ipynb
|
kamikat/cs231n
| 6 |
<jupyter_start><jupyter_text>### Loading data into pandas and do exploratoin<jupyter_code># load data
test_data = pd.read_csv("/Users/yijia/Documents/current_work/BigData/Assign/Final/data/test.csv")
train_data = pd.read_csv('/Users/yijia/Documents/current_work/BigData/Assign/Final/data/train.csv')
attributes = pd.read_csv("/Users/yijia/Documents/current_work/BigData/Assign/Final/data/attributes.csv")
product_description = pd.read_csv("/Users/yijia/Documents/current_work/BigData/Assign/Final/data/product_descriptions.csv")<jupyter_output><empty_output><jupyter_text>### Problem statement:
Giving training data contains id, product_uid, product title, and search term, given the relevance as label. We can also used the attributes and product description in the training information, by joining with product_uid.
Our task is to predict the relevance for data in test set given id and product_uid. <jupyter_code>train_data.head()
train_data.info()
test_data.head()
attributes.head()
attributes.info()
product_description.head()<jupyter_output><empty_output><jupyter_text>### Corpus exploration:
For this task, corpus can be seen as the combination of product description which conatins product_uid and product description for this unique product_uid. And the attributes, which contains map of product_uid and name and value.
To explore the corpus, we can calculate each word's term frequency as well as the invert document frequency in the product description file. Here we can consider each single description as a document in corpus. Also we can seek into the attribut of product and see what relationship the attribut has to the query. <jupyter_code># terms in product description
terms = dict()
uids = product_description['product_uid']
docs = product_description['product_description']
for i in range(0, len(product_description)):
terms[uids[i]] = re.split("\W+", docs[i])
<jupyter_output><empty_output><jupyter_text>#### Calculate Tf and idf of term, score can be represent as tf*idf<jupyter_code># explore prodcut description
# calculate tf and idf for each term in a single product's description.
import re
def calculateTf(uid, t):
# cur_doc = product_description[product_description['product_uid'] == uid]['product_description'][0]
# terms = set(re.split("\W+", cur_doc))
term = terms[uid]
# tf = dict()
# for t in term:
# tf[t] = cur_doc.count(t)
tf = term.count(t)
return tf
def calculateIdf(uid, t):
# cur_doc = product_description[product_description['product_uid'] == uid]['product_description'][0]
# terms = set(re.split("\W+", cur_doc))
term = terms[uid]
# idf = dict()
N = len(product_description)
# for t in terms:
# idf[t] = np.log(N/(getDf(t) + 1))
idf = np.log(N/(getDf(t) + 1))
return idf
def getDf(term):
cnt = 0
for t in terms.values():
if term in t:
cnt += 1
return cnt
# scores can be represent as tf*idf
def getScore(uid):
return calculateIdf(uid)*calculateTf(uid)<jupyter_output><empty_output><jupyter_text>#### Explore attributes, using information about brands, material, etc<jupyter_code># explore attributes
attributes = attributes[attributes['value'] != 'No']
brands = attributes[attributes['name'] == "MFG Brand Name"]
material = attributes[attributes['name'] == "Material"]
material.head()
brands.head()<jupyter_output><empty_output><jupyter_text>#### Finding:
the attributes for each product_uid may be different. For example, some products do not have the informantion about their brand or material. But all the products have product description which we can retrive some information from. Also, the title can also be used to make prediction for relevance. We can see those with related title will more likely to be relevanted. So basically using the decription's tf-idf score, brand, title, material as features and decided whether it is relevant to the query. When processing the description, the data set is very large, we only need to calculate terms appears in search query, but it still cost time to calculate the tf and idf. #### Approach to solve the problem:
After deciding which feature to use and feature engineering, we can decide which model to use to make the prediction. Basically machine learning algorithms will be used for prediction. I think for this task, the result is a number in the range of [0, 3], so in this way we can ues regression model as baseline model. For example, we may quantify the feature of the train and test data, use integer to represent str features, and then apply linear regression model to make the prediction. Also, after base line model, I think we can use some model like random forest since RF can handle both classification and regression, and both discrete and continus features work well. So in this way, I may try RF model on the training set and test to see how the model works. Other models like CNN may also be used. And since there are several terms in each query,I pretend to combine them by "OR", "AND", two different operators to make the query, then compare the result's performance. And since we have different field like title and description and attributes, we can try to find different weight for the field and use weight operator like "WAND" to get the relevance.<jupyter_code># Feature engineering
# merge train and test with description
train = train_data.merge(product_description, on = "product_uid", how = 'left')
test = test_data.merge(product_description, on = "product_uid", how = 'left')
train.head()
test.head()
# merge train and test with brand and material
train = train.merge(brands, on = "product_uid", how = 'left')
test = test.merge(brands, on = "product_uid", how = 'left')
train = train.merge(material,on = "product_uid", how = 'left')
test = test.merge(material, on = "product_uid", how = 'left')
train.head()
test.head()<jupyter_output><empty_output><jupyter_text>### Calculate AND and OR operator's result of each term and save<jupyter_code># calculate AND score, OR score and add new column to train
search_terms = []
for st in train['search_term']:
search_terms.append(re.split("\W+", st))
AND_score = []
OR_score = []
idx = 0
for ts in search_terms:
cur_uid = train['product_uid'][idx]
cur_and = 1 # and , multiply all terms' score
cur_or = 0 # or, find max one
for t in ts:
cur_tf = calculateTf(cur_uid, t)
cur_idf = calculateIdf(cur_uid, t)
cur_and = cur_and * cur_tf*cur_idf
cur_or = max(cur_or, cur_tf*cur_idf)
AND_score.append(cur_and)
OR_score.append(cur_or)
idx += 1
train['AND_score'] = AND_score
train['OR_score'] = OR_score
# calculate AND score, OR score and add new column to test
search_terms_t = []
for st in test['search_term']:
search_terms_t.append(re.split("\W+", st))
AND_score_t = []
OR_score_t = []
idx = 0
for ts in search_terms_t:
cur_uid = test['product_uid'][idx]
cur_and = 1 # and , multiply all terms' score
cur_or = 0 # or, find max one
for t in ts:
cur_tf = calculateTf(cur_uid, t)
cur_idf = calculateIdf(cur_uid, t)
cur_and = cur_and * cur_tf*cur_idf
cur_or = max(cur_or, cur_tf*cur_idf)
AND_score_t.append(cur_and)
OR_score_t.append(cur_or)
idx += 1
test['AND_score'] = AND_score_t
test['OR_score'] = OR_score_t
# process with product title, the percent of terms that appear in title
train_title = []
test_title = []
idx = 0
for ts in train['search_term']:
cur_terms = set(re.split("\W+", ts))
cur_title = set(re.split("\W+", train['product_title'][idx]))
train_title.append(float(len(cur_terms.intersection(cur_title)))/len(cur_title))
idx += 1
train['title_contain'] = train_title
idx = 0
for ts in test['search_term']:
cur_terms = set(re.split("\W+", ts))
cur_title = set(re.split("\W+", test['product_title'][idx]))
test_title.append(float(len(cur_terms.intersection(cur_title)))/len(cur_title))
idx += 1
test['title_contain'] = test_title
train.head()
test.head()
train.head()<jupyter_output><empty_output>
|
no_license
|
/FinalPrj.ipynb
|
YijiaJin/BigDataAssign
| 7 |
<jupyter_start><jupyter_text>1. Write a Python program to find those numbers which are divisible by 7 and multiple of 11, between 11000 and 2000 (both included).<jupyter_code>
for i in range (1000,2001):
if(i%7==0 and i%11==0):
print(i)<jupyter_output>1001
1078
1155
1232
1309
1386
1463
1540
1617
1694
1771
1848
1925
<jupyter_text>2. Write a program to print this pattern
#
##
###<jupyter_code>a=" "
b="#"
for i in range (4):
d=b*i
for j in range(4):
if(i==j):
c=a*(4-j)
e=c+d
print(e)<jupyter_output>
#
##
###
<jupyter_text>3. list=[apple, orange, apple, banana]
. Replace all the apple in the list with grapes using for loop<jupyter_code>list=["apple", "orange", "apple", "banana"]
for i in range(len(list)):
if(list[i])==("apple"):
list[i]="grapes"
print(list)<jupyter_output>['grapes', 'orange', 'grapes', 'banana']
|
no_license
|
/TASK_6.ipynb
|
abnv16/learn_py
| 3 |
<jupyter_start><jupyter_text>
# CIS024C - Spring 2018 - Monday 5:30-9:25pm
## Homework 7
Homework 7 covers exercises that involve Exception Handling
The below sites have some interesting and useful information on working with files
* Errors and Exceptions: https://docs.python.org/2/tutorial/errors.html
* Built-in Exceptions: https://docs.python.org/2/library/exceptions.html
You will need to download this notebook and use this as a starting point for your homework. You will just need to fill in the content of each code-block (cell) and execute. Once you have completed all the exercises, you will need to save and upload this to your github repository under a folder called hw7.
Note also the exercises build on top of one another so you might be able to do the next exercise if you have not completed the previous exercise.
Post any questions you have on our Slack at **cis-024c1.slack.com**
** Slides ** for Week 7 can be found at
https://docs.google.com/presentation/d/1HcWIuVciM0_L935Umi5rPgQHJYIv1bNfYnErfOelbtw/edit?usp=sharing
**Please refer back to hw1 and slack for instructions on how to setup your computer for developing using Python.**### Helpful Jupyter Commands
Below are some useful commands to know when using Jupyter
1. You can add a new cell by clicking on the "+" icon on top.
2. You can delete a cell by selecting that cell and clicking on the "scissors" icon on top.
3. You can execute a cell by either pressing shift+enter or selecting the "play" button on top.
4. You can create a new file in Jupyter via the File menu->New Notebook option. Make sure to select Python 2 when creating your notebook.
5. Also, for your code blocks make sure that Code is selected instead of another option like Markdown.
6. Use the Enter key to go to the next line in a cell to enter the next statement.
7. You can clear results by clicking on the Cell menu item and selecting Current Output->Clear or All Output->Clear depending on whether you are trying to just clear the output for one cell or for all cells.
8. In case your program has crashed for some reason (infinite loop, for example), you can restart your Python session by select Kernel in the menu and selecting Restart.
#### Check Python Version<jupyter_code>!python --version<jupyter_output>Python 2.7.12 :: Continuum Analytics, Inc.
<jupyter_text>#### Sample Exercises with Exception Handling
Week 7 Class Work can be found here https://github.com/cis024c/spring2018classwork/blob/master/week7/week7_classwork.ipynb**Exercise 1 - Testing your knowledge of Exceptions **
Answer the below questions
1. What is the difference between a Syntax Error and a Runtime Error?
2. Are exceptions Syntax Errors or Runtime Errors?
3. How do exceptions help a programmer?
4. Which exception is raised when the user tries to use a variable that has not been defined?1. Syntax Error makes the program unexecutable. We have to correct the error before running it. But Runtime Error occurs during the program execution under certain circumstance. It's dynamic error and cannot be detected during compiling.
2. Exceptions are Runtime Error.
3. Handling Exception is an effective ways of dealing with exceptional conditions. They decouple the detection and handling of these conditions and automate the propagation of the exception from the point of detection to the point of handling. As a result, the code can be much cleaner, easier to write correctly, and easier to maintain.
4. NameError will be raised when useing a variable that has not been defined.#### Exercise 2 - Raising Exceptions
Write python programs that raise the following exceptions.
1. ValueError
2. TypeError
3. IndexError
4. KeyError<jupyter_code>### YOUR CODE GOES
# ValueError
print "---------------------------"
try:
x1 = raw_input("Enter an Integer:")
x1 = int(x1)
print x1
except ValueError:
print "Oops - ValueError"
# TypeError
print "---------------------------"
try:
x2 = raw_input("Enter an Integer:")
x2 = x2 + 3
print x2
except TypeError:
print "Oops - TypeError"
# IndexError
print "---------------------------"
x3 = [1, 2, 3]
try:
for i in range(len(x3)+1):
print x3[i]
except IndexError:
print "Oops - IndexError"
# KeyError
print "---------------------------"
x4 = {"A":1, "B":2}
try:
print x4["A"]
print x4["C"]
except KeyError:
print "Oops - KeyError"
### END CODE<jupyter_output>---------------------------
Enter an Integer:abc
Oops - ValueError
---------------------------
Enter an Integer:23
Oops - TypeError
---------------------------
1
2
3
Oops - IndexError
---------------------------
1
Oops - KeyError
<jupyter_text>#### Exercise 3 - Handling Exceptions
Write a Python program that asks the user to enter a GPA (integer values - 0,1,2,3 or 4). Convert the input from the user into an integer. Write an exception handler to handle the ValueError exception and display the message "ValueError occurred. Please try again". If the value entered by the user is not compatible with integer values the program will raise a ValueError exception and display the message from within the exception handler.<jupyter_code>### YOUR CODE GOES BELOW
input_gpa = raw_input("Please Enter GPA(Integer values - 0,1,2,3 or 4):")
try:
gpa = int(input_gpa)
if gpa not in (0, 1, 2, 3, 4):
raise ValueError
except ValueError:
print "ValueError occurred. Please try again"
### END CODE<jupyter_output>Please Enter GPA(Integer values - 0,1,2,3 or 4):S3
ValueError occurred. Please try again
<jupyter_text>#### Exercise 4 - Displaying the error description in an Exception
Create a list of **5** items in a grocery cart. For example, your list can be something like the below
**Example of a list with 2 items:**
groceryList = ["suger","rice"]
Write an exception handler to handle an IndexError exceptio and store the details of the exception in a variable called **details**. If the exception occurs, print out a message saying "Exception Occurred" along with the details (from **details** variable).
In your program attempt to access the 6th item in the list.
Since there are only 5 elements, the exception handler should be triggered and the message printed inside the exception should be printed.<jupyter_code>### YOUR CODE GOES BELOW
groceryList = ["suger","rice","pepper","Soy Sause","salt"]
try:
for i in range(0, len(groceryList)+1):
print "Grocery - %s: %s" % (i, groceryList[i])
except Exception as details:
print "Oops! Exception Occurred:", details
### END CODE<jupyter_output>Grocery - 0: suger
Grocery - 1: rice
Grocery - 2: pepper
Grocery - 3: Soy Sause
Grocery - 4: salt
Oops! Exception Occurred: list index out of range
<jupyter_text>#### Exercise 5 - Using loops to wait for a user to enter a valid value
**This is a partial repeat of problem 1. You should be able to reuse that code**
Write a Python program that asks the user to enter a GPA (integer values - 0,1,2,3 or 4). Convert the input from the user into an integer. Write an exception handler to handle the ValueError exception and display the message "ValueError occurred. Please try again". If the value entered by the user is not compatible with integer values the program will raise a ValueError exception and display the message from within the exception handler.
Place the above code to get user input inside a **while** loop. As long as the user is entering an invalid numeric value, the program should **continue** to prompt the user to enter the GPA. When the user enters a correct value, the program should **break** out of the while loop and print the GPA.
**Please see classwork for similar examples**<jupyter_code>### YOUR CODE GOES BELOW
while True:
input_gpa = raw_input("Please Enter GPA(Integer values - 0,1,2,3 or 4):")
try:
gpa = int(input_gpa)
if gpa not in (0, 1, 2, 3, 4):
raise ValueError
break
except ValueError:
print "ValueError occurred. Please try again"
print "Good job, you input GPA: ", gpa
### END CODE<jupyter_output>Please Enter GPA(Integer values - 0,1,2,3 or 4):S3
ValueError occurred. Please try again
Please Enter GPA(Integer values - 0,1,2,3 or 4):12
ValueError occurred. Please try again
Please Enter GPA(Integer values - 0,1,2,3 or 4):-1
ValueError occurred. Please try again
Please Enter GPA(Integer values - 0,1,2,3 or 4):3
Good job, you input GPA: 3
<jupyter_text>## OPTIONAL EXERCISES
Below is a set of optional exercises. These will not be graded but the solutions will be posted. I would strongly encourage you to try these out if you are done with the mandatory homework exercises to improve your understanding of python.#### Exercise 6
Write a python program to build a simple chatbot. The chatbot will accept input from the user, check the input against a dictionary and display the corresponding value.
Below is the dictionary that the chatbot will use.
```
conversationsDict = {
'how are you':'I am fine, thank you.How are you today',
'i am well':'Cool! How can I help you today',
'can you tell me what the weather is like today':'Sure. It looks mostly sunny with a high of 80 degrees',
'thank you very much':'You are welcome. Have a nice day'
}
```
Note that the dictionary **key** is the value entered by the user and the **value** is the response from the chatbot. The chatbot must use the user input to look up the dictionary for a response. If a response is found, then the chatbot must print the response. If the key is Invalid, then a KeyError exception handler must handle the exception displaying the messsage "Sorry, I did not get that". It must then allow the user to repeat the question.<jupyter_code>### YOUR CODE GOES BELOW
conversationsDict = {
'how are you':'I am fine, thank you.How are you today',
'i am well':'Cool! How can I help you today',
'can you tell me what the weather is like today':'Sure. It looks mostly sunny with a high of 80 degrees',
'thank you very much':'You are welcome. Have a nice day'
}
while True:
chat = raw_input("Let's Chat ('Stop' to Exit) ----> ")
try:
if chat.lower() == "stop":
break
print conversationsDict[chat]
except KeyError:
print "Sorry, I did not get that"
### END CODE<jupyter_output>Let's Chat ('Stop' to Exit) ----> how are you
I am fine, thank you.How are you today
Let's Chat ('Stop' to Exit) ----> thank you very much
You are welcome. Have a nice day
Let's Chat ('Stop' to Exit) ----> really
Sorry, I did not get that
Let's Chat ('Stop' to Exit) ----> are you crazy
Sorry, I did not get that
Let's Chat ('Stop' to Exit) ----> i am well
Cool! How can I help you today
Let's Chat ('Stop' to Exit) ----> Stop
<jupyter_text>#### Exercise 7
Write two Python to compute the average of a list of numbers. The inputs to the function is a python **List**. The output is the average. The function must have an Exception handler to handle the **ZeroDivisionError exception**.
Verify that the ZeroDivisionError handler works by passing in an empty list.<jupyter_code>### YOUR CODE GOES BELOW
def avg(list1):
try:
print "Your input is: %s, and their Average is:" % (list1)
return sum(list1)/len(list1)
except ZeroDivisionError:
return "Oops, your List is empty. I cannot calculate it!"
print avg([1,2,3,4,5])
print "--------------------------------------------------------"
print avg([])
print "--------------------------------------------------------"
print avg([4, 8])
### END CODE<jupyter_output>Your input is: [1, 2, 3, 4, 5], and their Average is:
3
--------------------------------------------------------
Your input is: [], and their Average is:
Oops, your List is empty. I cannot calculate it!
--------------------------------------------------------
Your input is: [4, 8], and their Average is:
6
<jupyter_text>#### Exercise 8
Write a python program that accepts a sequence of words, some of them numbers. Attempt to perform an integer add on all the words that were entered. If a word that is not a number is encountered, use the ValueError to skip the next word. Display the resulting sum of all the numbers.<jupyter_code>### YOUR CODE GOES BELOW
s = 0
while True:
n = raw_input("Enter an Integer ('Stop' to exit): ")
if n.lower() == "stop":
break
try:
s = s + int(n)
except ValueError:
print "Sorry, You Must Enter an Ingeter. Try again"
print '=========================================='
print "The Summary of all Integers are: ", s
### END CODE<jupyter_output>Enter an Integer ('Stop' to exit): 1
Enter an Integer ('Stop' to exit): s
Sorry, You Must Enter an Ingeter. Try again
Enter an Integer ('Stop' to exit): 2
Enter an Integer ('Stop' to exit): yes
Sorry, You Must Enter an Ingeter. Try again
Enter an Integer ('Stop' to exit): ?
Sorry, You Must Enter an Ingeter. Try again
Enter an Integer ('Stop' to exit): 3
Enter an Integer ('Stop' to exit): 4
Enter an Integer ('Stop' to exit): t
Sorry, You Must Enter an Ingeter. Try again
Enter an Integer ('Stop' to exit): 5
Enter an Integer ('Stop' to exit): stop
==========================================
The Summary of all Integers are: 15
|
non_permissive
|
/hw7_qi_jing.ipynb
|
jingqi8/CIS-024C
| 8 |
<jupyter_start><jupyter_text># Bacterial Ensemble Language Model
This notebook trains a language model on the ensemble of bacterial genomes assembled in the [Bacterial Ensemble 0 Data Processing](https://github.com/kheyer/Genomic-ULMFiT/blob/master/Bacteria/Bacterial%20Ensemble/Bacterial%20Ensemble%200%20Data%20Processing.ipynb) notebook. The language model trained is based on the AWD-LSTM architecture. the genomic input information is split into 5-mers with a stride of 2 bases between each 5-mer. The model is trained to take an input sequence of 5-mers and predict the next 5-mer. This allows us to train a model that learns the structure of genomic information in a totally unsupervised way.
The base of the language model (token embedding + LSTM layers) will then be used to initialize a classification model.
For more detail on how genomic data is processed and how these language models are trained, see the following notebooks:
[E. coli 1 Naive Model](https://github.com/kheyer/Genomic-ULMFiT/blob/master/Bacteria/E.%20Coli/E.%20coli%201%20Naive%20Model.ipynb)
[E. coli 2 Genomic Pretraining](https://github.com/kheyer/Genomic-ULMFiT/blob/master/Bacteria/E.%20Coli/E.%20coli%202%20Genomic%20Pretraining.ipynb)<jupyter_code>%reload_ext autoreload
%autoreload 2
%matplotlib inline
from fastai import *
from fastai.text import *
from Bio import Seq
from Bio.Seq import Seq
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.SeqFeature import FeatureLocation, CompoundLocation
import networkx as nx
sys.path.append("../../..")
from utils import *
path = Path('F:/genome/bacterial genomes/')
df = pd.read_csv(path/'bacterial_data.csv')
df.head()
# 10% of the data used for validation
train_df, valid_df = split_data(df, 0.9)
train_df.shape, valid_df.shape
tok = Tokenizer(GenomicTokenizer, n_cpus=1, pre_rules=[], post_rules=[], special_cases=['xxpad'])
data = GenomicTextLMDataBunch.from_df(path, train_df, valid_df, bs=428, tokenizer=tok, text_cols=0, label_cols=1)
# Model vocabulary - 1025 tokens. 1024 5-mer nucleotide combinations plus one padding token
len(data.vocab.itos)
# Save model vocabulary - this will be important later
np.save(path/'bact_vocab.npy', data.vocab.itos)
config = dict(emb_sz=400, n_hid=1150, n_layers=3, pad_token=0, qrnn=False, output_p=0.25,
hidden_p=0.1, input_p=0.2, embed_p=0.02, weight_p=0.15, tie_weights=True, out_bias=True)
drop_mult = 0.25
learn = get_model_LM(data, drop_mult, config)
learn.model
learn.lr_find()
learn.recorder.plot()
learn.fit_one_cycle(5, 1e-2, moms=(0.8,0.7))
learn.save('b1')
learn.fit_one_cycle(5, 5e-3, moms=(0.8,0.7))
learn.save('b2')
learn.save_encoder('b2_enc')<jupyter_output><empty_output>
|
no_license
|
/Bacteria/Bacterial Ensemble/Genomic Language Models/Bacterial Ensemble LM 1 5-mer Language Model.ipynb
|
bharatr21/Genomic-ULMFiT
| 1 |
<jupyter_start><jupyter_text># Seaborn Exercises - Solutions
Time to practice your new seaborn skills! Try to recreate the plots below (don't worry about color schemes, just the plot itself.## The Data
We will be working with a famous titanic data set for these exercises. Later on in the Machine Learning section of the course, we will revisit this data, and use it to predict survival rates of passengers. For now, we'll just focus on the visualization of the data with seaborn:<jupyter_code>import seaborn as sns
%matplotlib inline
sns.set(rc={'figure.figsize': [7, 7]}, font_scale=1.2)
df = sns.load_dataset('titanic')
df.head()
df.info()
df.head()
sns.distplot(df['age'], kde=False, bins=30, color='m')
sns.kdeplot(df['age'], shade=True, color='m')
sns.distplot(df['fare'], kde=False, bins=30, color='m')
sns.kdeplot(df['fare'], shade=True, color='m')
sns.jointplot(x='age', y='fare', data=df, color='m')
df.head()
sns.countplot(x='pclass', data=df, hue='survived', palette='rocket')
sns.countplot(x='class', data=df, hue='sex', palette='BuPu')
sns.countplot(x='class', data=df, hue='who', palette='Set2')
sns.countplot(x='class', data=df, hue='alone', palette='Set2')
sns.countplot(x='embark_town', data=df)
sns.countplot(x='class', data=df, hue='embark_town', palette='viridis')
df.head()
sns.boxplot(x='survived', y='age', data=df)
sns.boxplot(x='class', y='age', data=df)
sns.violinplot(x='class', y='age', data=df, hue='survived', split=True)
sns.stripplot(x='embark_town', y='age', data=df, hue='survived', dodge=True)
sns.violinplot(x='who', y='fare', data=df, hue='survived', split=True)
sns.barplot(x='class', y='fare', data=df, estimator=sum, hue='embark_town')
df.head()
new_df = df.pivot_table(index='embark_town', columns='class', values='survived', aggfunc='count')
new_df
sns.heatmap(new_df, cmap='viridis', linecolor='k', linewidths=2)<jupyter_output><empty_output>
|
no_license
|
/08-Data Visualization with Matplotlib & Seaborn/code/01-Seaborn/.ipynb_checkpoints/06 - Seaborn Exercise-checkpoint.ipynb
|
Terminator98/Singularity-Data-Science
| 1 |
<jupyter_start><jupyter_text># Saving and Loading Models
In this notebook, I'll show you how to save and load models with PyTorch. This is important because you'll often want to load previously trained models to use in making predictions or to continue training on new data.<jupyter_code>%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms
import helper
import fc_model
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))])
# Download and load the training data
trainset = datasets.FashionMNIST('F_MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
# Download and load the test data
testset = datasets.FashionMNIST('F_MNIST_data/', download=True, train=False, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)<jupyter_output>Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz
Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz
Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz
Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz
Processing...
Done!
<jupyter_text>Here we can see one of the images.<jupyter_code>image, label = next(iter(trainloader))
helper.imshow(image[0,:]);<jupyter_output><empty_output><jupyter_text># Train a network
To make things more concise here, I moved the model architecture and training code from the last part to a file called `fc_model`. Importing this, we can easily create a fully-connected network with `fc_model.Network`, and train the network using `fc_model.train`. I'll use this model (once it's trained) to demonstrate how we can save and load models.<jupyter_code># Create the network, define the criterion and optimizer
model = fc_model.Network(784, 10, [512, 256, 128])
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
fc_model.train(model, trainloader, testloader, criterion, optimizer, epochs=2)<jupyter_output>Epoch: 1/2.. Training Loss: 1.745.. Test Loss: 1.018.. Test Accuracy: 0.631
Epoch: 1/2.. Training Loss: 1.027.. Test Loss: 0.772.. Test Accuracy: 0.717
Epoch: 1/2.. Training Loss: 0.898.. Test Loss: 0.706.. Test Accuracy: 0.743
Epoch: 1/2.. Training Loss: 0.768.. Test Loss: 0.639.. Test Accuracy: 0.755
Epoch: 1/2.. Training Loss: 0.760.. Test Loss: 0.606.. Test Accuracy: 0.769
Epoch: 1/2.. Training Loss: 0.714.. Test Loss: 0.593.. Test Accuracy: 0.782
Epoch: 1/2.. Training Loss: 0.647.. Test Loss: 0.571.. Test Accuracy: 0.788
Epoch: 1/2.. Training Loss: 0.700.. Test Loss: 0.564.. Test Accuracy: 0.798
Epoch: 1/2.. Training Loss: 0.678.. Test Loss: 0.549.. Test Accuracy: 0.803
Epoch: 1/2.. Training Loss: 0.645.. Test Loss: 0.544.. Test Accuracy: 0.791
Epoch: 1/2.. Training Loss: 0.610.. Test Loss: 0.524.. Test Accuracy: 0.811
Epoch: 1/2.. Training Loss: 0.613.. Test Loss: 0.543.. Test Accuracy: 0.798
Epoch: 1/2.. Training Loss: 0.630.. Test Loss: 0.5[...]<jupyter_text>## Saving and loading networks
As you can imagine, it's impractical to train a network every time you need to use it. Instead, we can save trained networks then load them later to train more or use them for predictions.
The parameters for PyTorch networks are stored in a model's `state_dict`. We can see the state dict contains the weight and bias matrices for each of our layers.<jupyter_code>print("Our model: \n\n", model, '\n')
print("The state dict keys: \n\n", model.state_dict().keys())<jupyter_output><empty_output><jupyter_text>The simplest thing to do is simply save the state dict with `torch.save`. For example, we can save it to a file `'checkpoint.pth'`.<jupyter_code>torch.save(model.state_dict(), 'checkpoint.pth')<jupyter_output><empty_output><jupyter_text>Then we can load the state dict with `torch.load`.<jupyter_code>state_dict = torch.load('checkpoint.pth')
print(state_dict.keys())<jupyter_output><empty_output><jupyter_text>And to load the state dict in to the network, you do `model.load_state_dict(state_dict)`.<jupyter_code>model.load_state_dict(state_dict)<jupyter_output><empty_output><jupyter_text>Seems pretty straightforward, but as usual it's a bit more complicated. Loading the state dict works only if the model architecture is exactly the same as the checkpoint architecture. If I create a model with a different architecture, this fails.<jupyter_code># Try this
model = fc_model.Network(784, 10, [400, 200, 100])
# This will throw an error because the tensor sizes are wrong!
model.load_state_dict(state_dict)<jupyter_output><empty_output><jupyter_text>This means we need to rebuild the model exactly as it was when trained. Information about the model architecture needs to be saved in the checkpoint, along with the state dict. To do this, you build a dictionary with all the information you need to compeletely rebuild the model.<jupyter_code>checkpoint = {'input_size': 784,
'output_size': 10,
'hidden_layers': [each.out_features for each in model.hidden_layers],
'state_dict': model.state_dict()}
torch.save(checkpoint, 'checkpoint.pth')<jupyter_output><empty_output><jupyter_text>Now the checkpoint has all the necessary information to rebuild the trained model. You can easily make that a function if you want. Similarly, we can write a function to load checkpoints. <jupyter_code>def load_checkpoint(filepath):
checkpoint = torch.load(filepath)
model = fc_model.Network(checkpoint['input_size'],
checkpoint['output_size'],
checkpoint['hidden_layers'])
model.load_state_dict(checkpoint['state_dict'])
return model
model = load_checkpoint('checkpoint.pth')
print(model)<jupyter_output><empty_output>
|
no_license
|
/intro-to-pytorch/Part 6 - Saving and Loading Models.ipynb
|
shashankdeshpande/deep-learning-with-pytorch
| 10 |
<jupyter_start><jupyter_text># instance variables and class variables<jupyter_code>#instance variables are nothing but the attributes that are specific instances like balance in the Account
#class variables are specific to class. If any change in the class then it will effect all instances
class Myclass():
a = 10
b = 20
def __init__(self):
self.c = 100
self.d = 200
o1 = Myclass()
o2 = Myclass()
print dir(o1)
print dir(o2)
class Pen():
def __init__(self, c):
self.ink_level = 100
self.color = c
p1 = Pen("blue")
p2 = Pen("red")
print dir(p1)
print dir(p2)
print p1.color
print p2.color
class Pen():
def __init__(self, c):
print self
self.ink_level = 100
self.color = c
def getColor(self):
print self
return self.color
p1 = Pen("blue")
p2 = Pen("red")
print "p1 is ", p1
print "p2 is ", p2
print p1.getColor()
print p2.getColor()
class Pen():
def __init__(self, c):
print self
self.ink_level = 100
self.color = c
def getColor(xyz):
print xyz
return xyz.color
p1 = Pen("blue")
p2 = Pen("red")
print "p1 is ", p1
print "p2 is ", p2
print p1.getColor()
class Myclass():
a = 10
b = 20
def __init__(self, c, d):
self.c = d
self.d = c
e =100
o1 = Myclass(35, 45)
o2 = Myclass(40, 50)
print o1.c
print o2.c
class Myclass():
a = 10
b = 20
def __init__(self, c, d):
self.c = d
self.d = c
e =100
def readValues(self):
print self.c, self.d, self.e
o1 = Myclass(35, 45)
o2 = Myclass(40, 50)
print o1.readValues()
class Myclass():
a = 10
b = 20
def __init__(self, c, d):
self.c = d
self.d = c
e =100
def readValues(self):
print self.c, self.d
o1 = Myclass(35, 45)
o2 = Myclass(40, 50)
o1.readValues()
o2.readValues()
class Myclass():
a = 10
b = 20
def __init__(self, c, d):
self.c = d
self.d = c
self.e =100
def readValues(self):
print self.c, self.d, self.e
o1 = Myclass(35, 45)
o2 = Myclass(40, 50)
o1.readValues()
o2.readValues()
#class variables can be modified/read/create by using classNames
#class variables can also be read by using instance/object
#instance variables can be modified/read/create by using instance
class Myclass():
a = 10
b = 20
def __init__(self, c, d):
self.c = d
self.d = c
self.e =100
def readValues(self):
print self.c, self.d, self.e
o1 = Myclass(35, 45)
o2 = Myclass(40, 50)
print o1.a, o1.b, o1.c, o1.d, o1.e
print o2.a, o2.b, o2.c, o2.d, o2.e
o1.c = 150
print o2.c
print o1.c
o2.d = 200
print o2.d, o1.d
Myclass.a = 250
print o1.a, o2.a, o1.b, o2.b
o3 = Myclass(12, 25)
print o3.a, o3.b, o3.c, o3.d
o3.a = 210
print o3.a, o2.a, o1.a
print 3/2
print str(3)+'/'+str(2)
class Fraction(object):
def __init__(self, num, denom):
self.num = num
self.denom = denom
def display(self):
return str(self.num)+'/'+str(self.denom)
f1 = Fraction(8, 5)
print f1
print f1.display()
class Fraction(object):
def __init__(self, num, denom):
self.num = num
self.denom = denom
def __str__(self):
return str(self.num)+'/'+str(self.denom)
f1 = Fraction(8, 5)
print f1
print f1.num
print f1.denom
print f1
class Fraction(object):
def __init__(self, num, denom):
self.num = num
self.denom = denom
def __str__(self):
return str(self.num)+'/'+str(self.denom)
def addi(self, other):
num = (self.num*other.denom)+(self.denom*other.num)
denom = self.denom*other.denom
return str(num)+'/'+str(denom)
f1 = Fraction(8, 5)
f2 = Fraction(8, 4)
print f1
print f2
print f1.addi(f2)
class Fraction(object):
def __init__(self, num, denom):
self.num = num
self.denom = denom
def __str__(self):
return str(self.num)+'/'+str(self.denom)
def __add__(self, other):
num = (self.num*other.denom)+(self.denom*other.num)
denom = self.denom*other.denom
return str(num)+'/'+str(denom)
f1 = Fraction(8, 5)
f2 = Fraction(8, 4)
print f1
print f2
print f1+f2
print f1.__add__(f2)
print dir(5)
class parent(object):
def myfun(self):
return "I am from parent"
class Child(parent):
pass
o1 = Child()
print dir(o1)
print o1.myfun()
class parent(object):
def __init__(self):
self.a = 10
self.b = 20
def myfun(self):
return "I am from parent"
class Child(parent):
pass
o1 = Child()
print dir(o1)
print o1.a, o1.b
class parent(object):
def __init__(self):
self.a = 10
self.b = 20
def myfun(self):
return "I am from parent"
class Child(parent):
def __init__(self):
self.c = 10
self.d = 20
o1 = Child()
print dir(o1)
class parent(object):
def __init__(self):
self.a = 10
self.b = 20
def myfun(self):
return "I am from parent"
class Child(parent):
def __init__(self):
self.c = 10
self.d = 20
super(Child, self).__init__()
o1 = Child()
print dir(o1)
class parent(object):
def __init__(self):
self.a = 10
self.b = 20
def myfun(self):
return "I am from parent"
class Child(parent):
def __init__(self):
self.c = 10
self.d = 20
print self.myfun()
super(Child, self).__init__()
o1 = Child()
print dir(o1)
class parent(object):
def __init__(self):
self.a = 10
self.b = 20
def myfun(self):
return "I am from parent"
class Child(parent):
def __init__(self):
self.c = 10
self.d = 20
super(Child, self).__init__()
def myfun(self):
print super(Child, self).myfun()
print "I am from child"
o1 = Child()
o1.myfun()
class Myclass(object):
def __init__(self):
self.a = 100
def myfun(self):
self.c = 200
return self.b
class Child(Myclass):
def myfun(self):
self.b = 20
print super(Child, self).myfun()
print self.c
o1 = Child()
o1.myfun()
class Myclass(object):
def __init__(self):
self.a = 100
def myfun(self):
self.c = 200
return self.b
class Child(Myclass):
def myfun(self):
print super(Child, self).myfun()
print self.c
self.b = 20
o1 = Child()
o1.myfun()
class Myclass(object):
def __init__(self):
self.a = 100
def myfun(self):
self.c = 200
return self.b
class Child(Myclass):
def myfun(self):
print self.c
print super(Child, self).myfun()
self.b = 20
o1 = Child()
o1.myfun()
# Multiple inheritance
class Myclass(object):
def myfun(self):
return "I am from Myclass"
class Myclass1(object):
def myfun1(self):
return "I am from class1"
class Child(Myclass, Myclass1):
pass
o1 = Child()
print dir(o1)
class Myclass(object):
def myfun(self):
return "I am from Myclass"
class Myclass1(object):
def myfun(self):
return "I am from class1"
class Child(Myclass, Myclass1):
pass
o1 = Child()
print dir(o1)
print o1.myfun()
class Myclass(object):
def myfun(self):
return "I am from Myclass"
class Myclass1(object):
def myfun(self):
return "I am from class1"
class Child(Myclass1, Myclass):
pass
o1 = Child()
print dir(o1)
print o1.myfun()
class Myclass(object):
def myfun(self):
return "I am from Myclass"
class Myclass1(object):
def myfun(self):
return "I am from class1"
class Child(Myclass1, Myclass):
def myfun(self):
print Myclass.myfun(self)
print Myclass1.myfun(self)
o1 = Child()
print dir(o1)
print o1.myfun()
class Myclass(object):
def myfun(self):
return "I am from Myclass"
class Myclass1(object):
def myfun(self):
return "I am from class1"
class Child(object, Myclass1, Myclass):
def myfun(self):
print Myclass.myfun(self)
print Myclass1.myfun(self)
o1 = Child()
print dir(o1)
print o1.myfun()<jupyter_output><empty_output>
|
non_permissive
|
/.ipynb_checkpoints/oops-checkpoint.ipynb
|
pythontrainernag/batch7
| 1 |
<jupyter_start><jupyter_text># K-Means Clustering## Importing the libraries<jupyter_code>import numpy as np
import matplotlib.pyplot as plt
import pandas as pd<jupyter_output><empty_output><jupyter_text>## Importing the dataset<jupyter_code>dataset = pd.read_csv('Mall_Customers.csv')
X = dataset.iloc[:, [3, 4]].values
#using only certain columns of data
#columns 3 and 4
#chosen only two columns so that we can visualise the clusters in 2d<jupyter_output><empty_output><jupyter_text>## Using the elbow method to find the optimal number of clusters<jupyter_code>from sklearn.cluster import KMeans
wcss = []
for i in range(1, 11):
kmeans = KMeans(n_clusters = i, init = 'k-means++', random_state = 42)
kmeans.fit(X)
wcss.append(kmeans.inertia_)
plt.plot(range(1, 11), wcss)
plt.title('The Elbow Method')
plt.xlabel('Number of clusters')
plt.ylabel('WCSS')
plt.show()
"""
from my first course
wcss = []
cl_num = 8
for i in range(1,7):
kmeans = KMeans(i)
kmeans.fit(X)
wcss_iter = kmeans.inertia_ #using inertia method
wcss.append(wcss_iter)
wcss
#plotting
number_clusters = range(1,7)
plt.plot(number_clusters, wcss)
plt.title("The Elbow Method")
plt.xlabel("Number of clusters")
plt.ylabel("Within Cluster Sum of Squares")
"""<jupyter_output><empty_output><jupyter_text>## Training the K-Means model on the dataset<jupyter_code>from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=5, init = 'k-means++', random_state = 42)
y_kmeans = kmeans.fit_predict(X) #or y_pred instead of y_kmeans
#fit_predict = it fits and predicts
#Clustering is about creating dependendent variables in such a way that each of the values of these future dependent variables are actually
#classes of this dependent variables
print(y_kmeans)<jupyter_output>[3 0 3 0 3 0 3 0 3 0 3 0 3 0 3 0 3 0 3 0 3 0 3 0 3 0 3 0 3 0 3 0 3 0 3 0 3
0 3 0 3 0 3 1 3 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1 1 1 1 1 2 4 2 1 2 4 2 4 2 1 2 4 2 4 2 4 2 4 2 1 2 4 2 4 2
4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4 2 4
2 4 2 4 2 4 2 4 2 4 2 4 2 4 2]
<jupyter_text>## Visualising the clusters<jupyter_code>plt.scatter(X[y_kmeans == 0, 0], X[y_kmeans == 0, 1], s = 100, c = 'red', label = 'Cluster 1')
plt.scatter(X[y_kmeans == 1, 0], X[y_kmeans == 1, 1], s = 100, c = 'blue', label = 'Cluster 2')
plt.scatter(X[y_kmeans == 2, 0], X[y_kmeans == 2, 1], s = 100, c = 'green', label = 'Cluster 3')
plt.scatter(X[y_kmeans == 3, 0], X[y_kmeans == 3, 1], s = 100, c = 'cyan', label = 'Cluster 4')
plt.scatter(X[y_kmeans == 4, 0], X[y_kmeans == 4, 1], s = 100, c = 'magenta', label = 'Cluster 5')
plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s = 300, c = 'yellow', label = 'Centroids')
plt.title('Clusters of customers')
plt.xlabel('Annual Income (k$)')
plt.ylabel('Spending Score (1-100)')
plt.legend()
plt.show()
#we need to interpret the graph
#target those in cluster 3
#don't target those in Cluster 1 too much [ethics]
#target also those in Cluster 5 to make them spend more. Make them more loyal customers
#KMeans can be used for more than two variables
<jupyter_output><empty_output>
|
no_license
|
/Clustering/k_means_clustering.ipynb
|
ckdarko/Machine_Learning_Tasks
| 5 |
<jupyter_start><jupyter_text><jupyter_code>import pandas as pd
from google.colab import files
import matplotlib.pyplot as plt
from matplotlib import style
import datetime as dt
import collections
upper = dt.datetime(2021,1,31)
lower = dt.datetime(2020,9,2)
files.upload()
df = pd.read_csv('ElonMusk.csv', parse_dates= ['date'], dayfirst= True)
df.set_index('date', inplace= True)
df['Smooth_polarity'] = df['polarity'].rolling(window = 100).mean()
df.dropna(inplace = True)
df = df.resample('D').mean()
df.dropna(inplace= True)
import pandas_datareader as web
start = dt.datetime(2020, 8, 20)
end = dt.datetime(2021,2,21)
df2 = web.DataReader('TSLA', start= start, end= end, data_source= 'yahoo')
df2['Smooth_Adj Close'] = df2['Adj Close'].rolling(window = 10).mean()
df2.dropna(inplace= True)
x = collections.Counter(df2.index > upper)
df2.drop(df2.tail(x.get(True)).index, inplace= True)
df.drop(df.head((collections.Counter(df.index<lower)).get(True)).index, inplace = True)
df.drop(df.tail((collections.Counter(df.index>upper)).get(True)).index, inplace = True)
style.use('seaborn-whitegrid')
fig = plt.figure(figsize=(20,15))
ax1 = plt.subplot2grid((8,1),(0,0), rowspan= 5, colspan= 1)
ax2 = plt.subplot2grid((8,1),(5,0), rowspan= 3, colspan= 1)
ax1.plot(df.index, df['Smooth_polarity'], linewidth= 0.8, color= 'blue')
ax2.plot(df2.index,df2['Smooth_Adj Close'], linewidth= 0.8, color= 'blue')
ax1.set_xlabel('Time', fontsize= 14)
ax1.set_ylabel('Polarity',fontsize= 14)
ax2.set_xlabel('Time', fontsize= 14)
ax2.set_ylabel('Price', fontsize= 14)
fig.suptitle('ELON MUSK TWITTER SENTIMENT VS TESLA STOCKS', fontsize= 16)
fig.tight_layout()
fig.subplots_adjust(top = 0.88)
<jupyter_output><empty_output>
|
no_license
|
/Sentiment_vs_Stock.ipynb
|
ShazamZX/Tesla-Stock
| 1 |
<jupyter_start><jupyter_text># Read the CSV and Perform Basic Data Cleaning<jupyter_code>df = pd.read_csv("exoplanet_data.csv")
# Drop the null columns where all values are null
df = df.dropna(axis='columns', how='all')
# Drop the null rows
df = df.dropna()
df.head()<jupyter_output><empty_output><jupyter_text># Select your features (columns)<jupyter_code># Set features. This will also be used as your x values.
selected_features = df[['names', 'of', 'selected', 'features', 'here']]<jupyter_output><empty_output><jupyter_text># Create a Train Test Split
Use `koi_disposition` for the y values<jupyter_code>X_train.head()<jupyter_output><empty_output><jupyter_text># Pre-processing
Scale the data using the MinMaxScaler and perform some feature selection<jupyter_code># Scale your data<jupyter_output><empty_output><jupyter_text># Train the Model
<jupyter_code>print(f"Training Data Score: {model2.score(X_train_scaled, y_train)}")
print(f"Testing Data Score: {model2.score(X_test_scaled, y_test)}")<jupyter_output><empty_output><jupyter_text># Hyperparameter Tuning
Use `GridSearchCV` to tune the model's parameters<jupyter_code># Create the GridSearchCV model
# Train the model with GridSearch
print(grid2.best_params_)
print(grid2.best_score_)<jupyter_output><empty_output><jupyter_text># Save the Model<jupyter_code># save your model by updating "your_name" with your name
# and "your_model" with your model variable
# be sure to turn this in to BCS
# if joblib fails to import, try running the command to install in terminal/git-bash
import joblib
filename = 'your_name.sav'
joblib.dump(your_model, filename)<jupyter_output><empty_output>
|
no_license
|
/code/.ipynb_checkpoints/model_1-checkpoint.ipynb
|
hannah1lee/machine-learning-challenge
| 7 |
<jupyter_start><jupyter_text><jupyter_code>#importando a biblioteca
import pandas as pd
#Leitura de arquivos
df1 = pd.read_excel("/content/mg.xlsx")
df2 = pd.read_excel("/content/rj.xlsx")
df3 = pd.read_excel("/content/sp.xlsx")
df = pd.concat([df1,df2,df3])
df.head()
df.tail()
df.sample(5)
df.dtypes
df["Valor"] = df["Valor"].astype("object")
df["Valor"] = df["Valor"].astype("int64")
df.dtypes
df.head()
df.isnull().sum()
df.sample(15)
df["Valor"].mean()
#Verificando o tipo de dado em cada coluna
df.dtypes
#transformando a coluna de data em datas
df["data"] = pd.to_datetime(df["data"])
df.dtypes
df["Ano_Venda"] = df["data"].dt.year
df.sample(5)
df["mes_venda"], df["dia_venda"] = (df["data"].dt.month, df["data"].dt.day)
df.sample(10)
df["data"].min()
df["diferenca_dias"] = df["data"] - df["data"].min()
df.sample(30)
df["trimestre_venda"] = df["data"].dt.quarter
df.sample()
vendas_novembro = df.loc[(df["data"].dt.year == 2020) & (df["data"].dt.month == 11)]
vendas_novembro
vendas_novembro.sample(5)
df["Vendedor"].value_counts(ascending=False)
#Grafico de barras
df["Valor"].value_counts(ascending=False).plot.bar()
df["Valor"].value_counts(ascending=True).plot.barh();
df.groupby(df["data"].dt.year)["trimestre_venda"].sum().plot.pie();
#adcionando um titulo e alterarndo o nome dos eixos
import matplotlib.pyplot as plt
df["Cidades"].value_counts().plot.bar(title="Total de vendas por cidade")
plt.xlabel("Cidades")
plt.ylabel("Total vendas");
#Alterando a cor
import matplotlib.pyplot as plt
df["Cidades"].value_counts().plot.bar(title="Total de vendas por cidade", color="green")
plt.xlabel("Cidades")
plt.ylabel("Total vendas");
#alterando o estilo
plt.style.use("ggplot")
#Alterando a cor
import matplotlib.pyplot as plt
df["Cidades"].value_counts().plot(title="Total de vendas por cidade", color="green")
plt.xlabel("Cidades")
plt.ylabel("Total vendas");
plt.legend()
plt.hist(df["Valor"], color="red");
plt.scatter(x=df["dia_venda"], y = df["Valor"])
<jupyter_output><empty_output>
|
no_license
|
/Untitled0.ipynb
|
joaovc800/Analise_de_dados
| 1 |
<jupyter_start><jupyter_text>Deep Learning
-------------
*Assignment 1*
The objective of this assignment is to learn about simple data curation practices, and familiarize you with some of the data we'll be reusing
later. This notebook uses the notMNIST dataset to be used with python experiments. This dataset is designed to look like the classic MNIST dataset,
while looking a little more like real data: it's a harder task, and the data is a lot less 'clean' than MNIST.<jupyter_code># These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import tarfile
from IPython.display import display, Image
from scipy import ndimage
from sklearn.linear_model import LogisticRegression
from six.moves.urllib.request import urlretrieve
from six.moves import cPickle as pickle
# Config the matlotlib backend as plotting inline in IPython
%matplotlib inline<jupyter_output><empty_output><jupyter_text>First, we'll download the dataset to our local machine. The data consists of characters rendered in a variety of fonts on a 28x28 image. The labels are limited to 'A' through 'J' (10 classes). The training set has about 500k and the testset 19000 labelled examples. Given these sizes, it should be possible to train models quickly on any machine.<jupyter_code>url = 'http://commondatastorage.googleapis.com/books1000/'
last_percent_reported = None
def download_progress_hook(count, blockSize, totalSize):
"""A hook to report the progress of a download. This is mostly intended for users with
slow internet connections. Reports every 1% change in download progress.
"""
global last_percent_reported
percent = int(count * blockSize * 100 / totalSize)
if last_percent_reported != percent:
if percent % 5 == 0:
sys.stdout.write("%s%%" % percent)
sys.stdout.flush()
else:
sys.stdout.write(".")
sys.stdout.flush()
last_percent_reported = percent
def maybe_download(filename, expected_bytes, force=False):
"""Download a file if not present, and make sure it's the right size."""
if force or not os.path.exists(filename):
print('Attempting to download:', filename)
filename, _ = urlretrieve(url + filename, filename, reporthook=download_progress_hook)
print('\nDownload Complete!')
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)
test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)
num_classes = 10
np.random.seed(133)
def maybe_extract(filename, force=False):
root = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz
if os.path.isdir(root) and not force:
# You may override by setting force=True.
print('%s already present - Skipping extraction of %s.' % (root, filename))
else:
print('Extracting data for %s. This may take a while. Please wait.' % root)
tar = tarfile.open(filename)
sys.stdout.flush()
tar.extractall()
tar.close()
data_folders = [
os.path.join(root, d) for d in sorted(os.listdir(root))
if os.path.isdir(os.path.join(root, d))]
if len(data_folders) != num_classes:
raise Exception(
'Expected %d folders, one per class. Found %d instead.' % (
num_classes, len(data_folders)))
print(data_folders)
return data_folders
train_folders = maybe_extract(train_filename)
test_folders = maybe_extract(test_filename)<jupyter_output>notMNIST_large already present - Skipping extraction of notMNIST_large.tar.gz.
['notMNIST_large/A', 'notMNIST_large/B', 'notMNIST_large/C', 'notMNIST_large/D', 'notMNIST_large/E', 'notMNIST_large/F', 'notMNIST_large/G', 'notMNIST_large/H', 'notMNIST_large/I', 'notMNIST_large/J']
notMNIST_small already present - Skipping extraction of notMNIST_small.tar.gz.
['notMNIST_small/A', 'notMNIST_small/B', 'notMNIST_small/C', 'notMNIST_small/D', 'notMNIST_small/E', 'notMNIST_small/F', 'notMNIST_small/G', 'notMNIST_small/H', 'notMNIST_small/I', 'notMNIST_small/J']
<jupyter_text>**Problem 1**
Let's take a peek at some of the data to make sure it looks sensible. Each exemplar should be an image of a character A through J rendered in a
different font. Display a sample of the images that we just downloaded. Hint: you can use the package IPython.display.
----------------------------------------------------------------------------------------------------------------------------------------------------
Now let's load the data in a more manageable format. Since, depending on your computer setup you might not be able to fit it all in memory, we'll
load each class into a separate dataset, store them on disk and curate them independently. Later we'll merge them into a single dataset of
manageable size.
We'll convert the entire dataset into a 3D array (image index, x, y) of floating point values, normalized to have approximately zero mean and
standard deviation ~0.5 to make training easier down the road.
A few images might not be readable, we'll just skip them.<jupyter_code>image_size = 28 # Pixel width and height.
pixel_depth = 255.0 # Number of levels per pixel.
def load_letter(folder, min_num_images):
"""Load the data for a single letter label."""
image_files = os.listdir(folder)
dataset = np.ndarray(shape=(len(image_files), image_size, image_size),
dtype=np.float32)
print(folder)
num_images = 0
for image in image_files:
image_file = os.path.join(folder, image)
try:
image_data = (ndimage.imread(image_file).astype(float) -
pixel_depth / 2) / pixel_depth
if image_data.shape != (image_size, image_size):
raise Exception('Unexpected image shape: %s' % str(image_data.shape))
dataset[num_images, :, :] = image_data
num_images = num_images + 1
except IOError as e:
print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.')
dataset = dataset[0:num_images, :, :]
if num_images < min_num_images:
raise Exception('Many fewer images than expected: %d < %d' %
(num_images, min_num_images))
print('Full dataset tensor:', dataset.shape)
print('Mean:', np.mean(dataset))
print('Standard deviation:', np.std(dataset))
return dataset
def maybe_pickle(data_folders, min_num_images_per_class, force=False):
dataset_names = []
for folder in data_folders:
set_filename = folder + '.pickle'
dataset_names.append(set_filename)
if os.path.exists(set_filename) and not force:
# You may override by setting force=True.
print('%s already present - Skipping pickling.' % set_filename)
else:
print('Pickling %s.' % set_filename)
dataset = load_letter(folder, min_num_images_per_class)
try:
with open(set_filename, 'wb') as f:
pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', set_filename, ':', e)
return dataset_names
train_datasets = maybe_pickle(train_folders, 45000)
test_datasets = maybe_pickle(test_folders, 1800)
#Problem 1: Sanities
from IPython.display import Image
print (len(test_datasets))
print (len(train_datasets))
A = pickle.load( open(train_datasets[0], "rb") )
print( type(A) )
Image(filename="notMNIST_small/A/MDEtMDEtMDAudHRm.png")
# Plot from array
print(A.shape)
A3 = A[2]
plt.imshow(A3)
print(A3.shape)<jupyter_output><empty_output><jupyter_text>**Problem 3**
Another check: we expect the data to be balanced across classes. Verify that.<jupyter_code>letters=[]
for letter in train_datasets:
letters.append(pickle.load( open(letter, "rb") ))
for l in letters:
print (l.shape)
np.std([
52909,
52911,
52912,
52911,
52912,
52912,
52912,
52912,
52912,
52911])<jupyter_output><empty_output><jupyter_text>-----------------------------------------------------------------------------
Merge and prune the training data as needed. Depending on your computer setup, you might not be able to fit it all in memory, and you can tune
train_size as needed. The labels will be stored into a separate array of integers 0 through 9.
Also create a validation dataset for hyperparameter tuning.<jupyter_code>def make_arrays(nb_rows, img_size):
if nb_rows:
dataset = np.ndarray((nb_rows, img_size, img_size), dtype=np.float32)
labels = np.ndarray(nb_rows, dtype=np.int32)
else:
dataset, labels = None, None
return dataset, labels
def merge_datasets(pickle_files, train_size, valid_size=0):
num_classes = len(pickle_files)
valid_dataset, valid_labels = make_arrays(valid_size, image_size)
train_dataset, train_labels = make_arrays(train_size, image_size)
vsize_per_class = valid_size // num_classes
tsize_per_class = train_size // num_classes
start_v, start_t = 0, 0
end_v, end_t = vsize_per_class, tsize_per_class
end_l = vsize_per_class+tsize_per_class
for label, pickle_file in enumerate(pickle_files):
try:
with open(pickle_file, 'rb') as f:
letter_set = pickle.load(f)
# let's shuffle the letters to have random validation and training set
np.random.shuffle(letter_set)
if valid_dataset is not None:
valid_letter = letter_set[:vsize_per_class, :, :]
valid_dataset[start_v:end_v, :, :] = valid_letter
valid_labels[start_v:end_v] = label
start_v += vsize_per_class
end_v += vsize_per_class
train_letter = letter_set[vsize_per_class:end_l, :, :]
train_dataset[start_t:end_t, :, :] = train_letter
train_labels[start_t:end_t] = label
start_t += tsize_per_class
end_t += tsize_per_class
except Exception as e:
print('Unable to process data from', pickle_file, ':', e)
raise
return valid_dataset, valid_labels, train_dataset, train_labels
train_size = 200000
valid_size = 10000
test_size = 10000
valid_dataset, valid_labels, train_dataset, train_labels = merge_datasets(
train_datasets, train_size, valid_size)
_, _, test_dataset, test_labels = merge_datasets(test_datasets, test_size)
print('Training:', train_dataset.shape, train_labels.shape)
print('Validation:', valid_dataset.shape, valid_labels.shape)
print('Testing:', test_dataset.shape, test_labels.shape)<jupyter_output>Training: (200000, 28, 28) (200000,)
Validation: (10000, 28, 28) (10000,)
Testing: (10000, 28, 28) (10000,)
<jupyter_text>Next, we'll randomize the data. It's important to have the labels well shuffled for the training and test distributions to match.<jupyter_code>def randomize(dataset, labels):
permutation = np.random.permutation(labels.shape[0])
shuffled_dataset = dataset[permutation,:,:]
shuffled_labels = labels[permutation]
return shuffled_dataset, shuffled_labels
train_dataset, train_labels = randomize(train_dataset, train_labels)
test_dataset, test_labels = randomize(test_dataset, test_labels)
valid_dataset, valid_labels = randomize(valid_dataset, valid_labels)<jupyter_output><empty_output><jupyter_text>**Problem 4**
Convince yourself that the data is still good after shuffling!
-------------------------------------------------------------
Finally, let's save the data for later reuse:<jupyter_code>pickle_file = './pickled/notMNIST.pickle'
try:
f = open(pickle_file, 'wb')
save = {
'train_dataset': train_dataset,
'train_labels': train_labels,
'valid_dataset': valid_dataset,
'valid_labels': valid_labels,
'test_dataset': test_dataset,
'test_labels': test_labels,
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
###############################################
#Loading train, valid, and test, dataset split#
###############################################
pickle_file = './pickled/notMNIST.pickle'
statinfo = os.stat(pickle_file)
print('Compressed pickle size:', statinfo.st_size)
try:
f = open(pickle_file, 'rb')
pcklData = pickle.load(f)
except Exception as e:
print('Unable to load data from', pickle_file, ':', e)
raise
# Loading to original variable names
train_dataset = pcklData['train_dataset']
train_labels = pcklData['train_labels' ]
valid_dataset = pcklData['valid_dataset']
valid_labels = pcklData['valid_labels' ]
test_dataset = pcklData['test_dataset' ]
test_labels = pcklData['test_labels' ]
<jupyter_output><empty_output><jupyter_text>**Problem 5**
By construction, this dataset might contain a lot of overlapping samples, including training data that's also contained in the validation and test
set! Overlap between training and test can skew the results if you expect to use your model in an environment where there is never an overlap, but
are actually ok if you expect to see training samples recur when you use it. Measure how much overlap there is between training, validation and
test samples.
Optional questions:
* What about near duplicates between datasets? (images that are almost identical)
* Create a sanitized validation and test set, and compare your accuracy on those in subsequent assignments.
<jupyter_code>######################
# unflatten function
def unflatten(x):
n = int(np.sqrt(len(x)))
xX = np.array([ [0.0 for i in xrange(n)] for j in xrange(n)] )
for i in xrange(n):
for j in xrange(n):
xX[i,j] = x[i*n + j]
return xX
######################
# Pickling function
def Ipckl(file_name, opr,Data=None):
try:
f = open(file_name, opr)
if opr == 'wb':
pickle.dump(Data, f, pickle.HIGHEST_PROTOCOL)
elif opr == 'rb':
return pickle.load(f)
f.close()
except Exception as e:
print('Unable to '+ opr[0] + ' data. file:', file_name, ':', e)
raise
####################
#Overlap exploration
####################
from time import time
# Flatten two dimensional image arrays into n*n dimension vectors
X_train = np.array([image.flatten() for image in train_dataset])
X_valid = np.array([image.flatten() for image in valid_dataset])
X_test = np.array([image.flatten() for image in test_dataset])
X_train.shape
# Reduce dimension size to n_components
from sklearn.decomposition import RandomizedPCA
n_components = 120
print ("Extracting the top {} eigenvectors from {} letters".format(n_components, X_train.shape[0]))
t0 = time()
pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X_train)
print ("done in {}".format(time() - t0) )
# Applying pca projection to data
X_train_pca = pca.transform(X_train, n_components)
X_valid_pca = pca.transform(X_valid, n_components)
X_test_pca = pca.transform(X_test, n_components)
print( X_train.shape, X_train_pca.shape)
#######Saving PCA transformation data##########
pca_data = {
'X_train_pca' : X_train_pca,
'X_valid_pca' : X_valid_pca,
'X_test_pca' : X_test_pca ,
}
Ipckl('./pickled/pca_data.pickle', 'wb', pca_data)
H_pca = unflatten(X_train_pca[0])
plt.imshow(H_pca)
############Load PCA transformed data#############
pca_data = Ipckl('./pickled/pca_data.pickle', 'rb')
X_train_pca = pca_data['X_train_pca']
X_valid_pca = pca_data['X_valid_pca']
X_test_pca = pca_data['X_test_pca' ]
print( X_train_pca.shape, X_valid_pca.shape, X_test_pca.shape )
########################
#Clustering exploration#
########################
from sklearn.cluster import DBSCAN
dbscn = DBSCAN(eps=0.0005, min_samples=2)
all_data = np.concatenate( (X_train_pca , X_valid_pca, X_test_pca), axis=0 )
############################
start = time()
dblabels = dbscn.fit_predict( all_data )
cores = dbscn.components_
core_labels = dbscn.core_sample_indices_
stop = time() - start
############################
print ('done in {} s'.format(stop))
#######Saving cluster and core labels##########
dbscan_labels = {
'dblabels' : dblabels,
'core_labels' : core_labels,
'cores': cores,
}
Ipckl('./pickled/dbscan_labels.pickle', 'wb', dbscan_labels)
#######Loading cluster and core labels##########
dbscan_labels = Ipckl('./pickled/dbscan_labels.pickle', 'rb')
dblabels = dbscan_labels['dblabels']
core_labels = dbscan_labels['core_labels']
cores = dbscan_labels['cores' ]
<jupyter_output><empty_output><jupyter_text>My attempted approach is to identify repeated data samples by finding really 'tight' clusters using DBSCAN (i.e. I set eps=0.0005) the reasoning
being that the limiting case of closeness is equality. <jupyter_code>#########Removing duplicates (or approximate duplicates)###########
print( "DBSCAN found: {} types of 'tight' groups".format( max(dblabels) - min(dblabels)) )
###
all_data = np.concatenate( (X_train_pca , X_valid_pca, X_test_pca), axis=0 )
all_labels = np.concatenate( (train_labels, valid_labels, test_labels), axis=0 )
unique_data = []
u_targets = []
cluster_no = np.zeros( max(dblabels+1) ) # The class labels are sequential from -1 to the largest number in dblades
for i, x in enumerate(all_data):
if dblabels[i] == -1: # clusters of one (already unique)
unique_data.append(x)
u_targets.append(all_labels[i])
elif cluster_no[dblabels[i]] == 0: # grab one representative of each cluster of more than one member
cluster_no[dblabels[i]] +=1
unique_data.append(x)
u_targets.append(all_labels[i])
unique_data = np.array(unique_data)
u_targets = np.array(u_targets)
print(all_data.shape, unique_data.shape)
##################################################################
#Reconstructing train, valid and test sets after duplicate removal
from sklearn.cross_validation import train_test_split
uX_train_pca, Xtemp, uy_train_pca, ytemp = train_test_split(unique_data, u_targets, test_size=0.10, random_state=0)
uX_valid_pca, uX_test_pca, uy_valid_pca, uy_test_pca = train_test_split(Xtemp, ytemp, test_size=0.50, random_state=0)
######Saving data after duplicate removal##########
upca_data = {
'uX_train_pca' : uX_train_pca,
'uX_valid_pca' : uX_valid_pca,
'uX_test_pca' : uX_test_pca ,
'uy_train_pca' : uy_train_pca,
'uy_valid_pca' : uy_valid_pca,
'uy_test_pca' : uy_test_pca ,
}
Ipckl('./pickled/upca_data.pickle', 'wb', upca_data)
#######Loading data after duplicate removal##########
upca_data = Ipckl('./pickled/upca_data.pickle', 'rb')
uX_train_pca = upca_data['uX_train_pca']
uX_valid_pca = upca_data['uX_valid_pca']
uX_test_pca = upca_data['uX_test_pca' ]
uy_train_pca = upca_data['uy_train_pca']
uy_valid_pca = upca_data['uy_valid_pca']
uy_test_pca = upca_data['uy_test_pca' ]
#########################
# Linear-model Training #
#########################
from sklearn.linear_model import LogisticRegression
logr = LogisticRegression()
k = 50
start = time()#####################
logr.fit(X_train_pca, train_labels)
stop = time()#####################
print(logr.score(X_valid_pca, valid_labels), stop - start)
from sklearn.linear_model import LogisticRegression
logr = LogisticRegression()
k = 50
start = time()#####################
logr.fit(uX_train_pca, uy_train_pca)
stop = time()#####################
print(logr.score(uX_valid_pca, uy_valid_pca), stop - start)<jupyter_output>0.823575331772 32.9561469555
|
no_license
|
/Assig1.ipynb
|
rortms/DeepLearn
| 9 |
<jupyter_start><jupyter_text># Level 1 header
## Level 2 header
* list item
* another list item
```sqlite
select * from kittens where fur='soft'
```
Now some Pythagoras:
$$a^2 + b^2 = c^2$$
Inline code: `print("Hallo")`<jupyter_code>import numpy as np
x = np.linspace(0, 2*np.pi, 100)
y = np.sin(x)
plt.plot(x, y, c='r')
for a in x[10:50:5]:
print(sin(a))
#[sin(a) for a in x]
y = []
for a in x:
y.append(sin(a))
#is not such a nice, declarative, way of difining y
y = [sin(a) for a in x] #is already a clearer, cleaner way
y = np.sin(x) #is perhaps the nicest way to do it
def sinc(x):
return sin(x) / x<jupyter_output><empty_output>
|
permissive
|
/analysis/Introduction.ipynb
|
samnooij/reproducible_science_workshop-20200211
| 1 |
<jupyter_start><jupyter_text># Anna KaRNNa
In this notebook, we'll build a character-wise RNN trained on Anna Karenina, one of my all-time favorite books. It'll be able to generate new text based on the text from the book.
This network is based off of Andrej Karpathy's [post on RNNs](http://karpathy.github.io/2015/05/21/rnn-effectiveness/) and [implementation in Torch](https://github.com/karpathy/char-rnn). Also, some information [here at r2rt](http://r2rt.com/recurrent-neural-networks-in-tensorflow-ii.html) and from [Sherjil Ozair](https://github.com/sherjilozair/char-rnn-tensorflow) on GitHub. Below is the general architecture of the character-wise RNN.
<jupyter_code>import time
from collections import namedtuple
import numpy as np
import tensorflow as tf<jupyter_output><empty_output><jupyter_text>First we'll load the text file and convert it into integers for our network to use. Here I'm creating a couple dictionaries to convert the characters to and from integers. Encoding the characters as integers makes it easier to use as input in the network.<jupyter_code>with open('anna.txt', 'r') as f:
text=f.read()
vocab = set(text)
vocab_to_int = {c: i for i, c in enumerate(vocab)}
int_to_vocab = dict(enumerate(vocab))
encoded = np.array([vocab_to_int[c] for c in text], dtype=np.int32)<jupyter_output><empty_output><jupyter_text>Let's check out the first 100 characters, make sure everything is peachy. According to the [American Book Review](http://americanbookreview.org/100bestlines.asp), this is the 6th best first line of a book ever.<jupyter_code>text[:100]<jupyter_output><empty_output><jupyter_text>And we can see the characters encoded as integers.<jupyter_code>encoded[:100]<jupyter_output><empty_output><jupyter_text>Since the network is working with individual characters, it's similar to a classification problem in which we are trying to predict the next character from the previous text. Here's how many 'classes' our network has to pick from.<jupyter_code>len(vocab)<jupyter_output><empty_output><jupyter_text>## Making training mini-batches
Here is where we'll make our mini-batches for training. Remember that we want our batches to be multiple sequences of some desired number of sequence steps. Considering a simple example, our batches would look like this:
We have our text encoded as integers as one long array in `encoded`. Let's create a function that will give us an iterator for our batches. I like using [generator functions](https://jeffknupp.com/blog/2013/04/07/improve-your-python-yield-and-generators-explained/) to do this. Then we can pass `encoded` into this function and get our batch generator.
The first thing we need to do is discard some of the text so we only have completely full batches. Each batch contains $N \times M$ characters, where $N$ is the batch size (the number of sequences) and $M$ is the number of steps. Then, to get the number of batches we can make from some array `arr`, you divide the length of `arr` by the batch size. Once you know the number of batches and the batch size, you can get the total number of characters to keep.
After that, we need to split `arr` into $N$ sequences. You can do this using `arr.reshape(size)` where `size` is a tuple containing the dimensions sizes of the reshaped array. We know we want $N$ sequences (`n_seqs` below), let's make that the size of the first dimension. For the second dimension, you can use `-1` as a placeholder in the size, it'll fill up the array with the appropriate data for you. After this, you should have an array that is $N \times (M * K)$ where $K$ is the number of batches.
Now that we have this array, we can iterate through it to get our batches. The idea is each batch is a $N \times M$ window on the array. For each subsequent batch, the window moves over by `n_steps`. We also want to create both the input and target arrays. Remember that the targets are the inputs shifted over one character. You'll usually see the first input character used as the last target character, so something like this:
```python
y[:, :-1], y[:, -1] = x[:, 1:], x[:, 0]
```
where `x` is the input batch and `y` is the target batch.
The way I like to do this window is use `range` to take steps of size `n_steps` from $0$ to `arr.shape[1]`, the total number of steps in each sequence. That way, the integers you get from `range` always point to the start of a batch, and each window is `n_steps` wide.
> **Exercise:** Write the code for creating batches in the function below. The exercises in this notebook _will not be easy_. I've provided a notebook with solutions alongside this notebook. If you get stuck, checkout the solutions. The most important thing is that you don't copy and paste the code into here, **type out the solution code yourself.**<jupyter_code>def get_batches(arr, n_seqs, n_steps):
'''Create a generator that returns batches of size
n_seqs x n_steps from arr.
Arguments
---------
arr: Array you want to make batches from
n_seqs: Batch size, the number of sequences per batch
n_steps: Number of sequence steps per batch
'''
# Get the batch size and number of batches we can make
batch_size = n_steps * n_seqs
n_batches = len(arr) // batch_size
# Keep only enough characters to make full batches
arr = arr[:n_batches * batch_size]
# Reshape into n_seqs rows
arr = arr.reshape((n_seqs, -1))
for n in range(0, arr.shape[1], n_steps):
# The features
x = arr[:, n:n+n_steps]
# The targets, shifted by one
y = np.zeros_like(x)
y[:, :-1], y[:, -1] = x[:, 1:], x[:, 0]
yield x, y<jupyter_output><empty_output><jupyter_text>Now I'll make my data sets and we can check out what's going on here. Here I'm going to use a batch size of 10 and 50 sequence steps.<jupyter_code>batches = get_batches(encoded, 10, 50)
x, y = next(batches)
print('x\n', x[:10, :10])
print('\ny\n', y[:10, :10])<jupyter_output>x
[[50 57 81 37 69 79 59 11 76 17]
[11 81 62 11 68 60 69 11 38 60]
[80 1 68 64 17 17 0 46 79 3]
[68 11 74 47 59 1 68 38 11 57]
[11 1 69 11 1 3 58 11 3 1]
[11 71 69 11 40 81 3 17 60 68]
[57 79 68 11 7 60 62 79 11 56]
[34 11 51 47 69 11 68 60 40 11]
[69 11 1 3 68 14 69 64 11 18]
[11 3 81 1 74 11 69 60 11 57]]
y
[[57 81 37 69 79 59 11 76 17 17]
[81 62 11 68 60 69 11 38 60 1]
[ 1 68 64 17 17 0 46 79 3 58]
[11 74 47 59 1 68 38 11 57 1]
[ 1 69 11 1 3 58 11 3 1 59]
[71 69 11 40 81 3 17 60 68 72]
[79 68 11 7 60 62 79 11 56 60]
[11 51 47 69 11 68 60 40 11 3]
[11 1 3 68 14 69 64 11 18 57]
[ 3 81 1 74 11 69 60 11 57 79]]
<jupyter_text>If you implemented `get_batches` correctly, the above output should look something like
```
x
[[55 63 69 22 6 76 45 5 16 35]
[ 5 69 1 5 12 52 6 5 56 52]
[48 29 12 61 35 35 8 64 76 78]
[12 5 24 39 45 29 12 56 5 63]
[ 5 29 6 5 29 78 28 5 78 29]
[ 5 13 6 5 36 69 78 35 52 12]
[63 76 12 5 18 52 1 76 5 58]
[34 5 73 39 6 5 12 52 36 5]
[ 6 5 29 78 12 79 6 61 5 59]
[ 5 78 69 29 24 5 6 52 5 63]]
y
[[63 69 22 6 76 45 5 16 35 35]
[69 1 5 12 52 6 5 56 52 29]
[29 12 61 35 35 8 64 76 78 28]
[ 5 24 39 45 29 12 56 5 63 29]
[29 6 5 29 78 28 5 78 29 45]
[13 6 5 36 69 78 35 52 12 43]
[76 12 5 18 52 1 76 5 58 52]
[ 5 73 39 6 5 12 52 36 5 78]
[ 5 29 78 12 79 6 61 5 59 63]
[78 69 29 24 5 6 52 5 63 76]]
```
although the exact numbers will be different. Check to make sure the data is shifted over one step for `y`.## Building the model
Below is where you'll build the network. We'll break it up into parts so it's easier to reason about each bit. Then we can connect them up into the whole network.
### Inputs
First off we'll create our input placeholders. As usual we need placeholders for the training data and the targets. We'll also create a placeholder for dropout layers called `keep_prob`. This will be a scalar, that is a 0-D tensor. To make a scalar, you create a placeholder without giving it a size.
> **Exercise:** Create the input placeholders in the function below.<jupyter_code>def build_inputs(batch_size, num_steps):
''' Define placeholders for inputs, targets, and dropout
Arguments
---------
batch_size: Batch size, number of sequences per batch
num_steps: Number of sequence steps in a batch
'''
# Declare placeholders we'll feed into the graph
inputs = tf.placeholder(tf.int32, [batch_size, num_steps], name='inputs')
targets = tf.placeholder(tf.int32, [batch_size, num_steps], name='targets')
# Keep probability placeholder for drop out layers
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
return inputs, targets, keep_prob<jupyter_output><empty_output><jupyter_text>### LSTM Cell
Here we will create the LSTM cell we'll use in the hidden layer. We'll use this cell as a building block for the RNN. So we aren't actually defining the RNN here, just the type of cell we'll use in the hidden layer.
We first create a basic LSTM cell with
```python
lstm = tf.contrib.rnn.BasicLSTMCell(num_units)
```
where `num_units` is the number of units in the hidden layers in the cell. Then we can add dropout by wrapping it with
```python
tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
```
You pass in a cell and it will automatically add dropout to the inputs or outputs. Finally, we can stack up the LSTM cells into layers with [`tf.contrib.rnn.MultiRNNCell`](https://www.tensorflow.org/versions/r1.0/api_docs/python/tf/contrib/rnn/MultiRNNCell). With this, you pass in a list of cells and it will send the output of one cell into the next cell. For example,
```python
tf.contrib.rnn.MultiRNNCell([cell]*num_layers)
```
This might look a little weird if you know Python well because this will create a list of the same `cell` object. However, TensorFlow will create different weight matrices for all `cell` objects. Even though this is actually multiple LSTM cells stacked on each other, you can treat the multiple layers as one cell.
We also need to create an initial cell state of all zeros. This can be done like so
```python
initial_state = cell.zero_state(batch_size, tf.float32)
```
> **Exercise:** Below, implement the `build_lstm` function to create these LSTM cells and the initial state.<jupyter_code>def build_lstm(lstm_size, num_layers, batch_size, keep_prob):
''' Build LSTM cell.
Arguments
---------
keep_prob: Scalar tensor (tf.placeholder) for the dropout keep probability
lstm_size: Size of the hidden layers in the LSTM cells
num_layers: Number of LSTM layers
batch_size: Batch size
'''
### Build the LSTM Cell
# Use a basic LSTM cell
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
# Add dropout to the cell outputs
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
# Stack up multiple LSTM layers, for deep learning
cell = tf.contrib.rnn.MultiRNNCell([drop]*num_layers)
initial_state = cell.zero_state(batch_size, tf.float32)
return cell, initial_state<jupyter_output><empty_output><jupyter_text>### RNN Output
Here we'll create the output layer. We need to connect the output of the RNN cells to a full connected layer with a softmax output. The softmax output gives us a probability distribution we can use to predict the next character, so we want this layer to have size $C$, the number of classes/characters we have in our text.
If our input has batch size $N$, number of steps $M$, and the hidden layer has $L$ hidden units, then the output is a 3D tensor with size $N \times M \times L$. The output of each LSTM cell has size $L$, we have $M$ of them, one for each sequence step, and we have $N$ sequences. So the total size is $N \times M \times L$.
We are using the same fully connected layer, the same weights, for each of the outputs. Then, to make things easier, we should reshape the outputs into a 2D tensor with shape $(M * N) \times L$. That is, one row for each sequence and step, where the values of each row are the output from the LSTM cells. We get the LSTM output as a list, `lstm_output`. First we need to concatenate this whole list into one array with [`tf.concat`](https://www.tensorflow.org/api_docs/python/tf/concat). Then, reshape it (with `tf.reshape`) to size $(M * N) \times L$.
One we have the outputs reshaped, we can do the matrix multiplication with the weights. We need to wrap the weight and bias variables in a variable scope with `tf.variable_scope(scope_name)` because there are weights being created in the LSTM cells. TensorFlow will throw an error if the weights created here have the same names as the weights created in the LSTM cells, which they will be default. To avoid this, we wrap the variables in a variable scope so we can give them unique names.
> **Exercise:** Implement the output layer in the function below.<jupyter_code>def build_output(lstm_output, in_size, out_size):
''' Build a softmax layer, return the softmax output and logits.
Arguments
---------
lstm_output: List of output tensors from the LSTM layer
in_size: Size of the input tensor, for example, size of the LSTM cells
out_size: Size of this softmax layer
'''
# Reshape output so it's a bunch of rows, one row for each step for each sequence.
# Concatenate lstm_output over axis 1 (the columns)
seq_output = tf.concat(lstm_output, 1)
# Reshape seq_output to a 2D tensor with lstm_size columns
x = tf.reshape(seq_output, [-1, lstm_size])
# Connect the RNN outputs to a softmax layer
with tf.variable_scope('softmax'):
# Create the weight and bias variables here
softmax_w = tf.Variable(tf.truncated_normal((in_size, out_size), stddev=0.1))
softmax_b = tf.Variable(tf.zeros(out_size))
# Since output is a bunch of rows of RNN cell outputs, logits will be a bunch
# of rows of logit outputs, one for each step and sequence
logits = tf.matmul(x, softmax_w) + softmax_b
# Use softmax to get the probabilities for predicted characters
out = tf.nn.softmax(logits, name='predictions')
return out, logits<jupyter_output><empty_output><jupyter_text>### Training loss
Next up is the training loss. We get the logits and targets and calculate the softmax cross-entropy loss. First we need to one-hot encode the targets, we're getting them as encoded characters. Then, reshape the one-hot targets so it's a 2D tensor with size $(M*N) \times C$ where $C$ is the number of classes/characters we have. Remember that we reshaped the LSTM outputs and ran them through a fully connected layer with $C$ units. So our logits will also have size $(M*N) \times C$.
Then we run the logits and targets through `tf.nn.softmax_cross_entropy_with_logits` and find the mean to get the loss.
>**Exercise:** Implement the loss calculation in the function below.<jupyter_code>def build_loss(logits, targets, lstm_size, num_classes):
''' Calculate the loss from the logits and the targets.
Arguments
---------
logits: Logits from final fully connected layer
targets: Targets for supervised learning
lstm_size: Number of LSTM hidden units
num_classes: Number of classes in targets
'''
# One-hot encode targets and reshape to match logits, one row per sequence per step
y_one_hot = tf.one_hot(targets, num_classes)
y_reshaped = tf.reshape(y_one_hot, logits.get_shape())
# Softmax cross entropy loss
loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_reshaped)
loss = tf.reduce_mean(loss)
return loss<jupyter_output><empty_output><jupyter_text>### Optimizer
Here we build the optimizer. Normal RNNs have have issues gradients exploding and disappearing. LSTMs fix the disappearance problem, but the gradients can still grow without bound. To fix this, we can clip the gradients above some threshold. That is, if a gradient is larger than that threshold, we set it to the threshold. This will ensure the gradients never grow overly large. Then we use an AdamOptimizer for the learning step.<jupyter_code>def build_optimizer(loss, learning_rate, grad_clip):
''' Build optmizer for training, using gradient clipping.
Arguments:
loss: Network loss
learning_rate: Learning rate for optimizer
'''
# Optimizer for training, using gradient clipping to control exploding gradients
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), grad_clip)
train_op = tf.train.AdamOptimizer(learning_rate)
optimizer = train_op.apply_gradients(zip(grads, tvars))
return optimizer<jupyter_output><empty_output><jupyter_text>### Build the network
Now we can put all the pieces together and build a class for the network. To actually run data through the LSTM cells, we will use [`tf.nn.dynamic_rnn`](https://www.tensorflow.org/versions/r1.0/api_docs/python/tf/nn/dynamic_rnn). This function will pass the hidden and cell states across LSTM cells appropriately for us. It returns the outputs for each LSTM cell at each step for each sequence in the mini-batch. It also gives us the final LSTM state. We want to save this state as `final_state` so we can pass it to the first LSTM cell in the the next mini-batch run. For `tf.nn.dynamic_rnn`, we pass in the cell and initial state we get from `build_lstm`, as well as our input sequences. Also, we need to one-hot encode the inputs before going into the RNN.
> **Exercise:** Use the functions you've implemented previously and `tf.nn.dynamic_rnn` to build the network.<jupyter_code>class CharRNN:
def __init__(self, num_classes, batch_size=64, num_steps=50,
lstm_size=128, num_layers=2, learning_rate=0.001,
grad_clip=5, sampling=False):
# When we're using this network for sampling later, we'll be passing in
# one character at a time, so providing an option for that
if sampling == True:
batch_size, num_steps = 1, 1
else:
batch_size, num_steps = batch_size, num_steps
tf.reset_default_graph()
# Build the input placeholder tensors
self.inputs, self.targets, self.keep_prob = build_inputs(batch_size, num_steps)
# Build the LSTM cell
cell, self.initial_state = build_lstm(lstm_size, num_layers, batch_size, self.keep_prob)
### Run the data through the RNN layers
# First, one-hot encode the input tokens
x_one_hot = tf.one_hot(self.inputs, num_classes)
# Run each sequence step through the RNN with tf.nn.dynamic_rnn
with tf.variable_scope('rnn1'):
outputs, state = tf.nn.dynamic_rnn(cell, x_one_hot, initial_state=self.initial_state)
self.final_state = state
# Get softmax predictions and logits
self.prediction, self.logits = build_output(outputs, lstm_size, num_classes)
# Loss and optimizer (with gradient clipping)
self.loss = build_loss(self.logits, self.targets, lstm_size, num_classes)
self.optimizer = build_optimizer(self.loss, learning_rate, grad_clip)<jupyter_output><empty_output><jupyter_text>## Hyperparameters
Here are the hyperparameters for the network.
* `batch_size` - Number of sequences running through the network in one pass.
* `num_steps` - Number of characters in the sequence the network is trained on. Larger is better typically, the network will learn more long range dependencies. But it takes longer to train. 100 is typically a good number here.
* `lstm_size` - The number of units in the hidden layers.
* `num_layers` - Number of hidden LSTM layers to use
* `learning_rate` - Learning rate for training
* `keep_prob` - The dropout keep probability when training. If you're network is overfitting, try decreasing this.
Here's some good advice from Andrej Karpathy on training the network. I'm going to copy it in here for your benefit, but also link to [where it originally came from](https://github.com/karpathy/char-rnn#tips-and-tricks).
> ## Tips and Tricks
>### Monitoring Validation Loss vs. Training Loss
>If you're somewhat new to Machine Learning or Neural Networks it can take a bit of expertise to get good models. The most important quantity to keep track of is the difference between your training loss (printed during training) and the validation loss (printed once in a while when the RNN is run on the validation data (by default every 1000 iterations)). In particular:
> - If your training loss is much lower than validation loss then this means the network might be **overfitting**. Solutions to this are to decrease your network size, or to increase dropout. For example you could try dropout of 0.5 and so on.
> - If your training/validation loss are about equal then your model is **underfitting**. Increase the size of your model (either number of layers or the raw number of neurons per layer)
> ### Approximate number of parameters
> The two most important parameters that control the model are `lstm_size` and `num_layers`. I would advise that you always use `num_layers` of either 2/3. The `lstm_size` can be adjusted based on how much data you have. The two important quantities to keep track of here are:
> - The number of parameters in your model. This is printed when you start training.
> - The size of your dataset. 1MB file is approximately 1 million characters.
>These two should be about the same order of magnitude. It's a little tricky to tell. Here are some examples:
> - I have a 100MB dataset and I'm using the default parameter settings (which currently print 150K parameters). My data size is significantly larger (100 mil >> 0.15 mil), so I expect to heavily underfit. I am thinking I can comfortably afford to make `lstm_size` larger.
> - I have a 10MB dataset and running a 10 million parameter model. I'm slightly nervous and I'm carefully monitoring my validation loss. If it's larger than my training loss then I may want to try to increase dropout a bit and see if that helps the validation loss.
> ### Best models strategy
>The winning strategy to obtaining very good models (if you have the compute time) is to always err on making the network larger (as large as you're willing to wait for it to compute) and then try different dropout values (between 0,1). Whatever model has the best validation performance (the loss, written in the checkpoint filename, low is good) is the one you should use in the end.
>It is very common in deep learning to run many different models with many different hyperparameter settings, and in the end take whatever checkpoint gave the best validation performance.
>By the way, the size of your training and validation splits are also parameters. Make sure you have a decent amount of data in your validation set or otherwise the validation performance will be noisy and not very informative.<jupyter_code>batch_size = 10 # Sequences per batch
num_steps = 50 # Number of sequence steps per batch
lstm_size = 128 # Size of hidden layers in LSTMs
num_layers = 2 # Number of LSTM layers
learning_rate = 0.01 # Learning rate
keep_prob = 0.5 # Dropout keep probability<jupyter_output><empty_output><jupyter_text>## Time for training
This is typical training code, passing inputs and targets into the network, then running the optimizer. Here we also get back the final LSTM state for the mini-batch. Then, we pass that state back into the network so the next batch can continue the state from the previous batch. And every so often (set by `save_every_n`) I save a checkpoint.
Here I'm saving checkpoints with the format
`i{iteration number}_l{# hidden layer units}.ckpt`
> **Exercise:** Set the hyperparameters above to train the network. Watch the training loss, it should be consistently dropping. Also, I highly advise running this on a GPU.<jupyter_code>epochs = 20
# Save every N iterations
save_every_n = 200
model = CharRNN(len(vocab), batch_size=batch_size, num_steps=num_steps,
lstm_size=lstm_size, num_layers=num_layers,
learning_rate=learning_rate)
saver = tf.train.Saver(max_to_keep=100)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Use the line below to load a checkpoint and resume training
#saver.restore(sess, 'checkpoints/______.ckpt')
counter = 0
for e in range(epochs):
# Train network
new_state = sess.run(model.initial_state)
loss = 0
for x, y in get_batches(encoded, batch_size, num_steps):
counter += 1
start = time.time()
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: keep_prob,
model.initial_state: new_state}
batch_loss, new_state, _ = sess.run([model.loss,
model.final_state,
model.optimizer],
feed_dict=feed)
end = time.time()
if counter % 500 == 0:
print('Epoch: {}/{}... '.format(e+1, epochs),
'Training Step: {}... '.format(counter),
'Training loss: {:.4f}... '.format(batch_loss),
'{:.4f} sec/batch'.format((end-start)))
if (counter % save_every_n == 0):
saver.save(sess, "checkpoints/i{}_l{}.ckpt".format(counter, lstm_size))
saver.save(sess, "checkpoints/i{}_l{}.ckpt".format(counter, lstm_size))<jupyter_output>Epoch: 1/20... Training Step: 500... Training loss: 2.3225... 0.0552 sec/batch
Epoch: 1/20... Training Step: 1000... Training loss: 2.0291... 0.0542 sec/batch
Epoch: 1/20... Training Step: 1500... Training loss: 1.9428... 0.0619 sec/batch
Epoch: 1/20... Training Step: 2000... Training loss: 1.9263... 0.0598 sec/batch
Epoch: 1/20... Training Step: 2500... Training loss: 1.8028... 0.0627 sec/batch
Epoch: 1/20... Training Step: 3000... Training loss: 1.8476... 0.0538 sec/batch
Epoch: 1/20... Training Step: 3500... Training loss: 1.8136... 0.0561 sec/batch
Epoch: 2/20... Training Step: 4000... Training loss: 1.7746... 0.0573 sec/batch
Epoch: 2/20... Training Step: 4500... Training loss: 1.7926... 0.0608 sec/batch
Epoch: 2/20... Training Step: 5000... Training loss: 1.7459... 0.0614 sec/batch
Epoch: 2/20... Training Step: 5500... Training loss: 1.8202... 0.0574 sec/batch
Epoch: 2/20... Training Step: 6000... Training loss: 1.6451... 0.0589 sec/batch
Epoch[...]<jupyter_text>#### Saved checkpoints
Read up on saving and loading checkpoints here: https://www.tensorflow.org/programmers_guide/variables<jupyter_code>import tensorflow as tf
tf.train.get_checkpoint_state('checkpoints')<jupyter_output><empty_output><jupyter_text>## Sampling
Now that the network is trained, we'll can use it to generate new text. The idea is that we pass in a character, then the network will predict the next character. We can use the new one, to predict the next one. And we keep doing this to generate all new text. I also included some functionality to prime the network with some text by passing in a string and building up a state from that.
The network gives us predictions for each character. To reduce noise and make things a little less random, I'm going to only choose a new character from the top N most likely characters.
<jupyter_code>def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, prime="The "):
samples = [c for c in prime]
model = CharRNN(len(vocab), lstm_size=lstm_size, sampling=True)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = vocab_to_int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
return ''.join(samples)<jupyter_output><empty_output><jupyter_text>Here, pass in the path to a checkpoint and sample from the network.<jupyter_code>tf.train.latest_checkpoint('checkpoints')
checkpoint = tf.train.latest_checkpoint('checkpoints')
samp = sample(checkpoint, 2000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = 'checkpoints/i75200_l128.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = 'checkpoints/i72200_l128.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = 'checkpoints/i71800_l128.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)<jupyter_output>Fard,
where he walked about to the meaning when he heard in the same word at the sound of the sand we was nice of the could be because the man arounest the convarcation and was suddenly that having the children to himself, was at a
prepertion well the cantached with his same of
alone.
"Ah, to see it," he said. "It has between how
having all he could be nothing the ceasants of the dangers.... What would not bees my thing the
delices of misting and were all the same at itself. And I mave them of, as though is to talk to so too, I said him to his straight of that the cares of the mind in the compalling," he added, standing in the same or stray her with his peasants and sermant, which there can seem, and seemed out in his side was all and the mescemating a passent of hand to the place of her ball, the country, whom she was a sense, seened
a sister, and saying he had to brood him
to him as with that what they walked to all his
head to the carm of
that heart, as though already was still too[...]
|
no_license
|
/3-Recurrent-NN/Intro-to-RNNs/Anna_KaRNNa_Exercises.ipynb
|
andressamarcal/Deep-Learning-Nanodegree
| 18 |
<jupyter_start><jupyter_text># COGS 108 - Final Project: What Makes a Kickstarter Successful? # OverviewIn our project, we looked at Kickstarters and whether or not they reached their initial monetary goal. By observing the six characteristics (variables): “Goal”, “Duration”, “Friends”, “Projects”, “Images”, and “Words”, and the strength of their influence on Kickstarters’ success, we were able to determine which variable was the most important in a Kickstarter’s success.# Names
- Jessyca Beksa
- David Yang
- Atul Antil
- Elizabeth Chang
- Camron Chermak
- Temesgen Teklemariam# Research QuestionOut of the six variables: “Goal”, “Duration”, “Friends”, “Projects”, “Images”, and “Words”, which variable contributes most to a Kickstarter’s success? In other words, If we removed one of these variables from the Kickstarter, which one would cause the biggest loss of support? Analysis of successful and failed Kickstarters and their characteristics may show us this information.## Background and Prior WorkIn today’s society, the Internet and social media have become a popular past time fulfilling a large portion of most individuals days. Whether it’s portrayed in movies or through news reports, it’s safe to estimate that the internet is the largest available socio-technological system, connecting millions of individuals around the world. USA Today reports that individuals spend an estimated 23.6 hours a week on the internet, whether it’s through a laptop, mobile phone, or tablet. One advantage to our societies over saturated media use is its ability to potentiate new young entrepreneurs into the workforce via a new trend known as crowdfunding. Websites such as: GoFundMe, Crowd Supply, and Kickstarter, allow individuals or organizations to create a fundraising page complete with a monetary goal and pitch to garner donations. Crowdfunding allows individuals to bypass traditional fundraising models and source their startup income straight from the general public. While people’s intent for creating a crowdfunding page vary inevitably, the goal of gaining monetary support for their idea is fundamentally the same. One adult may be raising money to support his/her non-profit of choice, or a group of people may be raising money to cover medical treatment costs. Amongst the variety of reasons why people create crowdfunding pages, there are two perceived outcomes. Where some individuals reach their initial proposed goal, receiving well beyond the monetary value they asked for, others fall short. With a plethora of options to choose from, Kickstarter has been one of the more favored crowdfunding websites as well as one of the longest standing, having launched in 2009. Due to its popularity, it has also become one of the most heavily analyzed crowdfunding sites (Alvaris, 8). Through analysis of the components that make up a successful Kickstarter, researchers can gain knowledge and insight into how to create successful Kickstarters, making the site more effective for future users.
References (include links):
- 1) https://thehustle.co/archive/02102019d/
- 2) https://towardsdatascience.com/predicting-the-success-of-kickstarter-campaigns-3f4a976419b9
One analysis done by “The Hustle” noted that the most successful kickstarters are those proposed in regards to a technological idea or company.
Another individual examined classification models which predicted a Kickstarter’s success or failure using three different programs, K-Nearest Neighbors algorithm, Random Forest, and Linear Regression, to which he concluded that the Random Forest was the most accurate predictor. While both of these highlight aspects of successful Kickstarters and their components, our analysis will focus solely on the variables stated above to narrow down one specific characteristic that is most useful in our prediction model.# Hypothesis
We hypothesize that the most important characteristic in a Kickstarter’s success is “Friends”. In other words, if we take away “Friends” from a Kickstarter, it will lose more support compared to taking away any of the other characteristics: “Goal”, “Duration”, “Projects”, “Images”, or “Words”. We chose this hypothesis because we are in a world that is the most connected to the Internet than ever before and the presence of social media is so prevalent in this day and age. As such, we feel that the more friends on Facebook people have, the more connections people have to help raise these funds, leading to successful kickstarters that will reach their goal from the donations and additionals shares it will receive.# Dataset(s)- Dataset Name: Kickstarter
- Link to the dataset: https://www.kaggle.com/tayoaki/kickstarter-dataset
- Number of observations: 18,142
We used data from kaggle, using the “Kickstarter Dataset” which contains 18065 observations. It uses a variety of different factors such as duration of movies, facebook friends, words and images used in descriptions, and initial goals set for the kickstarter to predict if the company was a success. The dataset consists of 18,142 rows and 35 columns, but we reduced it to 9978 rows and 8 columns for a more meaningful use. “State” is defined as whether the Kickstarter was successful or not, “Currency” is defined as the type of currency required, “Goal” is defined as the number amount of money a Kickstarter needs to reach, “Duration” is defined as the number of days the Kickstarter is open for donations, “Friends” is defined as the number of Facebook friends the Kickstarter’s founder has, “Projects” is defined as the number of previous projects by the founder, “Images” is defined as the number of images included in the Kickstarter description, and “Words” is defined as the number of words included in the Kickstarter description.# SetupTo set up for our project, we have the following imports for our code:<jupyter_code># Needed imports for the program
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.ensemble import RandomForestRegressor # For using Random Forests<jupyter_output><empty_output><jupyter_text>Next, we can configure our libraries to prepare the data and graphs, such as making them look nicer or rounding the numbers in the data to be read:<jupyter_code># Configure libraries
# The seaborn library makes plots look nicer
sns.set()
sns.set_context('talk')
# Don't display too many rows/cols of DataFrames
pd.options.display.max_rows = 7
pd.options.display.max_columns = 8
# Round decimals when displaying DataFrames
pd.set_option('precision', 2)<jupyter_output><empty_output><jupyter_text>Now, we can read our data from a file into a dataframe and use it for our data cleaning, visualization, and analysis:<jupyter_code># Reading in the original dataset
data_initial = pd.read_csv('18k_Projects.csv', low_memory=False)
# Printing out the initial columns
data_initial.columns
# Printing out the initial data
data_initial<jupyter_output><empty_output><jupyter_text># Data CleaningBecause our dataset was very large, we cleaned our data using multiple steps and methods. First, we dropped all columns that we did not choose to analyze from our dataset. This was so that we could focus on the columns we wanted to analyze. Next, we cleaned the data by setting the success or failed column, “State”, with values of 1 and 0, respectively. We did this so that we could read our dataset more efficiently. Afterwards, we went through the data and made sure all monetary values, or the “Currency” column, were all in one currency (USD). We dropped all rows with “Currency” that did not have “USD” to make our dataset more relevant and easy to compare. Following this, we dropped all rows that contained a null value because we cannot use them in our analysis. And lastly, we renamed some columns to shorter phrases for readability.<jupyter_code># Removing all of the extra columns
data_initial.drop(labels=['Id', 'Name', 'Url', 'Category', \
'Creator', 'Location'], axis=1, inplace=True)
data_initial.drop(labels=['Updates', 'Start', 'End', \
'# FAQs', 'Start Timestamp (UTC)'], axis=1, inplace=True)
data_initial.drop(labels=['End Timestamp (UTC)', 'Latitude', \
'Longitude'], axis=1, inplace=True)
data_initial.drop(labels=['Comments', 'Rewards', 'Pledged', \
'Backers' ], axis=1, inplace=True)
data_initial.drop(labels=['Creator Bio', 'Creator Website', \
'Creator - # Projects Backed'], axis=1, inplace=True)
data_initial.drop(labels=['# Videos', 'Facebook Connected', \
'Facebook Shares', 'Has Video'], axis=1, inplace=True)
data_initial.drop(labels=['# Words (Risks and Challenges)', \
'Top Category'], axis=1, inplace=True)
# Printing out the remaining columns
data_initial.columns
# Replacing successful and failed with 1 and 0, respectively
data_initial.loc[data_initial['State'] == 'successful', 'State'] = 1
data_initial.loc[data_initial['State'] == 'failed', 'State'] = 0
# Making sure they all have the same currency in USD and then removing column
data_initial = data_initial[data_initial['Currency'] == 'USD']
# Removing all of the null data
data = data_initial.dropna()
# Renaming the columns
data.columns = ['State', 'Currency', 'Goal', 'Duration', 'Friends', \
'Projects', 'Images', 'Words']
# Printing out the final data
data<jupyter_output><empty_output><jupyter_text># Data Analysis & ResultsAfter cleaning the data, we created scatterplots and bar graphs that show the correlation between the different variables we are comparing. Below are said scatterplots and bar graphs.<jupyter_code># Plotting out the correlation matrix of the variables we are comparing
f1 = pd.plotting.scatter_matrix(data[['Duration', 'Friends', 'Goal' , \
'Words', 'Images', 'Projects']], figsize=[10,10])<jupyter_output><empty_output><jupyter_text>This is a matrix of correlations between all of the characteristics we chose to analyze. Notice that the “Goal” column and rows correlation behaviors seems weird, where it is close to either a vertical or horizontal line. We will examine these more closely with a smaller axis to see the actual correlation without any outliers affecting the indexing on the graphs.<jupyter_code># Plotting total number of successes and failures in dataset
f2 = data['State'].value_counts().plot('bar')
plt.xlabel("Successful or Failed")
plt.ylabel("Number of projects")
plt.title("Number of projects that failed and succeeded")<jupyter_output><empty_output><jupyter_text>This bar graph shows the number of “Successful” and “Failed” Kickstarter projects and their label as either a “1” or a “0”.
Next we printed the correlation between “Words” and “Images”, “Goal” and “Words”, and “Words” and “State”. <jupyter_code># Expanding plot that looked most correlated, Words and Images
plt.scatter(data['Words'], data['Images'], s=.4)
plt.xlabel('# Words')
plt.ylabel('# Images')
plt.xlim(0, 3000)
plt.ylim(0,75)
plt.title('Correlation of Words and Images')<jupyter_output><empty_output><jupyter_text>We plotted “Words” against “Images” because in our scatterplot above, all the goal columns and rows seem to be uncorrelated, where most of the data seemingly represents a horizontal line. Upon looking more closely at this graph and setting the axis to smaller maximum values, we can see that “Words” and “Images” are actually correlated. When examined more closely, the correlation does appear to be low between words and images, meaning both will most likely be needed to capture the data, instead of being able to leave one or the other out with minimal effects.<jupyter_code># Looking at one of the goal plots which appear the strangest
plt.scatter(data['Words'], data['Goal'], s=.8)
plt.xlabel('# Words')
plt.ylabel('Goal ($)')
plt.ylim(0, 200000)
plt.title('Correlation of Goal and Words')<jupyter_output><empty_output><jupyter_text>We plotted “Goal” against “Words” because this looked the most strange. The goal plot also appears to look strange with all the values appearing to be stuck at 0 for all the plots, but this is actually due to the fact that there were a few very high asking amounts for the goal so autofitting the plot to include all points makes the scaling look weird, but when a more appropriate scaling that captures most of the data is used, the plot becomes more similar to the others.<jupyter_code># Plotting State vs Words
plt.scatter(data['Words'], data['State'], s=.5)
plt.title('Correlation of Words and State')
plt.xlabel('# Words')
plt.ylabel('Whether Succeeded')<jupyter_output><empty_output><jupyter_text>We plotted “Words” against “State” to see if we can predict “State” using just the category “Words”. As can be seen, there is not a strong correlation between Words and State, meaning there is not a simple way to predict State using just the number of words. This means we need a more complex model to take into account the different attributes we have in order to predict whether or not a startup will be funded.<jupyter_code># Getting array of each of attributes of data
State_Data = data['State'].values
Goal_Data = data['Goal'].values
Duration_Data = data['Duration'].values
Friends_Data = data['Friends'].values
Projects_Data = data['Projects'].values
Images_Data = data['Images'].values
Words_Data = data['Words'].values
# Creating dictionary to hold accuracy values
accuracy_rf = {}<jupyter_output><empty_output><jupyter_text>The project analyzed the factors for failure and success of kickstarter. We used a data set of 9978 projects and 6 features to differentiate successful and failed projects. When we started, the data set had more than 20 categories or columns, but for simplicity in our project, we are only measuring six of the attributes to predict the failure and success of the projects, dropping all the other extra columns. Also, we make sure our data contained the same currency which is USD. We started with the data set contained more successful projects which is about 5200 projects compared with failed projects, about 4900 projects. The prediction is based on the 6 features we have Goal, Duration, Friends, projects, images, and words. As we can see on our “Accuracy for each leave one out Analysis” histogram, we explored the importance of each attribute by leaving out each attribute to see which one is most important. To do this we first train with 0.75 of data and use 0.25 to test by leaving out each attribute and comparing it with actual data to see which one is the most important and least important predictor.# Working with all attributes<jupyter_code>##############################
# Getting the training set #
##############################
# Getting the X or predictor variable set
Train_X = Goal_Data[0:int(len(Goal_Data) * .75)]
Train_X = np.expand_dims(Train_X, axis=1)
Train_X = np.append(Train_X, np.expand_dims(Friends_Data[0:int(len(Friends_Data) \
* .75)], axis=1), axis=1)
Train_X = np.append(Train_X, np.expand_dims(Duration_Data[0:int(len(Duration_Data) \
* .75)], axis=1), axis=1)
Train_X = np.append(Train_X, np.expand_dims(Projects_Data[0:int(len(Projects_Data) \
* .75)], axis=1), axis=1)
Train_X = np.append(Train_X, np.expand_dims(Images_Data[0:int(len(Images_Data) \
* .75)], axis=1), axis=1)
Train_X = np.append(Train_X, np.expand_dims(Words_Data[0:int(len(Words_Data) \
* .75)], axis=1), axis=1)
# Getting the Y or prediction variable set
Train_Y = State_Data[0:int(len(State_Data) * .75)]
#############################
# Getting the testing set #
#############################
# Getting the X or predictor variable set
Test_X = Goal_Data[int(len(Goal_Data) * .75):]
Test_X = np.expand_dims(Test_X, axis=1)
Test_X = np.append(Test_X, np.expand_dims(Friends_Data[int(len(Friends_Data) \
* .75):], axis=1), axis=1)
Test_X = np.append(Test_X, np.expand_dims(Duration_Data[int(len(Duration_Data) \
* .75):], axis=1), axis=1)
Test_X = np.append(Test_X, np.expand_dims(Projects_Data[int(len(Projects_Data) \
* .75):], axis=1), axis=1)
Test_X = np.append(Test_X, np.expand_dims(Images_Data[int(len(Images_Data) \
* .75):], axis=1), axis=1)
Test_X = np.append(Test_X, np.expand_dims(Words_Data[int(len(Words_Data) \
* .75):], axis=1), axis=1)
# Getting the Y or prediction variable set
Test_Y = State_Data[int(len(State_Data) * .75):]
#################
# Random Forest #
#################
clf = RandomForestRegressor(random_state=0)
clf.fit(Train_X, Train_Y)
# Predicting using the random forest
predictions = clf.predict(Test_X)
predictions = predictions >= 0.5
Test_Y = (Test_Y == 1)
# Checking correct or not
correct = (predictions == Test_Y)
# Getting and printing accuracy on valid set
Accuracy = sum(correct) / len(Test_Y)
accuracy_rf['All'] = Accuracy*100<jupyter_output><empty_output><jupyter_text># Leaving out Goal<jupyter_code>##############################
# Getting the training set #
##############################
# Getting the X or predictor variable set
Train_X = Friends_Data[0:int(len(Friends_Data) * .75)]
Train_X = np.expand_dims(Train_X, axis=1)
Train_X = np.append(Train_X, np.expand_dims(Duration_Data[0:int(len(Duration_Data) \
* .75)], axis=1), axis=1)
Train_X = np.append(Train_X, np.expand_dims(Projects_Data[0:int(len(Projects_Data) \
* .75)], axis=1), axis=1)
Train_X = np.append(Train_X, np.expand_dims(Images_Data[0:int(len(Images_Data) \
* .75)], axis=1), axis=1)
Train_X = np.append(Train_X, np.expand_dims(Words_Data[0:int(len(Words_Data) \
* .75)], axis=1), axis=1)
# Getting the Y or prediction variable set
Train_Y = State_Data[0:int(len(State_Data) * .75)]
#############################
# Getting the testing set #
#############################
# Getting the X or predictor variable set
Test_X = Friends_Data[int(len(Friends_Data) * .75):]
Test_X = np.expand_dims(Test_X, axis=1)
Test_X = np.append(Test_X, np.expand_dims(Duration_Data[int(len(Duration_Data) \
* .75):], axis=1), axis=1)
Test_X = np.append(Test_X, np.expand_dims(Projects_Data[int(len(Projects_Data) \
* .75):], axis=1), axis=1)
Test_X = np.append(Test_X, np.expand_dims(Images_Data[int(len(Images_Data) \
* .75):], axis=1), axis=1)
Test_X = np.append(Test_X, np.expand_dims(Words_Data[int(len(Words_Data) \
* .75):], axis=1), axis=1)
# Getting the Y or prediction variable set
Test_Y = State_Data[int(len(State_Data) * .75):]
#################
# Random Forest #
#################
clf = RandomForestRegressor(random_state=0)
clf.fit(Train_X, Train_Y)
# Predicting using the random forest
predictions = clf.predict(Test_X)
predictions = predictions >= 0.5
Test_Y = (Test_Y == 1)
# Checking correct or not
correct = (predictions == Test_Y)
# Getting and printing accuracy on valid set
Accuracy = sum(correct) / len(Test_Y)
accuracy_rf['W/O Goal'] = Accuracy*100<jupyter_output><empty_output><jupyter_text># Leaving out duration<jupyter_code>##############################
# Getting the training set #
##############################
# Getting the X or predictor variable set
Train_X = Goal_Data[0:int(len(Goal_Data) * .75)]
Train_X = np.expand_dims(Train_X, axis=1)
Train_X = np.append(Train_X, np.expand_dims(Friends_Data[0:int(len(Friends_Data) \
* .75)], axis=1), axis=1)
Train_X = np.append(Train_X, np.expand_dims(Projects_Data[0:int(len(Projects_Data) \
* .75)], axis=1), axis=1)
Train_X = np.append(Train_X, np.expand_dims(Images_Data[0:int(len(Images_Data) \
* .75)], axis=1), axis=1)
Train_X = np.append(Train_X, np.expand_dims(Words_Data[0:int(len(Words_Data) \
* .75)], axis=1), axis=1)
# Getting the Y or prediction variable set
Train_Y = State_Data[0:int(len(State_Data) * .75)]
#############################
# Getting the testing set #
#############################
# Getting the X or predictor variable set
Test_X = Goal_Data[int(len(Goal_Data) * .75):]
Test_X = np.expand_dims(Test_X, axis=1)
Test_X = np.append(Test_X, np.expand_dims(Friends_Data[int(len(Friends_Data) \
* .75):], axis=1), axis=1)
Test_X = np.append(Test_X, np.expand_dims(Projects_Data[int(len(Projects_Data) \
* .75):], axis=1), axis=1)
Test_X = np.append(Test_X, np.expand_dims(Images_Data[int(len(Images_Data) \
* .75):], axis=1), axis=1)
Test_X = np.append(Test_X, np.expand_dims(Words_Data[int(len(Words_Data) \
* .75):], axis=1), axis=1)
# Getting the Y or prediction variable set
Test_Y = State_Data[int(len(State_Data) * .75):]
#################
# Random Forest #
#################
clf = RandomForestRegressor(random_state=0)
clf.fit(Train_X, Train_Y)
# Predicting using the random forest
predictions = clf.predict(Test_X)
predictions = predictions >= 0.5
Test_Y = (Test_Y == 1)
# Checking correct or not
correct = (predictions == Test_Y)
# Getting and printing accuracy on valid set
Accuracy = sum(correct) / len(Test_Y)
accuracy_rf['W/O Duration'] = Accuracy*100<jupyter_output><empty_output><jupyter_text># Leaving out friends<jupyter_code>##############################
# Getting the training set #
##############################
# Getting the X or predictor variable set
Train_X = Goal_Data[0:int(len(Goal_Data) * .75)]
Train_X = np.expand_dims(Train_X, axis=1)
Train_X = np.append(Train_X, np.expand_dims(Duration_Data[0:int(len(Duration_Data) \
* .75)], axis=1), axis=1)
Train_X = np.append(Train_X, np.expand_dims(Projects_Data[0:int(len(Projects_Data) \
* .75)], axis=1), axis=1)
Train_X = np.append(Train_X, np.expand_dims(Images_Data[0:int(len(Images_Data) \
* .75)], axis=1), axis=1)
Train_X = np.append(Train_X, np.expand_dims(Words_Data[0:int(len(Words_Data) \
* .75)], axis=1), axis=1)
# Getting the Y or prediction variable set
Train_Y = State_Data[0:int(len(State_Data) * .75)]
#############################
# Getting the testing set #
#############################
# Getting the X or predictor variable set
Test_X = Goal_Data[int(len(Goal_Data) * .75):]
Test_X = np.expand_dims(Test_X, axis=1)
Test_X = np.append(Test_X, np.expand_dims(Duration_Data[int(len(Duration_Data) \
* .75):], axis=1), axis=1)
Test_X = np.append(Test_X, np.expand_dims(Projects_Data[int(len(Projects_Data) \
* .75):], axis=1), axis=1)
Test_X = np.append(Test_X, np.expand_dims(Images_Data[int(len(Images_Data) \
* .75):], axis=1), axis=1)
Test_X = np.append(Test_X, np.expand_dims(Words_Data[int(len(Words_Data) \
* .75):], axis=1), axis=1)
# Getting the Y or prediction variable set
Test_Y = State_Data[int(len(State_Data) * .75):]
#################
# Random Forest #
#################
clf = RandomForestRegressor(random_state=0)
clf.fit(Train_X, Train_Y)
# Predicting using the random forest
predictions = clf.predict(Test_X)
predictions = predictions >= 0.5
Test_Y = (Test_Y == 1)
# Checking correct or not
correct = (predictions == Test_Y)
# Getting and printing accuracy on valid set
Accuracy = sum(correct) / len(Test_Y)
accuracy_rf['W/O Friends'] = Accuracy*100<jupyter_output><empty_output><jupyter_text># Leaving out projects<jupyter_code>##############################
# Getting the training set #
##############################
# Getting the X or predictor variable set
Train_X = Goal_Data[0:int(len(Goal_Data) * .75)]
Train_X = np.expand_dims(Train_X, axis=1)
Train_X = np.append(Train_X, np.expand_dims(Duration_Data[0:int(len(Duration_Data) \
* .75)], axis=1), axis=1)
Train_X = np.append(Train_X, np.expand_dims(Friends_Data[0:int(len(Friends_Data) \
* .75)], axis=1), axis=1)
Train_X = np.append(Train_X, np.expand_dims(Images_Data[0:int(len(Images_Data) \
* .75)], axis=1), axis=1)
Train_X = np.append(Train_X, np.expand_dims(Words_Data[0:int(len(Words_Data) \
* .75)], axis=1), axis=1)
# Getting the Y or prediction variable set
Train_Y = State_Data[0:int(len(State_Data) * .75)]
#############################
# Getting the testing set #
#############################
# Getting the X or predictor variable set
Test_X = Goal_Data[int(len(Goal_Data) * .75):]
Test_X = np.expand_dims(Test_X, axis=1)
Test_X = np.append(Test_X, np.expand_dims(Duration_Data[int(len(Duration_Data) \
* .75):], axis=1), axis=1)
Test_X = np.append(Test_X, np.expand_dims(Friends_Data[int(len(Friends_Data) \
* .75):], axis=1), axis=1)
Test_X = np.append(Test_X, np.expand_dims(Images_Data[int(len(Images_Data) \
* .75):], axis=1), axis=1)
Test_X = np.append(Test_X, np.expand_dims(Words_Data[int(len(Words_Data) \
* .75):], axis=1), axis=1)
# Getting the Y or prediction variable set
Test_Y = State_Data[int(len(State_Data) * .75):]
#################
# Random Forest #
#################
clf = RandomForestRegressor(random_state=0)
clf.fit(Train_X, Train_Y)
# Predicting using the random forest
predictions = clf.predict(Test_X)
predictions = predictions >= 0.5
Test_Y = (Test_Y == 1)
# Checking correct or not
correct = (predictions == Test_Y)
# Getting and printing accuracy on valid set
Accuracy = sum(correct) / len(Test_Y)
accuracy_rf['W/O Projects'] = Accuracy*100<jupyter_output><empty_output><jupyter_text># Leaving out images<jupyter_code>##############################
# Getting the training set #
##############################
# Getting the X or predictor variable set
Train_X = Goal_Data[0:int(len(Goal_Data) * .75)]
Train_X = np.expand_dims(Train_X, axis=1)
Train_X = np.append(Train_X, np.expand_dims(Duration_Data[0:int(len(Duration_Data) \
* .75)], axis=1), axis=1)
Train_X = np.append(Train_X, np.expand_dims(Friends_Data[0:int(len(Friends_Data) \
* .75)], axis=1), axis=1)
Train_X = np.append(Train_X, np.expand_dims(Projects_Data[0:int(len(Projects_Data) \
* .75)], axis=1), axis=1)
Train_X = np.append(Train_X, np.expand_dims(Words_Data[0:int(len(Words_Data) \
* .75)], axis=1), axis=1)
# Getting the Y or prediction variable set
Train_Y = State_Data[0:int(len(State_Data) * .75)]
#############################
# Getting the testing set #
#############################
# Getting the X or predictor variable set
Test_X = Goal_Data[int(len(Goal_Data) * .75):]
Test_X = np.expand_dims(Test_X, axis=1)
Test_X = np.append(Test_X, np.expand_dims(Duration_Data[int(len(Duration_Data) \
* .75):], axis=1), axis=1)
Test_X = np.append(Test_X, np.expand_dims(Friends_Data[int(len(Friends_Data) \
* .75):], axis=1), axis=1)
Test_X = np.append(Test_X, np.expand_dims(Projects_Data[int(len(Projects_Data) \
* .75):], axis=1), axis=1)
Test_X = np.append(Test_X, np.expand_dims(Words_Data[int(len(Words_Data) \
* .75):], axis=1), axis=1)
# Getting the Y or prediction variable set
Test_Y = State_Data[int(len(State_Data) * .75):]
#################
# Random Forest #
#################
clf = RandomForestRegressor(random_state=0)
clf.fit(Train_X, Train_Y)
# Predicting using the random forest
predictions = clf.predict(Test_X)
predictions = predictions >= 0.5
Test_Y = (Test_Y == 1)
# Checking correct or not
correct = (predictions == Test_Y)
# Getting and printing accuracy on valid set
Accuracy = sum(correct) / len(Test_Y)
accuracy_rf['W/O Images'] = Accuracy*100<jupyter_output><empty_output><jupyter_text># Leaving out words<jupyter_code>##############################
# Getting the training set #
##############################
# Getting the X or predictor variable set
Train_X = Goal_Data[0:int(len(Goal_Data) * .75)]
Train_X = np.expand_dims(Train_X, axis=1)
Train_X = np.append(Train_X, np.expand_dims(Duration_Data[0:int(len(Duration_Data) \
* .75)], axis=1), axis=1)
Train_X = np.append(Train_X, np.expand_dims(Friends_Data[0:int(len(Friends_Data) \
* .75)], axis=1), axis=1)
Train_X = np.append(Train_X, np.expand_dims(Projects_Data[0:int(len(Projects_Data) \
* .75)], axis=1), axis=1)
Train_X = np.append(Train_X, np.expand_dims(Images_Data[0:int(len(Images_Data) \
* .75)], axis=1), axis=1)
# Getting the Y or prediction variable set
Train_Y = State_Data[0:int(len(State_Data) * .75)]
#############################
# Getting the testing set #
#############################
# Getting the X or predictor variable set
Test_X = Goal_Data[int(len(Goal_Data) * .75):]
Test_X = np.expand_dims(Test_X, axis=1)
Test_X = np.append(Test_X, np.expand_dims(Duration_Data[int(len(Duration_Data) \
* .75):], axis=1), axis=1)
Test_X = np.append(Test_X, np.expand_dims(Friends_Data[int(len(Friends_Data) \
* .75):], axis=1), axis=1)
Test_X = np.append(Test_X, np.expand_dims(Projects_Data[int(len(Projects_Data) \
* .75):], axis=1), axis=1)
Test_X = np.append(Test_X, np.expand_dims(Images_Data[int(len(Images_Data) \
* .75):], axis=1), axis=1)
# Getting the Y or prediction variable set
Test_Y = State_Data[int(len(State_Data) * .75):]
#################
# Random Forest #
#################
clf = RandomForestRegressor(random_state=0)
clf.fit(Train_X, Train_Y)
# Predicting using the random forest
predictions = clf.predict(Test_X)
predictions = predictions >= 0.5
Test_Y = (Test_Y == 1)
# Checking correct or not
correct = (predictions == Test_Y)
# Getting and printing accuracy on valid set
Accuracy = sum(correct) / len(Test_Y)
accuracy_rf['W/O Words'] = Accuracy*100
# Printing out the accuracies for rf
accuracy_rf
# Plotting out the accuracies
f, ax = plt.subplots(figsize=(18,7))
plt.bar(*zip(*accuracy_rf.items()))
plt.ylabel('Accuracy %')
plt.title('Accuracy for each of Leave One Out Analysis')
plt.ylim(50,70)
plt.show()<jupyter_output><empty_output>
|
no_license
|
/FinalProject_group137.ipynb
|
tzhang852/FinalProjects-Sp19
| 17 |
<jupyter_start><jupyter_text># `Digital Image Processing Assignment`Question:
The following is an image combining a blurry part and a high contrast part. Find an image like this from internet.
You need to apply both the smoothing filter and the sharpening filter in spatial domain. Display the output and explain the effect.
Demonstrate the same result with low pass and high pass filter in the frequency domain. Attach the code as well. (Marks: 20)
_____________________________________________________
Submitted by:
Md. Al Siam,
Roll No: 1603008# `Picking the Image and Displaying`<jupyter_code># Getting the image
! wget https://cdn.jhmrad.com/wp-content/uploads/beautiful-small-house-designs_756083-670x400.jpg
# display the image
import cv2
img = cv2.imread("/content/beautiful-small-house-designs_756083-670x400.jpg")
img = img[:,:,:1]
from google.colab.patches import cv2_imshow
cv2_imshow(img)<jupyter_output><empty_output><jupyter_text># `Smoothing Filter: Average Filter`<jupyter_code>import numpy as np
# Using smoothing filter (average filter)
def averageFilter(img, kernel_size):
mid_x = int(kernel_size//2)
mid_y = int(kernel_size//2)
dx = np.zeros((kernel_size, kernel_size))
dy = np.zeros((kernel_size, kernel_size))
kernel = np.ones((kernel_size, kernel_size))/(kernel_size*kernel_size)
for i in range(kernel_size):
for j in range(kernel_size):
dx[i][j] = int(i-mid_x)
dy[i][j] = int(j-mid_y)
final_image = np.zeros((img.shape[0], img.shape[1], 1))
print(mid_x, mid_y, "Kernel = ", kernel, "\n", dx, dy)
for i in range(int(mid_x), img.shape[0]-(kernel_size//2)):
# print("\n>", i, " :", end="")
for j in range(int(mid_y), img.shape[1]-(kernel_size//2)):
# print(f" {j}", end="")
sum_making_mean = 0.00
for k_i in range(kernel_size):
for k_j in range(kernel_size):
sum_making_mean += img[i+int(dx[k_i][k_j])][j+int(dy[k_i][k_j])][0]*kernel[k_i][k_j]
final_image[i][j][0] = sum_making_mean
return final_image
+
cv2_imshow(avg_filtered_image)<jupyter_output><empty_output><jupyter_text># `Displaying Original and Blurred Image`<jupyter_code>original_and_blurred = np.hstack((img, avg_filtered_image))
cv2_imshow(original_and_blurred)<jupyter_output><empty_output><jupyter_text># `Sharpening Image: Sobel Filter`<jupyter_code>def sobelFilter(img):
kernel_size = 3
mid_x = int(kernel_size//2)
mid_y = int(kernel_size//2)
dx = np.zeros((kernel_size, kernel_size))
dy = np.zeros((kernel_size, kernel_size))
kernel = [
[-1, -2, -1],
[0, 0, 0],
[1, 2, 1]
]
for i in range(kernel_size):
for j in range(kernel_size):
dx[i][j] = int(i-mid_x)
dy[i][j] = int(j-mid_y)
final_image = np.zeros((img.shape[0], img.shape[1], 1))
print(mid_x, mid_y, "Kernel = ", kernel, "\n", dx, dy)
for i in range(int(mid_x), img.shape[0]-(kernel_size//2)):
# print("\n>", i, " :", end="")
for j in range(int(mid_y), img.shape[1]-(kernel_size//2)):
# print(f" {j}", end="")
sum_making_mean = 0.00
for k_i in range(kernel_size):
for k_j in range(kernel_size):
sum_making_mean += img[i+int(dx[k_i][k_j])][j+int(dy[k_i][k_j])][0]*kernel[k_i][k_j]
final_image[i][j][0] = sum_making_mean
return final_image
sobel_filtered_image = sobelFilter(img)
cv2_imshow(sobel_filtered_image)<jupyter_output><empty_output><jupyter_text># `Sharpening Image: Laplacian Filter`<jupyter_code># Using smoothing filter (average filter)
def lapalcianFilter(img):
kernel_size = 3
mid_x = int(kernel_size//2)
mid_y = int(kernel_size//2)
dx = np.zeros((kernel_size, kernel_size))
dy = np.zeros((kernel_size, kernel_size))
kernel = [
[0, -1, 0],
[-1, 4, -1],
[0, -1, 0]
]
for i in range(kernel_size):
for j in range(kernel_size):
dx[i][j] = int(i-mid_x)
dy[i][j] = int(j-mid_y)
final_image = np.zeros((img.shape[0], img.shape[1], 1))
print(mid_x, mid_y, "Kernel = ", kernel, "\n", dx, dy)
for i in range(int(mid_x), img.shape[0]-(kernel_size//2)):
# print("\n>", i, " :", end="")
for j in range(int(mid_y), img.shape[1]-(kernel_size//2)):
# print(f" {j}", end="")
sum_making_mean = 0.00
for k_i in range(kernel_size):
for k_j in range(kernel_size):
sum_making_mean += img[i+int(dx[k_i][k_j])][j+int(dy[k_i][k_j])][0]*kernel[k_i][k_j]
final_image[i][j][0] = sum_making_mean
return final_image
laplacian_filtered_image = lapalcianFilter(img)
cv2_imshow(laplacian_filtered_image)<jupyter_output><empty_output><jupyter_text># `Displaying Sharpened Images`<jupyter_code>original_sobel_laplacian = np.hstack((img, sobel_filtered_image, laplacian_filtered_image))
cv2_imshow(original_sobel_laplacian)<jupyter_output><empty_output><jupyter_text># `Explaining the Effects`Mean filtering is a filter which can smooth or blur the image. It reduces the intensity variation between the adjacent pixels. This replaces the pixel values of an image with the average value of its neighbours. So the adjacent pixel values having high variance is removed. Thereby the variance of the pixel values reduces and the image get smoothed. The greater we take the kernel size, the more the image gets smoothened.
The sharpening filters are used for edge detection. There are some widely used state of the art operators or kernels used for sharpening image. Among them, median filter and laplacian filter has been used and displayed in this notebook. We see in the output, the quality of edge detection is different between the outputs from the two filters, as the kernels are different.# `Makking the Histogram`<jupyter_code>histogram = [0 for i in range(256)]
for i in range(img.shape[0]):
for j in range(img.shape[1]):
histogram[img[i][j][0]] += 1
for i, val in enumerate(histogram):
print(i, ">", val)
import matplotlib.pyplot as plt
plt.figure(figsize=(21,14))
plt.plot([i for i in range(256)], histogram)
plt.show()
plt.close()<jupyter_output><empty_output><jupyter_text># `Low Pass Filter`<jupyter_code>threshold = 145
low_passed_image = np.zeros((img.shape[0], img.shape[1], 1))
for i in range(img.shape[0]):
for j in range(img.shape[1]):
if img[i][j][0] < threshold:
low_passed_image[i][j][0] = img[i][j][0]
cv2_imshow(low_passed_image)<jupyter_output><empty_output><jupyter_text># `High Pass Filter`<jupyter_code>high_passed_image = np.zeros((img.shape[0], img.shape[1], 1))
for i in range(img.shape[0]):
for j in range(img.shape[1]):
if img[i][j][0] > threshold:
high_passed_image[i][j][0] = img[i][j][0]
cv2_imshow(high_passed_image)<jupyter_output><empty_output><jupyter_text># `Gaussian Low Pass Filter`<jupyter_code>def get_value(u, v):
try:
# D = math.sqrt(((u-img.shape[0]//2)*(v-img.shape[0]//2))+((v-img.shape[1]//2)*(v-img.shape[1]//2)))
D = math.sqrt((u-img.shape[0]/2)**2 + (v-img.shape[1]/2)**2)
except:
D = 1
print(D)
sigma = 80
return np.exp((-1*(D*D))/(2*sigma*sigma))
def make_kernel():
kernel = np.zeros((img.shape[0], img.shape[1], 1))
for i in range(img.shape[0]):
for j in range(img.shape[1]):
kernel[i][j] = get_value(i, j)
return kernel
kernel = make_kernel()
gray_fft = np.fft.fft2(img)
gray_fftshift = np.fft.fftshift(gray_fft)
dst = np.zeros_like(gray_fftshift)
dst_filtered = kernel * gray_fftshift
dst_ifftshift = np.fft.ifftshift(dst_filtered)
dst_ifft = np.fft.ifft2(dst_ifftshift)
dst = np.abs(np.real(dst_ifft))
dst = np.clip(dst,0,255)
cv2_imshow(dst)<jupyter_output><empty_output><jupyter_text># `Gaussian High Pass Filter`<jupyter_code>def get_value(u, v):
try:
# D = math.sqrt(((u-img.shape[0]//2)*(v-img.shape[0]//2))+((v-img.shape[1]//2)*(v-img.shape[1]//2)))
D = math.sqrt((u-img.shape[0]/2)**2 + (v-img.shape[1]/2)**2)
except:
D = 1
print(D)
sigma = 80
return 1 - np.exp((-1*(D*D))/(2*sigma*sigma))
def make_kernel():
kernel = np.zeros((img.shape[0], img.shape[1], 1))
for i in range(img.shape[0]):
for j in range(img.shape[1]):
kernel[i][j] = get_value(i, j)
return kernel
kernel = make_kernel()
gray_fft = np.fft.fft2(img)
gray_fftshift = np.fft.fftshift(gray_fft)
dst = np.zeros_like(gray_fftshift)
dst_filtered = kernel * gray_fftshift
dst_ifftshift = np.fft.ifftshift(dst_filtered)
dst_ifft = np.fft.ifft2(dst_ifftshift)
dst = np.abs(np.real(dst_ifft))
dst = np.clip(dst,0,255)
cv2_imshow(dst)<jupyter_output><empty_output>
|
no_license
|
/DIP_Assignment.ipynb
|
MdAlSiam/Digital_Image_Processing
| 11 |
<jupyter_start><jupyter_text>## model<jupyter_code>class ModelParameter:
def __init__(self, dataset="train",
num_classes=28,
image_rows=512,
image_cols=512,
batch_size=200,
n_channels=1,
row_scale_factor=4,
col_scale_factor=4,
shuffle=False,
n_epochs=1):
self.dataset = dataset
self.basepath = f"{INPUT_FILE}{dataset}/"
self.num_classes = num_classes
self.image_rows = image_rows
self.image_cols = image_cols
self.batch_size = batch_size
self.n_channels = n_channels
self.shuffle = shuffle
self.row_scale_factor = row_scale_factor
self.col_scale_factor = col_scale_factor
self.scaled_row_dim = np.int(self.image_rows / self.row_scale_factor)
self.scaled_col_dim = np.int(self.image_cols / self.col_scale_factor)
self.n_epochs = n_epochs
parameter = ModelParameter()
class ImagePreprocess:
def __init__(self, modelparameter):
self.parameter = modelparameter
self.basepath
?a.resize<jupyter_output><empty_output>
|
no_license
|
/script/.ipynb_checkpoints/Protein Atlas - Exploration and Baseline-checkpoint.ipynb
|
IvanFei/Human-Protein-Atlas-Image-Classification
| 1 |
<jupyter_start><jupyter_text>### [Линк](https://colab.research.google.com/drive/1mguVQuMEn2mIfISPCf4I9P6rvjNAK2ub?usp=sharing) до самиот Notebook### Вовед##### Import на библиотеките кои се користат во кодот<jupyter_code>from keras.models import Model
from keras.layers import Dense, Input, Dropout, LSTM, Activation
from keras.layers.embeddings import Embedding
from sklearn.metrics import classification_report, f1_score, log_loss, precision_score, recall_score
import csv
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
np.random.seed(1)<jupyter_output><empty_output><jupyter_text>##### Симнување на glove векторите за репрезентација на зборови<jupyter_code>!wget http://nlp.stanford.edu/data/glove.6B.zip
!unzip glove*.zip<jupyter_output>--2021-06-08 18:03:20-- http://nlp.stanford.edu/data/glove.6B.zip
Resolving nlp.stanford.edu (nlp.stanford.edu)... 171.64.67.140
Connecting to nlp.stanford.edu (nlp.stanford.edu)|171.64.67.140|:80... connected.
HTTP request sent, awaiting response... 302 Found
Location: https://nlp.stanford.edu/data/glove.6B.zip [following]
--2021-06-08 18:03:20-- https://nlp.stanford.edu/data/glove.6B.zip
Connecting to nlp.stanford.edu (nlp.stanford.edu)|171.64.67.140|:443... connected.
HTTP request sent, awaiting response... 301 Moved Permanently
Location: http://downloads.cs.stanford.edu/nlp/data/glove.6B.zip [following]
--2021-06-08 18:03:20-- http://downloads.cs.stanford.edu/nlp/data/glove.6B.zip
Resolving downloads.cs.stanford.edu (downloads.cs.stanford.edu)... 171.64.64.22
Connecting to downloads.cs.stanford.edu (downloads.cs.stanford.edu)|171.64.64.22|:80... connected.
HTTP request sent, awaiting response... 200 OK
Length: 862182613 (822M) [application/zip]
Saving to: ‘glove.6B.zip’
glove.6[...]<jupyter_text>### Помошни функции##### Исчитување на glove фајлот со вредности<jupyter_code>def read_glove_vecs(glove_file):
with open(glove_file, 'r', encoding="utf8") as f:
words = set()
word_to_vec_map = {}
for line in f:
line = line.strip().split()
curr_word = line[0]
words.add(curr_word)
word_to_vec_map[curr_word] = np.array(line[1:], dtype=np.float64)
i = 1
words_to_index = {}
index_to_words = {}
for w in sorted(words):
words_to_index[w] = i
index_to_words[i] = w
i = i + 1
return words_to_index, index_to_words, word_to_vec_map<jupyter_output><empty_output><jupyter_text>##### Softmax функцијата<jupyter_code>def softmax(x):
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()<jupyter_output><empty_output><jupyter_text>##### Читање на соодветните вредности од CSV train и test датотеките<jupyter_code>def read_csv(filename):
phrase = []
emoji_ = []
with open(filename) as csvDataFile:
csv_reader = csv.reader(csvDataFile)
for row in csv_reader:
phrase.append(row[0])
emoji_.append(row[1])
x = np.asarray(phrase)
y = np.asarray(emoji_, dtype=int)
return x, y
<jupyter_output><empty_output><jupyter_text>##### Излезите(бројки) ги претвора во one-hot вектори<jupyter_code>def convert_to_one_hot(y, c):
y = np.eye(c)[y.reshape(-1)]
return y<jupyter_output><empty_output><jupyter_text>##### Предвидување на излезите при дадени елементи како влез<jupyter_code>def predict(X, Y, W, b, word_to_vec_map):
m = X.shape[0]
pred = np.zeros((m, 1))
for j in range(m):
words = X[j].lower().split()
avg = np.zeros((50,))
for w in words:
avg += word_to_vec_map[w]
avg = avg / len(words)
z = np.dot(W, avg) + b
a = softmax(z)
pred[j] = np.argmax(a)
print("Accuracy: " + str(np.mean((pred[:] == Y.reshape(Y.shape[0], 1)[:]))))
return pred<jupyter_output><empty_output><jupyter_text>##### Претворање на реченици дадени како влез во матрици од вредности<jupyter_code>def sentences_to_indices(X, word_to_index, max_len):
m = X.shape[0]
x_indices = np.zeros((m, max_len))
for i in range(m):
sentence_words = (X[i].lower()).split()
j = 0
for w in sentence_words:
# i-тата вредност е редниот број на реченицата, j-тата е редниот
# број на зборот во неа. вредноста која се поставува е таа на
# соодветниот збор
x_indices[i, j] = word_to_index[w]
j = j + 1
return x_indices<jupyter_output><empty_output><jupyter_text>##### Креирање на веќе истрениран Embedding слој со помош на glove векторите<jupyter_code>def pretrained_embedding_layer(word_to_vec_map, word_to_index):
vocab_len = len(word_to_index) + 1
emb_dim = word_to_vec_map["cucumber"].shape[0]
emb_matrix = np.zeros((vocab_len, emb_dim))
for word, index in word_to_index.items():
emb_matrix[index, :] = word_to_vec_map[word]
embedding_layer = Embedding(vocab_len, emb_dim)
embedding_layer.build((None,))
embedding_layer.set_weights([emb_matrix])
return embedding_layer<jupyter_output><empty_output><jupyter_text>##### Креирање на модел со соодветни предефинирани вредности<jupyter_code>def sentiment_analysis(input_shape, word_to_vec_map, word_to_index):
sentence_indices = Input(shape=input_shape, dtype=np.int32)
embedding_layer = pretrained_embedding_layer(word_to_vec_map, word_to_index)
embeddings = embedding_layer(sentence_indices)
# LSTM слој со 128-димензионален hidden state
X = LSTM(128, return_sequences=True)(embeddings)
# Веројатност на зачувување од 0.5
X = Dropout(0.5)(X)
# Уште еден LSTM слој со 128-димензионален hidden state
X = LSTM(128)(X)
X = Dropout(0.5)(X)
X = Dense(5, activation='softmax')(X)
X = Activation('softmax')(X)
model = Model(sentence_indices, X)
return model<jupyter_output><empty_output><jupyter_text># Main дел##### Читање на train и test податоците, пренос на излезите како one-hot вектори, читање на векторите за репрезентација на зборови<jupyter_code>X_train, Y_train = read_csv('train_set.csv')
X_test, Y_test = read_csv('test_set.csv')
maxLen = len(max(X_train, key=len).split())
Y_oh_train = convert_to_one_hot(Y_train, 5)
Y_oh_test = convert_to_one_hot(Y_test, 5)
word_to_index_, index_to_word, word_to_vec_map_ = read_glove_vecs('glove.6B.50d.txt')<jupyter_output><empty_output><jupyter_text>##### Креирање и тренирање на моделот<jupyter_code>model = sentiment_analysis((maxLen,), word_to_vec_map_, word_to_index_)
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
X_train_indices = sentences_to_indices(X_train, word_to_index_, maxLen)
Y_train_oh = convert_to_one_hot(Y_train, 5)
model.fit(X_train_indices, Y_train_oh, epochs=100, batch_size=32, shuffle=True)<jupyter_output>Model: "model"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) [(None, 10)] 0
_________________________________________________________________
embedding (Embedding) (None, 10, 50) 20000050
_________________________________________________________________
lstm (LSTM) (None, 10, 128) 91648
_________________________________________________________________
dropout (Dropout) (None, 10, 128) 0
_________________________________________________________________
lstm_1 (LSTM) (None, 128) 131584
_________________________________________________________________
dropout_1 (Dropout) (None, 128) 0
_____________________________________________________________[...]<jupyter_text>##### Тестирање на моделот со test податоците<jupyter_code>X_test_indices = sentences_to_indices(X_test, word_to_index_, max_len=maxLen)
Y_test_oh = convert_to_one_hot(Y_test, 5)
loss, acc = model.evaluate(X_test_indices, Y_test_oh)
print()
print("Accuracy ", acc)<jupyter_output>2/2 [==============================] - 1s 16ms/step - loss: 1.1351 - accuracy: 0.7679
Accuracy 0.7678571343421936
<jupyter_text>##### Преглед на влезовите кои се грешно предвидени и дополнителни метрики за евалуација<jupyter_code>y_test_oh = np.eye(5)[Y_test.reshape(-1)]
X_test_indices = sentences_to_indices(X_test, word_to_index_, maxLen)
pred = model.predict(X_test_indices)
actual = []
predicted = []
for i in range(len(X_test)):
x = X_test_indices
num = np.argmax(pred[i])
actual.append(Y_test[i])
predicted.append(num)
if num != Y_test[i]:
print('Input: ' + str(X_test[i]))
print('Expected class: ' + str(Y_test[i]))
print('Predicted class: ' + str(num) + '\n')
precision = precision_score(actual, predicted, average='macro')
recall = recall_score(actual, predicted, average='macro')
f1_score = f1_score(actual, predicted, average='macro')
loss = log_loss(actual, pred, eps=1e-15)
matrix = classification_report(actual, predicted, labels=[0, 1, 2, 3, 4])<jupyter_output>Input: he got a very nice raise
Expected class: 2
Predicted class: 0
Input: she got me a nice present
Expected class: 2
Predicted class: 0
Input: he is a good friend
Expected class: 2
Predicted class: 0
Input: Stop making this joke ha ha ha
Expected class: 2
Predicted class: 3
Input: any suggestions for dinner
Expected class: 4
Predicted class: 2
Input: you brighten my day
Expected class: 2
Predicted class: 0
Input: she is a bully
Expected class: 3
Predicted class: 0
Input: will you be my valentine
Expected class: 2
Predicted class: 3
Input: he can pitch really well
Expected class: 1
Predicted class: 3
Input: See you at the restaurant
Expected class: 4
Predicted class: 1
Input: What you did was awesome
Expected class: 2
Predicted class: 3
Input: go away
Expected class: 3
Predicted class: 1
Input: I did not have breakfast
Expected class: 4
Predicted class: 0
<jupyter_text>##### Тестирање на моделот со влезови од корисник<jupyter_code>x_test = np.array(['very happy'])
X_test_indices = sentences_to_indices(x_test, word_to_index_, maxLen)
print('Input: ' + x_test[0])
print('Predicted class: ' + str(np.argmax(model.predict(X_test_indices))) + '\n')
x_test = np.array(['very sad'])
X_test_indices = sentences_to_indices(x_test, word_to_index_, maxLen)
print('Input: ' + x_test[0])
print('Predicted class: ' + str(np.argmax(model.predict(X_test_indices))) + '\n')
x_test = np.array(['i am starving'])
X_test_indices = sentences_to_indices(x_test, word_to_index_, maxLen)
print('Input: ' + x_test[0])
print('Predicted class: ' + str(np.argmax(model.predict(X_test_indices))) + '\n')
x_test = np.array(['I have met the love of my life'])
X_test_indices = sentences_to_indices(x_test, word_to_index_, maxLen)
print('Input: ' + x_test[0])
print('Predicted class: ' + str(np.argmax(model.predict(X_test_indices))) + '\n')<jupyter_output>Input: I have met the love of my life
Predicted class: 0
<jupyter_text>##### Приказ на мерките за успешност на моделот<jupyter_code>print('Accuracy: {0}'.format(str(acc)))
print('Log loss: {0}'.format(loss))
print('Precision: {0}'.format(precision))
print('Recall: {0}'.format(recall))
print('F1 score: {0}'.format(f1_score))
print('Classification report: \n{0}'.format(matrix))<jupyter_output>Classification report:
precision recall f1-score support
0 0.54 1.00 0.70 7
1 0.78 0.88 0.82 8
2 0.92 0.61 0.73 18
3 0.78 0.88 0.82 16
4 1.00 0.57 0.73 7
accuracy 0.77 56
macro avg 0.80 0.79 0.76 56
weighted avg 0.82 0.77 0.77 56
|
no_license
|
/_build/html/_sources/is-code.ipynb
|
robertokostov/sentiment-analysis-book
| 16 |
<jupyter_start><jupyter_text># Weekly shooting numbers
We need to run the script below to get the numbers to update [this page](http://www.chicagotribune.com/news/data/ct-shooting-victims-map-charts-htmlstory.html). When this script run from the same directory as wherever you download the shootings csv, the numbers you need to update the shootings page should be printed in your command line.
You should follow along with [this ticket](https://tribune.unfuddle.com/a#/projects/46/tickets/by_number/1262) to fully understand what we're doing here.### Step 1: download shootings csv from newsroomdb. Make sure it is saved on your Desktop (or just in the same directory as wherever this script will live).### Step 2: Let's make sure we can take a look at the data.<jupyter_code># Below, you can import into this file the python libraries needed to do this analysis.
import pandas as pd
import numpy as np
# This assigns the variable 'shootings' to the appropriate csv, which you should have downloaded already.
# You can edit the following line to make sure it points to wherever the shootings csv lives on your machine.
shootings = pd.read_csv('../Desktop/shootings.csv')
# This allows you to look at the first 3 rows of the data.
shootings[:3]
# Let's look just at the first 10 rows of the 'Date' column.
shootings['Date'][:10]<jupyter_output><empty_output><jupyter_text>### Step 3: Let's see how many rows we have in the 'Date' column.<jupyter_code># The 'count' method will tell us how many rows are in this entire dataset.
# Our analyses for the purposes of the shootings page will focus on dates, so let's look at how many Date rows there are.
shootings['Date'].count()<jupyter_output><empty_output><jupyter_text>### Step 4: Let's check out total shootings in 2016.<jupyter_code># Let's focus in on shootings from 2016. The 'startswith' method allows us to zero in on rows that start with
# the year we want, and assign them to the variable 'shootings_2016'.
shootings_2016 = shootings[shootings['Date'].str.startswith('2016', na=False)]
# Now, let's count how many rows there are of shootings with 2016 dates.
print "There were", shootings_2016['Date'].count(), "shootings in 2016."<jupyter_output><empty_output><jupyter_text>### Step 5: Let's check out total shootings so far in 2017.<jupyter_code># Now let's do the same thing for shootings in 2017.
shootings_2017 = shootings[shootings['Date'].str.startswith('2017', na=False)]
print "There have been", shootings_2017['Date'].count(), "shootings in 2017 so far."<jupyter_output><empty_output><jupyter_text>### For the shootings page, we want to focus in on how many shootings _within a certain time frame_ (until present day), which we now have for 2017.
### Step 6: Let's try to find the shootings in each year before 2017 only from 1/1 to present day.<jupyter_code># Everytime you update this data, you will need to change the ending date from '2016-05-30' to the current date.
shootings_in_range_2016 = shootings_2016[(shootings['Date'] > '2016-01-01') & (shootings['Date'] <= '2016-06-19')]
shootings_in_range_2015 = shootings[(shootings['Date'] > '2015-01-01') & (shootings['Date'] <= '2015-06-19')]
shootings_in_range_2014 = shootings[(shootings['Date'] > '2014-01-01') & (shootings['Date'] <= '2014-06-19')]
shootings_in_range_2013 = shootings[(shootings['Date'] > '2013-01-01') & (shootings['Date'] <= '2013-06-19')]
shootings_in_range_2012 = shootings[(shootings['Date'] > '2012-01-01') & (shootings['Date'] <= '2012-06-19')]
print ("There were", shootings_in_range_2016['Date'].count(), "shootings between January 1 and present day in 2016.")
print ("There were", shootings_in_range_2015['Date'].count(), "shootings between January 1 and present day in 2015.")
print ("There were", shootings_in_range_2014['Date'].count(), "shootings between January 1 and present day in 2014.")
print ("There were", shootings_in_range_2013['Date'].count(), "shootings between January 1 and present day in 2013.")
print ("There were", shootings_in_range_2012['Date'].count(), "shootings between January 1 and present day in 2012.")<jupyter_output><empty_output><jupyter_text>### Step 7: The next thing we want to do is find out how many shootings there were _per month_ in 2017. <jupyter_code># "Date" need some formatting. Some dates are 'None' or 'NaN' -- the try/except accounts for those.
try:
shootings['Date'] = pd.to_datetime(shootings['Date'], errors='coerce')
except:
pass
# The next thing we need for the shootings page is an updated shootings breakdown by month for 2017.
# One way to check if this is right, is to go to the current shootings page (linked at the top of this notebook),
# and see if the numbers for January through May match, since those were done manually.
shootings_by_month_2017 = shootings_2017.groupby([shootings['Date'].dt.year, shootings['Date'].dt.month])['Date'].count()
# Below is how we change from numerals to months. As we progress through the months of 2017, you should add more months.
shootings_by_month_2017.index = ['January','February','March','April','May','June']
print "Here are 2017 shootings by month:", shootings_by_month_2017<jupyter_output><empty_output>
|
no_license
|
/crime_analysis_instrux.ipynb
|
nausheenhusain/jupyter-notebooks
| 6 |
<jupyter_start><jupyter_text>## word2vec으로 추천 시스템 구현하기 <jupyter_code>from gensim.models import Word2Vec
import pandas as pd
from collections import defaultdict
import numpy as np
import mykmeans as kmeans
## string to int with ignore none disit
def toint(s,default=0):
converted = default
try :
converted = int(s)
except:
#print('string convert exception "{0}" (default={1})"'.format(s , default))
pass
return converted
class m2v_recsys:
model = None
home_dir = '/Users/goodvc/Documents/data-analytics/movie-recommendation/'
w2v_env = { 'min_count':5, 'size':100, 'window':5 }
nn_func = None
## initialize
def __init__(self, ds_type='ml-latest-small'):
self.ds_type = ds_type
self.movieLensDataLoad()
## dataset load
def movieLensDataLoad(self, ds_type=None):
if ds_type != None:
self.ds_type = ds_type
## user 영화 별점 data
self.ratings = pd.read_csv('{home}/{type}/{name}'.format( home=self.home_dir, type=self.ds_type, name='ratings.csv'))
## movie meta(타이트,장르) data
self.movies = pd.read_csv('{home}/{type}/{name}'.format( home=self.home_dir, type=self.ds_type, name='movies.csv'))
## split title and release year . ex) Nixon (1995) => Nixon , 1995
self.movies['year'] = self.movies['title'].apply(lambda x: toint(x.strip()[-5:-1], 1950))
self.movies['title'] = self.movies['title'].apply(lambda x: x[:-7].strip())
## user가 영화에 tag를 기입한 data
self.tags = pd.read_csv('{home}/{type}/{name}'.format( home=self.home_dir, type=self.ds_type, name='tags.csv'))
## join ratings and movies by movieId
self.ratings_movie = pd.merge( self.ratings, self.movies, on='movieId' )
def load_model(self, model_path):
self.model = Word2Vec.load(model_path)
print('model loaded', model_path)
def save_model(self, model_path):
self.model.save(model_path)
def training(self):
if self.model == None :
self.scentences = self.make_sentences()
self.model = Word2Vec(self.scentences,
min_count = self.w2v_env['min_count'],
size = self.w2v_env['size'],
window = self.w2v_env['window'])
print('model trained')
def nearest_neighbors_by_m2v(self, target, topn=10):
if str == type(target):
target=[target]
result = []
try:
result = self.model.most_similar(positive=target, topn=topn)
except:
pass
return result
def user_interests(self, userId):
return self.ratings_movie[self.ratings_movie.userId==userId][['title','rating']].values.tolist()
def cal_score(self, title, sim, rating, pre_score):
score = np.linalg.norm([sim, rating/5])
score = score+pre_score
return score
def recommend_movies(self, userId, topn=10):
movies = defaultdict(float)
interests = self.user_interests(userId)
seenMovies = set( m for m,s in interests )
for title, rating in interests:
neighbors = self.nearest_neighbors_by_m2v(title)
for (title2, sim) in neighbors:
if title2 in seenMovies:
continue
pre = movies.get(title2, 0.0)
movies[title2] = self.cal_score( title2, sim, rating, pre )
return {'recommended' : sorted(movies.items(), key=lambda x: x[1], reverse=True)[:topn]
,'ratings':interests}
####################################
## clustering methods
def clustering(self, k=100, delta=0.00000001, maxiter=100):
self.movie_vec = self.model.syn0
centres, index2cid, dist = kmeans.kmeanssample(self.movie_vec, k,
metric = 'cosine',
delta = delta,
nsample = 0, maxiter = maxiter,)
self.clustered_ds = pd.DataFrame( [ (a, b, c) for a, b, c in zip(self.model.index2word, index2cid, dist )],
columns=['title', 'cid', 'dist'] ).sort(['cid','dist'], ascending=True)
self.movie2cid = { movie:cid for movie,cid in zip(self.model.index2word, index2cid) }
return (centres, index2cid, dist)
def clusterMembersByCId(self, cid):
return self.clustered_ds[self.clustered_ds.cid==cid]
def clusterMembersByTitle(self, title):
cid = self.clustered_ds[self.clustered_ds.title==title]['cid'].values[0]
return self.clusterMembersByCId(cid)
def clusterTags(self, cid):
ds = pd.merge(self.tags
, self.clusterMembersByCId(cid)
, on='title')
ds['dist'] = 1 - ds['dist']
cid_tags = ds.groupby(['tag'])['dist'].sum().sort(inplace=False,ascending=False)[:5]
return cid_tags
######################################
def user_mcate_interests(self, uid, v=False):
interests = self.user_interests(uid)
cid_interests = defaultdict(int)
total = 0
for title, rating in interests:
cid_interests[self.movie2cid.get(title, -1)] += rating/5
total += (rating/5)
interests_ds = pd.DataFrame. from_dict({'score':cid_interests},orient='columns')
interests_ds['score'] = round(interests_ds['score'] / total,5)
interests_ds.sort(['score'], ascending=False, inplace=True)
if True==v:
for cid, score in interests_ds.iterrows():
print(cid,score.values[0],cid,self.clusterTags(cid).index.values[:3])
return interests_ds
## 20m dataset
m2v_rs_ml20 = m2v_recsys('ml-20m')
m2v_rs_ml20.load_model('./resource/m2v_rs.ml20.model')
m2v_rs_ml20.load_model('./resource/m2v_rs.ml20.model')
(centres, index2cid, dist ) = m2v_rs_ml20.clustering(k=500, maxiter=200, delta=1e-20)
m2v_rs_ml20.clusterMembersByCId(100)
m2v_rs_ml20.clusterMembersByTitle('Pretty Woman')
m2v_rs_ml20.clusterMembersByTitle('Nixon')
for cid in range(100,130):
print('cid :',cid,m2v_rs_ml20.clusterTags(cid).index.values[:3])
m2v_rs_ml20.clusterMembersByCId(90)
m2v_rs_ml20.clusterMembersByTitle('Matrix, The')
m2v_rs.clustering()
m2v_rs_ml20.clusterTags(81)
interests = m2v_rs_ml20.user_interests(10)
cid_interests = defaultdict(int)
total = 0
for title, rating in interests:
cid_interests[m2v_rs_ml20.movie2cid.get(title, -1)] += rating/5
total += (rating/5)
interests_ds = pd.DataFrame. from_dict({'score':cid_interests},orient='columns')
interests_ds['score'] = (interests_ds['score'] / total)
interests_ds.sort(['score'], ascending=False, inplace=True)
#for cid in cid_interests.keys():
# cid_interests[cid] = round(cid_interests[cid]/total,5)
m2v_rs_ml20.user_mcate_interests(1)
interests = m2v_rs_ml20.user_mcate_interests(100, v=True)
for cid, score in interests.iterrows():
print(cid,score.values[0],cid,m2v_rs_ml20.clusterTags(cid).index.values[:3])
interests<jupyter_output><empty_output>
|
no_license
|
/week4/05-m2v-recsys-clustering.ipynb
|
geniuskch/fc-recsys-lecture
| 1 |
<jupyter_start><jupyter_text># Librerias
https://www.cienciadedatos.net/documentos/py17-regresion-logistica-python.html<jupyter_code># Tratamiento de datos
# ==============================================================================
import pandas as pd
import numpy as np
# Gráficos
# ==============================================================================
import matplotlib.pyplot as plt
from matplotlib import style
import seaborn as sns
# Preprocesado y modelado
# ==============================================================================
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, confusion_matrix
import statsmodels.api as sm
import statsmodels.formula.api as smf
from statsmodels.stats.weightstats import ttest_ind
# # Configuración matplotlib
# # ==============================================================================
# plt.rcParams['image.cmap'] = "bwr"
# #plt.rcParams['figure.dpi'] = "100"
# plt.rcParams['savefig.bbox'] = "tight"
# style.use('ggplot') or plt.style.use('ggplot')
# # Configuración warnings
# # ==============================================================================
# import warnings
# warnings.filterwarnings('ignore')<jupyter_output><empty_output><jupyter_text># Datos modificados<jupyter_code>datos=pd.read_csv('data/datos_entrenar.csv')
datos=datos.set_index('Fecha')
datos
# División de los datos en train y test
# ==============================================================================
X = datos[['26055100','26055120','26085160','26085170']]
y = datos['Inundacion']
X_train, X_test, y_train, y_test = train_test_split(
X.values.reshape(-1,4),
y.values.reshape(-1,1),
train_size = 0.8,
random_state = 1234,
shuffle = False
)
# Creación del modelo
# ==============================================================================
# Para no incluir ningún tipo de regularización en el modelo se indica
# penalty='none'
modelo = LogisticRegression(penalty='none')
modelo.fit(X = X_train.reshape(-1, 4), y = y_train)
# Información del modelo
# ==============================================================================
print("Intercept:", modelo.intercept_)
print("Coeficiente:", list(zip(X.columns, modelo.coef_.flatten(), )))
print("Accuracy de entrenamiento:", modelo.score(X, y))
# Predicciones probabilísticas
# ==============================================================================
# Con .predict_proba() se obtiene, para cada observación, la probabilidad predicha
# de pertenecer a cada una de las dos clases.
predicciones = modelo.predict_proba(X = X_test)
predicciones = pd.DataFrame(predicciones, columns = modelo.classes_)
predicciones.head(3)
# Predicciones con clasificación final
# ==============================================================================
# Con .predict() se obtiene, para cada observación, la clasificación predicha por
# el modelo. Esta clasificación se corresponde con la clase con mayor probabilidad.
predicciones = modelo.predict(X = X_test)
predicciones
def make_confusion_matrix(cf,
group_names=None,
categories='auto',
count=True,
percent=True,
cbar=True,
xyticks=True,
xyplotlabels=True,
sum_stats=True,
figsize=None,
cmap='Blues',
title=None):
'''
This function will make a pretty plot of an sklearn Confusion Matrix cm using a Seaborn heatmap visualization.
Arguments
---------
cf: confusion matrix to be passed in
group_names: List of strings that represent the labels row by row to be shown in each square.
categories: List of strings containing the categories to be displayed on the x,y axis. Default is 'auto'
count: If True, show the raw number in the confusion matrix. Default is True.
normalize: If True, show the proportions for each category. Default is True.
cbar: If True, show the color bar. The cbar values are based off the values in the confusion matrix.
Default is True.
xyticks: If True, show x and y ticks. Default is True.
xyplotlabels: If True, show 'True Label' and 'Predicted Label' on the figure. Default is True.
sum_stats: If True, display summary statistics below the figure. Default is True.
figsize: Tuple representing the figure size. Default will be the matplotlib rcParams value.
cmap: Colormap of the values displayed from matplotlib.pyplot.cm. Default is 'Blues'
See http://matplotlib.org/examples/color/colormaps_reference.html
title: Title for the heatmap. Default is None.
'''
# CODE TO GENERATE TEXT INSIDE EACH SQUARE
blanks = ['' for i in range(cf.size)]
if group_names and len(group_names)==cf.size:
group_labels = ["{}\n".format(value) for value in group_names]
else:
group_labels = blanks
if count:
group_counts = ["{0:0.0f}\n".format(value) for value in cf.flatten()]
else:
group_counts = blanks
if percent:
group_percentages = ["{0:.2%}".format(value) for value in cf.flatten()/np.sum(cf)]
else:
group_percentages = blanks
box_labels = [f"{v1}{v2}{v3}".strip() for v1, v2, v3 in zip(group_labels,group_counts,group_percentages)]
box_labels = np.asarray(box_labels).reshape(cf.shape[0],cf.shape[1])
# CODE TO GENERATE SUMMARY STATISTICS & TEXT FOR SUMMARY STATS
if sum_stats:
#Accuracy is sum of diagonal divided by total observations
accuracy = np.trace(cf) / float(np.sum(cf))
#if it is a binary confusion matrix, show some more stats
if len(cf)==2:
#Metrics for Binary Confusion Matrices
precision = cf[1,1] / sum(cf[:,1])
recall = cf[1,1] / sum(cf[1,:])
f1_score = 2*precision*recall / (precision + recall)
stats_text = "\n\nAccuracy={:0.3f}\nPrecision={:0.3f}\nRecall={:0.3f}\nF1 Score={:0.3f}".format(
accuracy,precision,recall,f1_score)
else:
stats_text = "\n\nAccuracy={:0.3f}".format(accuracy)
else:
stats_text = ""
# SET FIGURE PARAMETERS ACCORDING TO OTHER ARGUMENTS
if figsize==None:
#Get default figure size if not set
figsize = plt.rcParams.get('figure.figsize')
if xyticks==False:
#Do not show categories if xyticks is False
categories=False
# MAKE THE HEATMAP VISUALIZATION
plt.figure(figsize=figsize)
sns.heatmap(cf,annot=box_labels,fmt="",cmap=cmap,cbar=cbar,xticklabels=categories,yticklabels=categories)
if xyplotlabels:
plt.ylabel('True label')
plt.xlabel('Predicted label' + stats_text)
else:
plt.xlabel(stats_text)
if title:
plt.title(title)
labels = ['True 0','False 1','False 0','True 1']
categories = ['0', '1']
make_confusion_matrix(confusion_matrix(y_test,predicciones),cmap='Blues',group_names=labels,categories=categories)
plt.show()<jupyter_output><empty_output>
|
no_license
|
/Modeling/logistic regression.ipynb
|
groloboy/Tesis-ETL-Modelo
| 2 |
<jupyter_start><jupyter_text># RSAtoolbox
* get clone from https://github.com/rsagroup/rsatoolbox
- folked version: https://github.com/eyshin05/rsatoolbox
* run a MATLAB function through matlab engine for Python
### Description
* Our dataset is LSS beta values (processed by afni `3dLSS`)
* I'll perform the data ROI-based multivariate connectivity analysis with Linear Discriminant Contrast (LDC)
- LDC <- That's why I use this toolbox
* According to the comment of toolbox, if you perform prewhitening your dataset before run the function, it'll be crossnovis distance
- = cross-validated mahalanobis distance
### Goal
* Get the distances
* Make connectivity matrix<jupyter_code>%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import glob
import matlab.engine
import matplotlib.pyplot as plt
import nilearn.image
import numpy as np
import pandas as pd
import scipy.io, scipy.stats, scipy.spatial.distance
import seaborn as sns
%config InlineBackend.figure_format = 'retina'
# start matlab engine
eng = matlab.engine.start_matlab()
toolboxRoot = '/Users/eyshin/Documents/MATLAB/rsatoolbox-develop'
eng.addpath(eng.genpath(toolboxRoot));
# initialize directory path
data_dir = '/Volumes/clmnlab/GA/MVPA/LSS_pb02_MO_short_duration/data/'
# initialize ROIs (we use AAL2)
roi_dir = '/Volumes/clmnlab/GA/fmri_data/masks/AALnew/'
roi_path = sorted(glob.glob(roi_dir + 'AALnew_ROI_resam*.nii'))
roi_imgs = []
for path in roi_path:
roi_imgs.append(nilearn.image.load_img(path))
# our subject list
subj_list = [
'GA01', 'GA02', 'GA05', 'GA07', 'GA08', 'GA11', 'GA12', 'GA13', 'GA14', 'GA15',
'GA18', 'GA19', 'GA20', 'GA21', 'GA23', 'GA26', 'GA27', 'GA28', 'GA29', 'GA30',
'GA31', 'GA32', 'GA33', 'GA34', 'GA35', 'GA36', 'GA37', 'GA38', 'GA42', 'GA44',
'GB01', 'GB02', 'GB05', 'GB07', 'GB08', 'GB11', 'GB12', 'GB13', 'GB14', 'GB15',
'GB18', 'GB19', 'GB20', 'GB21', 'GB23', 'GB26', 'GB27', 'GB28', 'GB29', 'GB30',
'GB31', 'GB32', 'GB33', 'GB34', 'GB35', 'GB36', 'GB37', 'GB38', 'GB42', 'GB44'
]
# data loading
data = {}
for subj in subj_list:
for run in range(1, 7):
data[subj, run] = nilearn.image.load_img(data_dir + 'betasLSS.MO.shortdur.%s.r%02d.nii.gz' % (subj, run))
print(subj, end='\r')
# we don't use a first trial of each runs
for key, value in data.items():
data[key] = nilearn.image.index_img(value, np.arange(1, 97))
# loading target - same in each runs
cond_map = {1:1, 5:2, 21:3, 25:4}
target_pos = []
with open('/Volumes/clmnlab/GA/MVPA/LSS_pb02/behaviors/targetID.txt') as file:
for line in file:
target_pos.append(cond_map[int(line.strip())])
target_pos = target_pos[1:97]
# you can use nilearn.masking.apply_mask instead, but I think that function is slow a little...
def fast_masking(img, roi):
img_data = img.get_data()
roi_mask = roi.get_data().astype(bool)
if img_data.shape[:3] != roi_mask.shape:
raise ValueError('different shape while masking! img=%s and roi=%s' % (img_data.shape, roi_mask.shape))
return img_data[roi_mask, :].T
# this functions is where to use the matlab engine instance - 1) save vectors in matlab workspace and 2) run function
def get_crossnobis_distance(X, target):
B = np.concatenate(X)
partition = [a+1 for a, x in enumerate(X) for i in range(len(x))]
conditionVec = target_pos + target_pos + target_pos
eng.workspace['B'] = matlab.double(B.tolist())
eng.workspace['partition'] = matlab.double(partition)
eng.workspace['conditionVec'] = matlab.double(conditionVec)
(d, Sig) = eng.eval("rsa.distanceLDC(B, partition', conditionVec')", nargout=2)
return d, Sig
# our experimental design constructed with Practiced Mapping Runs (1, 2, 3) & Unpracticed Mapping Runs (4, 5, 6)
results = {}
for subj in subj_list:
for roi_index, roi in enumerate(roi_imgs, 1):
X = [
fast_masking(roi=roi, img=data[subj, 1]),
fast_masking(roi=roi, img=data[subj, 2]),
fast_masking(roi=roi, img=data[subj, 3]),
]
results['practiced', subj, roi_index] = get_crossnobis_distance(X, target_pos)
X = [
fast_masking(roi=roi, img=data[subj, 4]),
fast_masking(roi=roi, img=data[subj, 5]),
fast_masking(roi=roi, img=data[subj, 6]),
]
results['unpracticed', subj, roi_index] = get_crossnobis_distance(X, target_pos)
print(subj, roi_index, end='\r')
# just visualizing a RDM - averaged RDMs in practiced mapping condition
dist = np.mean([results['practiced', 'GA01', i][0] for i in range(1, 121)], axis=0)[0]
dist_mat = np.zeros((4, 4))
dist_mat[1, 0] = dist[0]
dist_mat[2, 0] = dist[1]
dist_mat[3, 0] = dist[2]
dist_mat[2, 1] = dist[3]
dist_mat[3, 1] = dist[4]
dist_mat[3, 2] = dist[5]
plt.matshow(dist_mat)
# averaged RDMs in unpracticed mapping condition
dist = np.mean([results['unpracticed', 'GA01', i][0] for i in range(1, 121)], axis=0)[0]
dist_mat = np.zeros((4, 4))
dist_mat[1, 0] = dist[0]
dist_mat[2, 0] = dist[1]
dist_mat[3, 0] = dist[2]
dist_mat[2, 1] = dist[3]
dist_mat[3, 1] = dist[4]
dist_mat[3, 2] = dist[5]
plt.matshow(dist_mat)
# align results
conn_results = []
for i in range(1, 121):
for j in range(i+1, 121):
corr = []
for subj in subj_list:
conn_results.append({
'subj': subj,
'late': 'B' in subj,
'practiced': True,
'from_roi': i,
'to_roi': j,
'correlation': np.corrcoef(results['practiced', subj, i][0], results['practiced', subj, j][0])[0, 1],
'cosine': scipy.spatial.distance.cosine(results['practiced', subj, i][0], results['practiced', subj, j][0])
})
conn_results.append({
'subj': subj,
'late': 'B' in subj,
'practiced': False,
'from_roi': i,
'to_roi': j,
'correlation': np.corrcoef(results['unpracticed', subj, i][0], results['unpracticed', subj, j][0])[0, 1],
'cosine': scipy.spatial.distance.cosine(results['unpracticed', subj, i][0], results['unpracticed', subj, j][0])
})
conn_df = pd.DataFrame(conn_results)
conn_df.head()
# helper function for calc mean measure
def mean_score(from_roi, to_roi, practiced, late, measure):
from_roi, to_roi = sorted((from_roi, to_roi))
sub_df = conn_df[(conn_df['from_roi'] == from_roi) & (conn_df['to_roi'] == to_roi) &
(conn_df['practiced'] == practiced) & (conn_df['late'] == late)]
return sub_df[measure].mean()
# name of regions
aal_names = {
1: 'Precentral_L', 2: 'Precentral_R', 3: 'Frontal_Sup_2_L', 4: 'Frontal_Sup_2_R',
5: 'Frontal_Mid_2_L', 6: 'Frontal_Mid_2_R', 7: 'Frontal_Inf_Oper_L', 8: 'Frontal_Inf_Oper_R',
9: 'Frontal_Inf_Tri_L', 10: 'Frontal_Inf_Tri_R', 11: 'Frontal_Inf_Orb_2_L', 12: 'Frontal_Inf_Orb_2_R',
13: 'Rolandic_Oper_L', 14: 'Rolandic_Oper_R', 15: 'Supp_Motor_Area_L', 16: 'Supp_Motor_Area_R',
17: 'Olfactory_L', 18: 'Olfactory_R', 19: 'Frontal_Sup_Medial_L', 20: 'Frontal_Sup_Medial_R',
21: 'Frontal_Med_Orb_L', 22: 'Frontal_Med_Orb_R', 23: 'Rectus_L', 24: 'Rectus_R',
25: 'OFCmed_L', 26: 'OFCmed_R', 27: 'OFCant_L', 28: 'OFCant_R', 29: 'OFCpost_L',
30: 'OFCpost_R', 31: 'OFClat_L', 32: 'OFClat_R', 33: 'Insula_L', 34: 'Insula_R',
35: 'Cingulate_Ant_L', 36: 'Cingulate_Ant_R', 37: 'Cingulate_Mid_L', 38: 'Cingulate_Mid_R',
39: 'Cingulate_Post_L', 40: 'Cingulate_Post_R', 41: 'Hippocampus_L', 42: 'Hippocampus_R',
43: 'ParaHippocampal_L', 44: 'ParaHippocampal_R', 45: 'Amygdala_L', 46: 'Amygdala_R',
47: 'Calcarine_L', 48: 'Calcarine_R', 49: 'Cuneus_L', 50: 'Cuneus_R',
51: 'Lingual_L', 52: 'Lingual_R', 53: 'Occipital_Sup_L', 54: 'Occipital_Sup_R',
55: 'Occipital_Mid_L', 56: 'Occipital_Mid_R', 57: 'Occipital_Inf_L', 58: 'Occipital_Inf_R',
59: 'Fusiform_L', 60: 'Fusiform_R', 61: 'Postcentral_L', 62: 'Postcentral_R',
63: 'Parietal_Sup_L', 64: 'Parietal_Sup_R', 65: 'Parietal_Inf_L', 66: 'Parietal_Inf_R',
67: 'SupraMarginal_L', 68: 'SupraMarginal_R', 69: 'Angular_L', 70: 'Angular_R',
71: 'Precuneus_L', 72: 'Precuneus_R', 73: 'Paracentral_Lobule_L', 74: 'Paracentral_Lobule_R',
75: 'Caudate_L', 76: 'Caudate_R', 77: 'Putamen_L', 78: 'Putamen_R',
79: 'Pallidum_L', 80: 'Pallidum_R', 81: 'Thalamus_L', 82: 'Thalamus_R',
83: 'Heschl_L', 84: 'Heschl_R', 85: 'Temporal_Sup_L', 86: 'Temporal_Sup_R',
87: 'Temporal_Pole_Sup_L', 88: 'Temporal_Pole_Sup_R', 89: 'Temporal_Mid_L',
90: 'Temporal_Mid_R', 91: 'Temporal_Pole_Mid_L', 92: 'Temporal_Pole_Mid_R',
93: 'Temporal_Inf_L', 94: 'Temporal_Inf_R', 95: 'Cerebelum_Crus1_L', 96: 'Cerebelum_Crus1_R',
97: 'Cerebelum_Crus2_L', 98: 'Cerebelum_Crus2_R', 99: 'Cerebelum_3_L', 100: 'Cerebelum_3_R',
101: 'Cerebelum_4_5_L', 102: 'Cerebelum_4_5_R', 103: 'Cerebelum_6_L', 104: 'Cerebelum_6_R',
105: 'Cerebelum_7b_L', 106: 'Cerebelum_7b_R', 107: 'Cerebelum_8_L', 108: 'Cerebelum_8_R',
109: 'Cerebelum_9_L', 110: 'Cerebelum_9_R', 111: 'Cerebelum_10_L', 112: 'Cerebelum_10_R',
113: 'Vermis_1_2', 114: 'Vermis_3', 115: 'Vermis_4_5', 116: 'Vermis_6',
117: 'Vermis_7', 118: 'Vermis_8', 119: 'Vermis_9', 120: 'Vermis_10'
}
# AAL2 modules ordering for visualization
motor = [1, 2, 15, 16, 61, 62]
visual = [47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60]
visuomotor = [63, 64, 65, 66]
basal = [75, 76, 77, 78, 81, 82]
hippo = [41, 42, 43, 44]
cerebellum = [95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120]
roi_order = motor + visual + visuomotor + basal + hippo + cerebellum
roi_modules = [(motor, 'Motor'),
(visual, 'Visual'),
(visuomotor, 'Visuomotor'),
(basal, 'Basal Ganglia'),
(hippo, 'Hippocampus'),
(cerebellum, 'Cerebellum')]
# xticks respresents modules information
xticks = [-0.5]
for xtick in [len(motor), len(visual), len(visuomotor), len(basal), len(hippo), len(cerebellum)]:
xticks.append(xticks[-1] + xtick)
xticks
# visualization (cosine similarity)
p_early_conn_mat = np.zeros((len(roi_order), len(roi_order)))
for i, from_roi in enumerate(roi_order):
for j, to_roi in enumerate(roi_order[i+1:]):
score = mean_score(from_roi, to_roi, True, False, 'cosine')
p_early_conn_mat[i, i+j+1] = score
p_early_conn_mat[i+j+1, i] = score
p_late_conn_mat = np.zeros((len(roi_order), len(roi_order)))
for i, from_roi in enumerate(roi_order):
for j, to_roi in enumerate(roi_order[i+1:]):
score = mean_score(from_roi, to_roi, True, True, 'cosine')
p_late_conn_mat[i, i+j+1] = score
p_late_conn_mat[i+j+1, i] = score
diff_conn_mat = p_early_conn_mat - p_late_conn_mat
plt.figure(figsize=(30,20))
ax = plt.subplot(2, 3, 1)
plt.matshow(p_early_conn_mat, fignum=False, vmax=1.0, cmap='viridis_r')
plt.xticks(xticks, [b for _, b in roi_modules], rotation=20, ha='left', fontsize=8)
plt.yticks(np.arange(0, len(roi_order)), [aal_names[i] for i in roi_order], fontsize=6)
plt.title('Prac Early\n\n')
plt.colorbar()
plt.subplot(2, 3, 2)
plt.matshow(p_late_conn_mat, fignum=False, vmax=1.0, cmap='viridis_r')
plt.xticks(xticks, [b for _, b in roi_modules], rotation=20, ha='left', fontsize=8)
plt.yticks(np.arange(0, len(roi_order)), [aal_names[i] for i in roi_order], fontsize=6)
plt.title('Prac Late\n\n')
plt.colorbar()
plt.subplot(2, 3, 3)
plt.matshow(diff_conn_mat, fignum=False, vmin=-0.4, vmax=0.4, cmap='RdBu')
plt.xticks(xticks, [b for _, b in roi_modules], rotation=20, ha='left', fontsize=8)
plt.yticks(np.arange(0, len(roi_order)), [aal_names[i] for i in roi_order], fontsize=6)
plt.title('Prac Early-Prac Late\n\n')
plt.colorbar()
up_early_conn_mat = np.zeros((len(roi_order), len(roi_order)))
for i, from_roi in enumerate(roi_order):
for j, to_roi in enumerate(roi_order[i+1:]):
score = mean_score(from_roi, to_roi, False, False, 'cosine')
up_early_conn_mat[i, i+j+1] = score
up_early_conn_mat[i+j+1, i] = score
up_late_conn_mat = np.zeros((len(roi_order), len(roi_order)))
for i, from_roi in enumerate(roi_order):
for j, to_roi in enumerate(roi_order[i+1:]):
score = mean_score(from_roi, to_roi, False, True, 'cosine')
up_late_conn_mat[i, i+j+1] = score
up_late_conn_mat[i+j+1, i] = score
diff_conn_mat = up_early_conn_mat - up_late_conn_mat
ax = plt.subplot(2, 3, 4)
plt.matshow(up_early_conn_mat, fignum=False, vmax=1.0, cmap='viridis_r')
plt.xticks(xticks, [b for _, b in roi_modules], rotation=20, ha='left', fontsize=8)
plt.yticks(np.arange(0, len(roi_order)), [aal_names[i] for i in roi_order], fontsize=6)
plt.title('Unprac Early\n\n')
plt.colorbar()
plt.subplot(2, 3, 5)
plt.matshow(up_late_conn_mat, fignum=False, vmax=1.0, cmap='viridis_r')
plt.xticks(xticks, [b for _, b in roi_modules], rotation=20, ha='left', fontsize=8)
plt.yticks(np.arange(0, len(roi_order)), [aal_names[i] for i in roi_order], fontsize=6)
plt.title('Unprac Late\n\n')
plt.colorbar()
plt.subplot(2, 3, 6)
plt.matshow(diff_conn_mat, fignum=False, vmin=-0.4, vmax=0.4, cmap='RdBu')
plt.xticks(xticks, [b for _, b in roi_modules], rotation=20, ha='left', fontsize=8)
plt.yticks(np.arange(0, len(roi_order)), [aal_names[i] for i in roi_order], fontsize=6)
plt.title('Unprac Early-Unprac Late\n\n')
plt.colorbar()
plt.show()<jupyter_output><empty_output>
|
permissive
|
/MVPA/20190708_Multivariate-connectivity_using_RSAtoolbox.ipynb
|
eyshin05/neuro-imaging-tutorials
| 1 |
<jupyter_start><jupyter_text># Implement KNN model <jupyter_code># create KNN model and train
from sklearn.neighbors import KNeighborsClassifier
K=KNeighborsClassifier(n_neighbors=7)
#train the model by traing dataset
K.fit(X_train,Y_train)
#test the model
Y_pred_knn=K.predict(X_test)
# Find accuracy of KNN
from sklearn.metrics import accuracy_score
acc_knn=accuracy_score(Y_test,Y_pred_knn)
acc_knn= round(acc_knn*100,2)
print("accuracy score in KNN is",acc_knn,"%")<jupyter_output>accuracy score in KNN is 88.16 %
<jupyter_text># implemnt by Logistic Regression<jupyter_code>from sklearn.linear_model import LogisticRegression
L=LogisticRegression()
L.fit(X_train,Y_train)
#test the model
Y_pred_lg=L.predict(X_test)
# find accuracy in Logistic regression
from sklearn.metrics import accuracy_score
acc_lg=accuracy_score(Y_test,Y_pred_lg)
acc_lg=round(acc_lg*100,2)
print("accuracy in logistic regression is",acc_lg,"%")<jupyter_output>accuracy in logistic regression is 58.68 %
<jupyter_text># conclusion<jupyter_code># in this project we used KNN and Logistic Regression
Accuracy of KNN is 88.16%
accuracy of Logistic Reg is 58.68%
Hence we should select KNN for ths problem<jupyter_output><empty_output>
|
no_license
|
/subhransu_sekhar_khuntia_200104108.ipynb
|
sekhar10/ML_task
| 3 |
<jupyter_start><jupyter_text>### Without dropout<jupyter_code>%run nmt_translate.py
_= compute_dev_pplx()
_ = compute_dev_bleu()<jupyter_output>100%|██████████| 100/100 [00:04<00:00, 22.27it/s]<jupyter_text>### With dropout - embedding + 1st and rest layer<jupyter_code>%run nmt_translate.py
_= compute_dev_pplx()
_= compute_dev_bleu()
%run nmt_translate.py
_ = compute_dev_pplx()
_ = compute_dev_bleu()<jupyter_output>100%|██████████| 100/100 [00:04<00:00, 20.38it/s]
|
no_license
|
/Dropout.ipynb
|
JeffreyJosanne/MT_I
| 2 |
<jupyter_start><jupyter_text># Parameter Management
Once we have chosen an architecture
and set our hyperparameters,
we proceed to the training loop,
where our goal is to find parameter values
that minimize our loss function.
After training, we will need these parameters
in order to make future predictions.
Additionally, we will sometimes wish
to extract the parameters
either to reuse them in some other context,
to save our model to disk so that
it may be executed in other software,
or for examination in the hope of
gaining scientific understanding.
Most of the time, we will be able
to ignore the nitty-gritty details
of how parameters are declared
and manipulated, relying on deep learning frameworks
to do the heavy lifting.
However, when we move away from
stacked architectures with standard layers,
we will sometimes need to get into the weeds
of declaring and manipulating parameters.
In this section, we cover the following:
* Accessing parameters for debugging, diagnostics, and visualizations.
* Parameter initialization.
* Sharing parameters across different model components.
We start by focusing on an MLP with one hidden layer.
<jupyter_code>import tensorflow as tf
net = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(4, activation=tf.nn.relu),
tf.keras.layers.Dense(1),
])
X = tf.random.uniform((2, 4))
net(X)<jupyter_output><empty_output><jupyter_text>## Parameter Access
Let us start with how to access parameters
from the models that you already know.
When a model is defined via the `Sequential` class,
we can first access any layer by indexing
into the model as though it were a list.
Each layer's parameters are conveniently
located in its attribute.
We can inspect the parameters of the second fully-connected layer as follows.
<jupyter_code>print(net.layers[2].weights)<jupyter_output>[<tf.Variable 'dense_1/kernel:0' shape=(4, 1) dtype=float32, numpy=
array([[-0.70263696],
[-0.22071278],
[ 0.70914674],
[ 0.6964009 ]], dtype=float32)>, <tf.Variable 'dense_1/bias:0' shape=(1,) dtype=float32, numpy=array([0.], dtype=float32)>]
<jupyter_text>The output tells us a few important things.
First, this fully-connected layer
contains two parameters,
corresponding to that layer's
weights and biases, respectively.
Both are stored as single precision floats (float32).
Note that the names of the parameters
allow us to uniquely identify
each layer's parameters,
even in a network containing hundreds of layers.
### Targeted Parameters
Note that each parameter is represented
as an instance of the parameter class.
To do anything useful with the parameters,
we first need to access the underlying numerical values.
There are several ways to do this.
Some are simpler while others are more general.
The following code extracts the bias
from the second neural network layer, which returns a parameter class instance, and
further accesses that parameter's value.
<jupyter_code>print(type(net.layers[2].weights[1]))
print(net.layers[2].weights[1])
print(tf.convert_to_tensor(net.layers[2].weights[1]))<jupyter_output><class 'tensorflow.python.ops.resource_variable_ops.ResourceVariable'>
<tf.Variable 'dense_1/bias:0' shape=(1,) dtype=float32, numpy=array([0.], dtype=float32)>
tf.Tensor([0.], shape=(1,), dtype=float32)
<jupyter_text>### All Parameters at Once
When we need to perform operations on all parameters,
accessing them one-by-one can grow tedious.
The situation can grow especially unwieldy
when we work with more complex blocks (e.g., nested blocks),
since we would need to recurse
through the entire tree to extract
each sub-block's parameters. Below we demonstrate accessing the parameters of the first fully-connected layer vs. accessing all layers.
<jupyter_code>print(net.layers[1].weights)
print(net.get_weights())<jupyter_output>[<tf.Variable 'dense/kernel:0' shape=(4, 4) dtype=float32, numpy=
array([[ 0.7081526 , -0.01244313, 0.55799276, -0.6492791 ],
[ 0.22770828, -0.770811 , -0.54992485, -0.34173292],
[-0.5914638 , 0.4005019 , -0.13743204, 0.28752023],
[-0.7954682 , 0.18603677, 0.1703785 , -0.03160411]],
dtype=float32)>, <tf.Variable 'dense/bias:0' shape=(4,) dtype=float32, numpy=array([0., 0., 0., 0.], dtype=float32)>]
[array([[ 0.7081526 , -0.01244313, 0.55799276, -0.6492791 ],
[ 0.22770828, -0.770811 , -0.54992485, -0.34173292],
[-0.5914638 , 0.4005019 , -0.13743204, 0.28752023],
[-0.7954682 , 0.18603677, 0.1703785 , -0.03160411]],
dtype=float32), array([0., 0., 0., 0.], dtype=float32), array([[-0.70263696],
[-0.22071278],
[ 0.70914674],
[ 0.6964009 ]], dtype=float32), array([0.], dtype=float32)]
<jupyter_text>This provides us with another way of accessing the parameters of the network as follows.
<jupyter_code>net.get_weights()[1]<jupyter_output><empty_output><jupyter_text>### Collecting Parameters from Nested Blocks
Let us see how the parameter naming conventions work
if we nest multiple blocks inside each other.
For that we first define a function that produces blocks
(a block factory, so to speak) and then
combine these inside yet larger blocks.
<jupyter_code>def block1(name):
return tf.keras.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(4, activation=tf.nn.relu)],
name=name)
def block2():
net = tf.keras.Sequential()
for i in range(4):
# Nested here
net.add(block1(name=f'block-{i}'))
return net
rgnet = tf.keras.Sequential()
rgnet.add(block2())
rgnet.add(tf.keras.layers.Dense(1))
rgnet(X)<jupyter_output><empty_output><jupyter_text>Now that we have designed the network,
let us see how it is organized.
<jupyter_code>print(rgnet.summary())<jupyter_output>Model: "sequential_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
sequential_2 (Sequential) (2, 4) 80
_________________________________________________________________
dense_6 (Dense) (2, 1) 5
=================================================================
Total params: 85
Trainable params: 85
Non-trainable params: 0
_________________________________________________________________
None
<jupyter_text>Since the layers are hierarchically nested,
we can also access them as though
indexing through nested lists.
For instance, we can access the first major block,
within it the second sub-block,
and within that the bias of the first layer,
with as follows.
<jupyter_code>rgnet.layers[0].layers[1].layers[1].weights[1]<jupyter_output><empty_output><jupyter_text>## Parameter Initialization
Now that we know how to access the parameters,
let us look at how to initialize them properly.
We discussed the need for proper initialization in :numref:`sec_numerical_stability`.
The deep learning framework provides default random initializations to its layers.
However, we often want to initialize our weights
according to various other protocols. The framework provides most commonly
used protocols, and also allows to create a custom initializer.
By default, Keras initializes weight matrices uniformly by drawing from a range that is computed according to the input and output dimension, and the bias parameters are all set to zero.
TensorFlow provides a variety of initialization methods both in the root module and the `keras.initializers` module.
### Built-in Initialization
Let us begin by calling on built-in initializers.
The code below initializes all weight parameters
as Gaussian random variables
with standard deviation 0.01, while bias parameters cleared to zero.
<jupyter_code>net = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(
4, activation=tf.nn.relu,
kernel_initializer=tf.random_normal_initializer(mean=0, stddev=0.01),
bias_initializer=tf.zeros_initializer()),
tf.keras.layers.Dense(1)])
net(X)
net.weights[0], net.weights[1]<jupyter_output><empty_output><jupyter_text>We can also initialize all the parameters
to a given constant value (say, 1).
<jupyter_code>net = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(
4, activation=tf.nn.relu,
kernel_initializer=tf.keras.initializers.Constant(1),
bias_initializer=tf.zeros_initializer()),
tf.keras.layers.Dense(1),
])
net(X)
net.weights[0], net.weights[1]<jupyter_output><empty_output><jupyter_text>We can also apply different initializers for certain blocks.
For example, below we initialize the first layer
with the Xavier initializer
and initialize the second layer
to a constant value of 42.
<jupyter_code>net = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(
4,
activation=tf.nn.relu,
kernel_initializer=tf.keras.initializers.GlorotUniform()),
tf.keras.layers.Dense(
1, kernel_initializer=tf.keras.initializers.Constant(1)),
])
net(X)
print(net.layers[1].weights[0])
print(net.layers[2].weights[0])<jupyter_output><tf.Variable 'dense_11/kernel:0' shape=(4, 4) dtype=float32, numpy=
array([[ 0.77454144, -0.42678392, 0.18757135, 0.2861511 ],
[-0.12093848, -0.63353795, -0.55993074, -0.26746225],
[ 0.50490373, 0.38413602, -0.83991975, 0.181912 ],
[-0.08994305, -0.03767806, 0.82064635, -0.33605212]],
dtype=float32)>
<tf.Variable 'dense_12/kernel:0' shape=(4, 1) dtype=float32, numpy=
array([[1.],
[1.],
[1.],
[1.]], dtype=float32)>
<jupyter_text>### Custom Initialization
Sometimes, the initialization methods we need
are not provided by the deep learning framework.
In the example below, we define an initializer
for any weight parameter $w$ using the following strange distribution:
$$
\begin{aligned}
w \sim \begin{cases}
U(5, 10) & \text{ with probability } \frac{1}{4} \\
0 & \text{ with probability } \frac{1}{2} \\
U(-10, -5) & \text{ with probability } \frac{1}{4}
\end{cases}
\end{aligned}
$$
Here we define a subclass of `Initializer` and implement the `__call__`
function that return a desired tensor given the shape and data type.
<jupyter_code>class MyInit(tf.keras.initializers.Initializer):
def __call__(self, shape, dtype=None):
data=tf.random.uniform(shape, -10, 10, dtype=dtype)
factor=(tf.abs(data) >= 5)
factor=tf.cast(factor, tf.float32)
return data * factor
net = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(
4,
activation=tf.nn.relu,
kernel_initializer=MyInit()),
tf.keras.layers.Dense(1),
])
net(X)
print(net.layers[1].weights[0])<jupyter_output><tf.Variable 'dense_13/kernel:0' shape=(4, 4) dtype=float32, numpy=
array([[ 5.889845 , 8.331539 , 0. , -0. ],
[-0. , -0. , -0. , -9.657394 ],
[-0. , -0. , -0. , -0. ],
[-0. , -5.579841 , -5.4117727, -7.462876 ]], dtype=float32)>
<jupyter_text>Note that we always have the option
of setting parameters directly.
<jupyter_code>net.layers[1].weights[0][:].assign(net.layers[1].weights[0] + 1)
net.layers[1].weights[0][0, 0].assign(42)
net.layers[1].weights[0]<jupyter_output><empty_output><jupyter_text>## Tied Parameters
Often, we want to share parameters across multiple layers.
Let us see how to do this elegantly.
In the following we allocate a dense layer
and then use its parameters specifically
to set those of another layer.
<jupyter_code># tf.keras behaves a bit differently. It removes the duplicate layer
# automatically
shared = tf.keras.layers.Dense(4, activation=tf.nn.relu)
net = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
shared,
shared,
tf.keras.layers.Dense(1),
])
net(X)
# Check whether the parameters are different
print(len(net.layers) == 3)<jupyter_output>True
|
permissive
|
/chapter_deep-learning-computation/parameters.ipynb
|
KiLJ4EdeN/d2l-tensorflow
| 14 |
<jupyter_start><jupyter_text># Clustering Crypto<jupyter_code># Initial imports
import requests
import pandas as pd
import matplotlib.pyplot as plt
import hvplot.pandas
import plotly.express as px
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
<jupyter_output><empty_output><jupyter_text>### Fetching Cryptocurrency Data<jupyter_code>from pathlib import Path
file_path = Path("Resources/crypto_data.csv")
df = pd.read_csv(file_path)
df.head()
for col in df.columns:
print(col)
df = df.rename(columns={"Unnamed: 0": " "})
df = df.set_index(" ")
df.head()<jupyter_output><empty_output><jupyter_text>### Data Preprocessing<jupyter_code># Keep only necessary columns:
# 'CoinName','Algorithm','IsTrading','ProofType','TotalCoinsMined','TotalCoinSupply'
# 'TotalCoinSupply' is titled 'MaxSupply'
df = df[['CoinName','Algorithm','IsTrading','ProofType','TotalCoinsMined','TotalCoinSupply']]
df.head()
# Keep only cryptocurrencies that are trading
df = df[df['IsTrading'] == True]
df.head()
# Keep only cryptocurrencies with a working algorithm
df = df.loc[df['Algorithm'] != "N/A"]
df.head()
# Remove the "IsTrading" column
df.drop('IsTrading', axis=1, inplace=True)
df.head()
# Remove rows with at least 1 null value
df = df.dropna()
df.head()
# Remove rows with cryptocurrencies having no coins mined
df = df.loc[df['TotalCoinsMined'] > 0]
df.head()
# Drop rows where there are 'N/A' text values
df = df[df!='N/A']
df.head()
# Store the 'CoinName'column in its own DataFrame prior to dropping it from crypto_df
coin_names = pd.DataFrame(df['CoinName'])
df = df.drop(columns = ['CoinName'])
df.head()
coin_names.head()
# Create dummy variables for text features
X = pd.get_dummies(df, columns=['Algorithm', 'ProofType'])
X.head()
# Standardize data
crypto_scaled = StandardScaler().fit_transform(X)
print(crypto_scaled[0:1])<jupyter_output>[[-0.11710817 -0.1528703 -0.0433963 -0.0433963 -0.0433963 -0.06142951
-0.07530656 -0.0433963 -0.06142951 -0.06142951 -0.0433963 -0.0433963
-0.19245009 -0.06142951 -0.09740465 -0.0433963 -0.11547005 -0.07530656
-0.0433963 -0.0433963 -0.15191091 -0.0433963 -0.13118084 -0.0433963
-0.0433963 -0.08703883 -0.0433963 -0.0433963 -0.0433963 -0.0433963
-0.06142951 -0.0433963 -0.08703883 -0.08703883 -0.08703883 -0.0433963
-0.13118084 -0.13840913 -0.13840913 -0.0433963 -0.06142951 -0.0433963
-0.07530656 -0.18168574 -0.0433963 -0.0433963 -0.0433963 -0.07530656
-0.15826614 -0.31491833 -0.0433963 -0.08703883 -0.07530656 -0.06142951
1.38675049 -0.0433963 -0.0433963 -0.06142951 -0.0433963 -0.0433963
-0.0433963 -0.0433963 -0.0433963 -0.0433963 -0.0433963 -0.0433963
-0.39879994 -0.0433963 -0.18168574 -0.0433963 -0.08703883 -0.08703883
-0.10680283 -0.0433963 -0.13118084 -0.0433963 -0.0433963 -0.0433963
-0.0433963 -0.07530656 -0.43911856 -0.04339[...]<jupyter_text>### Reducing Dimensions Using PCA<jupyter_code># Use PCA to reduce dimensions to 3 principal components
pca = PCA(n_components=3)
crypto_pca = pca.fit_transform(crypto_scaled)
# Create a DataFrame with the principal components data
crypto_pca_df = pd.DataFrame(
data=crypto_pca, columns=["PC 1", "PC 2", "PC 3"]
).set_index(df.index)
crypto_pca_df.head()<jupyter_output><empty_output><jupyter_text>### Clustering Crytocurrencies Using K-Means
#### Find the Best Value for `k` Using the Elbow Curve<jupyter_code>inertia = []
k = list(range(1, 11))
# Calculate the inertia for the range of k values
for i in k:
km = KMeans(n_clusters=i, random_state=0)
km.fit(crypto_pca_df)
inertia.append(km.inertia_)
# Create the Elbow Curve using hvPlot
elbow_data = {"k": k, "inertia": inertia}
df_elbow = pd.DataFrame(elbow_data)
df_elbow.hvplot.line(x="k", y="inertia", xticks=k, title="Elbow Curve")<jupyter_output><empty_output><jupyter_text>#### Running K-Means with `k=`<jupyter_code># Initialize the K-Means model
model = KMeans(n_clusters=3, random_state=0)
# Fit the model
model.fit(crypto_pca_df)
# Predict clusters
predictions = model.predict(crypto_pca_df)
# Create a new DataFrame including predicted clusters and cryptocurrencies features
crypto_pca_df["Class"] = model.labels_
clustered_df = pd.concat([coin_names, df, crypto_pca_df], axis=1)
clustered_df.head(10)<jupyter_output><empty_output><jupyter_text>### Visualizing Results
#### 3D-Scatter with Clusters<jupyter_code># Create a 3D-Scatter with the PCA data and the clusters
fig = px.scatter_3d(
clustered_df,
hover_name="CoinName",
hover_data=['Algorithm'],
symbol="Class",
width=800,
)
fig.update_layout(legend=dict(x=0, y=1))
fig.show()<jupyter_output><empty_output><jupyter_text>#### Table of Tradable Cryptocurrencies<jupyter_code># Table with tradable cryptos
columns = ['CoinName', 'Algorithm', 'ProofType', 'TotalCoinSupply', 'TotalCoinsMined', 'Class']
table = clustered_df.hvplot.table(columns)
table
# Print the total number of tradable cryptocurrencies
print(f"The total number of tradable cryptocurrencies is {clustered_df['CoinName'].count()}")<jupyter_output>The total number of tradable cryptocurrencies is 532
<jupyter_text>#### Scatter Plot with Tradable Cryptocurrencies<jupyter_code>clustered_df.dtypes
clustered_df["TotalCoinSupply"] = clustered_df["TotalCoinSupply"].astype('float')
# Scale data to create the scatter plot
scatter_data = clustered_df.drop(columns=['CoinName', 'Algorithm', 'ProofType'])
data_scaler = StandardScaler()
data = pd.DataFrame(data_scaler.fit_transform(scatter_data),
columns=["TotalCoinsMined", "TotalCoinSupply", "PC 1", "PC 2", "PC 3", "Class"]
).set_index(clustered_df.index)
data.head()
# Plot the scatter with x="TotalCoinsMined" and y="TotalCoinSupply"
data.hvplot(
kind="scatter",
x= "TotalCoinsMined",
y= "TotalCoinSupply",
hover_cols=["CoinName"],
color="Class")<jupyter_output><empty_output>
|
no_license
|
/ClusteringCrypto/crypto_clustering-use_csv.ipynb
|
WillyFeid/13_AWS-Lex
| 9 |
<jupyter_start><jupyter_text>## 05 Scraping data with Requests and Beautiful Soup
To now, we've covered means of grabbing data that are formatted to grab. The term 'web scraping' refers to the messier means of pulling material from web sites that were really meant for people, not for computers. Web sites, of course, can include a variety of objects: text, images, video, flash, etc., and your success at scraping what you want will vary. In other words, scraping involves a bit of [MacGyvering](https://en.wiktionary.org/wiki/MacGyver).
Useful packages for scraping are `requests` and `bs4`/`BeautifulSoup`, which code is included to install these below.
We'll run through a few quick examples, but for more on this topic, I recommend:
* http://www.pythonforbeginners.com/python-on-the-web/beautifulsoup-4-python/
* http://web.stanford.edu/~zlotnick/TextAsData/Web_Scraping_with_Beautiful_Soup.html
* http://stanford.edu/~mgorkove/cgi-bin/rpython_tutorials/webscraping_with_lxml.php<jupyter_code># Import the requests and beautiful soup
import requests
try:
from bs4 import BeautifulSoup
except:
!pip install beautifulsoup4
from bs4 import BeautifulSoup
# Import re, a package for using regular expressions
import re<jupyter_output><empty_output><jupyter_text>The `requests` package works a lot like the urllib package in that it sends a request to a server and stores the servers response in a variable, here named `response`. <jupyter_code># Send a request to a web page
response = requests.get('https://xkcd.com/869')
# The response object simply has the contents of the web page at the address provided
print(response.text)<jupyter_output><empty_output><jupyter_text>`BeautifulSoup` is designed to intelligently read raw HTML code, i.e., what is stored in the `response` variable generated above. The command below reads in the raw HTML and parses it into logical components that we can command.
The `lxml` in the command specifies a particular parser for deconstructing the HTML... <jupyter_code># BeautifulSoup
soup = BeautifulSoup(response.text, 'lxml')
type(soup)<jupyter_output><empty_output><jupyter_text>Here we search the text of the web page's body for any instances of `https://....png`, that is any link to a PNG image embedded in the page. This is done using `re` and implementing regular expressions (see https://developers.google.com/edu/python/regular-expressions for more info on this useful module...)
The `match` object returned by `search(`) holds information about the nature of the match, including the original input string, the regular expression used, and the location within the original string where the pattern occurs. The `group` property of the match is the full string that's returned<jupyter_code>#Search the page for emebedded links to PNG files
match = re.search('https://.*\.png', soup.body.text)
#What was found in the search
print(match.group())
#And here is some Juptyer code to display the picture resulting from it
from IPython.display import Image
Image(url=match.group())<jupyter_output><empty_output>
|
no_license
|
/05a-Scraping-Data-With-Requests-and-BeautifulSoup.ipynb
|
johnpfay/GettingData
| 4 |
<jupyter_start><jupyter_text># Generative Adversarial Networks (GANs)
So far in CS231N, all the applications of neural networks that we have explored have been **discriminative models** that take an input and are trained to produce a labeled output. This has ranged from straightforward classification of image categories to sentence generation (which was still phrased as a classification problem, our labels were in vocabulary space and we’d learned a recurrence to capture multi-word labels). In this notebook, we will expand our repetoire, and build **generative models** using neural networks. Specifically, we will learn how to build models which generate novel images that resemble a set of training images.
### What is a GAN?
In 2014, [Goodfellow et al.](https://arxiv.org/abs/1406.2661) presented a method for training generative models called Generative Adversarial Networks (GANs for short). In a GAN, we build two different neural networks. Our first network is a traditional classification network, called the **discriminator**. We will train the discriminator to take images, and classify them as being real (belonging to the training set) or fake (not present in the training set). Our other network, called the **generator**, will take random noise as input and transform it using a neural network to produce images. The goal of the generator is to fool the discriminator into thinking the images it produced are real.
We can think of this back and forth process of the generator ($G$) trying to fool the discriminator ($D$), and the discriminator trying to correctly classify real vs. fake as a minimax game:
$$\underset{G}{\text{minimize}}\; \underset{D}{\text{maximize}}\; \mathbb{E}_{x \sim p_\text{data}}\left[\log D(x)\right] + \mathbb{E}_{z \sim p(z)}\left[\log \left(1-D(G(z))\right)\right]$$
where $x \sim p_\text{data}$ are samples from the input data, $z \sim p(z)$ are the random noise samples, $G(z)$ are the generated images using the neural network generator $G$, and $D$ is the output of the discriminator, specifying the probability of an input being real. In [Goodfellow et al.](https://arxiv.org/abs/1406.2661), they analyze this minimax game and show how it relates to minimizing the Jensen-Shannon divergence between the training data distribution and the generated samples from $G$.
To optimize this minimax game, we will aternate between taking gradient *descent* steps on the objective for $G$, and gradient *ascent* steps on the objective for $D$:
1. update the **generator** ($G$) to minimize the probability of the __discriminator making the correct choice__.
2. update the **discriminator** ($D$) to maximize the probability of the __discriminator making the correct choice__.
While these updates are useful for analysis, they do not perform well in practice. Instead, we will use a different objective when we update the generator: maximize the probability of the **discriminator making the incorrect choice**. This small change helps to allevaiate problems with the generator gradient vanishing when the discriminator is confident. This is the standard update used in most GAN papers, and was used in the original paper from [Goodfellow et al.](https://arxiv.org/abs/1406.2661).
In this assignment, we will alternate the following updates:
1. Update the generator ($G$) to maximize the probability of the discriminator making the incorrect choice on generated data:
$$\underset{G}{\text{maximize}}\; \mathbb{E}_{z \sim p(z)}\left[\log D(G(z))\right]$$
2. Update the discriminator ($D$), to maximize the probability of the discriminator making the correct choice on real and generated data:
$$\underset{D}{\text{maximize}}\; \mathbb{E}_{x \sim p_\text{data}}\left[\log D(x)\right] + \mathbb{E}_{z \sim p(z)}\left[\log \left(1-D(G(z))\right)\right]$$
### What else is there?
Since 2014, GANs have exploded into a huge research area, with massive [workshops](https://sites.google.com/site/nips2016adversarial/), and [hundreds of new papers](https://github.com/hindupuravinash/the-gan-zoo). Compared to other approaches for generative models, they often produce the highest quality samples but are some of the most difficult and finicky models to train (see [this github repo](https://github.com/soumith/ganhacks) that contains a set of 17 hacks that are useful for getting models working). Improving the stabiilty and robustness of GAN training is an open research question, with new papers coming out every day! For a more recent tutorial on GANs, see [here](https://arxiv.org/abs/1701.00160). There is also some even more recent exciting work that changes the objective function to Wasserstein distance and yields much more stable results across model architectures: [WGAN](https://arxiv.org/abs/1701.07875), [WGAN-GP](https://arxiv.org/abs/1704.00028).
GANs are not the only way to train a generative model! For other approaches to generative modeling check out the [deep generative model chapter](http://www.deeplearningbook.org/contents/generative_models.html) of the Deep Learning [book](http://www.deeplearningbook.org). Another popular way of training neural networks as generative models is Variational Autoencoders (co-discovered [here](https://arxiv.org/abs/1312.6114) and [here](https://arxiv.org/abs/1401.4082)). Variational autoencoders combine neural networks with variational inference to train deep generative models. These models tend to be far more stable and easier to train but currently don't produce samples that are as pretty as GANs.
Example pictures of what you should expect (yours might look slightly different):

## Setup<jupyter_code>from __future__ import print_function, division
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# A bunch of utility functions
def show_images(images):
images = np.reshape(images, [images.shape[0], -1]) # images reshape to (batch_size, D)
sqrtn = int(np.ceil(np.sqrt(images.shape[0])))
sqrtimg = int(np.ceil(np.sqrt(images.shape[1])))
fig = plt.figure(figsize=(sqrtn, sqrtn))
gs = gridspec.GridSpec(sqrtn, sqrtn)
gs.update(wspace=0.05, hspace=0.05)
for i, img in enumerate(images):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(img.reshape([sqrtimg,sqrtimg]))
return
def preprocess_img(x):
return 2 * x - 1.0
def deprocess_img(x):
return (x + 1.0) / 2.0
def rel_error(x,y):
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
def count_params():
"""Count the number of parameters in the current TensorFlow graph """
param_count = np.sum([np.prod(x.get_shape().as_list()) for x in tf.global_variables()])
return param_count
def get_session():
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
return session
answers = np.load('gan-checks-tf.npz')
<jupyter_output><empty_output><jupyter_text>## Dataset
GANs are notoriously finicky with hyperparameters, and also require many training epochs. In order to make this assignment approachable without a GPU, we will be working on the MNIST dataset, which is 60,000 training and 10,000 test images. Each picture contains a centered image of white digit on black background (0 through 9). This was one of the first datasets used to train convolutional neural networks and it is fairly easy -- a standard CNN model can easily exceed 99% accuracy.
To simplify our code here, we will use the TensorFlow MNIST wrapper, which downloads and loads the MNIST dataset. See the [documentation](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/learn/python/learn/datasets/mnist.py) for more information about the interface. The default parameters will take 5,000 of the training examples and place them into a validation dataset. The data will be saved into a folder called `MNIST_data`.
**Heads-up**: The TensorFlow MNIST wrapper returns images as vectors. That is, they're size (batch, 784). If you want to treat them as images, we have to resize them to (batch,28,28) or (batch,28,28,1). They are also type np.float32 and bounded [0,1]. <jupyter_code>from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('./cs231n/datasets/MNIST_data', one_hot=False)
# show a batch
show_images(mnist.train.next_batch(16)[0])<jupyter_output>WARNING:tensorflow:From <ipython-input-2-d681c923e097>:2: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use alternatives such as official/mnist/dataset.py from tensorflow/models.
WARNING:tensorflow:From /home/wangx/miniconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.
Instructions for updating:
Please write your own downloading logic.
WARNING:tensorflow:From /home/wangx/miniconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.data to implement this functionality.
Extracting ./cs231n/datas[...]<jupyter_text>## LeakyReLU
In the cell below, you should implement a LeakyReLU. See the [class notes](http://cs231n.github.io/neural-networks-1/) (where alpha is small number) or equation (3) in [this paper](http://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf). LeakyReLUs keep ReLU units from dying and are often used in GAN methods (as are maxout units, however those increase model size and therefore are not used in this notebook).
HINT: You should be able to use `tf.maximum`<jupyter_code>def leaky_relu(x, alpha=0.01):
"""Compute the leaky ReLU activation function.
Inputs:
- x: TensorFlow Tensor with arbitrary shape
- alpha: leak parameter for leaky ReLU
Returns:
TensorFlow Tensor with the same shape as x
"""
# TODO: implement leaky ReLU
return tf.maximum(x,alpha*x)
<jupyter_output><empty_output><jupyter_text>Test your leaky ReLU implementation. You should get errors < 1e-10<jupyter_code>def test_leaky_relu(x, y_true):
tf.reset_default_graph()
with get_session() as sess:
y_tf = leaky_relu(tf.constant(x))
y = sess.run(y_tf)
print('Maximum error: %g'%rel_error(y_true, y))
test_leaky_relu(answers['lrelu_x'], answers['lrelu_y'])<jupyter_output>Maximum error: 0
<jupyter_text>## Random Noise
Generate a TensorFlow `Tensor` containing uniform noise from -1 to 1 with shape `[batch_size, dim]`.<jupyter_code>def sample_noise(batch_size, dim):
"""Generate random uniform noise from -1 to 1.
Inputs:
- batch_size: integer giving the batch size of noise to generate
- dim: integer giving the dimension of the the noise to generate
Returns:
TensorFlow Tensor containing uniform noise in [-1, 1] with shape [batch_size, dim]
"""
# TODO: sample and return noise
return tf.random_uniform(shape=[batch_size,dim],minval=-1,maxval=1)
<jupyter_output><empty_output><jupyter_text>Make sure noise is the correct shape and type:<jupyter_code>def test_sample_noise():
batch_size = 3
dim = 4
tf.reset_default_graph()
with get_session() as sess:
z = sample_noise(batch_size, dim)
# Check z has the correct shape
assert z.get_shape().as_list() == [batch_size, dim]
# Make sure z is a Tensor and not a numpy array
assert isinstance(z, tf.Tensor)
# Check that we get different noise for different evaluations
z1 = sess.run(z)
z2 = sess.run(z)
assert not np.array_equal(z1, z2)
# Check that we get the correct range
assert np.all(z1 >= -1.0) and np.all(z1 <= 1.0)
print("All tests passed!")
test_sample_noise()<jupyter_output>All tests passed!
<jupyter_text>## Discriminator
Our first step is to build a discriminator. You should use the layers in `tf.layers` to build the model.
All fully connected layers should include bias terms.
Architecture:
* Fully connected layer from size 784 to 256
* LeakyReLU with alpha 0.01
* Fully connected layer from 256 to 256
* LeakyReLU with alpha 0.01
* Fully connected layer from 256 to 1
The output of the discriminator should have shape `[batch_size, 1]`, and contain real numbers corresponding to the scores that each of the `batch_size` inputs is a real image.<jupyter_code>def discriminator(x):
"""Compute discriminator score for a batch of input images.
Inputs:
- x: TensorFlow Tensor of flattened input images, shape [batch_size, 784]
Returns:
TensorFlow Tensor with shape [batch_size, 1], containing the score
for an image being real for each input image.
"""
with tf.variable_scope("discriminator"):
# TODO: implement architecture
input_layer = tf.reshape(x,[-1,784])
fc1 = tf.layers.dense(inputs=input_layer, units=256,activation=None,use_bias=True)
lrelu1 = leaky_relu(fc1, 0.01)
fc2 = tf.layers.dense(inputs=lrelu1, units=256,activation=None,use_bias=True)
lrelu2 = leaky_relu(fc2, 0.01)
logits = tf.layers.dense(inputs=lrelu2, units=1,activation=None,use_bias=True)
return logits<jupyter_output><empty_output><jupyter_text>Test to make sure the number of parameters in the discriminator is correct:<jupyter_code>def test_discriminator(true_count=267009):
tf.reset_default_graph()
with get_session() as sess:
y = discriminator(tf.ones((2, 784)))
cur_count = count_params()
if cur_count != true_count:
print('Incorrect number of parameters in discriminator. {0} instead of {1}. Check your achitecture.'.format(cur_count,true_count))
else:
print('Correct number of parameters in discriminator.')
test_discriminator()<jupyter_output>Correct number of parameters in discriminator.
<jupyter_text>## Generator
Now to build a generator. You should use the layers in `tf.layers` to construct the model. All fully connected layers should include bias terms.
Architecture:
* Fully connected layer from tf.shape(z)[1] (the number of noise dimensions) to 1024
* ReLU
* Fully connected layer from 1024 to 1024
* ReLU
* Fully connected layer from 1024 to 784
* TanH (To restrict the output to be [-1,1])<jupyter_code>def generator(z):
"""Generate images from a random noise vector.
Inputs:
- z: TensorFlow Tensor of random noise with shape [batch_size, noise_dim]
Returns:
TensorFlow Tensor of generated images, with shape [batch_size, 784].
"""
with tf.variable_scope("generator"):
# TODO: implement architecture
fc1 = tf.layers.dense(inputs=z,units=1024,activation=tf.nn.relu,use_bias=True)
fc2 = tf.layers.dense(inputs=fc1,units=1024,activation=tf.nn.relu,use_bias=True)
img = tf.layers.dense(inputs=fc2,units=784,activation=tf.nn.tanh,use_bias=True)
return img<jupyter_output><empty_output><jupyter_text>Test to make sure the number of parameters in the generator is correct:<jupyter_code>def test_generator(true_count=1858320):
tf.reset_default_graph()
with get_session() as sess:
y = generator(tf.ones((1, 4)))
cur_count = count_params()
if cur_count != true_count:
print('Incorrect number of parameters in generator. {0} instead of {1}. Check your achitecture.'.format(cur_count,true_count))
else:
print('Correct number of parameters in generator.')
test_generator()<jupyter_output>Correct number of parameters in generator.
<jupyter_text># GAN Loss
Compute the generator and discriminator loss. The generator loss is:
$$\ell_G = -\mathbb{E}_{z \sim p(z)}\left[\log D(G(z))\right]$$
and the discriminator loss is:
$$ \ell_D = -\mathbb{E}_{x \sim p_\text{data}}\left[\log D(x)\right] - \mathbb{E}_{z \sim p(z)}\left[\log \left(1-D(G(z))\right)\right]$$
Note that these are negated from the equations presented earlier as we will be *minimizing* these losses.
**HINTS**: Use [tf.ones_like](https://www.tensorflow.org/api_docs/python/tf/ones_like) and [tf.zeros_like](https://www.tensorflow.org/api_docs/python/tf/zeros_like) to generate labels for your discriminator. Use [sigmoid_cross_entropy loss](https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits) to help compute your loss function. Instead of computing the expectation, we will be averaging over elements of the minibatch, so make sure to combine the loss by averaging instead of summing.<jupyter_code>def gan_loss(logits_real, logits_fake):
"""Compute the GAN loss.
Inputs:
- logits_real: Tensor, shape [batch_size, 1], output of discriminator
Log probability that the image is real for each real image
- logits_fake: Tensor, shape[batch_size, 1], output of discriminator
Log probability that the image is real for each fake image
Returns:e image is real for each real image
- D_loss: discriminator loss scalar
- G_loss: generator loss scalar
"""
# TODO: compute D_loss and G_loss
labels_ones = tf.ones_like(logits_fake)
labels_zeros = tf.zeros_like(logits_fake)
# sigmoid_cross_entropy_with_logits = labels * -log(sigmoid(logits)) + (1 - labels) * -log(1 - sigmoid(logits))
# use labels_ones and labels_zeros
G_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=labels_ones,logits=logits_fake))
D_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=labels_ones,logits=logits_real))\
+ tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=labels_zeros,logits=logits_fake))
return D_loss, G_loss<jupyter_output><empty_output><jupyter_text>Test your GAN loss. Make sure both the generator and discriminator loss are correct. You should see errors less than 1e-5.<jupyter_code>def test_gan_loss(logits_real, logits_fake, d_loss_true, g_loss_true):
tf.reset_default_graph()
with get_session() as sess:
d_loss, g_loss = sess.run(gan_loss(tf.constant(logits_real), tf.constant(logits_fake)))
print("Maximum error in d_loss: %g"%rel_error(d_loss_true, d_loss))
print("Maximum error in g_loss: %g"%rel_error(g_loss_true, g_loss))
test_gan_loss(answers['logits_real'], answers['logits_fake'],
answers['d_loss_true'], answers['g_loss_true'])<jupyter_output>Maximum error in d_loss: 0
Maximum error in g_loss: 0
<jupyter_text># Optimizing our loss
Make an `AdamOptimizer` with a 1e-3 learning rate, beta1=0.5 to mininize G_loss and D_loss separately. The trick of decreasing beta was shown to be effective in helping GANs converge in the [Improved Techniques for Training GANs](https://arxiv.org/abs/1606.03498) paper. In fact, with our current hyperparameters, if you set beta1 to the Tensorflow default of 0.9, there's a good chance your discriminator loss will go to zero and the generator will fail to learn entirely. In fact, this is a common failure mode in GANs; if your D(x) learns to be too fast (e.g. loss goes near zero), your G(z) is never able to learn. Often D(x) is trained with SGD with Momentum or RMSProp instead of Adam, but here we'll use Adam for both D(x) and G(z). <jupyter_code># TODO: create an AdamOptimizer for D_solver and G_solver
def get_solvers(learning_rate=1e-3, beta1=0.5):
"""Create solvers for GAN training.
Inputs:
- learning_rate: learning rate to use for both solvers
- beta1: beta1 parameter for both solvers (first moment decay)
Returns:
- D_solver: instance of tf.train.AdamOptimizer with correct learning_rate and beta1
- G_solver: instance of tf.train.AdamOptimizer with correct learning_rate and beta1
"""
D_solver = tf.train.AdamOptimizer(learning_rate=learning_rate,beta1=beta1)
G_solver = tf.train.AdamOptimizer(learning_rate=learning_rate,beta1=beta1)
return D_solver, G_solver<jupyter_output><empty_output><jupyter_text>## Putting it all together
Now just a bit of Lego Construction.. Read this section over carefully to understand how we'll be composing the generator and discriminator<jupyter_code>tf.reset_default_graph()
# number of images for each batch
batch_size = 128
# our noise dimension
noise_dim = 96
# placeholder for images from the training dataset
x = tf.placeholder(tf.float32, [None, 784])
# random noise fed into our generator
z = sample_noise(batch_size, noise_dim)
# generated images
G_sample = generator(z)
with tf.variable_scope("") as scope:
#scale images to be -1 to 1
logits_real = discriminator(preprocess_img(x))
# Re-use discriminator weights on new inputs
scope.reuse_variables()
logits_fake = discriminator(G_sample)
# Get the list of variables for the discriminator and generator
D_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'discriminator')
G_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'generator')
# get our solver
D_solver, G_solver = get_solvers()
# get our loss
D_loss, G_loss = gan_loss(logits_real, logits_fake)
# setup training steps
D_train_step = D_solver.minimize(D_loss, var_list=D_vars)
G_train_step = G_solver.minimize(G_loss, var_list=G_vars)
D_extra_step = tf.get_collection(tf.GraphKeys.UPDATE_OPS, 'discriminator')
G_extra_step = tf.get_collection(tf.GraphKeys.UPDATE_OPS, 'generator')<jupyter_output><empty_output><jupyter_text># Training a GAN!
Well that wasn't so hard, was it? In the iterations in the low 100s you should see black backgrounds, fuzzy shapes as you approach iteration 1000, and decent shapes, about half of which will be sharp and clearly recognizable as we pass 3000. In our case, we'll simply train D(x) and G(z) with one batch each every iteration. However, papers often experiment with different schedules of training D(x) and G(z), sometimes doing one for more steps than the other, or even training each one until the loss gets "good enough" and then switching to training the other. <jupyter_code># a giant helper function
def run_a_gan(sess, G_train_step, G_loss, D_train_step, D_loss, G_extra_step, D_extra_step,\
show_every=250, print_every=50, batch_size=128, num_epoch=10):
"""Train a GAN for a certain number of epochs.
Inputs:
- sess: A tf.Session that we want to use to run our data
- G_train_step: A training step for the Generator
- G_loss: Generator loss
- D_train_step: A training step for the Generator
- D_loss: Discriminator loss
- G_extra_step: A collection of tf.GraphKeys.UPDATE_OPS for generator
- D_extra_step: A collection of tf.GraphKeys.UPDATE_OPS for discriminator
Returns:
Nothing
"""
# compute the number of iterations we need
max_iter = int(mnist.train.num_examples*num_epoch/batch_size)
for it in range(max_iter):
# every show often, show a sample result
if it % show_every == 0:
samples = sess.run(G_sample)
fig = show_images(samples[:16])
plt.show()
print()
# run a batch of data through the network
minibatch,minbatch_y = mnist.train.next_batch(batch_size)
_, D_loss_curr = sess.run([D_train_step, D_loss], feed_dict={x: minibatch})
_, G_loss_curr = sess.run([G_train_step, G_loss], feed_dict={x: minibatch})
# print loss every so often.
# We want to make sure D_loss doesn't go to 0
if it % print_every == 0:
print('Iter: {}, D: {:.4}, G:{:.4}'.format(it,D_loss_curr,G_loss_curr))
print('Final images')
samples = sess.run(G_sample)
fig = show_images(samples[:16])
plt.show()<jupyter_output><empty_output><jupyter_text>#### Train your GAN! This should take about 10 minutes on a CPU, or less than a minute on GPU.<jupyter_code>with get_session() as sess:
sess.run(tf.global_variables_initializer())
run_a_gan(sess,G_train_step,G_loss,D_train_step,D_loss,G_extra_step,D_extra_step)<jupyter_output><empty_output><jupyter_text># Least Squares GAN
We'll now look at [Least Squares GAN](https://arxiv.org/abs/1611.04076), a newer, more stable alternative to the original GAN loss function. For this part, all we have to do is change the loss function and retrain the model. We'll implement equation (9) in the paper, with the generator loss:
$$\ell_G = \frac{1}{2}\mathbb{E}_{z \sim p(z)}\left[\left(D(G(z))-1\right)^2\right]$$
and the discriminator loss:
$$ \ell_D = \frac{1}{2}\mathbb{E}_{x \sim p_\text{data}}\left[\left(D(x)-1\right)^2\right] + \frac{1}{2}\mathbb{E}_{z \sim p(z)}\left[ \left(D(G(z))\right)^2\right]$$
**HINTS**: Instead of computing the expectation, we will be averaging over elements of the minibatch, so make sure to combine the loss by averaging instead of summing. When plugging in for $D(x)$ and $D(G(z))$ use the direct output from the discriminator (`score_real` and `score_fake`).<jupyter_code>def lsgan_loss(score_real, score_fake):
"""Compute the Least Squares GAN loss.
Inputs:
- score_real: Tensor, shape [batch_size, 1], output of discriminator
score for each real image
- score_fake: Tensor, shape[batch_size, 1], output of discriminator
score for each fake image
Returns:
- D_loss: discriminator loss scalar
- G_loss: generator loss scalar
"""
# TODO: compute D_loss and G_loss
G_loss = 0.5 * tf.reduce_mean((score_fake-1)**2)
D_loss = 0.5 * tf.reduce_mean((score_real-1)**2)\
+ 0.5 * tf.reduce_mean(score_fake**2)
return D_loss, G_loss<jupyter_output><empty_output><jupyter_text>Test your LSGAN loss. You should see errors less than 1e-7.<jupyter_code>def test_lsgan_loss(score_real, score_fake, d_loss_true, g_loss_true):
with get_session() as sess:
d_loss, g_loss = sess.run(
lsgan_loss(tf.constant(score_real), tf.constant(score_fake)))
print("Maximum error in d_loss: %g"%rel_error(d_loss_true, d_loss))
print("Maximum error in g_loss: %g"%rel_error(g_loss_true, g_loss))
test_lsgan_loss(answers['logits_real'], answers['logits_fake'],
answers['d_loss_lsgan_true'], answers['g_loss_lsgan_true'])<jupyter_output>Maximum error in d_loss: 0
Maximum error in g_loss: 0
<jupyter_text>Create new training steps so we instead minimize the LSGAN loss:<jupyter_code>D_loss, G_loss = lsgan_loss(logits_real, logits_fake)
D_train_step = D_solver.minimize(D_loss, var_list=D_vars)
G_train_step = G_solver.minimize(G_loss, var_list=G_vars)
with get_session() as sess:
sess.run(tf.global_variables_initializer())
run_a_gan(sess, G_train_step, G_loss, D_train_step, D_loss, G_extra_step, D_extra_step)<jupyter_output><empty_output><jupyter_text># INLINE QUESTION 1:
Describe how the visual quality of the samples changes over the course of training. Do you notice anything about the distribution of the samples? How do the results change across different training runs?** (Write Your Answer In This Cell) **# Deep Convolutional GANs
In the first part of the notebook, we implemented an almost direct copy of the original GAN network from Ian Goodfellow. However, this network architecture allows no real spatial reasoning. It is unable to reason about things like "sharp edges" in general because it lacks any convolutional layers. Thus, in this section, we will implement some of the ideas from [DCGAN](https://arxiv.org/abs/1511.06434), where we use convolutional networks as our discriminators and generators.
#### Discriminator
We will use a discriminator inspired by the TensorFlow MNIST classification [tutorial](https://www.tensorflow.org/get_started/mnist/pros), which is able to get above 99% accuracy on the MNIST dataset fairly quickly. *Be sure to check the dimensions of x and reshape when needed*, fully connected blocks expect [N,D] Tensors while conv2d blocks expect [N,H,W,C] Tensors.
Architecture:
* 32 Filters, 5x5, Stride 1, Leaky ReLU(alpha=0.01)
* Max Pool 2x2, Stride 2
* 64 Filters, 5x5, Stride 1, Leaky ReLU(alpha=0.01)
* Max Pool 2x2, Stride 2
* Flatten
* Fully Connected size 4 x 4 x 64, Leaky ReLU(alpha=0.01)
* Fully Connected size 1<jupyter_code>import tensorflow as tf
import tensorflow.contrib.slim as slim
def flatten_fully_connected(inputs,
num_outputs,
activation_fn=tf.nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=slim.xavier_initializer(),
weights_regularizer=None,
biases_initializer=tf.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
with tf.variable_scope(scope, 'flatten_fully_connected', [inputs]):
if inputs.shape.ndims > 2:
inputs = slim.flatten(inputs)
return slim.fully_connected(inputs,
num_outputs,
activation_fn,
normalizer_fn,
normalizer_params,
weights_initializer,
weights_regularizer,
biases_initializer,
biases_regularizer,
reuse,
variables_collections,
outputs_collections,
trainable,
scope)
def leak_relu(x, leak, scope=None):
with tf.name_scope(scope, 'leak_relu', [x, leak]):
if leak < 1:
y = tf.maximum(x, leak * x)
else:
y = tf.minimum(x, leak * x)
return y
from functools import partial
conv = partial(slim.conv2d, activation_fn=None, weights_initializer=tf.truncated_normal_initializer(stddev=0.02))
dconv = partial(slim.conv2d_transpose, activation_fn=None, weights_initializer=tf.random_normal_initializer(stddev=0.02))
fc = partial(flatten_fully_connected, activation_fn=None, weights_initializer=tf.random_normal_initializer(stddev=0.02))
relu = tf.nn.relu
lrelu = partial(leak_relu, leak=0.01)
batch_norm = partial(slim.batch_norm, decay=0.9, scale=True, epsilon=1e-5, updates_collections=None)
ln = slim.layer_norm
def discriminator(x):
"""Compute discriminator score for a batch of input images.
Inputs:
- x: TensorFlow Tensor of flattened input images, shape [batch_size, 784]
Returns:
TensorFlow Tensor with shape [batch_size, 1], containing the score
for an image being real for each input image.
"""
with tf.variable_scope("discriminator"):
# TODO: implement architecture
out = tf.reshape(x, [-1, 28, 28, 1])
out = lrelu(conv(out, 32, 5, 1, padding='valid'))
out = slim.max_pool2d(out, 2, 2)
out = lrelu(conv(out, 64, 5, 1, padding='valid'))
out = slim.max_pool2d(out, 2, 2)
out = tf.reshape(out, [-1, 1024])
out = lrelu(fc(out, 4*4*64))
out = fc(out, 1)
logits = out
return logits
test_discriminator(1102721)<jupyter_output>Correct number of parameters in discriminator.
<jupyter_text>#### Generator
For the generator, we will copy the architecture exactly from the [InfoGAN paper](https://arxiv.org/pdf/1606.03657.pdf). See Appendix C.1 MNIST. See the documentation for [tf.nn.conv2d_transpose](https://www.tensorflow.org/api_docs/python/tf/nn/conv2d_transpose). We are always "training" in GAN mode.
Architecture:
* Fully connected of size 1024, ReLU
* BatchNorm
* Fully connected of size 7 x 7 x 128, ReLU
* BatchNorm
* Resize into Image Tensor
* 64 conv2d^T (transpose) filters of 4x4, stride 2, ReLU
* BatchNorm
* 1 conv2d^T (transpose) filter of 4x4, stride 2, TanH<jupyter_code>def generator(z):
"""Generate images from a random noise vector.
Inputs:
- z: TensorFlow Tensor of random noise with shape [batch_size, noise_dim]
Returns:
TensorFlow Tensor of generated images, with shape [batch_size, 784].
"""
with tf.variable_scope("generator"):
# TODO: implement architecture
out = batch_norm(relu(fc(z, 1024)))
out = batch_norm(relu(fc(out, 7*7*128)))
out = tf.reshape(out, [-1, 7, 7, 128])
out = batch_norm(relu(dconv(out, 64, 4, 2)))
out = tf.nn.tanh(dconv(out, 1, 4, 2))
img = out
return img
test_generator(6595521)<jupyter_output>Correct number of parameters in generator.
<jupyter_text>We have to recreate our network since we've changed our functions.<jupyter_code>tf.reset_default_graph()
batch_size = 128
# our noise dimension
noise_dim = 96
# placeholders for images from the training dataset
x = tf.placeholder(tf.float32, [None, 784])
z = sample_noise(batch_size, noise_dim)
# generated images
G_sample = generator(z)
with tf.variable_scope("") as scope:
#scale images to be -1 to 1
logits_real = discriminator(preprocess_img(x))
# Re-use discriminator weights on new inputs
scope.reuse_variables()
logits_fake = discriminator(G_sample)
# Get the list of variables for the discriminator and generator
D_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,'discriminator')
G_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,'generator')
D_solver,G_solver = get_solvers()
D_loss, G_loss = gan_loss(logits_real, logits_fake)
D_train_step = D_solver.minimize(D_loss, var_list=D_vars)
G_train_step = G_solver.minimize(G_loss, var_list=G_vars)
D_extra_step = tf.get_collection(tf.GraphKeys.UPDATE_OPS,'discriminator')
G_extra_step = tf.get_collection(tf.GraphKeys.UPDATE_OPS,'generator')<jupyter_output><empty_output><jupyter_text>### Train and evaluate a DCGAN
This is the one part of A3 that significantly benefits from using a GPU. It takes 3 minutes on a GPU for the requested five epochs. Or about 50 minutes on a dual core laptop on CPU (feel free to use 3 epochs if you do it on CPU).<jupyter_code># a giant helper function
def run_a_gan(sess, G_train_step, G_loss, D_train_step, D_loss, G_extra_step, D_extra_step,\
show_every=250, print_every=50, batch_size=128, num_epoch=10):
"""Train a GAN for a certain number of epochs.
Inputs:
- sess: A tf.Session that we want to use to run our data
- G_train_step: A training step for the Generator
- G_loss: Generator loss
- D_train_step: A training step for the Generator
- D_loss: Discriminator loss
- G_extra_step: A collection of tf.GraphKeys.UPDATE_OPS for generator
- D_extra_step: A collection of tf.GraphKeys.UPDATE_OPS for discriminator
Returns:
Nothing
"""
# compute the number of iterations we need
max_iter = int(mnist.train.num_examples*num_epoch/batch_size)
for it in range(max_iter):
# every show often, show a sample result
if it % show_every == 0:
samples = sess.run(G_sample)
fig = show_images(samples[:16])
plt.show()
print()
# run a batch of data through the network
minibatch,minbatch_y = mnist.train.next_batch(batch_size)
if it % 3 == 0:
_, D_loss_curr = sess.run([D_train_step, D_loss], feed_dict={x: minibatch})
_, G_loss_curr = sess.run([G_train_step, G_loss], feed_dict={x: minibatch})
# print loss every so often.
# We want to make sure D_loss doesn't go to 0
if it % print_every == 0:
print('Iter: {}, D: {:.4}, G:{:.4}'.format(it,D_loss_curr,G_loss_curr))
print('Final images')
samples = sess.run(G_sample)
fig = show_images(samples[:16])
plt.show()
with get_session() as sess:
sess.run(tf.global_variables_initializer())
run_a_gan(sess,G_train_step,G_loss,D_train_step,D_loss,G_extra_step,D_extra_step,num_epoch=10)<jupyter_output><empty_output><jupyter_text># INLINE QUESTION 2:
What differences do you see between the DCGAN results and the original GAN results?** (Write Your Answer In This Cell) **------
# Extra Credit
** Be sure you don't destroy your results above, but feel free to copy+paste code to get results below **
* For a small amount of extra credit, you can implement additional new GAN loss functions below, provided they converge. See AFI, BiGAN, Softmax GAN, Conditional GAN, InfoGAN, etc. They should converge to get credit.
* Likewise for an improved architecture or using a convolutional GAN (or even implement a VAE)
* For a bigger chunk of extra credit, load the CIFAR10 data (see last assignment) and train a compelling generative model on CIFAR-10
* Demonstrate the value of GANs in building semi-supervised models. In a semi-supervised example, only some fraction of the input data has labels; we can supervise this in MNIST by only training on a few dozen or hundred labeled examples. This was first described in [Improved Techniques for Training GANs](https://arxiv.org/abs/1606.03498).
* Something new/cool.
#### Describe what you did here# WGAN-GP (Small Extra Credit)
Please only attempt after you have completed everything above.
We'll now look at [Improved Wasserstein GAN](https://arxiv.org/abs/1704.00028) as a newer, more stable alernative to the original GAN loss function. For this part, all we have to do is change the loss function and retrain the model. We'll implement Algorithm 1 in the paper.
You'll also need to use a discriminator and corresponding generator without max-pooling. So we cannot use the one we currently have from DCGAN. Pair the DCGAN Generator (from InfoGAN) with the discriminator from [InfoGAN](https://arxiv.org/pdf/1606.03657.pdf) Appendix C.1 MNIST (We don't use Q, simply implement the network up to D). You're also welcome to define a new generator and discriminator in this notebook, in case you want to use the fully-connected pair of D(x) and G(z) you used at the top of this notebook.
Architecture:
* 64 Filters of 4x4, stride 2, LeakyReLU
* 128 Filters of 4x4, stride 2, LeakyReLU
* BatchNorm
* Flatten
* Fully connected 1024, LeakyReLU
* Fully connected size 1
## Reference
1. <jupyter_code>def discriminator(x):
with tf.variable_scope('discriminator'):
# TODO: implement architecture
out = tf.reshape(x, [-1, 28, 28, 1])
out = lrelu(conv(out, 64, 4, 2, padding='valid'))
out = lrelu(conv(out, 128, 4, 2, padding='valid'))
out = batch_norm(out)
out = tf.reshape(out, [-1, 5*5*128])
out = lrelu(fc(out, 4*4*64))
out = fc(out, 1)
logits = out
return logits
test_discriminator(3411649)
tf.reset_default_graph()
batch_size = 128
# our noise dimension
noise_dim = 96
# placeholders for images from the training dataset
x = tf.placeholder(tf.float32, [None, 784])
z = sample_noise(batch_size, noise_dim)
# generated images
G_sample = generator(z)
with tf.variable_scope("") as scope:
#scale images to be -1 to 1
logits_real = discriminator(preprocess_img(x))
# Re-use discriminator weights on new inputs
scope.reuse_variables()
logits_fake = discriminator(G_sample)
# Get the list of variables for the discriminator and generator
D_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,'discriminator')
G_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,'generator')
D_solver, G_solver = get_solvers(learning_rate=1e-4)
def wgangp_loss(logits_real, logits_fake, batch_size, x, G_sample):
"""Compute the WGAN-GP loss.
Inputs:
- logits_real: Tensor, shape [batch_size, 1], output of discriminator
Log probability that the image is real for each real image
- logits_fake: Tensor, shape[batch_size, 1], output of discriminator
Log probability that the image is real for each fake image
- batch_size: The number of examples in this batch
- x: the input (real) images for this batch
- G_sample: the generated (fake) images for this batch
Returns:
- D_loss: discriminator loss scalar
- G_loss: generator loss scalar
"""
# TODO: compute D_loss and G_loss
D_loss = tf.reduce_mean(logits_fake) - tf.reduce_mean(logits_real)
G_loss = -tf.reduce_mean(logits_fake)
# lambda from the paper
lam = 10
# random sample of batch_size (tf.random_uniform)
eps = tf.random_uniform(shape=[batch_size, 1], minval=0., maxval=1.)
G_sample = tf.reshape(G_sample, [batch_size, -1])
x_hat = eps*x + (1-eps)*G_sample
# Gradients of Gradients is kind of tricky!
with tf.variable_scope('',reuse=True) as scope:
grad_D_x_hat = tf.gradients(discriminator(x_hat), [x_hat])[0]
grad_norm = tf.sqrt(tf.reduce_sum(tf.square(grad_D_x_hat), reduction_indices=[1]))
grad_pen = tf.reduce_mean((grad_norm-1.)**2)
D_loss += lam*grad_pen
return D_loss, G_loss
D_loss, G_loss = wgangp_loss(logits_real, logits_fake, 128, x, G_sample)
D_train_step = D_solver.minimize(D_loss, var_list=D_vars)
G_train_step = G_solver.minimize(G_loss, var_list=G_vars)
D_extra_step = tf.get_collection(tf.GraphKeys.UPDATE_OPS,'discriminator')
G_extra_step = tf.get_collection(tf.GraphKeys.UPDATE_OPS,'generator')
with get_session() as sess:
sess.run(tf.global_variables_initializer())
run_a_gan(sess,G_train_step,G_loss,D_train_step,D_loss,G_extra_step,D_extra_step,batch_size=128,num_epoch=10)<jupyter_output><empty_output>
|
no_license
|
/assignment3/GANs-TensorFlow.ipynb
|
WangXin93/CS231n-Spring-2017-Assignment
| 24 |
<jupyter_start><jupyter_text># Data input, cleaning and pre-processing## Loading expression data<jupyter_code>library(WGCNA)
options(stringsAsFactors = FALSE)
femData = read.csv("LiverFemale3600.csv")
maleData = read.csv("LiverMale3600.csv")
dim(femData)
names(femData)
nSets = 2
setLabels = c("Female liver", "Male liver")
shortLabels = c("Female", "Male")
multiExpr = vector(mode = "list", length = nSets)
multiExpr[[1]] = list(data = as.data.frame(t(femData[-c(1:8)]))) # name is "data"
names(multiExpr[[1]]$data) = femData$substanceBXH # feature name
rownames(multiExpr[[1]]$data) = names(femData)[-c(1:8)] # name of samples
multiExpr[[2]] = list(data = as.data.frame(t(maleData[-c(1:8)])))
names(multiExpr[[2]]$data) = maleData$substanceBXH;
rownames(multiExpr[[2]]$data) = names(maleData)[-c(1:8)];
exprSize = checkSets(multiExpr)
exprSize<jupyter_output><empty_output><jupyter_text>## Rudimentary data cleaning and outlier removal<jupyter_code># Check that all genes and samples have sufficiently low numbers of missing values.
gsg = goodSamplesGenesMS(multiExpr, verbose = 3)
gsg$allOK
sampleTrees = list()
for (set in 1:nSets){
sampleTrees[[set]] = hclust(dist(multiExpr[[set]]$data), method = "average")
}
pdf(file = "Plots_SampleClustering.pdf", width = 12, height = 12)
par(mfrow=c(2,1))
par(mar = c(0, 4, 2, 0))
for (set in 1:nSets){
plot(sampleTrees[[set]],
main = paste("Sample clustering on all genes in", setLabels[set]),
xlab="", sub="", cex = 0.7)
}
dev.off() #device.off
#There is an outliers
# Choose the "base" cut height for the female data set
baseHeight = 16
cutHeights = c(16, 16*exprSize$nSamples[2]/exprSize$nSamples[1])
# Re-plot the dendrograms including the cut lines
pdf(file = "Plots_SampleClustering.pdf", width = 12, height = 12)
par(mfrow=c(2,1))
par(mar = c(0, 4, 2, 0))
for (set in 1:nSets){
plot(sampleTrees[[set]],
main = paste("Sample clustering on all genes in", setLabels[set]),
xlab="", sub="", cex = 0.7)
abline(h=cutHeights[set], col = "red")
}
dev.off()
# outlier removal
for (set in 1:nSets){
# Find clusters cut by the line
labels = cutreeStatic(sampleTrees[[set]], cutHeight = cutHeights[set])
labels
# Keep the largest one (labeled by the number 1)
keep = (labels==1)
multiExpr[[set]]$data = multiExpr[[set]]$data[keep, ]
}
collectGarbage();
# Check the size of the leftover data
exprSize = checkSets(multiExpr)
exprSize
cutreeStatic(sampleTrees[[1]], cutHeight = cutHeights[1])==1<jupyter_output><empty_output><jupyter_text>## Loading clinical trait data<jupyter_code>traitData = read.csv("ClinicalTraits.csv")
names(traitData)
dim(traitData)
# remove columns that hold information we do not need.
allTraits = traitData[, -c(31, 16)]
allTraits = allTraits[, c(2, 11:36) ]
dim(allTraits)
names(allTraits)
# Form a multi-set structure that will hold the clinical traits.
Traits = vector(mode="list", length = nSets)
for (set in 1:nSets){
setSamples = rownames(multiExpr[[set]]$data)
traitRows = match(setSamples, allTraits$Mice) # find a correspondance between rows
Traits[[set]] = list(data = allTraits[traitRows, -1])
rownames(Traits[[set]]$data) = allTraits[traitRows, 1]
}
collectGarbage()
# Define data set dimensions
nGenes = exprSize$nGenes
nSamples = exprSize$nSamples
nSamples
save(multiExpr, Traits, nGenes, nSamples, setLabels,
shortLabels, exprSize,file = "Consensus-dataInput.RData")<jupyter_output><empty_output>
|
no_license
|
/WGCNA/Practice/2/.ipynb_checkpoints/1-checkpoint.ipynb
|
Yuancheng-Xu/UCLA-CSST-SAVE
| 3 |
<jupyter_start><jupyter_text>Only processing the cells that have an assigned borough. Ignoring the cells with a borough that is Not assigned. Droping row where borough is "Not assigned"<jupyter_code># Get names of indexes for which column Borough has value "Not assigned"
indexNames = df[ df['Borough'] =='Not assigned'].index
# Delete these row indexes from dataFrame
df.drop(indexNames , inplace=True)
df.head(10)<jupyter_output><empty_output><jupyter_text>If a cell has a borough but a Not assigned neighborhood, then the neighborhood will be the same as the borough<jupyter_code>df.loc[df['Neighbourhood'] =='Not assigned' , 'Neighbourhood'] = df['Borough']
df.head(10)
result = df.groupby(['Postcode','Borough'], sort=False).agg( ', '.join)
df_new=result.reset_index()
df_new.head(15)
df_new.shape
<jupyter_output><empty_output><jupyter_text>Question 2
Use the Geocoder package or the csv file to create dataframe with longitude and latitude valuesI will be using a csv file that has the geographical coordinates of each postal code: http://cocl.us/Geospatial_data<jupyter_code>!wget -q -O 'Toronto_long_lat_data.csv' http://cocl.us/Geospatial_data
df_lon_lat = pd.read_csv('Toronto_long_lat_data.csv')
df_lon_lat.head()
df_lon_lat.columns=['Postcode','Latitude','Longitude']
df_lon_lat.head()
Toronto_df = pd.merge(df_new,
df_lon_lat[['Postcode','Latitude', 'Longitude']],
on='Postcode')
Toronto_df<jupyter_output><empty_output><jupyter_text>Question 3
Explore and cluster the neighborhoods in Toronto
Use geopy library to get the latitude and longitude values of New York City.<jupyter_code>from geopy.geocoders import Nominatim # convert an address into latitude and longitude values
# Matplotlib and associated plotting modules
import matplotlib.cm as cm
import matplotlib.colors as colors
# import k-means from clustering stage
from sklearn.cluster import KMeans
#!conda install -c conda-forge folium=0.5.0 --yes # uncomment this line if you haven't completed the Foursquare API lab
import folium # map rendering library
print('Libraries imported.')
address = 'Toronto, ON'
geolocator = Nominatim(user_agent="Toronto")
location = geolocator.geocode(address)
latitude_toronto = location.latitude
longitude_toronto = location.longitude
print('The geograpical coordinate of Toronto are {}, {}.'.format(latitude_toronto, longitude_toronto))
map_toronto = folium.Map(location=[latitude_toronto, longitude_toronto], zoom_start=10)
# add markers to map
for lat, lng, borough, Neighbourhood in zip(Toronto_df['Latitude'], Toronto_df['Longitude'], Toronto_df['Borough'], Toronto_df['Neighbourhood']):
label = '{}, {}'.format(Neighbourhood, borough)
label = folium.Popup(label, parse_html=True)
folium.CircleMarker(
[lat, lng],
radius=5,
popup=label,
color='blue',
fill=True,
fill_color='#3186cc',
fill_opacity=0.7,
parse_html=False).add_to(map_toronto)
map_toronto<jupyter_output><empty_output><jupyter_text>Define Foursquare Credentials and Version¶<jupyter_code># defining radius and limit of venues to get
radius=500
LIMIT=100
CLIENT_ID = '5VE0TJS5ZJC2YCCBP5R5UE5F0ZVFBHKHM2PK1DPDNSGKAQK3'
CLIENT_SECRET = '2R0VDFNSWDVUWEBXPR0YMJSYJYEF1WLJKWPGZ5TIZF41QRTU'
VERSION = '20180604'
def getNearbyVenues(names, latitudes, longitudes, radius=500):
venues_list=[]
for name, lat, lng in zip(names, latitudes, longitudes):
print(name)
# create the API request URL
url = 'https://api.foursquare.com/v2/venues/explore?&client_id={}&client_secret={}&v={}&ll={},{}&radius={}&limit={}'.format(
CLIENT_ID,
CLIENT_SECRET,
VERSION,
lat,
lng,
radius,
LIMIT)
# make the GET request
results = requests.get(url).json()["response"]['groups'][0]['items']
# return only relevant information for each nearby venue
venues_list.append([(
name,
lat,
lng,
v['venue']['name'],
v['venue']['location']['lat'],
v['venue']['location']['lng'],
v['venue']['categories'][0]['name']) for v in results])
nearby_venues = pd.DataFrame([item for venue_list in venues_list for item in venue_list])
nearby_venues.columns = ['Neighbourhood',
'Neighbourhood Latitude',
'Neighbourhood Longitude',
'Venue',
'Venue Latitude',
'Venue Longitude',
'Venue Category']
return(nearby_venues)
toronto_venues = getNearbyVenues(names=Toronto_df['Neighbourhood'],
latitudes=Toronto_df['Latitude'],
longitudes=Toronto_df['Longitude']
)<jupyter_output>Parkwoods
Victoria Village
Regent Park
Lawrence Heights, Lawrence Manor
Queen's Park
Islington Avenue
Rouge, Malvern
Don Mills North
Woodbine Gardens, Parkview Hill
Ryerson, Garden District
Glencairn
Cloverdale, Islington, Martin Grove, Princess Gardens, West Deane Park
Highland Creek , Rouge Hill, Port Union
Flemingdon Park, Don Mills South
Woodbine Heights
St. James Town
Humewood-Cedarvale
Bloordale Gardens, Eringate, Markland Wood, Old Burnhamthorpe
Guildwood, Morningside, West Hill
The Beaches
Berczy Park
Caledonia-Fairbanks
Woburn
Leaside
Central Bay Street
Christie
Cedarbrae
Hillcrest Village
Bathurst Manor, Downsview North, Wilson Heights
Thorncliffe Park
Adelaide, King, Richmond
Dovercourt Village, Dufferin
Scarborough Village
Fairview, Henry Farm, Oriole
Northwood Park, York University
East Toronto
Harbourfront East, Toronto Islands, Union Station
Little Portugal, Trinity–Bellwoods
East Birchmount Park, Ionview, Kennedy Park
Bayview Village
CFB Toronto, Downsview East
The Da[...]<jupyter_text>Let's check the size of the resulting dataframe¶<jupyter_code>toronto_venues.head(10)
toronto_venues.shape<jupyter_output><empty_output><jupyter_text>Let's check how many venues were returned for each neighborhood<jupyter_code>
toronto_venues.groupby('Neighbourhood').count()<jupyter_output><empty_output><jupyter_text>Analysing Each Neighborhood<jupyter_code># one hot encoding
toronto_onehot = pd.get_dummies(toronto_venues[['Venue Category']], prefix="", prefix_sep="")
# add neighborhood column back to dataframe
toronto_onehot['Neighbourhood'] = toronto_venues['Neighbourhood']
# move neighborhood column to the first column
fixed_columns = [toronto_onehot.columns[-1]] + list(toronto_onehot.columns[:-1])
toronto_onehot.head()
toronto_onehot.shape<jupyter_output><empty_output><jupyter_text>Next, let's group rows by neighborhood and by taking the mean of the frequency of occurrence of each category<jupyter_code>
toronto_grouped = toronto_onehot.groupby('Neighbourhood').mean().reset_index()
toronto_grouped<jupyter_output><empty_output><jupyter_text>Let's print each neighborhood along with the top 5 most common venues<jupyter_code>
num_top_venues = 5
for hood in toronto_grouped['Neighbourhood']:
print("----"+hood+"----")
temp = toronto_grouped[toronto_grouped['Neighbourhood'] == hood].T.reset_index()
temp.columns = ['venue','freq']
temp = temp.iloc[1:]
temp['freq'] = temp['freq'].astype(float)
temp = temp.round({'freq': 2})
print(temp.sort_values('freq', ascending=False).reset_index(drop=True).head(num_top_venues))
print('\n')<jupyter_output>----Adelaide, King, Richmond----
venue freq
0 Coffee Shop 0.07
1 Restaurant 0.05
2 Café 0.04
3 Thai Restaurant 0.04
4 Bar 0.03
----Agincourt----
venue freq
0 Lounge 0.2
1 Breakfast Spot 0.2
2 Latin American Restaurant 0.2
3 Skating Rink 0.2
4 Clothing Store 0.2
----Agincourt North, L'Amoreaux East, Milliken, Ontario, Steeles East----
venue freq
0 Gym 0.33
1 Park 0.33
2 Playground 0.33
3 Accessories Store 0.00
4 Men's Store 0.00
----Albion Gardens, Beaumond Heights, Humbergate, Mount Olive-Silverstone-Jamestown, Mount Olive-Silverstone-Jamestown, Silverstone, South Steeles, Thistletown----
venue freq
0 Grocery Store 0.22
1 Pizza Place 0.11
2 Fried Chicken Joint 0.11
3 Sandwich Place 0.11
4 Fast Food Restaurant 0.11
-[...]<jupyter_text>Let's put that into a pandas dataframe
First, let's write a function to sort the venues in descending order.<jupyter_code>
def return_most_common_venues(row, num_top_venues):
row_categories = row.iloc[1:]
row_categories_sorted = row_categories.sort_values(ascending=False)
return row_categories_sorted.index.values[0:num_top_venues]
import numpy as np
num_top_venues = 10
indicators = ['st', 'nd', 'rd']
# create columns according to number of top venues
columns = ['Neighbourhood']
for ind in np.arange(num_top_venues):
try:
columns.append('{}{} Most Common Venue'.format(ind+1, indicators[ind]))
except:
columns.append('{}th Most Common Venue'.format(ind+1))
# create a new dataframe
neighbourhoods_venues_sorted = pd.DataFrame(columns=columns)
neighbourhoods_venues_sorted['Neighbourhood'] = toronto_grouped['Neighbourhood']
for ind in np.arange(toronto_grouped.shape[0]):
neighbourhoods_venues_sorted.iloc[ind, 1:] = return_most_common_venues(toronto_grouped.iloc[ind, :], num_top_venues)
neighbourhoods_venues_sorted.head()<jupyter_output><empty_output><jupyter_text>Cluster Neighborhoods
Run k-means to cluster the neighborhood into 5 clusters.<jupyter_code># set number of clusters
kclusters = 5
toronto_grouped_clustering = toronto_grouped.drop('Neighbourhood', 1)
# run k-means clustering
kmeans = KMeans(n_clusters=kclusters, random_state=0).fit(toronto_grouped_clustering)
# check cluster labels generated for each row in the dataframe
kmeans.labels_
# to change use .astype()<jupyter_output><empty_output><jupyter_text>Let's create a new dataframe that includes the cluster as well as the top 10 venues for each neighborhood.<jupyter_code># add clustering labels
neighbourhoods_venues_sorted.insert(0, 'Cluster_Labels', kmeans.labels_)
toronto_merged = Toronto_df
# merge toronto_grouped with toronto_data to add latitude/longitude for each neighborhood
toronto_merged = toronto_merged.join(neighbourhoods_venues_sorted.set_index('Neighbourhood'), on='Neighbourhood')
toronto_merged.head() # check the last columns!
toronto_merged=toronto_merged.dropna()
toronto_merged['Cluster_Labels'] = toronto_merged.Cluster_Labels.astype(int)
map_clusters = folium.Map(location=[latitude_toronto, longitude_toronto], zoom_start=11)
# set color scheme for the clusters
x = np.arange(kclusters)
ys = [i + x + (i*x)**2 for i in range(kclusters)]
colors_array = cm.rainbow(np.linspace(0, 1, len(ys)))
rainbow = [colors.rgb2hex(i) for i in colors_array]
# add markers to the map
markers_colors = []
for lat, lon, poi, cluster in zip(toronto_merged['Latitude'], toronto_merged['Longitude'], toronto_merged['Neighbourhood'], toronto_merged['Cluster_Labels']):
label = folium.Popup(str(poi) + ' Cluster ' + str(cluster), parse_html=True)
folium.CircleMarker(
[lat, lon],
radius=5,
popup=label,
color=rainbow[cluster-1],
fill=True,
fill_color=rainbow[cluster-1],
fill_opacity=0.7).add_to(map_clusters)
map_clusters<jupyter_output><empty_output><jupyter_text>Examine Clusters - Cluster 1<jupyter_code>toronto_merged.loc[toronto_merged['Cluster_Labels'] == 0, toronto_merged.columns[[1] + list(range(5, toronto_merged.shape[1]))]]
Cluster 2
toronto_merged.loc[toronto_merged['Cluster_Labels'] == 1, toronto_merged.columns[[1] + list(range(5, toronto_merged.shape[1]))]]<jupyter_output><empty_output><jupyter_text>Cluster 3<jupyter_code>
toronto_merged.loc[toronto_merged['Cluster_Labels'] == 2, toronto_merged.columns[[1] + list(range(5, toronto_merged.shape[1]))]]
toronto_merged.loc[toronto_merged['Cluster_Labels'] == 3, toronto_merged.columns[[1] + list(range(5, toronto_merged.shape[1]))]]
toronto_merged.loc[toronto_merged['Cluster_Labels'] == 4, toronto_merged.columns[[1] + list(range(5, toronto_merged.shape[1]))]]<jupyter_output><empty_output>
|
no_license
|
/Toronto.ipynb
|
milenashehu/Projects_Capstone
| 15 |
<jupyter_start><jupyter_text># **Problem Statement**
**DESCRIPTION**
**Problem Statement:**
Facial recognition is a biometric alternative that measures unique characteristics of a human
face. Applications available today include flight check in, tagging friends and family members in
photos, and “tailored” advertising. You are a computer vision engineer who needs to develop a
face recognition programme with deep convolutional neural networks.
Objective: Use a deep convolutional neural network to perform facial recognition using Keras.
**Dataset Details:**
ORL face database composed of 400 images of size 112 x 92. There are 40 people, 10 images
per person. The images were taken at different times, lighting and facial expressions. The faces
are in an upright position in frontal view, with a slight left-right rotation.
Link to the Dataset: https://www.dropbox.com/s/i7uzp5yxk7wruva/ORL_faces.npz?dl=0
**Prerequisites:**
Keras
Scikit Learn
Steps to be followed:
1. Input the required libraries
2. Load the dataset after loading the dataset, you have to normalize every image.
3. Split the dataset
4. Transform the images to equal sizes to feed in CNN
5. Build a CNN model that has 3 main layers:
i. Convolutional Layer
ii. Pooling Layer
iii. Fully Connected Layer
6. Train the model
7. Plot the result
8. Iterate the model until the accuracy is above 90%## **Task 1** - **Import Libraries**<jupyter_code>import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
import seaborn as sn
import cv2
from glob import glob
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
import seaborn as sns
from sklearn import metrics
%matplotlib inline<jupyter_output>/usr/local/lib/python3.6/dist-packages/statsmodels/tools/_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.
import pandas.util.testing as tm
<jupyter_text>## **Task 2(a)** - **Load File**
Load file and view the list of files in the dataset<jupyter_code># Mount drive to colab
from google.colab import drive
drive.mount('/content/drive')
# Load the NPZ file to data
data = np.load("drive/My Drive/PG AI ML/ORL_faces.npz",allow_pickle=False)
# View files/folders present in data
lst = data.files
for item in lst:
print(item)
lst = data.files
for item in lst:
print(item)
print(data[item])
print("\n")<jupyter_output>testY
[ 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2
3 3 3 3 3 3 3 3 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5
6 6 6 6 6 6 6 6 7 7 7 7 7 7 7 7 8 8 8 8 8 8 8 8
9 9 9 9 9 9 9 9 10 10 10 10 10 10 10 10 11 11 11 11 11 11 11 11
12 12 12 12 12 12 12 12 13 13 13 13 13 13 13 13 14 14 14 14 14 14 14 14
15 15 15 15 15 15 15 15 16 16 16 16 16 16 16 16 17 17 17 17 17 17 17 17
18 18 18 18 18 18 18 18 19 19 19 19 19 19 19 19]
testX
[[ 41. 47. 47. ... 35. 37. 38.]
[ 44. 43. 32. ... 43. 43. 37.]
[ 42. 41. 44. ... 42. 43. 41.]
...
[101. 100. 103. ... 31. 40. 42.]
[105. 108. 106. ... 44. 40. 47.]
[113. 114. 111. ... 62. 81. 89.]]
trainX
[[ 48. 49. 45. ... 47. 46. 46.]
[ 60. 60. 62. ... 32. 34. 34.]
[ 39. 44. 53. ... 29. 26. 29.]
...
[114. 117. 114. ... 98. 96. 98.]
[105. 105. 107. ... 54. 47. 41.]
[116. 114. 117. ... 95. 100. 101.]]
trainY
[ 0 0 0 0 0 0 0 0[...]<jupyter_text>**Observation:**
We can see that there are **4 folders** present in the given data set. **trainX, trainY, testX and testY**
**Train data** set contains **12 images** of each person (20 persons total)
**Test data** set contains **8 images** of each person (20 person total)## **Task 3** - **Seperate the Train and Test Data**
**Analysis:**
**Train X** contains the **images** and **Train Y** contains the **Label** for those images
**Test X** contains **images** to test and **Test Y** contains the **Label** for those images.
<jupyter_code>x_train,x_test,y_train,y_test=data['trainX'],data['testX'], data['trainY'],data['testY']
x_train.shape, x_test.shape, y_train.shape, y_test.shape<jupyter_output><empty_output><jupyter_text>## **Reshape the Train & Test Data to the shape required to display an image**<jupyter_code>x_train_img=np.reshape(x_train,(x_train.shape[0],112,-1))
print ("Reshaped Image of X_Train_img is: ", x_train_img.shape, "\n")
#plt.imshow(x_train[30])
x_test_img=np.reshape(x_test,(x_test.shape[0],112,92))
print ("Reshaped Image of X_Test_img is: ", x_test_img.shape, "\n")<jupyter_output>Reshaped Image of X_Test_img is: (160, 112, 92)
<jupyter_text>## **View Unique Faces that is present in Train Data set**<jupyter_code>print("unique target number:",np.unique(data['trainY']))
def show_20_distinct_people(images, unique_ids):
#Creating 2*10 subplots in 18x5 figure size
fig, axarr=plt.subplots(nrows=2, ncols=10, figsize=(18, 5))
#For easy iteration flattened 2X10 subplots matrix to 20 array
axarr=axarr.flatten()
#iterating over user ids
for unique_id in unique_ids:
image_index=unique_id*12
axarr[unique_id].imshow(images[image_index], cmap='gray')
axarr[unique_id].set_xticks([])
axarr[unique_id].set_yticks([])
axarr[unique_id].set_title("face id:{}".format(unique_id))
plt.suptitle("There are 20 distinct people in the dataset")
show_20_distinct_people(x_train_img, np.unique(data['trainY']))<jupyter_output><empty_output><jupyter_text>### **View Unique Faces that is present in Test Data set**<jupyter_code>print("unique target number:",np.unique(data['testY']))
def show_16_distinct_people(images, unique_ids):
#Creating 2*10 subplots in 18x5 figure size
fig, axarr=plt.subplots(nrows=2, ncols=10, figsize=(18, 5))
#For easy iteration flattened 2X10 subplots matrix to 20 array
axarr=axarr.flatten()
#iterating over user ids
for unique_id in unique_ids:
image_index=unique_id*8
axarr[unique_id].imshow(images[image_index], cmap='gray')
axarr[unique_id].set_xticks([])
axarr[unique_id].set_yticks([])
axarr[unique_id].set_title("face id:{}".format(unique_id))
plt.suptitle("There are 20 distinct people in the dataset")
show_16_distinct_people(x_test_img, np.unique(data['testY']))<jupyter_output><empty_output><jupyter_text>## **View different images of single person, present in Train Data**
Images of Single person in different posture, orientation, etc etc<jupyter_code>def show_12_faces_of_n_subject(images, subject_ids):
cols=12
rows=(len(subject_ids)*12)/cols
rows=int(rows)
fig, axarr=plt.subplots(nrows=rows, ncols=cols, figsize=(18,12))
#axarr=axarr.flatten()
for i, subject_id in enumerate(subject_ids):
for j in range(cols):
image_index=subject_id*12 + j
axarr[i,j].imshow(images[image_index], cmap="gray")
axarr[i,j].set_xticks([])
axarr[i,j].set_yticks([])
axarr[i,j].set_title("face id:{}".format(subject_id))
show_12_faces_of_n_subject(images=x_train_img, subject_ids=[0, 1, 2 , 3, 4, 18, 19])<jupyter_output><empty_output><jupyter_text>### **Different Images of single person, in Test Data set**<jupyter_code>def show_8_faces_of_n_subject(images, subject_ids):
cols=8
rows=(len(subject_ids)*8)/cols
rows=int(rows)
fig, axarr=plt.subplots(nrows=rows, ncols=cols, figsize=(12,10))
#axarr=axarr.flatten()
for i, subject_id in enumerate(subject_ids):
for j in range(cols):
image_index=subject_id*8 + j
axarr[i,j].imshow(images[image_index], cmap="gray")
axarr[i,j].set_xticks([])
axarr[i,j].set_yticks([])
axarr[i,j].set_title("face id:{}".format(subject_id))
show_8_faces_of_n_subject(images=x_test_img, subject_ids=[0, 1, 2 , 3, 4, 18, 19])<jupyter_output><empty_output><jupyter_text>## **No of Sample for each Image**<jupyter_code>y_frame=pd.DataFrame()
y_frame['subject ids']=y_train
y_frame.groupby(['subject ids']).size().plot.bar(figsize=(15,8),title="Number of Samples for Each Classes")<jupyter_output><empty_output><jupyter_text>## **Task 2(b)** - **Normalize the Data**<jupyter_code>print("-------------------------")
print("X train pixels:")
print("-------------------------")
print("Minimum Pixels: ", x_train.min(), "\t", "Maximum Pixels: ", x_train.max(), "\n")
x_train = np.array(x_train,dtype='float64')/255
print("Post Normalization")
print("Minimum Pixels: ", x_train.min(), "\t", "Maximum Pixels: ", x_train.max(), "\n")
print("--------------------------")
print("X test pixels:")
print("--------------------------")
print("Minimum Pixels: ", x_test.min(), "\t", "Maximum Pixels: ", x_test.max(), "\n")
x_test = np.array(x_test,dtype='float64')/255
print("Post Normalization")
print("Minimum Pixels: ", x_test.min(), "\t", "Maximum Pixels: ", x_test.max())
<jupyter_output>-------------------------
X train pixels:
-------------------------
Minimum Pixels: 0.0 Maximum Pixels: 244.0
Post Normalization
Minimum Pixels: 0.0 Maximum Pixels: 0.9568627450980393
--------------------------
X test pixels:
--------------------------
Minimum Pixels: 0.0 Maximum Pixels: 244.0
Post Normalization
Minimum Pixels: 0.0 Maximum Pixels: 0.9568627450980393
<jupyter_text>## **Task 4 - Transform the images to equal sizes**<jupyter_code>#imageGrayScale = np.array([cv2.cvtColor(i, cv2.COLOR_BGR2GRAY) for i in x_train)
x_train_final = np.array([cv2.resize(i,(112,92), interpolation=cv2.INTER_CUBIC) for i in x_train])
print("Shape of X Train: ", x_train_final.shape)
x_test_final = np.array([cv2.resize(i,(112,92), interpolation=cv2.INTER_CUBIC) for i in x_test])
print("Shape of X Test: ", x_test_final.shape)<jupyter_output>Shape of X Test: (160, 92, 112)
<jupyter_text>## **Changing classes count for Label**<jupyter_code>y_train_final = tf.keras.utils.to_categorical(y_train, num_classes=20, dtype='float32')
y_test_final = tf.keras.utils.to_categorical(y_test, num_classes=20, dtype='float32')<jupyter_output><empty_output><jupyter_text>## **Task 5 - Creating CNN Model**
i. Convolutional Layer
ii. Pooling Layer
iii. Fully Connected Layer<jupyter_code>x_train_edited = np.array([np.reshape(i, (112, 92, 1)) for i in x_train])
x_test_edited = np.array([np.reshape(i, (112, 92, 1)) for i in x_test])
print(x_train_edited.shape,",",x_test_edited.shape)
# Set Parameters
batchSize = 12
epochs = 200
tf.random.set_seed(2507)
np.random.seed(2507)
# Initialising the CNN
classifier = tf.keras.models.Sequential()
# Step 1 - Convolution #No of Feature Maps, Filter, color image with channel,
classifier.add(tf.keras.layers.Conv2D(32, (3, 3), input_shape = (112, 92, 1), activation = 'relu'))
# Step 2 - Pooling
classifier.add(tf.keras.layers.MaxPooling2D(pool_size = (2, 2)))
# Adding a second convolutional layer
classifier.add(tf.keras.layers.Conv2D(64, (3, 3), activation = 'relu'))
classifier.add(tf.keras.layers.MaxPooling2D(pool_size = (2, 2)))
# Adding a second convolutional layer
classifier.add(tf.keras.layers.Conv2D(128, (3, 3), activation = 'relu'))
classifier.add(tf.keras.layers.MaxPooling2D(pool_size = (2, 2)))
# Step 3 - Flattening
classifier.add(tf.keras.layers.Flatten())
#classifier.add(tf.keras.layers.GlobalAveragePooling2D())
# Step 4 - Full connection
classifier.add(tf.keras.layers.Dense(units = 512, activation = 'relu'))
classifier.add(tf.keras.layers.Dropout(0.25))
classifier.add(tf.keras.layers.Dense(units = 256, activation = 'relu'))
classifier.add(tf.keras.layers.Dropout(0.25))
classifier.add(tf.keras.layers.Dense(units = 128, activation = 'relu'))
classifier.add(tf.keras.layers.Dropout(0.25))
classifier.add(tf.keras.layers.Dense(units = 64, activation = 'relu'))
classifier.add(tf.keras.layers.Dense(units = 20, activation = 'softmax'))
# Compiling the CNN
classifier.compile(optimizer = "Adam" , loss = 'categorical_crossentropy', metrics = ['accuracy'])
classifier.summary()
#Custom Callback
class MyThresholdCallBack(tf.keras.callbacks.Callback):
def __init__(self,cl):
super(MyThresholdCallBack, self).__init__()
self.cl = cl
def on_epoch_end(self, epoch, logs=None):
test_score = logs["val_accuracy"]
train_score = logs["accuracy"]
if test_score > train_score and test_score > self.cl:
#if test_score > self.cl:
self.model.stop_training = True<jupyter_output><empty_output><jupyter_text>## **Task 6 - Train the Model**<jupyter_code>myR2ScoreMonitor = MyThresholdCallBack(cl=0.90)
history = classifier.fit(x = x_train_edited, y = y_train_final,
validation_data = (x_test_edited, y_test_final),
steps_per_epoch= (x_train_final.shape[0])/batchSize,
epochs = epochs,
callbacks = [myR2ScoreMonitor],
validation_steps= (x_test_final.shape[0])/batchSize,
verbose=2
)<jupyter_output>Epoch 1/200
WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0047s vs `on_train_batch_end` time: 0.0162s). Check your callbacks.
20/20 - 1s - loss: 3.0083 - accuracy: 0.0292 - val_loss: 2.9955 - val_accuracy: 0.0500
Epoch 2/200
20/20 - 0s - loss: 2.9986 - accuracy: 0.0500 - val_loss: 2.9955 - val_accuracy: 0.0500
Epoch 3/200
20/20 - 0s - loss: 2.9963 - accuracy: 0.0583 - val_loss: 2.9938 - val_accuracy: 0.0500
Epoch 4/200
20/20 - 0s - loss: 2.9957 - accuracy: 0.0500 - val_loss: 2.9872 - val_accuracy: 0.0500
Epoch 5/200
20/20 - 0s - loss: 2.9118 - accuracy: 0.0750 - val_loss: 2.6616 - val_accuracy: 0.1187
Epoch 6/200
20/20 - 0s - loss: 2.5980 - accuracy: 0.1625 - val_loss: 2.3485 - val_accuracy: 0.3125
Epoch 7/200
20/20 - 0s - loss: 2.0502 - accuracy: 0.3417 - val_loss: 1.6254 - val_accuracy: 0.4750
Epoch 8/200
20/20 - 0s - loss: 1.5121 - accuracy: 0.4583 - val_loss: 0.9439 - val_accuracy: 0.6625
Epoch 9/200
20/20 - 0s - loss: 1.[...]<jupyter_text>## **Task 7 - Train Results (Graph Visualization)**<jupyter_code>#Graphing our training and validation
import matplotlib.pyplot as plt
%matplotlib inline
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'r', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'r', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend()
plt.show()<jupyter_output><empty_output><jupyter_text>## **Task 8 - Model trained above 90% and generalized successfully**<jupyter_code># training accuracy after final epoch
history.history['accuracy'][-1]
# test accuracy after final epoch
history.history['val_accuracy'][-1]<jupyter_output><empty_output><jupyter_text>## **Save The Model**<jupyter_code>#classifier.save("FaceDetection.tf")
classifier.save("/content/drive/My Drive/PG AI ML/FaceRecognition.h5")
classifier.save('FaceRecognition.tf2')<jupyter_output>WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/training/tracking/tracking.py:111: Model.state_updates (from tensorflow.python.keras.engine.training) is deprecated and will be removed in a future version.
Instructions for updating:
This property should not be used in TensorFlow 2.0, as updates are applied automatically.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/training/tracking/tracking.py:111: Layer.updates (from tensorflow.python.keras.engine.base_layer) is deprecated and will be removed in a future version.
Instructions for updating:
This property should not be used in TensorFlow 2.0, as updates are applied automatically.
INFO:tensorflow:Assets written to: FaceRecognition.tf2/assets
<jupyter_text>## **Making Predictions**<jupyter_code>predictions = classifier.predict(x_test_edited)
def plot_image(i, predictions, trueLabel, img):
predictions_array, true_label, image = predictions[i], trueLabel[i], img[i]
plt.xticks([])
plt.yticks([])
#plt.imshow(x_test_edited[0].reshape(112,92))
plt.imshow(image.reshape(112,92), cmap="gray")
actual_label = np.argmax(true_label)
predicted_label = np.argmax(predictions_array)
# print("True Label: ",y_test_final[1],"\n")
# print("Prediction: ", predictions[1], "\n")
print("Actual Label is: ", actual_label)
print("Predicted Label is: ", predicted_label)
if predicted_label == actual_label:
color = "blue"
else:
color = "red"
plt.xlabel("Face ID {} predicted matches {:2.0f}% to ({})".format(predicted_label, 100*np.max(predictions_array), actual_label, color=color))
i = 0
plt.figure(figsize=(6,3))
plot_image(i, predictions, y_test_final, x_test_edited)
j = 23
plt.figure(figsize=(6,3))
plot_image(j, predictions, y_test_final, x_test_edited)
k = 38
plt.figure(figsize=(6,3))
plot_image(k, predictions, y_test_final, x_test_edited)<jupyter_output>Actual Label is: 4
Predicted Label is: 4
<jupyter_text>## **Confusion Matrix**<jupyter_code>predicted_classes = np.argmax(predictions, axis=1)
true_classes = np.array([np.argmax(i) for i in y_test_final])
class_labels = np.unique(data["trainY"])
report = metrics.classification_report(true_classes, predicted_classes)
print(report) <jupyter_output> precision recall f1-score support
0 1.00 0.88 0.93 8
1 1.00 1.00 1.00 8
2 1.00 1.00 1.00 8
3 1.00 1.00 1.00 8
4 1.00 0.75 0.86 8
5 1.00 1.00 1.00 8
6 1.00 1.00 1.00 8
7 1.00 1.00 1.00 8
8 1.00 1.00 1.00 8
9 1.00 0.75 0.86 8
10 0.89 1.00 0.94 8
11 1.00 1.00 1.00 8
12 1.00 1.00 1.00 8
13 1.00 1.00 1.00 8
14 1.00 1.00 1.00 8
15 0.86 0.75 0.80 8
16 0.73 1.00 0.84 8
17 0.73 [...]<jupyter_text>## **Prediction on other Images**<jupyter_code>from keras.models import load_model
model = load_model('drive/My Drive/PG AI ML/FaceRecognition.h5')
imageLocation = ('drive/My Drive/PG AI ML/Fruits/Training/Strawberry/113_100.jpg')
#Load the Image
imageLoad = tf.keras.preprocessing.image.load_img(imageLocation ,
target_size=(112,92,1))
#Convert Image to Array
imageArray = tf.keras.preprocessing.image.img_to_array(imageLoad)
plt.imshow(imageLoad)
#reshaping to RGB 1 channel : Method 1
convertedImage=tf.image.rgb_to_grayscale(imageArray, name=None)
convertedImage.shape
compatible_image = np.array([convertedImage])
compatible_image.shape
newPredict = model.predict(compatible_image)
newPredict
np.argmax(newPredict)<jupyter_output><empty_output>
|
no_license
|
/ORL_FaceRecognition_Confusionmatrix.ipynb
|
anusha-viraj/ORL_Face_Recognition
| 20 |
<jupyter_start><jupyter_text>### Bài 1:<jupyter_code>def show_stars(n):
str=""
for i in range(1,n+1):
str+="*"
print(str)
show_stars(2)
show_stars(5)<jupyter_output>*
**
***
****
*****
<jupyter_text>### Bài 2### Bài 3### 1. Dùng Pandas đọc và tổng hợp dữ liệu của các năm<jupyter_code>%config Completer.use_jedi = False
df1=pd.read_csv(r'C:\Users\Laptop EDG\Downloads\Names\Names\yob1880.txt', header=None).head(5)
df1
def process_data():
alldata=pd.DataFrame(columns=['Name','Sex','Quantity','Year'])
for year in range(1880,2019):
url='C:/Users/Laptop EDG/Downloads/Names/Names/yob'+str(year)+'.txt'
df=pd.read_csv(url, header=None)
df.columns=['Name','Sex','Quantity']
df['Year']=year
alldata=pd.concat([alldata,df],ignore_index=True)
return alldata
data=process_data()
data<jupyter_output><empty_output><jupyter_text>### 2. Vẽ biểu đồ tổng số trẻ em sinh ra theo giới tính và năm (Total births by sex and year)<jupyter_code>data.Quantity=data.Quantity.apply(lambda x:int(x))
data.dtypes
# Cách 1:
data_f=data[data.Sex=='F'].groupby('Year').Quantity.sum()
data_f.tail(3)
data_m=data[data.Sex=='M'].groupby('Year').Quantity.sum()
data_m.tail(3)
plt.stackplot(data_f.index,data_f.values,data_m.values,labels=['Female','Male'])
plt.xlabel('Year')
plt.ylabel('Population')
plt.title('Total births by sex and year')
plt.legend(loc='upper left');
# Test:
data.groupby(['Year','Sex']).Quantity.sum().plot(kind='area',stacked=False,xlabel='Year')<jupyter_output><empty_output><jupyter_text>### 3. Tạo subset gồm top 1000 cái tên phổ biến mỗi năm theo từng loại giới tính<jupyter_code>data_name=data.set_index('Name')
data_name.groupby(['Year','Sex']).Quantity.nlargest(1000)<jupyter_output><empty_output><jupyter_text>### 4. Vẽ biểu đồ số lượng các bé sinh theo năm có các tên sau: Philip, Harry, Elizabeth, Marilyn<jupyter_code>data_Philip=data[data.Name=='Philip'].groupby('Year').Quantity.sum()
data_Harry=data[data.Name=='Harry'].groupby('Year').Quantity.sum()
data_Elizabeth=data[data.Name=='Elizabeth'].groupby('Year').Quantity.sum()
data_Marilyn=data[data.Name=='Marilyn'].groupby('Year').Quantity.sum()
data_combined=pd.concat([data_Philip,data_Harry,data_Elizabeth,data_Marilyn],axis=1)
data_combined
data_combined.columns=['Philip', 'Harry', 'Elizabeth', 'Marilyn']
data_combined.Marilyn=data_combined.Marilyn.replace(np.nan,0)
data_combined
data_combined.Marilyn=data_combined.Marilyn.apply(lambda x:int(x))
data_combined
data_combined.plot(kind='line')
plt.title('Số lượng Tên theo năm')
plt.ylabel('Số lượng');
# Shorter:
df_4 = df[df.name.isin (['Philip', 'Harry', 'Elizabeth', 'Marilyn'])]
df_4.pivot_table(values='occurence', index = 'year', columns='name',aggfunc='sum').\
plot(ylabel='sum occurence');<jupyter_output><empty_output><jupyter_text>### 5. Vẽ biểu đồ thể hiện sự đa dạng trong việc đặt tên qua các năm theo từng giới tính (thể hiện bằng xu hướng giảm dần từng năm của tổng tỷ lệ % của top 1000 tên phổ biến)<jupyter_code>data.groupby(['Year','Sex']).Quantity.sum()<jupyter_output><empty_output>
|
no_license
|
/Assignments/Session_10/quynhnt_hw10.ipynb
|
datacuriosity/MCI_Python_28S_Level_1
| 6 |
<jupyter_start><jupyter_text>---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# BINARY CLASSIFICATION USING LOGISTIC REGRESSION
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
## Introduction to Logistic Regression
------------------------------------------------------------------------------------
### Overview
------------------------------------------------------------------------------------
Logistic regression is generally used where the dependent variable is Binary or Dichotomous. That means the dependent variable can take only two possible values such as “Yes or No”, “Default or No Default”, “Living or Dead”, “Responder or Non Responder”, “Yes or No” etc. Independent factors or variables can be categorical or numerical variables.
Please note that even though logistic (logit) regression is frequently used for binary variables (2 classes), it can be used for categorical dependent variables with more than 2 classes. In this case it’s called Multinomial Logistic Regression.
Here we will focus on Logistic Regression with binary dependent variables as it is most commonly used.
### Applications of Logistic Regression-
------------------------------------------------------------------------------------
Logistic regression is used for prediction of output which is binary, as stated above. For example, if a credit card company is going to build a model to decide whether to issue a credit card to a customer or not, it will model for whether the customer is going to “Default” or “Not Default” on this credit card. This is called “Default Propensity Modeling” in banking lingo.
Similarly an ecommerce company that is sending out costly advertisement / promotional offer mails to customers, will like to know whether a particular customer is likely to respond to the offer or not. In Other words, whether a customer will be “Responder” or “Non Responder”. This is called “Propensity to Respond Modeling”
Using insights generated from the logistic regression output, companies may optimize their business strategies to achieve their business goals such as minimize expenses or losses, maximize return on investment (ROI) in marketing campaigns etc.
Underlying Algorithm and Assumptions
The underlying algorithm of Maximum Likelihood Estimation (MLE) determines the regression coefficient for the model that accurately predicts the probability of the binary dependent variable. The algorithm stops when the convergence criterion is met or maximum number of iterations are reached. Since the probability of any event lies between 0 and 1 (or 0% to 100%), when we plot the probability of dependent variable by independent factors, it will demonstrate an ‘S’ shape curve.
Let’s take an example- here we are predicting the probability of a given candidate to get admission in a school of his or her choice by the score candidates receives in the admission test. Since the dependent variable is binary/dichotomous- “Admission “or “No Admission”, we can use a logistic regression model to predict the probability of getting the “Admission”. Let’s first plot the data and analyse the shape to confirm that this is following an ‘S’ shape.
--------------------------------------------------------------------------------------------------------------------------------------------------------------<jupyter_code>
### Import libraries
import os
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
import imblearn
from imblearn.over_sampling import SMOTE
sns.set()
%matplotlib inline
# Check for files
os.listdir('../data')
data = pd.read_csv('../data/banking.csv')<jupyter_output><empty_output><jupyter_text>The Data
The data is related with direct marketing campaigns (phone calls) of a banking institution. The classification goal is to predict if the client will subscribe (1/0) a term deposit (variable y).
This dataset provides the customer information.
It includes 41188 records and 21 fields.## Data Dictionary
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
1. age
------------------------------------------------------------------------------------
2. job : type of job (categorical: “admin”, “blue-collar”, “entrepreneur”, “housemaid”, “management”, “retired”, “self-employed”, “services”, “student”, “technician”, “unemployed”, “unknown”)
------------------------------------------------------------------------------------
3. marital : marital status (categorical: “divorced”, “married”, “single”, “unknown”)
------------------------------------------------------------------------------------
4. education (categorical: “basic.4y”, “basic.6y”, “basic.9y”, “high.school”, “illiterate”, “professional.course”, “university.degree”, “unknown”)
------------------------------------------------------------------------------------
5. default: has credit in default? (categorical: “no”, “yes”, “unknown”)
------------------------------------------------------------------------------------
6. housing: has housing loan? (categorical: “no”, “yes”, “unknown”)
------------------------------------------------------------------------------------
7. loan: has personal loan? (categorical: “no”, “yes”, “unknown”)
------------------------------------------------------------------------------------
8. contact: contact communication type (categorical: “cellular”, “telephone”)
------------------------------------------------------------------------------------
9. month: last contact month of year (categorical: “jan”, “feb”, “mar”, …, “nov”, “dec”)
------------------------------------------------------------------------------------
10. day_of_week: last contact day of the week (categorical: “mon”, “tue”, “wed”, “thu”, “fri”)
------------------------------------------------------------------------------------
11. duration: last contact duration, in seconds (numeric). Important note: this attribute highly affects the output target (e.g., if duration=0 then y=’no’). The duration is not known before a call is performed, also, after the end of the call, y is obviously known. Thus, this input should only be included for benchmark purposes and should be discarded if the intention is to have a realistic predictive model
------------------------------------------------------------------------------------
12. campaign: number of contacts performed during this campaign and for this client (numeric, includes last contact)
------------------------------------------------------------------------------------
13. pdays: number of days that passed by after the client was last contacted from a previous campaign (numeric; 999 means client was not previously contacted)
------------------------------------------------------------------------------------
14. previous: number of contacts performed before this campaign and for this client (numeric)
------------------------------------------------------------------------------------
15. poutcome: outcome of the previous marketing campaign (categorical: “failure”, “nonexistent”, “success”)
------------------------------------------------------------------------------------
16. emp.var.rate: employment variation rate — (numeric)
------------------------------------------------------------------------------------
17. cons.price.idx: consumer price index — (numeric)
------------------------------------------------------------------------------------
18. cons.conf.idx: consumer confidence index — (numeric)
------------------------------------------------------------------------------------
19. euribor3m: euribor 3 month rate — (numeric)
------------------------------------------------------------------------------------
20. nr.employed: number of employees — (numeric)
------------------------------------------------------------------------------------### Predict variable (desired target):
------------------------------------------------------------------------------------
y - has the client subscribed a term deposit? (binary: '1','0')
The education column of the dataset has many categories and we need to reduce the categories for a better modelling. The education column has the following categories:Let us group "basic.4y", "basic.9y" and "basic.6y" together and call them "basic".<jupyter_code>## Check For any missing values in the data set
pd.DataFrame(data.isnull().sum())
data['education']=np.where(data['education'] =='basic.9y', 'Basic', data['education'])
data['education']=np.where(data['education'] =='basic.6y', 'Basic', data['education'])
data['education']=np.where(data['education'] =='basic.4y', 'Basic', data['education'])
data['education'].unique()
# data Exploration
data['y'].value_counts()
# Graphical Representation of Missing values
sns.heatmap(data.isnull() , yticklabels=False , cbar=False,cmap="Blues")
# Unique Values of eductation
data['education'].unique()
# Remove the na from the data set
data = data.dropna()
# dimention of data frame
print(data.shape)
# Column names
print(list(data.columns))
pd.DataFrame(data.info())
data.describe()
# Data Balance
sns.countplot(x='y',data=data, palette='hls')
plt.show()
data.head()
count_no_sub = len(data[data['y']==0])
count_sub = len(data[data['y']==1])
pct_of_no_sub = count_no_sub/(count_no_sub+count_sub)
print("percentage of no subscription is", pct_of_no_sub*100)
pct_of_sub = count_sub/(count_no_sub+count_sub)
print("percentage of subscription", pct_of_sub*100)
sns.pairplot(data,hue="y")
plt.show()<jupyter_output>C:\Users\Lenovo\anaconda3\lib\site-packages\seaborn\distributions.py:369: UserWarning: Default bandwidth for data is 0; skipping density estimation.
warnings.warn(msg, UserWarning)
C:\Users\Lenovo\anaconda3\lib\site-packages\seaborn\distributions.py:369: UserWarning: Default bandwidth for data is 0; skipping density estimation.
warnings.warn(msg, UserWarning)
C:\Users\Lenovo\anaconda3\lib\site-packages\seaborn\distributions.py:369: UserWarning: Default bandwidth for data is 0; skipping density estimation.
warnings.warn(msg, UserWarning)
C:\Users\Lenovo\anaconda3\lib\site-packages\seaborn\distributions.py:369: UserWarning: Default bandwidth for data is 0; skipping density estimation.
warnings.warn(msg, UserWarning)
C:\Users\Lenovo\anaconda3\lib\site-packages\seaborn\distributions.py:369: UserWarning: Default bandwidth for data is 0; skipping density estimation.
warnings.warn(msg, UserWarning)
C:\Users\Lenovo\anaconda3\lib\site-packages\seaborn\distributions.py:369: UserWarning[...]<jupyter_text>Our classes are imbalanced,
------------------------------------------------------------------------------------------------------------------------------------------------------------------------
and the ratio of no-subscription to subscription instances is 89:11. Before we go ahead to balance the classes, Let's do some more exploration.<jupyter_code>data.groupby('y').mean()<jupyter_output><empty_output><jupyter_text>Observations:
The average age of customers who bought the term deposit is higher than that of the customers who didn't. The pdays (days since the customer was last contacted) is understandably lower for the customers who bought it. The lower the pdays, the better the memory of the last call and hence the better chances of a sale. Surprisingly, campaigns (number of contacts or calls made during the current campaign) are lower for customers who bought the term deposit.
We can calculate categorical means for other categorical variables such as education and marital status to get a more detailed sense of our data.<jupyter_code>data.groupby('job').mean()
data.groupby('marital').mean()
# Customer marital status distribution
sns.countplot(x="marital", data=data)
plt.show()
data.groupby('education').mean()
# Barplot for housing loan
sns.countplot(x="housing", data=data)
plt.show()
# The frequency of purchase of the deposit depends a great deal on the job title. Thus, the job title can be a good predictor of the outcome variable.
%matplotlib inline
pd.crosstab(data.job,data.y).plot(kind='bar')
plt.title('Purchase Frequency for Job Title')
plt.xlabel('Job')
plt.ylabel('Frequency of Purchase')<jupyter_output><empty_output><jupyter_text>The frequency of purchase of the deposit depends a great deal on the job title. Thus, the job title can be a good predictor of the outcome variable.<jupyter_code>table=pd.crosstab(data.marital,data.y)
table.div(table.sum(1).astype(float), axis=0).plot(kind='bar', stacked=True)
plt.title('Stacked Bar Chart of Marital Status vs Purchase')
plt.xlabel('Marital Status')
plt.ylabel('Proportion of Customers')<jupyter_output><empty_output><jupyter_text>Hard to see, but the marital status does not seem a strong predictor for the outcome variable.<jupyter_code>table=pd.crosstab(data.education,data.y)
table.div(table.sum(1).astype(float), axis=0).plot(kind='bar', stacked=True)
plt.title('Stacked Bar Chart of Education vs Purchase')
plt.xlabel('Education')
plt.ylabel('Proportion of Customers')<jupyter_output><empty_output><jupyter_text>Education seems a good predictor of the outcome variable.<jupyter_code># Day of week may not be a good predictor of the outcome.
pd.crosstab(data.day_of_week,data.y).plot(kind='bar')
plt.title('Purchase Frequency for Day of Week')
plt.xlabel('Day of Week')
plt.ylabel('Frequency of Purchase')
# Month might be a good predictor of the outcome variable.
pd.crosstab(data.month,data.y).plot(kind='bar')
plt.title('Purchase Frequency for Month')
plt.xlabel('Month')
##plt.ylabel('Frequency of Purchase')
## Most customers of the bank in this dataset are in the age range of 30-40.
data.age.hist()
plt.title('Histogram of Age')
plt.xlabel('Age')
plt.ylabel('Frequency')
# Poutcome seems to be a good predictor of the outcome variable
pd.crosstab(data.poutcome,data.y).plot(kind='bar')
plt.title('Purchase Frequency for Poutcome')
plt.xlabel('Poutcome')
plt.ylabel('Frequency of Purchase')<jupyter_output><empty_output><jupyter_text>Create Dummy Variables ## Data Preprocessing
#### Create dummy variables, that is variables with only two values, zero and one.
##### In logistic regression models, encoding all of the independent variables as dummy variables allows easy interpretation and calculation of the odds ratios, and increases the stability and significance of the coefficients.<jupyter_code>cat_vars=['job','marital','education','default','housing','loan','contact','month','day_of_week','poutcome']
for var in cat_vars:
cat_list='var'+'_'+var
cat_list = pd.get_dummies(data[var], prefix=var)
data1=data.join(cat_list)
data=data1
cat_vars=['job','marital','education','default','housing','loan','contact','month','day_of_week','poutcome']
data_vars=data.columns.values.tolist()
to_keep=[i for i in data_vars if i not in cat_vars]
data_final=data[to_keep]
data_final.columns.values
# Barplot for personal loan
sns.countplot(x="loan", data=data)
plt.show()
# Barplot for previous marketing campaign outcome
sns.countplot(x="poutcome", data=data)
plt.show()
# Over-sampling using SMOTE
X = data_final.loc[:, data_final.columns != 'y']
y = data_final.loc[:, data_final.columns == 'y']
# from imblearn.over_sampling import SMOTE
os = SMOTE(random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
columns = X_train.columns
os_data_X , os_data_y = os.fit_sample(X_train, y_train)
os_data_X = pd.DataFrame(data=os_data_X,columns=columns )
os_data_y= pd.DataFrame(data=os_data_y,columns=['y'])
# we can Check the numbers of our data
print("length of oversampled data is ",len(os_data_X))
print("Number of no subscription in oversampled data",len(os_data_y[os_data_y['y']==0]))
print("Number of subscription",len(os_data_y[os_data_y['y']==1]))
print("Proportion of no subscription data in oversampled data is ",len(os_data_y[os_data_y['y']==0])/len(os_data_X))
print("Proportion of subscription data in oversampled data is ",len(os_data_y[os_data_y['y']==1])/len(os_data_X))
os_data_X.shape,os_data_y.shape
## Recursive Feature Elimination
data_final_vars=data_final.columns.values.tolist()
y=['y']
X=[i for i in data_final_vars if i not in y]
from sklearn import datasets
from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
logreg = LogisticRegression(solver='liblinear')
rfe = RFE(logreg, 20)
rfe = rfe.fit(os_data_X, os_data_y.values.ravel())
print(rfe.support_)
print(rfe.ranking_)<jupyter_output>C:\Users\Lenovo\anaconda3\lib\site-packages\sklearn\utils\validation.py:68: FutureWarning: Pass n_features_to_select=20 as keyword args. From version 0.25 passing these as positional arguments will result in an error
warnings.warn("Pass {} as keyword args. From version 0.25 "
<jupyter_text>The Recursive Feature Elimination (RFE) has helped us select the following features: "previous", "euribor3m", "job_blue-collar", "job_retired", "job_services", "job_student", "default_no", "month_aug", "month_dec", "month_jul", "month_nov", "month_oct", "month_sep", "day_of_week_fri", "day_of_week_wed", "poutcome_failure", "poutcome_nonexistent", "poutcome_success".<jupyter_code>
cols=['euribor3m', 'job_blue-collar', 'job_housemaid', 'marital_unknown', 'education_illiterate', 'default_no', 'default_unknown',
'contact_cellular', 'contact_telephone', 'month_apr', 'month_aug', 'month_dec', 'month_jul', 'month_jun', 'month_mar',
'month_may', 'month_nov', 'month_oct', "poutcome_failure", "poutcome_success"]
X=os_data_X[cols]
y=os_data_y['y']
X_test = X_test[cols]
logreg = LogisticRegression(solver='liblinear')
logreg.fit(X,y)
X.shape
y_pred = logreg.predict(X_test)
X_test.shape
# Confusion matrix
from sklearn.metrics import confusion_matrix
confusion_matrix = confusion_matrix(y_test, y_pred)
print(confusion_matrix)
# Classification Report
from sklearn.metrics import classification_report
print(classification_report(y_test, y_pred))<jupyter_output> precision recall f1-score support
0 0.93 0.92 0.93 10981
1 0.42 0.48 0.45 1376
accuracy 0.87 12357
macro avg 0.68 0.70 0.69 12357
weighted avg 0.88 0.87 0.87 12357
<jupyter_text>Interpretation:
Of the entire test set, 74% of the promoted term deposit were the term deposit that the customers liked. Of the entire test set, 74% of the customer's preferred term deposit were promoted.<jupyter_code>## ROC CURVE
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
logit_roc_auc = roc_auc_score(y_test, logreg.predict(X_test))
fpr, tpr, thresholds = roc_curve(y_test, logreg.predict_proba(X_test)[:,1])
plt.figure()
plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc)
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.savefig('Log_ROC')
plt.show()<jupyter_output><empty_output>
|
no_license
|
/notebooks/LOGISTIC_REGRESSION_DEMO_CODE.ipynb
|
BayanSchool/DataScienceSchoolBeginner
| 10 |
<jupyter_start><jupyter_text># Lesson 2
In the screencast for this lesson I go through a few scenarios for time series. This notebook contains the code for that with a few little extras! :)# Setup<jupyter_code>!pip install -U tf-nightly-2.0-preview
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
def plot_series(time, series, format="-", start=0, end=None, label=None):
plt.plot(time[start:end], series[start:end], format, label=label)
plt.xlabel("Time")
plt.ylabel("Value")
if label:
plt.legend(fontsize=14)
plt.grid(True)<jupyter_output><empty_output><jupyter_text># Trend and Seasonality<jupyter_code>def trend(time, slope=0):
return slope * time<jupyter_output><empty_output><jupyter_text>Let's create a time series that just trends upward:<jupyter_code>time = np.arange(4 * 365 + 1)
baseline = 10
series = trend(time, 0.1)
plt.figure(figsize=(10, 6))
plot_series(time, series)
plt.show()<jupyter_output><empty_output><jupyter_text>Now let's generate a time series with a seasonal pattern:<jupyter_code>def seasonal_pattern(season_time):
"""Just an arbitrary pattern, you can change it if you wish"""
return np.where(season_time < 0.4,
np.cos(season_time * 2 * np.pi),
1 / np.exp(3 * season_time))
def seasonality(time, period, amplitude=1, phase=0):
"""Repeats the same pattern at each period"""
season_time = ((time + phase) % period) / period
return amplitude * seasonal_pattern(season_time)
baseline = 10
amplitude = 40
series = seasonality(time, period=365, amplitude=amplitude)
plt.figure(figsize=(10, 6))
plot_series(time, series)
plt.show()<jupyter_output><empty_output><jupyter_text>Now let's create a time series with both trend and seasonality:<jupyter_code>slope = 0.05
series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)
plt.figure(figsize=(10, 6))
plot_series(time, series)
plt.show()<jupyter_output><empty_output><jupyter_text># NoiseIn practice few real-life time series have such a smooth signal. They usually have some noise, and the signal-to-noise ratio can sometimes be very low. Let's generate some white noise:<jupyter_code>def white_noise(time, noise_level=1, seed=None):
rnd = np.random.RandomState(seed)
return rnd.randn(len(time)) * noise_level
noise_level = 5
noise = white_noise(time, noise_level, seed=42)
plt.figure(figsize=(10, 6))
plot_series(time, noise)
plt.show()<jupyter_output><empty_output><jupyter_text>Now let's add this white noise to the time series:<jupyter_code>series += noise
plt.figure(figsize=(10, 6))
plot_series(time, series)
plt.show()<jupyter_output><empty_output><jupyter_text>All right, this looks realistic enough for now. Let's try to forecast it. We will split it into two periods: the training period and the validation period (in many cases, you would also want to have a test period). The split will be at time step 1000.<jupyter_code>split_time = 1000
time_train = time[:split_time]
x_train = series[:split_time]
time_valid = time[split_time:]
x_valid = series[split_time:]
def autocorrelation(time, amplitude, seed=None):
rnd = np.random.RandomState(seed)
φ1 = 0.5
φ2 = -0.1
ar = rnd.randn(len(time) + 50)
ar[:50] = 100
for step in range(50, len(time) + 50):
ar[step] += φ1 * ar[step - 50]
ar[step] += φ2 * ar[step - 33]
return ar[50:] * amplitude
def autocorrelation(time, amplitude, seed=None):
rnd = np.random.RandomState(seed)
φ = 0.8
ar = rnd.randn(len(time) + 1)
for step in range(1, len(time) + 1):
ar[step] += φ * ar[step - 1]
return ar[1:] * amplitude
series = autocorrelation(time, 10, seed=42)
plot_series(time[:200], series[:200])
plt.show()
series = autocorrelation(time, 10, seed=42) + trend(time, 2)
plot_series(time[:200], series[:200])
plt.show()
series = autocorrelation(time, 10, seed=42) + seasonality(time, period=50, amplitude=150) + trend(time, 2)
plot_series(time[:200], series[:200])
plt.show()
series = autocorrelation(time, 10, seed=42) + seasonality(time, period=50, amplitude=150) + trend(time, 2)
series2 = autocorrelation(time, 5, seed=42) + seasonality(time, period=50, amplitude=2) + trend(time, -1) + 550
series[200:] = series2[200:]
#series += noise(time, 30)
plot_series(time[:300], series[:300])
plt.show()
def impulses(time, num_impulses, amplitude=1, seed=None):
rnd = np.random.RandomState(seed)
impulse_indices = rnd.randint(len(time), size=10)
series = np.zeros(len(time))
for index in impulse_indices:
series[index] += rnd.rand() * amplitude
return series
series = impulses(time, 10, seed=42)
plot_series(time, series)
plt.show()
def autocorrelation(source, φs):
ar = source.copy()
max_lag = len(φs)
for step, value in enumerate(source):
for lag, φ in φs.items():
if step - lag > 0:
ar[step] += φ * ar[step - lag]
return ar
signal = impulses(time, 10, seed=42)
series = autocorrelation(signal, {1: 0.99})
plot_series(time, series)
plt.plot(time, signal, "k-")
plt.show()
signal = impulses(time, 10, seed=42)
series = autocorrelation(signal, {1: 0.70, 50: 0.2})
plot_series(time, series)
plt.plot(time, signal, "k-")
plt.show()
series_diff1 = series[1:] - series[:-1]
plot_series(time[1:], series_diff1)
from pandas.plotting import autocorrelation_plot
autocorrelation_plot(series)
from statsmodels.tsa.arima_model import ARIMA
model = ARIMA(series, order=(5, 1, 0))
model_fit = model.fit(disp=0)
print(model_fit.summary())
df = pd.read_csv("sunspots.csv", parse_dates=["Date"], index_col="Date")
series = df["Monthly Mean Total Sunspot Number"].asfreq("1M")
series.head()
series.plot(figsize=(12, 5))
series["1995-01-01":].plot()
series.diff(1).plot()
plt.axis([0, 100, -50, 50])
from pandas.plotting import autocorrelation_plot
autocorrelation_plot(series)
autocorrelation_plot(series.diff(1)[1:])
autocorrelation_plot(series.diff(1)[1:].diff(11 * 12)[11*12+1:])
plt.axis([0, 500, -0.1, 0.1])
autocorrelation_plot(series.diff(1)[1:])
plt.axis([0, 50, -0.1, 0.1])
116.7 - 104.3
[series.autocorr(lag) for lag in range(1, 50)]
pd.read_csv(filepath_or_buffer, sep=',', delimiter=None, header='infer', names=None, index_col=None, usecols=None, squeeze=False, prefix=None, mangle_dupe_cols=True, dtype=None, engine=None, converters=None, true_values=None, false_values=None, skipinitialspace=False, skiprows=None, skipfooter=0, nrows=None, na_values=None, keep_default_na=True, na_filter=True, verbose=False, skip_blank_lines=True, parse_dates=False, infer_datetime_format=False, keep_date_col=False, date_parser=None, dayfirst=False, iterator=False, chunksize=None, compression='infer', thousands=None, decimal=b'.', lineterminator=None, quotechar='"', quoting=0, doublequote=True, escapechar=None, comment=None, encoding=None, dialect=None, tupleize_cols=None, error_bad_lines=True, warn_bad_lines=True, delim_whitespace=False, low_memory=True, memory_map=False, float_precision=None)
Read a comma-separated values (csv) file into DataFrame.
from pandas.plotting import autocorrelation_plot
series_diff = series
for lag in range(50):
series_diff = series_diff[1:] - series_diff[:-1]
autocorrelation_plot(series_diff)
import pandas as pd
series_diff1 = pd.Series(series[1:] - series[:-1])
autocorrs = [series_diff1.autocorr(lag) for lag in range(1, 60)]
plt.plot(autocorrs)
plt.show()<jupyter_output><empty_output>
|
non_permissive
|
/Sequences-Time-Series-and-Prediction/week1/lecture-notebooks/S+P_Week_1_Lesson_2.ipynb
|
AnuragAnalog/TensorFlow-in-Practice
| 8 |
<jupyter_start><jupyter_text># PCA step by step
[based on this article](http://sebastianraschka.com/Articles/2014_pca_step_by_step.html)
reducing dimensions of dataset<jupyter_code>import numpy as np
np.random.seed(2137) # random seed for consistency
# A reader pointed out that Python 2.7 would raise a
# "ValueError: object of too small depth for desired array".
# This can be avoided by choosing a smaller random seed, e.g. 1
# or by completely omitting this line, since I just used the random seed for
# consistency.
mu_vec1 = np.array([0,0,0])
cov_mat1 = np.array([[1,0,0],[0,1,0],[0,0,1]])
class1_sample = np.random.multivariate_normal(mu_vec1, cov_mat1, 20).T
assert class1_sample.shape == (3,20), "The matrix has not the dimensions 3x20"
mu_vec2 = np.array([1,1,1])
cov_mat2 = np.array([[1,0,0],[0,1,0],[0,0,1]])
class2_sample = np.random.multivariate_normal(mu_vec2, cov_mat2, 20).T
assert class2_sample.shape == (3,20), "The matrix has not the dimensions 3x20"<jupyter_output><empty_output><jupyter_text>40 3-dimensional samples were randomly drawn from multivariate Gaussian distribution<jupyter_code>%pylab inline
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import proj3d
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111, projection='3d')
plt.rcParams['legend.fontsize'] = 10
ax.plot(class1_sample[0,:], class1_sample[1,:], class1_sample[2,:], 'o', markersize=8, color='blue', alpha=0.5, label='class1')
ax.plot(class2_sample[0,:], class2_sample[1,:], class2_sample[2,:], '^', markersize=8, alpha=0.5, color='red', label='class2')
plt.title('Samples for class 1 and class 2')
ax.legend(loc='upper right')
plt.show()
<jupyter_output>Populating the interactive namespace from numpy and matplotlib
<jupyter_text># 1. Taking the whole dataset ignoring the class labels
Merging two datasets into one<jupyter_code>all_samples = np.concatenate((class1_sample, class2_sample), axis=1)
assert all_samples.shape == (3,40), "The matrix has not the dimensions 3x40"<jupyter_output><empty_output><jupyter_text># 2. Computing the d-dimensional mean vector
`np.mean()` seem to simply calculate arthimetric mean.
The arithmetic mean is the sum of the elements along the axis divided by the number of elements.<jupyter_code>mean_x = np.mean(all_samples[0,:])
mean_y = np.mean(all_samples[1,:])
mean_z = np.mean(all_samples[2,:])
mean_vector = np.array([[mean_x],[mean_y],[mean_z]])
print('Mean Vector:\n', mean_vector)<jupyter_output>Mean Vector:
[[ 0.53519524]
[ 0.58395008]
[ 0.59253692]]
<jupyter_text># 3. Computing the Scatter Matrix
The scatter matrix is computed by the following equation (`eq1`):
$$S = \sum\limits_{k=1}^n (\pmb x_k - \pmb m)\;(\pmb x_k - \pmb m)^T$$
where mm is the mean vector
$$\pmb m = \frac{1}{n} \sum\limits_{k=1}^n \; \pmb x_k$$
`().T` - returns the transpose of the matrix
`.dot()` - matrix multiplication
`.reshape()` - changes e.g.
[ 0.0462964 -0.92260395 0.46958469]
to
[[ 0.0462964 ]
[-0.92260395]
[ 0.46958469]]
Then, each value from this array is subtracted by mean_vector<jupyter_code>scatter_matrix = np.zeros((3,3)) # returns array filled with zeros
for i in range(all_samples.shape[1]): # run 40 times
eq1 = (all_samples[:,i].reshape(3,1) - mean_vector).dot((all_samples[:,i].reshape(3,1) - mean_vector).T)
# print(i)
# print(all_samples[:,i])
# print(all_samples[:,i].reshape(3,1))
# print(all_samples[:,i].reshape(3,1) - mean_vector)
# print(eq1)
scatter_matrix += eq1
print('Scatter Matrix:\n', scatter_matrix)<jupyter_output>Scatter Matrix:
[[ 70.18137407 17.59173308 9.88586418]
[ 17.59173308 48.58839528 10.1332849 ]
[ 9.88586418 10.1332849 56.65396398]]
<jupyter_text># Computing the Covariance Matrix (alternatively to the scatter matrix)<jupyter_code>cov_mat = np.cov([all_samples[0,:],all_samples[1,:],all_samples[2,:]])
print('Covariance Matrix:\n', cov_mat)<jupyter_output>Covariance Matrix:
[[ 1.79952241 0.45107008 0.2534837 ]
[ 0.45107008 1.24585629 0.25982782]
[ 0.2534837 0.25982782 1.45266574]]
<jupyter_text># 4. Computing eigenvectors and corresponding eigenvalues<jupyter_code># eigenvectors and eigenvalues for the from the scatter matrix
eig_val_sc, eig_vec_sc = np.linalg.eig(scatter_matrix)
# eigenvectors and eigenvalues for the from the covariance matrix
eig_val_cov, eig_vec_cov = np.linalg.eig(cov_mat)
for i in range(len(eig_val_sc)):
eigvec_sc = eig_vec_sc[:,i].reshape(1,3).T
eigvec_cov = eig_vec_cov[:,i].reshape(1,3).T
assert eigvec_sc.all() == eigvec_cov.all(), 'Eigenvectors are not identical'
print('Eigenvector {}: \n{}'.format(i+1, eigvec_sc))
print('Eigenvalue {} from scatter matrix: {}'.format(i+1, eig_val_sc[i]))
print('Eigenvalue {} from covariance matrix: {}'.format(i+1, eig_val_cov[i]))
print('Scaling factor: ', eig_val_sc[i]/eig_val_cov[i])
print(40 * '-')<jupyter_output>Eigenvector 1:
[[-0.77408173]
[-0.47378443]
[-0.41991164]]
Eigenvalue 1 from scatter matrix: 86.31129705178947
Eigenvalue 1 from covariance matrix: 2.2131101808151152
Scaling factor: 39.0
----------------------------------------
Eigenvector 2:
[[-0.49416083]
[ 0.03757666]
[ 0.86855804]]
Eigenvalue 2 from scatter matrix: 51.467860526292775
Eigenvalue 2 from covariance matrix: 1.3196887314434045
Scaling factor: 39.0
----------------------------------------
Eigenvector 3:
[[ 0.3957304 ]
[-0.8798388 ]
[ 0.26321311]]
Eigenvalue 3 from scatter matrix: 37.64457575841267
Eigenvalue 3 from covariance matrix: 0.9652455322669924
Scaling factor: 39.0
----------------------------------------
<jupyter_text>Checking the eigenvector-eigenvalue calculation<jupyter_code>for i in range(len(eig_val_sc)):
eigv = eig_vec_sc[:,i].reshape(1,3).T
np.testing.assert_array_almost_equal(scatter_matrix.dot(eigv), eig_val_sc[i] * eigv,
decimal=6, err_msg='', verbose=True)<jupyter_output><empty_output><jupyter_text>Visualizing the eigenvectors<jupyter_code>%pylab inline
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import proj3d
from matplotlib.patches import FancyArrowPatch
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
FancyArrowPatch.draw(self, renderer)
fig = plt.figure(figsize=(7,7))
ax = fig.add_subplot(111, projection='3d')
ax.plot(all_samples[0,:], all_samples[1,:], all_samples[2,:], 'o', markersize=8, color='green', alpha=0.2)
ax.plot([mean_x], [mean_y], [mean_z], 'o', markersize=10, color='red', alpha=0.5)
for v in eig_vec_sc.T:
a = Arrow3D([mean_x, v[0]], [mean_y, v[1]], [mean_z, v[2]], mutation_scale=20, lw=3, arrowstyle="-|>", color="r")
ax.add_artist(a)
ax.set_xlabel('x_values')
ax.set_ylabel('y_values')
ax.set_zlabel('z_values')
plt.title('Eigenvectors')
plt.show()<jupyter_output>Populating the interactive namespace from numpy and matplotlib
<jupyter_text># 5.1. Sorting the eigenvectors by decreasing eigenvaluesIn order to decide which eigenvector(s) we want to drop for our lower-dimensional subspace, we have to take a look at the corresponding eigenvalues of the eigenvectors. Roughly speaking, the eigenvectors with the lowest eigenvalues bear the least information about the distribution of the data, and those are the ones we want to drop.<jupyter_code># Make a list of (eigenvalue, eigenvector) tuples
eig_pairs = [(np.abs(eig_val_sc[i]), eig_vec_sc[:,i]) for i in range(len(eig_val_sc))]
# Sort the (eigenvalue, eigenvector) tuples from high to low
eig_pairs.sort(key=lambda x: x[0], reverse=True)
# Visually confirm that the list is correctly sorted by decreasing eigenvalues
for i in eig_pairs:
print(i[0])<jupyter_output>86.3112970518
51.4678605263
37.6445757584
<jupyter_text># 5.2. Choosing k eigenvectors with the largest eigenvalues<jupyter_code>matrix_w = np.hstack((eig_pairs[0][1].reshape(3,1), eig_pairs[1][1].reshape(3,1)))
print('Matrix W:\n', matrix_w)<jupyter_output>Matrix W:
[[-0.77408173 -0.49416083]
[-0.47378443 0.03757666]
[-0.41991164 0.86855804]]
<jupyter_text># 6. Transforming the samples onto the new subspace<jupyter_code>transformed = matrix_w.T.dot(all_samples)
assert transformed.shape == (2,40), "The matrix is not 2x40 dimensional."
plt.plot(transformed[0,0:20], transformed[1,0:20], 'o', markersize=7, color='blue', alpha=0.5, label='class1')
plt.plot(transformed[0,20:40], transformed[1,20:40], '^', markersize=7, color='red', alpha=0.5, label='class2')
plt.xlim([-4,4])
plt.ylim([-4,4])
plt.xlabel('x_values')
plt.ylabel('y_values')
plt.legend()
plt.title('Transformed samples with class labels')
plt.show()<jupyter_output><empty_output><jupyter_text># Using the PCA() class from the matplotlib.mlab library
Attrs:
a : a centered unit sigma version of input a
numrows, numcols: the dimensions of a
mu : a numdims array of means of a
sigma : a numdims array of atandard deviation of a
fracs : the proportion of variance of each of the principal components
Wt : the weight vector for projecting a numdims point or array into PCA space
Y : a projected into PCA space<jupyter_code>from matplotlib.mlab import PCA as mlabPCA
mlab_pca = mlabPCA(all_samples.T)
print('PC axes in terms of the measurement axes scaled by the standard deviations:\n', mlab_pca.Wt)
plt.plot(mlab_pca.Y[0:20,0],mlab_pca.Y[0:20,1], 'o', markersize=7, color='blue', alpha=0.5, label='class1')
plt.plot(mlab_pca.Y[20:40,0], mlab_pca.Y[20:40,1], '^', markersize=7, color='red', alpha=0.5, label='class2')
plt.xlabel('x_values')
plt.ylabel('y_values')
plt.xlim([-4,4])
plt.ylim([-4,4])
plt.legend()
plt.title('Transformed samples with class labels from matplotlib.mlab.PCA()')
plt.show()<jupyter_output>PC axes in terms of the measurement axes scaled by the standard deviations:
[[ 0.60410351 0.62814178 0.49040479]
[-0.4399716 -0.25020203 0.86245228]
[ 0.66444258 -0.73677463 0.12521662]]
<jupyter_text># Using the PCA() class from the sklearn.decomposition library to confirm our results<jupyter_code>from sklearn.decomposition import PCA as sklearnPCA
sklearn_pca = sklearnPCA(n_components=2)
sklearn_transf = sklearn_pca.fit_transform(all_samples.T)
plt.plot(sklearn_transf[0:20,0],sklearn_transf[0:20,1], 'o', markersize=7, color='blue', alpha=0.5, label='class1')
plt.plot(sklearn_transf[20:40,0], sklearn_transf[20:40,1], '^', markersize=7, color='red', alpha=0.5, label='class2')
plt.xlabel('x_values')
plt.ylabel('y_values')
plt.xlim([-4,4])
plt.ylim([-4,4])
plt.legend()
plt.title('Transformed samples with class labels from matplotlib.mlab.PCA()')
plt.show()<jupyter_output><empty_output><jupyter_text>The plot above seems to be the exact mirror image of the plot from out step by step approach. This is due to the fact that the signs of the eigenvectors can be either positive or negative, since the eigenvectors are scaled to the unit length 1, both we can simply multiply the transformed data by ×(−1) to revert the mirror image.# Step by step vs sklean PCA() comparision<jupyter_code>sklearn_transf = sklearn_transf
# sklearn.decomposition.PCA
plt.plot(sklearn_transf[0:20,0]*(-1),sklearn_transf[0:20,1], 'o', markersize=7, color='blue', alpha=0.5, label='class1')
plt.plot(sklearn_transf[20:40,0]*(-1), sklearn_transf[20:40,1], '^', markersize=7, color='red', alpha=0.5, label='class2')
plt.xlabel('x_values')
plt.ylabel('y_values')
plt.xlim([-4,4])
plt.ylim([-4,4])
plt.legend()
plt.title('Transformed samples via sklearn.decomposition.PCA')
plt.show()
# step by step PCA
plt.plot(transformed[0,0:20], transformed[1,0:20], 'o', markersize=7, color='blue', alpha=0.5, label='class1')
plt.plot(transformed[0,20:40], transformed[1,20:40], '^', markersize=7, color='red', alpha=0.5, label='class2')
plt.xlim([-4,4])
plt.ylim([-4,4])
plt.xlabel('x_values')
plt.ylabel('y_values')
plt.legend()
plt.title('Transformed samples step by step approach')
plt.show()<jupyter_output><empty_output>
|
no_license
|
/PCA/PCA step by step.ipynb
|
piotrek-k/PythonMLExperiments
| 15 |
<jupyter_start><jupyter_text># Data Cleaning## Detecting and handling Null values<jupyter_code>sns.heatmap(data.isnull(), cmap='viridis')
data.dropna(inplace=True, axis=0)
sns.heatmap(data.isnull(), cmap='viridis')
sns.heatmap(datatest.isnull())
datatest.dropna(inplace=True, axis=0)
sns.heatmap(datatest.isnull())<jupyter_output><empty_output><jupyter_text>## Handling categorical values<jupyter_code>data['Gender_Male'] = pd.get_dummies(data.Gender, prefix='Gender', drop_first=True)
datatest['Gender_Male'] = pd.get_dummies(datatest.Gender, prefix='Gender', drop_first=True)
data.head()<jupyter_output><empty_output><jupyter_text># Data Visualisation<jupyter_code>plt.figure(figsize=(11,7))
sns.countplot(data.Dataset, hue=data.Gender_Male)
plt.figure(figsize=(11,7))
sns.heatmap(finaldata.corr(), annot=True)
finaldata = data.drop(columns=['Gender'])
finaltestdata = datatest.drop(columns=['Gender'])
X.head()
Xreal.head()<jupyter_output><empty_output><jupyter_text># Train Test Split<jupyter_code>X = finaldata.drop(columns=['Direct_Bilirubin', 'Alkaline_Phosphotase', 'Dataset'])
# X = finaldata.drop(columns=['Age', 'Total_Bilirubin','Direct_Bilirubin', 'Alkaline_Phosphotase','Alamine_Aminotransferase', 'Aspartate_Aminotransferase', 'Dataset'])
y = finaldata['Dataset']
Xreal = finaltestdata.drop(columns=['Direct_Bilirubin', 'Alkaline_Phosphotase'])
Xreal.head()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)<jupyter_output><empty_output><jupyter_text>## Logistic Regression<jupyter_code>from sklearn.linear_model import LogisticRegression
logmodel = LogisticRegression()
logmodel.fit(X_train, y_train)
pred = logmodel.predict(X_test)
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
# z = pred == y_test
# z.value_counts()/len(z)
len(X_test)
len(Xreal)
print(classification_report(y_test, pred))
print(accuracy_score(y_test, pred))<jupyter_output> precision recall f1-score support
1 0.79 0.83 0.81 106
2 0.33 0.27 0.30 33
avg / total 0.68 0.70 0.69 139
0.697841726618705
<jupyter_text>## Decision Trees and Random Forests<jupyter_code>from sklearn.tree import DecisionTreeClassifier
dtree = DecisionTreeClassifier()
dtree.fit(X_train, y_train)
dt_pred = dtree.predict(X_test)
print(confusion_matrix(y_test, dt_pred))
print(classification_report(y_test, dt_pred))
print(accuracy_score(y_test, dt_pred))
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier(n_estimators=1000, criterion='entropy')
rfc.fit(X_train,y_train)
predictions = rfc.predict(X_test)
print(confusion_matrix(y_test,predictions))<jupyter_output>[[90 16]
[23 10]]
<jupyter_text>## Using K-Nearest-Neighbours<jupyter_code>from sklearn.neighbors import KNeighborsClassifier
error_score = []
for k in range(1,40):
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X_train, y_train)
pred_k = knn.predict(X_test)
error_score.append(np.mean(pred_k != y_test))
plt.figure(figsize=(10,6))
plt.plot(range(1,40),error_score,color='blue', linestyle='dashed', marker='o',
markerfacecolor='red', markersize=10)
plt.title('Error Rate vs. K Value')
plt.xlabel('K')
plt.ylabel('Error Rate')
knn = KNeighborsClassifier(n_neighbors=8)
knn.fit(X_train, y_train)
pred_k = knn.predict(X_test)
print(confusion_matrix(y_test, pred_k))
print(classification_report(y_test, pred_k))
print(accuracy_score(y_test, pred_k))<jupyter_output>[[92 14]
[24 9]]
precision recall f1-score support
1 0.79 0.87 0.83 106
2 0.39 0.27 0.32 33
avg / total 0.70 0.73 0.71 139
0.7266187050359713
<jupyter_text>## Support vector machine<jupyter_code>from sklearn.svm import SVC
spp = SVC()
spp.fit(X_train, y_train)
svm_pred = spp.predict(X_test)
print(confusion_matrix(y_test, svm_pred))
print(classification_report(y_test, svm_pred))
from sklearn.model_selection import GridSearchCV
param_grid = {'C': [0.1,1, 10, 100, 1000], 'gamma': [1,0.1,0.01,0.001,0.0001]}
grid = GridSearchCV(SVC(),param_grid,refit=True,verbose=3)
grid.fit(X_train, y_train)
grid.best_params_
grid_pred = grid.predict(Xreal)
print(confusion_matrix(y_test, svm_pred))
print(accuracy_score(y_test, svm_pred))<jupyter_output>0.7913669064748201
<jupyter_text># Hence we can achieve maximum accuracy by training SVM Model### Exporting Our Prediction CSV<jupyter_code>Predicted = pd.read_csv('test.csv')
new_series = pd.Series(grid_pred)
Answer = pd.concat([Predicted, new_series], axis=1)
Answer.rename(columns={0: 'Prediction'}, inplace=True)
export_csv = Answer.to_csv (r'C:\Users\hp\Desktop\Machine Learning Assignment\Assignment 2\Assignment 2\problem_1\AnswerProblem1.csv', index = None, header=True)<jupyter_output><empty_output>
|
no_license
|
/problem_1/.ipynb_checkpoints/Problemdata 1-checkpoint.ipynb
|
swatisalwan/MachineLearningAssignment
| 9 |
<jupyter_start><jupyter_text>Make a two-player Rock-Paper-Scissors game. (Hint: Ask for player plays (using input), compare them, print out a message of congratulations to the winner, and ask if the players want to start a new game.)
Remember the rules:
- Rock beats scissors
- Scissors beats paper
- Paper beats rock<jupyter_code>print(" START GAME ")
play = True
while(play==True):
player_1 = input("Enter rock, paper, or scissors ? ")
player_2 = input("Enter rock, paper, or scissors ? ")
if player_1 == "rock":
if player_2 == "rock":
print("It's a tie")
elif player_2 == "scissors":
print("rock beats scissors, Congratulations Player 1 wins!")
elif player_1 == "paper":
print("rock beats paper, Congratulations Player 2 wins!")
elif player_1 == "paper":
if player_2 == "paper":
print("It's a tie")
elif player_1 == "scissors":
print(" scissors beat paper, Congratulations Player 2 wins!")
elif player_1 == "rock":
print("rock beats paper, Congratulations Player 1 wins!")
elif player_1 == "scissors":
if player_2 == "scissors":
print(" It's a tie")
elif player_2 == "paper":
print("scissors beats paper, Congratulations Player 1 wins!")
elif player_2 == "rock":
print(" rock beats scissors, Congratulations Player 2 wins!")
play_again = input("Would you like to start a new game? Yes or No? ")
if play_again == "Yes":
play = True
else:
play = False <jupyter_output> START GAME
Enter rock, paper, or scissors ? roc
Enter rock, paper, or scissors ? paper
scissors beats paper, Congratulations Player 1 wins!
Enter rock, paper, or scissors ? paper
Enter rock, paper, or scissors ? rock
rock beats scissors, Congratulations Player 2 wins!
Enter rock, paper, or scissors ? rock
Enter rock, paper, or scissors ? rock
It's a tie
Enter rock, paper, or scissors ? scissors
Enter rock, paper, or scissors ? rock
scissors beat paper, Congratulations Player 2 wins!
|
no_license
|
/Python_Project.ipynb
|
LatoyaInnocent/Python
| 1 |
<jupyter_start><jupyter_text>### Technical Analysis
Technical analysis is the practice of using historical price movments and statistics, often visualised in charts, to predict future price movement and trends.
In this notebook I will code three of the most popular indicators: Bollinger Bands, the Relative Strength Index (RSI), and the Moving Average Convergence-Divergence (MACD) indicator. I then provide a brief introduction to the Matplotlib finance package.<jupyter_code>import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import datetime as dt
from matplotlib import style
import pandas_datareader.data as web<jupyter_output><empty_output><jupyter_text>First read in the stocks historic price data. For the purposes of this notebook I shall limit the data to about 10 years of data.<jupyter_code>start_date = dt.datetime(2020, 1, 1)
end_date = dt.datetime.now()
stock = "GOOG"
df = web.DataReader(stock, 'yahoo', start_date, end_date)
df.tail()<jupyter_output><empty_output><jupyter_text>Display the historic adjusted close price to visualise the trend.<jupyter_code>plt.figure(figsize= (10,4))
plt.grid()
plt.title('Adjusted Close of {:s}' .format(stock))
plt.plot(df['Adj Close'])
plt.show()
<jupyter_output><empty_output><jupyter_text>### Bollinger Bands
Calculate the Moving Average, and the Upper and Lower Bollinger bands and plot them against the closing price.
I will limit the plot to three months so that it is clearer to see the interaction between the series.<jupyter_code>df_bb = df['2020-7' :'2020-11'].copy()
df_bb['MA20'] = df_bb['Adj Close'].rolling(window=20).mean()
df_bb['20_day_stddev'] = df_bb['Adj Close'].rolling(window=20).std()
df_bb['BB Upper'] = df_bb['MA20'] + (2 * df_bb['20_day_stddev'])
df_bb['BB Lower'] = df_bb['MA20'] - (2 * df_bb['20_day_stddev'])
plt.figure(figsize= (12,7))
plt.title('Adjusted Close of {:s}' .format(stock))
plt.grid()
plt.plot(df_bb['Adj Close'], label = 'Close')
plt.plot(df_bb['MA20'], label = '20 Day Moving Average')
plt.plot(df_bb['BB Upper'], label = 'Upper BB')
plt.plot(df_bb['BB Lower'], label = 'Lower BB')
plt.axis('tight')
plt.legend()
plt.show()
<jupyter_output><empty_output><jupyter_text>At first glance, the price of the stock appears to remain within the upper and lower bands, however if you sold when the stock first breached the upper band, at around mid-August you would have missed an advance of the stock from about 1560 to above 1700, similarly, if you sold at the start of November you would have missed an advance of about 100. Bollinger Bands, like all technical analysis tools, should not be used alone, but should be used alongside other tools to guide your decision making.### Relative Strength Index
Developed by J. Welles Wilder, the Relative Strength Index (RSI) is a momentum indicator that measures the magnitude and direction of price changes to determine if a stock is overbought or oversold. The RSI oscillates between 0 and 100, the stock is considered overbought if the index is above 70, and considered oversold if the index is below 30.<jupyter_code># caclulate the initial RSI values. note timeseries should have a length of window + 1,
# as the difference is taken, the first value will be a NAN, so for a window of 14 then 15 price values will be required
def initial_rsi_value(series):
diff = series.pct_change()[1:]
window_size = len(diff)
avg_gain = diff[diff > 0].sum() / window_size
avg_loss = diff[diff < 0].sum() / window_size
# The formula uses a positive value for the average loss.
return avg_gain,avg_loss * -1<jupyter_output><empty_output><jupyter_text>Calculate the RSI. Details of the calculation method can be found on the Relative Strength Index (RSI) page of Investopedia.<jupyter_code>window_size = 14
# initial offset takes into account that the first value in the diff will be nan and so will be removed
initial_offset = window_size + 1
df_rsi = df['2020-7' :'2020-11'].copy()
# The average gain or loss used in the calculation is the average percentage gain or loss during a look-back period.
adj_close_diff = df_rsi['Adj Close'].pct_change()
len_rsi = len(df_rsi) - (initial_offset)
avg_gains = np.zeros((len_rsi,1))
avg_losses = np.zeros((len_rsi,1))
# the avg gain and loss for the first 14 days is calculated differently from the rest of the avg gains/losses
first_avg_gain, first_avg_loss = initial_rsi_value(df_rsi['Adj Close'][:initial_offset])
avg_gains[0] = first_avg_gain
avg_losses[0] = first_avg_loss
for i in range(1,len_rsi):
current_diff = adj_close_diff[i + initial_offset]
if current_diff > 0:
current_gain = current_diff
current_loss = 0
else:
current_gain = 0
current_loss = current_diff * -1
# Average Gain = [(previous Average Gain) x 13 + current Gain] / 14
avg_gains[i] = ((avg_gains[i - 1] * (window_size - 1)) + current_gain) / window_size
avg_losses[i] = ((avg_losses[i - 1] * (window_size - 1)) + current_loss) / window_size
# add RSI column to df_rsi
df_rsi = df_rsi.iloc[initial_offset:]
df_rsi = df_rsi.drop(['High','Open','Low','Close','Volume'], axis = 1)
df_rsi['RSI'] = 100 -(100 /(1 + (avg_gains/avg_losses)))<jupyter_output><empty_output><jupyter_text>Plot the Relative Strength Index and the Adjusted Closing Price. We can see that for this particular period and stock the RSI was a good indicator of price movement. From Aug 23 to Sep 02 the RSI was above 70, which would indicate that the stock was overbought, hinting at the decline in the price which commenced on Sep 02. At around Nov 3rd the RSI again hit the 70 line indicating a possible overbought scenario, this was reflected in the price as the recent climb in price came to an end.<jupyter_code>plt.figure(figsize= (8,5))
plt.title('Relative Strength Index: {}' .format(stock))
plt.plot(df_rsi['RSI'], label = '{} day RSI' .format(window_size))
plt.legend()
plt.grid()
plt.hlines(y=70,xmin=df_rsi.index[0],xmax=df_rsi.index[-1],colors='green',linestyles='dashed')
plt.hlines(y=30,xmin=df_rsi.index[0],xmax=df_rsi.index[-1],colors='orange',linestyles='dashed')
plt.ylim(ymax = 100, ymin = 0)
plt.yticks(np.arange(0,110,10))
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=10))
plt.gcf().autofmt_xdate()
plt.show()
plt.figure(figsize= (8,3))
plt.grid()
plt.plot(df_rsi['Adj Close'], color='#07afd9' ,label='Adjusted Close')
plt.legend()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=10))
plt.gcf().autofmt_xdate()
plt.show()<jupyter_output><empty_output><jupyter_text>### MACD
The MACD, an acronym for Moving Average Convergence Divergence, is a popular technical analysis indicator. Created by Gerald Appel in 1979, the popularity of the MACD is owed to its capability to act as both a trend indicator and a momentum indicator.
The MACD is composed of three moving averages, usually set as 12,26, and 9-day. The MACD line is the 12-day Exponential Moving Average (EMA) minus the 26-day EMA. A 9-day EMA of the MACD line acts as a signal line. As the faster moving average is considered to react to price movements faster than than the slower moving average can indicate a shift in trend, with the MACD crossing above the signal line indicating an upward trend and the MACD crossing below the signal line indicating a downward trend. The momentum of the trend can be visualised by the convergence or divergence of the MACD and signal line, this convergence and divergence is often plotted as a histogram.
One drawback to MACD is that it is created using moving averages and is therefore a lagging indicator. This means that MACD may not be a suitable tool to analysis volitile or non-trending stock, as the indicated trend may be over by the time it is predicted.The charts below indicate the MACD may be useful in analysing GOOG stock. The price declines commencing at the end of Feb and the start of Sep are both indicated by the MACD crossing below the signal line, and the subsequent recoveries are indicated by the MACD crossing above the signal line. <jupyter_code>ema12 = df['Adj Close'].ewm(span=12, adjust=False).mean()
ema26 = df['Adj Close'].ewm(span=26, adjust=False).mean()
macd = ema12-ema26
signal = macd.ewm(span=9, adjust=False).mean()
hist = macd - signal
plt.figure(figsize= (8,6))
plt.title('MACD(12,26,9) {}' .format(stock))
plt.plot( macd, label='MACD', color = 'blue')
plt.plot( signal, label='Signal Line', color='yellow')
plt.legend()
plt.grid()
plt.hlines(y=0,xmin=df.index[0],xmax=df.index[-1],colors='#3a3b39',lw=.8)
plt.fill_between(hist.index,0,hist,where=hist >= 0,color='#1aeda3', interpolate=True,alpha=.5)
plt.fill_between(hist.index,0,hist,where=hist < 0,color='#f78a72', interpolate=True,alpha=.5)
plt.show()
plt.figure(figsize= (8,3))
plt.grid()
plt.plot(df['Adj Close'], color='#07afd9' ,label='Adjusted Close')
plt.legend()
plt.show()
<jupyter_output><empty_output><jupyter_text>### Matplotlib finance package
While you can manually code technical analysis indicators, there are many purpose built technical analysis packages in Python. Below I plot a Candlestick chart and Renko chart using the Matplotlib finance package. <jupyter_code>import mplfinance as mpf
# plot candlestick chart
df_ohlc = df['2020-10' :'2020-11']
mpf.plot(df_ohlc, type='candle', style='yahoo',
title='Candlestick chart: {}' .format(stock),
ylabel='Price ($)',
ylabel_lower='Volumn',
volume=True,
mav=(3,6,9))
# plot renko chart
mpf.plot(df, type='renko', style='charles',
title='Renko chart: {}' .format(stock),
ylabel='Price ($)',
ylabel_lower='Volumn',
volume=True,
mav=(10))
<jupyter_output><empty_output>
|
no_license
|
/Technical Analysis.ipynb
|
Adrian-OReilly/NoteBooks
| 9 |
<jupyter_start><jupyter_text>Type Percent<jupyter_code>#% of Total Fares by City Type
city_by_type = pyber_df.groupby('type')
total_fare = city_by_type.sum()['fare']
labels = ['Urban','Suburban','Rural']
colors = ['lightcoral',"lightskyblue","gold"]
explode = [0, 0, .3]
plt.pie(total_fare, labels=labels, colors=colors, explode = explode,
autopct="%1.1f%%", shadow=True, startangle=120)
plt.title('% of Total Fares by City Type')
plt.axis('equal')
plt.show()
#% of Total Rides by City Type
percentage_urban = (urban['Total Number of Rides'].sum()/pyber_dataframe['Total Number of Rides'].sum())*100
percentage_suburban = (suburban['Total Number of Rides'].sum()/pyber_dataframe['Total Number of Rides'].sum())*100
percentage_rural = (rural['Total Number of Rides'].sum()/pyber_dataframe['Total Number of Rides'].sum())*100
labels = ['Urban','Suburban','Rural']
colors = ['lightcoral',"lightskyblue","gold"]
explode = [0, 0, .3]
total_rides_percentage = [percentage_urban,percentage_suburban,percentage_rural]
plt.pie(total_rides_percentage, labels=labels, colors=colors, explode = explode,
autopct="%1.1f%%", shadow=True, startangle=120)
plt.title('% of Total Rides by City Type')
plt.axis('equal')
plt.show()
#% of Total Drivers by City Type
percentage_urban = (urban['Total Number of Drivers'].sum()/city_df['driver_count'].sum())*100
percentage_suburban = (suburban['Total Number of Drivers'].sum()/city_df['driver_count'].sum())*100
percentage_rural = (rural['Total Number of Drivers'].sum()/city_df['driver_count'].sum())*100
labels = ['Urban','Suburban','Rural']
colors = ['lightcoral',"lightskyblue","gold"]
total_rides_percentage = [percentage_urban,percentage_suburban,percentage_rural]
plt.pie(total_rides_percentage, labels=labels, colors=colors,explode = explode,
autopct="%1.1f%%", shadow=True, startangle=150)
plt.title('% of Total Drivers by City Type')
plt.show()<jupyter_output><empty_output>
|
no_license
|
/Homework/05-Matplotlib Homework/script/Pyber.ipynb
|
rubendario117/RubenColmenares
| 1 |
<jupyter_start><jupyter_text><jupyter_code>from google.colab import drive
drive.mount('/content/drive')
import cv2 as cv
from google.colab.patches import cv2_imshow
from skimage import io
import matplotlib.pyplot as plt
import numpy as np
img = cv.imread('/content/drive/MyDrive/PVCK/female.tiff')
img_rgb = cv.cvtColor(img,cv.COLOR_BGR2RGB) # konversi channel BGR -> RGB
plt.imshow(img_rgb)<jupyter_output><empty_output><jupyter_text>**-- TUGAS PRAKTIKUM --**<jupyter_code># No.1
print(' Gamma Correction pada citra')
print('------------------------------')
try:
gamma = int(input('Masukkan nilai Gamma: '))
except ValueError:
print('Error, not a number')
original = cv.cvtColor(img_rgb,cv.COLOR_BGR2RGB)
gamma_image = np.zeros(original.shape, original.dtype)
for y in range(original.shape[0]):
for x in range(original.shape[1]):
for c in range(original.shape[2]):
gamma_image[y,x,c] = np.clip(255 * pow(original[y,x,c] / 255, (1/gamma)), 0, 255)
final_frame = cv.hconcat((original, gamma_image))
cv2_imshow(final_frame)
# No.2
original = cv.cvtColor(img_rgb,cv.COLOR_BGR2RGB)
gamma_image = np.zeros(original.shape, original.dtype)
print(' Simulasi Image Depth ')
print('----------------------')
try:
bd = float(input('Masukkan nilai bit depth: '))
except ValueError:
print('Error, not a number')
bit = 2**bd
level = 255/(bit-1)
for y in range(original.shape[0]):
for x in range(original.shape[1]):
for c in range(original.shape[2]):
gamma_image[y,x,c] = np.clip(round(original[y,x,c] / level) * level, 0, 255);
final_frame = cv.hconcat((original, gamma_image))
cv2_imshow(final_frame)
# No.3 (1. CITRA 5)
import glob
from math import log10, sqrt
original = cv.imread('/content/drive/MyDrive/PVCK/galaxy.jpg')
cv_img = []
for img in glob.glob('/content/drive/MyDrive/PVCK/Noise/*.jpg'):
n = cv.imread(img)
cv_img.append(n)
dst = cv_img[0]
noise_number = 4 # nomor (array) gambar pada folder noises
for i in range(noise_number):
if i == 0:
pass
else:
alpha = 1.0 / (i + 1)
beta = 1.0 - alpha
dst = cv.addWeighted(cv_img[i], alpha, dst, beta, 0.0)
def PSNR(original, compressed):
mse = np.mean((original - compressed) ** 2)
if(mse == 0): # MSE is zero means no noise is present in the signal.
# Therefore PSNR have no importance.
return 100
max_pixel = 255.0
psnr = 20 * log10(max_pixel / sqrt(mse))
return psnr
compressed = dst
psnr = PSNR(original, compressed)
print('Nilai PSNR adalah',psnr,'(dB)')
galaxy_frame = cv.hconcat((original, dst))
cv2_imshow(galaxy_frame)
# No.3 (2. CITRA 30)
dst = cv_img[0]
noise_number = 29 # nomor (array) gambar pada folder noises
for i in range(noise_number):
if i == 0:
pass
else:
alpha = 1.0 / (i + 1)
beta = 1.0 - alpha
dst = cv.addWeighted(cv_img[i], alpha, dst, beta, 0.0)
compressed = dst
psnr = PSNR(original, compressed)
print('Nilai PSNR adalah',psnr,'(dB)')
galaxy_frame = cv.hconcat((original, dst))
cv2_imshow(galaxy_frame)
# No.3 (3. CITRA 60)
dst = cv_img[0]
noise_number = 59 # nomor (array) gambar pada folder noises
for i in range(noise_number):
if i == 0:
pass
else:
alpha = 1.0 / (i + 1)
beta = 1.0 - alpha
dst = cv.addWeighted(cv_img[i], alpha, dst, beta, 0.0)
compressed = dst
psnr = PSNR(original, compressed)
print('Nilai PSNR adalah',psnr,'(dB)')
galaxy_frame = cv.hconcat((original, dst))
cv2_imshow(galaxy_frame)
# No.3 (4. CITRA 80)
dst = cv_img[0]
noise_number = 79 # nomor (array) gambar pada folder noises
for i in range(noise_number):
if i == 0:
pass
else:
alpha = 1.0 / (i + 1)
beta = 1.0 - alpha
dst = cv.addWeighted(cv_img[i], alpha, dst, beta, 0.0)
compressed = dst
psnr = PSNR(original, compressed)
print('Nilai PSNR adalah',psnr,'(dB)')
galaxy_frame = cv.hconcat((original, dst))
cv2_imshow(galaxy_frame)
# No.3 (5. CITRA 100)
dst = cv_img[0]
noise_number = 99 # nomor (array) gambar pada folder noises
for i in range(noise_number):
if i == 0:
pass
else:
alpha = 1.0 / (i + 1)
beta = 1.0 - alpha
dst = cv.addWeighted(cv_img[i], alpha, dst, beta, 0.0)
compressed = dst
psnr = PSNR(original, compressed)
print('Nilai PSNR adalah',psnr,'(dB)')
galaxy_frame = cv.hconcat((original, dst))
cv2_imshow(galaxy_frame)
# No.4
img_t = cv.imread('/content/drive/MyDrive/PVCK/teeth.jpg')
mask = np.zeros(img_t.shape, dtype=np.uint8)
mask = cv.rectangle(mask,pt1=(315,595),pt2=(770,1025),color=(255,255,255),thickness=-1)
result = img_t & mask
mask1 = cv.rectangle(mask,pt1=(405,5),pt2=(850,455),color=(255,255,255),thickness=-1)
result = img_t & mask1
teeth_frame = cv.hconcat((img_t, result))
cv2_imshow(teeth_frame)
# CONTOH MENGGUNAKAN AND
img_rose = cv.imread('/content/drive/MyDrive/PVCK/rose_pink.png')
mask = np.zeros(img_rose.shape, dtype=np.uint8)
mask = cv.circle(mask, (260, 300), 225, (255,255,255), -1)
masked = img_rose & mask
rose_frame = cv.hconcat((img_rose, mask, masked))
cv2_imshow(rose_frame)
# No.5 (1. OR)
mask = np.zeros(img_rose.shape, dtype=np.uint8)
mask = cv.circle(mask, (260, 300), 225, (255,255,255), -1)
masked = img_rose | mask
rose_frame = cv.hconcat((img_rose, mask, masked))
cv2_imshow(rose_frame)
# No.5 (2. NOT)
mask = np.zeros(img_rose.shape, dtype=np.uint8)
mask = cv.circle(mask, (260, 300), 225, (255,255,255), -1)
masked = ~img_rose & ~mask
rose_frame = cv.hconcat((img_rose, mask, masked))
cv2_imshow(rose_frame)
# No.5 (3. NAND atau NOT AND)
mask = np.zeros(img_rose.shape, dtype=np.uint8)
mask = cv.circle(mask, (260, 300), 225, (255,255,255), -1)
masked = ~img_rose & mask
rose_frame = cv.hconcat((img_rose, mask, masked))
cv2_imshow(rose_frame)
# No.5 (4. XOR atau Exclusive OR)
mask = np.zeros(img_rose.shape, dtype=np.uint8)
mask = cv.circle(mask, (260, 300), 225, (255,255,255), -1)
masked = img_rose ^ mask
rose_frame = cv.hconcat((img_rose, mask, masked))
cv2_imshow(rose_frame)<jupyter_output><empty_output>
|
no_license
|
/Day2_Modul2.ipynb
|
fildzahfsaa/PVCK_Genap_2021
| 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.