path
stringlengths 8
204
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_name
stringlengths 8
100
| repo_url
stringlengths 27
119
| star_events_count
int64 0
6.26k
| fork_events_count
int64 0
3.52k
| gha_license_id
stringclasses 10
values | gha_event_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_language
stringclasses 12
values | language
stringclasses 1
value | is_generated
bool 1
class | is_vendor
bool 1
class | conversion_extension
stringclasses 6
values | size
int64 172
10.2M
| script
stringlengths 367
7.46M
| script_size
int64 367
7.46M
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
/Exercise05/Exercise05.ipynb
|
343ae8d3672da79ab064302e9552faf515c191f8
|
[
"MIT"
] |
permissive
|
Develop-Packt/Generative-Adversarial-Networks-GANs
|
https://github.com/Develop-Packt/Generative-Adversarial-Networks-GANs
| 0 | 1 |
MIT
| 2022-12-26T21:35:49 | 2020-03-30T09:46:33 |
Jupyter Notebook
|
Jupyter Notebook
| false | false |
.py
| 100,455 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Exercise 05 : Building DCGAN for MNIST
# Import the required library functions
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import pyplot
import tensorflow as tf
from tensorflow.keras.layers import Input
from tensorflow.keras.initializers import RandomNormal
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Reshape, Dense, Dropout, Flatten,Activation
from tensorflow.keras.layers import LeakyReLU,BatchNormalization
from tensorflow.keras.layers import Conv2D, UpSampling2D,Conv2DTranspose
from tensorflow.keras.datasets import mnist
from tensorflow.keras.optimizers import Adam
# Function to generate real data samples
def realData(batch):
# Get the MNIST data
(X_train, _), (_, _) = mnist.load_data()
# Reshaping the input data to include channel
X = X_train[:,:,:,np.newaxis]
# normalising the data
X = (X.astype('float32') - 127.5)/127.5
# Generating a batch of data
imageBatch = X[np.random.randint(0, X.shape[0], size=batch)]
return imageBatch
# Generating a batch of images
mnistData = realData(25)
# +
# Plotting the images
for j in range(5*5):
pyplot.subplot(5,5,j+1)
# turn off axis
pyplot.axis('off')
pyplot.imshow(mnistData[j,:,:,0],cmap='gray_r')
# -
# Function to generate inputs for generator function
def fakeInputs(batch,infeats):
# Generate random noise data with shape (batch,input features)
x_fake = np.random.uniform(-1,1,size=[batch,infeats])
return x_fake
# Function for the generator model
def genModel(infeats):
# Defining the Generator model
Genmodel = Sequential()
Genmodel.add(Dense(512,input_dim=infeats))
Genmodel.add(Activation('relu'))
Genmodel.add(BatchNormalization())
# second layer of FC => RElu => BN layers
Genmodel.add(Dense(7*7*64))
Genmodel.add(Activation('relu'))
Genmodel.add(BatchNormalization())
# Upsample to 14 x 14
Genmodel.add(Reshape((7,7,64)))
Genmodel.add(Conv2DTranspose(32,kernel_size=(5,5),strides=(2,2),padding='same'))
Genmodel.add(Activation('relu'))
Genmodel.add(BatchNormalization(axis = -1))
# Updample to 28 x 28
Genmodel.add(Conv2DTranspose(1,kernel_size=(5,5),strides=(2,2),padding='same'))
Genmodel.add(Activation('tanh'))
return Genmodel
# Function to create fake samples using the generator model
def fakedataGenerator(Genmodel,batch,infeats):
# first generate the inputs to the model
genInputs = fakeInputs(batch,infeats)
# use these inputs inside the generator model to generate fake distribution
X_fake = Genmodel.predict(genInputs)
return X_fake
# Define the arguments like batch size and input feature
batch = 128
infeats = 100
Genmodel = genModel(infeats)
Genmodel.summary()
# Generating a fake sample and printing the shape
fake = fakedataGenerator(Genmodel,batch,infeats)
fake.shape
# +
# Plotting the fake sample
plt.imshow(fake[1, :, :, 0], cmap='gray_r')
plt.xlabel('Fake Sample Image')
# -
# Descriminator model as a function
def discModel():
Discmodel = Sequential()
Discmodel.add(Conv2D(32,kernel_size=(5,5),strides=(2,2),padding='same',input_shape=(28,28,1)))
Discmodel.add(LeakyReLU(0.2))
# second layer of convolutions
Discmodel.add(Conv2D(64, kernel_size=(5,5), strides=(2, 2), padding='same'))
Discmodel.add(LeakyReLU(0.2))
# Fully connected layers
Discmodel.add(Flatten())
Discmodel.add(Dense(512))
Discmodel.add(LeakyReLU(0.2))
Discmodel.add(Dense(1, activation='sigmoid'))
Discmodel.compile(loss='binary_crossentropy', optimizer=Adam(lr=0.0002, beta_1=0.5),metrics=['accuracy'])
return Discmodel
# Print the summary of the discriminator model
Discmodel = discModel()
Discmodel.summary()
# Next we develop the GAN model
# Define the combined generator and discriminator model, for updating the generator
def ganModel(Genmodel,Discmodel):
# First define that discriminator model cannot be trained
Discmodel.trainable = False
Ganmodel = Sequential()
# First adding the generator model
Ganmodel.add(Genmodel)
# Next adding the discriminator model without training the parameters
Ganmodel.add(Discmodel)
# Compile the model for loss to optimise the Generator model
Ganmodel.compile(loss='binary_crossentropy',optimizer = Adam(lr=0.0002, beta_1=0.5))
return Ganmodel
# Initialise the gan model
gan_model = ganModel(Genmodel,Discmodel)
# Print summary of the GAN model
gan_model.summary()
# Defining the number of epochs
nEpochs = 5000
# +
# Train the GAN network
for i in range(nEpochs):
# Generate samples equal to the bath size from the real distribution
x_real = realData(batch)
# Generate fake samples using the fake data generator function
x_fake = fakedataGenerator(Genmodel,batch,infeats)
# Concatenating the real and fake data
X = np.concatenate([x_real,x_fake])
# Creating the dependent variable and initializing them as '0'
Y = np.zeros(batch * 2)
# making the first set equivalent to batch with labels 1
Y[:batch] = 1
# train the discriminator on the real samples
discLoss = Discmodel.train_on_batch(X, Y)
# Generate new fake inputs for training the GAN network
x_gan = fakeInputs(batch*2,infeats)
# Create labels of the fake examples as 1 to fool the discriminator
y_gan = np.ones((batch*2, 1))
# Update the generator model through the discriminator model
gan_model.train_on_batch(x_gan, y_gan)
# Print the accuracy measures on the real and fake data for every 10 epochs
if (i) % 50 == 0:
# Printing the descriminator loss and accuracy
x_real_test = realData(10)
x_fake_test = fakedataGenerator(Genmodel,10,infeats)
# Concatenating the real and fake data
X_test = np.concatenate([x_real_test,x_fake_test])
# Creating the dependent variable and initializing them as '0'
Y = np.zeros(20 * 2)
Y[:10] = 1
# Predicting probabilities
preds = Discmodel.predict_proba(X_test)
print('Discriminator probability:{D}'.format(D=np.mean(preds)))
# Generate fake samples using the fake data generator function
x_fake = fakedataGenerator(Genmodel,batch,infeats)
# Saving the plots
for j in range(5*5):
pyplot.subplot(5,5,j+1)
# turn off axis
pyplot.axis('off')
pyplot.imshow(x_fake[j,:,:,0],cmap='gray_r')
filename = 'handwritten/GAN_MNIST_Training_Plot%03d.png' % (i)
pyplot.savefig(filename)
pyplot.close()
# -
# Let us now generate some fake data using the model we trained and visualize the images
# Images predicted after training
x_fake = fakedataGenerator(Genmodel,25,infeats)
# Visualizing the plots
for j in range(5*5):
pyplot.subplot(5,5,j+1)
# turn off axis
pyplot.axis('off')
pyplot.imshow(x_fake[j,:,:,0],cmap='gray_r')
| 7,311 |
/EDA/EDA.ipynb
|
38fa0fa391094bfa1463e0e333eb2d1d45628865
|
[
"MIT"
] |
permissive
|
jo-member/p4-ocr-hansarang-1
|
https://github.com/jo-member/p4-ocr-hansarang-1
| 1 | 0 | null | 2021-06-20T11:15:50 | 2021-06-18T14:24:15 | null |
Jupyter Notebook
| false | false |
.py
| 846,581 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introducción
#
# Para este proyecto, interpreté data del Servicio de Parques Nacionales de Estados Unidos, sobre especies en peligro de extinción.
#
# Trabajé con el análisis de datos en el estado de conservación de las especies en esta base de datos, e investigué si hay patrones o características comunes sobre los tipos de especies que están actualmente en peligro. Durante este proyecto, procedí a analizar, limpiar, y visualizar data así como a formular preguntas a las que intenté responder de manera contundente.
#
#
#
# ## Configuración del proyecto
#
# Siempre es importante configurar el alcance del proyecto antes de iniciar. En este caso, constaba de cuatro etapas: Objetivos, acciones y data disponibles, el análisis propiamente dicho, y la evaluación de los resultados de cada paso para sacar conclusiones y comunicar descubrimientos de una manera efectiva.
#
#
# ### 1. Objetivos del proyecto
#
# Los objetivos para este proyecto eran los siguientes:
#
# -Realizar el análisis de datos en las especies y su estado de conservación de la base de datos del Servicio de Parques Nacionales.
# -Investigar si hay patrones o características comunes sobre los tipos de especies que se encuentran en peligro.
# -Entender las características sobre las especies y su estado de conservación, y sobre esas especies y su relación con los parques nacionales.
#
# ### 2. Acciones y data
#
# Las acciones que se tomaron para trabajar tomando en cuenta los objetivos del proyecto fueron el análisis de dos bases de datos provistas por CodeCademy.com, llamadas Observations.csv y Species_info.csv, así como una tercera base de datos, Joined_table.csv, que consiste en las dos previamente mencionadas unidas en una sola tabla, para obtener todavía más información relevante.
#
# ### 3. Análisis
#
# Inicialmente, el análisis estuvo conducido por las siguientes preguntas:
#
# - Cuántas especies están en cada tipo de estado de conservación? Como están distribuídas?
# - Qué categoría de especies está más comprometida? Y cuales categorías no lo están tanto?
# - En qué Parque Nacional se vio la mayor cantidad de especies en peligro de extinción?
# - Hay ciertos tipos de especies que son más propensas a estar en peligro?
# - Las diferencias entre las especies y su estado de conservación son estadísticamente significantes?
# - Qué especie tiene más prevalencia en las observaciones y cuál es su distribución en los diferentes parques?
#
#
# ### 4. Evaluación
#
# Finalmente, es vital reveer los objetivos y chequear si el resultado del análisis corresponde a las preguntas hechas previamente para ser respondidas (en la sección de los objetivos). Esta sección refleja lo aprendido a lo largo del proceso, y si hay alguna pregunta que no pudo ser respondida. También puede incluirse las limitaciones o si algún tipo de análisis podría haber sido resuelto usando diferente metodología.
# # Importando los Módulos
#
# Lo primero que había que hacer, es importar los Módulos de Python para ser usados en este proyecto.
import pandas as pd
pd.set_option('display.max_colwidth', None)
import numpy as np
from matplotlib import pyplot as plt
from scipy import stats
# # Cargar las bases de datos
#
# Para empezar mi trabajo, importé la data que usaremos para trabajar.
#
# En este caso, las bases de datos son tres:
#
# ### Observations.csv:
# Este archivo contiene información sobre avistamientos de especies en cuatro Parques Nacionales en los últimos 7 días. Las columnas de la tabla son las siguientes:
#
# -**scientific_name**: El nombre de cada especie.
#
# -**park_name**: El Parque Nacional donde esos avistamientos específicos fueron registrados.
#
# -**observations**: El número de avistamientos en el período de 7 días.
#
# En la misma línea donde leemos el archivo .csv con la librería Pandas, también descarté las filas duplicadas.
observations = pd.read_csv('observations.csv').drop_duplicates().reset_index()
observations.head()
#También podemos ver la longitud de la base de datos, y ver si hay valores vacíos (null-values)
observations.info()
# ### Species_info.csv:
# Este archivo contiene información sobre las especies que fueron vistas en los Parques Nacionales. Las columnas son las siguientes:
#
# -**category**: La categoría en la que las especies están clasificadas.
#
# -**scientific_name**: El nombre científico de cada especie.
#
# -**common_names**: El nombre por el que cada especie es más comúnmente conocida..
#
# -**conservation_status**: El nivel de peligro (o no) de cada especie.
#
# En este caso, me ocupé de descartar las filas duplicadas pero tomando en cuenta la columna de nombre científico, para estar seguro de no estar trabajando con la misma especie dos veces en la misma base de datos.
species_info = pd.read_csv("species_info.csv").drop_duplicates(subset = ['scientific_name']).reset_index()
species_info.head(5)
# +
#Imprimiendo la información de esta tabla, podemos sacar nuestra primera observación. Sólo 178 filas son de especies en peligro.
print(species_info.info())
# +
#Con una acción realizada en SQL (hecha en DB Browser para SQLite), hice una unión cruzada de ambas bases de datos,
#para terminar con otro archivo .csv, llamado joined_table
joined_table = pd.read_csv("joinedtable.csv").drop_duplicates().reset_index()
joined_table.head()
# -
# ## Analizando las bases de datos
#
# Lo primero a hacer era explorar la tabla de especies para obtener cuantas categorías hay en ella. Además, es posible conocer cuantos estados de conservación hay, y cuántas de las 5541 especies se encuentran en cada estado de conservación.
print("Número de categorías: " + str(species_info.category.nunique()))
different_categories = species_info.category.unique()
print("Las diferentes categorías en la base de datos de especies son: {}".format(different_categories))
# +
#Rápidamente encontré que las plantas vasculares son por lejos la categoría más común en esta base de datos.
species_info.groupby('category').size().sort_values(ascending=False)
# -
# Tras una primera mirada a las categorías,quise ver cuántas de estas especies están en situación de peligro. Como el Servicio de Parques Nacionales debía estar más interesado en este tipo de especies mucho más que las que no lo están, dejé los valores vacíos (NaN) sin completar por el momento, para que los gráficos puedan reflejar mejor lo que realmente debería notarse.
types_of_conservation = species_info.conservation_status.unique()
print("Los diferentes tipos de estado de conservación son: "+ str(types_of_conservation))
conservation_status_count = species_info.groupby("conservation_status").index.count()
conservation_status_sorted = conservation_status_count.sort_values(ascending = False)
print(conservation_status_sorted)
# Con esta información, procedí a hacer la primera visualización. El gráfico de torta puede dar una buena impresión sobre como las especies en preocupación (Species of Concern) son muchas más que cualquier otra categoría de especies en peligro.
# +
species_of_concern = 0
endangered = 0
threatened = 0
in_recovery = 0
not_endangered = 0
for item in species_info.conservation_status:
if item == "Species of Concern":
species_of_concern +=1
elif item == "Endangered":
endangered += 1
elif item == "Threatened":
threatened +=1
elif item == "In Recovery":
in_recovery += 1
else:
not_endangered +=1
list_to_bar = [species_of_concern, endangered, threatened,in_recovery]
labels_to_bar = "Especies en Preocupación", "Especies en Peligro", "Especies Amenazadas", "Especies en Recuperación"
def make_autopct(values):
def my_autopct(pct):
total = sum(values)
val = int(round(pct*total/100.0))
return '{p:.2f}% ({v:d})'.format(p=pct,v=val)
return my_autopct
plt.figure(figsize = (7,7))
plt.pie(list_to_bar, labels = labels_to_bar, autopct = make_autopct(list_to_bar), pctdistance =0.7, startangle = 5, textprops={'fontsize': 13})
plt.axis("equal")
plt.title("Estado de conservación: Especies en Peligro de Extinción", fontsize = (18))
plt.show()
# -
# Un segundo gráfico era muy útil para ser añadido a este análisis. Después de mostrar como es la distribución del estado de conservación en las especies en peligro, hice un gráfico de barras para visualizar la cantidad de cada categoría de especies que estaban en peligro de extinción.
# +
most_endangered = species_info.groupby("category").conservation_status.count()
most_endangered_sorted = most_endangered.sort_values(ascending = False)
print(most_endangered_sorted)
labels = ['Aves', 'Plantas Vasculares', 'Mamíferos', 'Peces', 'Anfibios', 'Reptiles', 'Plantas No Vasculares']
plt.figure(figsize = (9,6))
ax = plt.subplot(1,1,1)
plt.bar(range(len(most_endangered_sorted)), most_endangered_sorted, color = ['red', 'red', 'red', 'green', 'green', 'green', 'green'])
plt.xlabel("Categoría", size = 14)
plt.ylabel("Cantidad de especies en peligro", size = 14)
plt.title("Especies en peligro por categoría", size = 16)
ax.set_xticks(range(len(labels)))
ax.set_xticklabels(labels, rotation = 20, size = 12)
plt.legend(["Rojo: Más amenazadas"])
plt.show()
# -
# Terminados los dos gráficos, hice un gráfico de barras más combinando toda esta información en una sola visualización. Aquí, podemos tener todavía más conclusiones para compartir.
# +
conservationcategories = species_info.groupby(["conservation_status", "category"]).scientific_name.count().unstack()
conservationcategories
# -
ax = conservationcategories.plot.bar(figsize=(10,7), stacked=True)
plt.xlabel("Estado de Conservación", size = 13)
plt.ylabel("Número de Especies", size = 13)
plt.title("Número de Especies en peligro por categoría y estado", size = 16)
plt.legend(['Anfibios', 'Aves', 'Peces', 'Mamíferos', 'Plantas No Vasculares', 'Reptiles', 'Plantas Vasculares'])
ax.set_xticklabels(['En peligro', 'En Recuperación', 'En preocupación', 'Amenazadas'])
plt.show()
# A partir de estos resultados, me hice otra pregunta sobre la cantidad de especies en peligro, que fue cuántas especies de cada categoría estaban en camino a quedar extinctas si no se tomaba ninguna acción. El porcentaje era la forma más eficiente de tener la repuesta.
# +
species_info.fillna('Not in danger', inplace = True)
species_info['is_in_danger'] = species_info.conservation_status != 'Not in danger'
danger_count = species_info.groupby(['category', 'is_in_danger']).scientific_name.nunique().reset_index()
danger_pivoted = danger_count.pivot(columns='is_in_danger',
index='category',
values='scientific_name').reset_index()
danger_pivoted.columns = ['Categoría', 'No_en_peligro', 'En_peligro']
danger_pivoted['Porcentaje'] = round((danger_pivoted.En_peligro/(danger_pivoted.En_peligro + danger_pivoted.No_en_peligro) * 100),2)
print(danger_pivoted)
# -
# Este cálculo me mostró que la cantidad de especies en cada categoría no tenía nada que ver con el porcentaje de la especies de cada categoría en peligro.
# Al llegar a este punto, era una buena idea de probar algunos test de hipítesis para ver si las diferentes categorías tenían diferencias estadísticamente significantes en sus porcentajes de estado de conservación. Trabajé con el **Test Chi Square**. El interalo de confiabilidad que buscaba era del 95%, por lo que el resultado del test que necesitaba era de 0.05 o menos para el valor p (p-value), para saber que las diferencias no eran simplemente determinadas por el azar.
amphibianvsmammal = [[72,7], [146,30]]
_,pval,_,_ = stats.chi2_contingency(amphibianvsmammal)
print (pval)
if pval <= 0.05:
print("Puedo estar razonablemente seguro de que la diferencia entre ambos porcentajes NO es simplemente por azar")
else:
print("Puedo estar razonablemente seguro de que la diferencia entre ambos porcentajes ES simplemente por azar")
reptilesvsmammal = [[73,5], [146,30]]
_,pval,_,_ = stats.chi2_contingency(reptilesvsmammal)
print (pval)
if pval <= 0.05:
print("Puedo estar razonablemente seguro de que la diferencia entre ambos porcentajes NO es simplemente por azar")
else:
print("Puedo estar razonablemente seguro de que la diferencia entre ambos porcentajes ES simplemente por azar")
vascularvsnonvascular = [[4216,46], [328,5]]
_,pval,_,_ = stats.chi2_contingency(vascularvsnonvascular)
print (pval)
if pval <= 0.05:
print("Puedo estar razonablemente seguro de que la diferencia entre ambos porcentajes NO es simplemente por azar")
else:
print("Puedo estar razonablemente seguro de que la diferencia entre ambos porcentajes ES simplemente por azar")
birdvsfish = [[413,75], [115,10]]
_,pval,_,_ = stats.chi2_contingency(birdvsfish)
print (pval)
if pval <= 0.05:
print("Puedo estar razonablemente seguro de que la diferencia entre ambos porcentajes NO es simplemente por azar")
else:
print("Puedo estar razonablemente seguro de que la diferencia entre ambos porcentajes ES simplemente por azar")
# Después de todo este trabajo con la base de datos sobre las especies, combiné ambas tablas para visualizar cuantas observaciones de especes fueron hechas los últimos 7 días en cada Parque Nacional.
observations_7 = joined_table.groupby(['park_name','conservation_status']).observations.sum().unstack()
ax = observations_7.plot.bar(figsize = (12,8),stacked = True)
plt.xlabel("Parque Nacional", size = 14)
plt.ylabel("Observaciones en los últimos 7 días", size = 14)
plt.title("Observaciones de especies en peligro en los últimos 7 días", size = 16)
plt.legend(['En Peligro', 'En Recuperación', 'En Preocupación', 'Amenazadas'],)
plt.show()
# Lo siguiente que hice con la base de datos de observaciones fue crear un cuadro para rápidamente encontrar que especies fueron las más y menos observadas, y dónde se las vio. En la siguiente tabla, con un simple enunciado se la puede ordenar por observaciones en un Parque Nacional específico, lo que puede ser muy útil para trabajar de un modo más dinámico.
observed = observations.groupby('scientific_name').observations.sum().sort_values(ascending = False).reset_index()
observed_by_park = observations.groupby(['scientific_name', 'park_name']).observations.sum().sort_values(ascending = False).reset_index()
observed_by_park_pivot = observed_by_park.pivot(columns = 'park_name',
index = 'scientific_name',
values = 'observations')
observed_by_park_pivot['total_observaciones'] = observed_by_park_pivot.sum(axis=1)
most_observed_total = observed_by_park_pivot.sort_values(by = 'total_observaciones', ascending = False)
most_observed_total.head()
# Estuve más tiempo llamando a esa tabla, buscando las especies menos observadas, y también las más observadas por Parque Nacional.
least_observed_total = observed_by_park_pivot.sort_values(by = 'total_observaciones', ascending = True)
least_observed_total.head()
most_observed_bryce = observed_by_park_pivot.sort_values(by = 'Bryce National Park', ascending = False)
most_observed_bryce.head()
most_observed_gsmountains = observed_by_park_pivot.sort_values(by = 'Great Smoky Mountains National Park', ascending = False)
most_observed_gsmountains.head()
most_observed_yellowstone = observed_by_park_pivot.sort_values(by = 'Yellowstone National Park', ascending = False)
most_observed_yellowstone.head()
most_observed_yosemite = observed_by_park_pivot.sort_values(by = 'Yosemite National Park', ascending = False)
most_observed_yosemite.head()
least_observed_bryce = observed_by_park_pivot.sort_values(by = 'Bryce National Park', ascending = True)
least_observed_bryce.head()
least_observed_gsmountains = observed_by_park_pivot.sort_values(by = 'Great Smoky Mountains National Park', ascending = True)
least_observed_gsmountains.head()
# Esta información (sobre todo sobre las especies que fueron las menos vistas) era realmente importante, pero necesitaba algo de contexto. Cuál era la situación de estas especies? Definí una función para llamarla por un nombre científico y obtener su información.
# +
def search_info(species_name):
return species_info[species_info.scientific_name == species_name]
search_info('Corydalis aurea')
# -
search_info('Collomia tinctoria')
search_info('Canis rufus')
search_info('Grus americana')
# ## Conclusión
#
# Todo este análisis me llevó a los siguientes descubrimientos:
#
# - De las 5541 especies de esta base de datos, 178 estaban en situación de peligro (sólo un 3%).
# - La mayoría de las especies en peligro de extinción estaban bajo el estatus de Especie en situación preocupante, y sólo tres de las 178 están actualmente en recuperación.
# - Las aves, las plantas vasculares y los mamíferos eran las tres categorías que más peligro corrían en esta base de datos. A pesar de que hay una enorme cantidad de plantas vasculares registradas aquí, hay más especies de aves que necesitan atención.
# - Las tres especes en recuperación son aves. Igualmente, sigue siendo un pequeño porcentaje si se compara con la cantidad de aves que están en peligro de extinción.
# - Mamíferos y aves tienen más del 15% de sus especies en situación de peligro. Este número parece ser alarmante, y requiere de atención.
# - Los tests de Chi Square me hicieron estar razonablemente seguro de que los mamíferos tienen una mayor tendencia a estar en peligro de extinción que los reptiles, y lo mismo sucede con las aves sobre los peces.
# - Yellowstone fue el Parque Nacional con la mayor cantidad de avistamientos de especies en los útimos siete días.
# - El Holcus Lanatus fue la especia más vista en los últimos siete días, debido a su gran cantidad de avistamientos en Yellowstone y en Yosemite.
# - La Grus Americana, un ave en peligro de extinción, fue la menos vista en los parques. La segunda especie menos vista, la Canis Rufus, comparte la misma situación.
# - Las dos especies que fueron menos observadas en un solo Parque Nacional no están en peligro. La posible explicación es que los dos Parques más grandes albergan un número mucho mayor de individuos de las especies.
# ## Preguntas a resolver en un futuro
#
# Estos tópicos pueden ser analizados a futuro con una información más contextual en futuras investigaciones:
#
# - El número de especies en peligro, ¿Está creciendo? ¿O este número decayó en los últimos días/meses?
# - ¿Qué tan grandes son los cuatro Parques Nacionales? ¿Es Yellowstone mucho más grande que el resto de ellos? ¿Es esa la respuesta a porqué la mayor cantidad de avistamientos se da allí?
# - La especie llamada Corydalis Aurea sólo fue vista nueve veces en siete días en el Parque Nacional Bryce. Un estudio adicional puede ser llevado a cabo en ese lugar para ver si está en camino a quedar extinto en ese Parque. Sin embargo, tiene una gran cantidad de avistamientos en el resto de los Parques Nacionales de la base de datos.
| 19,177 |
/_TUTORIAL/pyHowFun-master/LESSON4 爬蟲實戰 - 高鐵時刻.ipynb
|
ec1a1bd10875dfcf7398ad1d2f6e8fe3bbabb7b9
|
[
"MIT"
] |
permissive
|
AaronCHH/L_pylab
|
https://github.com/AaronCHH/L_pylab
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 3,732 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Average wage in Russia
# +
from __future__ import division
import numpy as np
import pandas as pd
from scipy import stats
import statsmodels.api as sm
# %matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
from itertools import product
from datetime import *
from dateutil.relativedelta import *
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
# -
# Для выполнения этого задания нам понадобятся данные о среднемесячных уровнях заработной платы в России:
#Reading data
wage = pd.read_csv('WAG_C_M.csv', sep=';', index_col='month', parse_dates=True, dayfirst=True)
wage.info()
wage.head()
_ = plt.figure(figsize=(15,7))
_ = wage.WAG_C_M.plot()
_ = plt.title('Average nominal wage')
# Проверка стационарности и STL-декомпозиция ряда:
_ = sm.tsa.seasonal_decompose(wage.WAG_C_M).plot()
print('Augmented Dickey-Fuller unit root test p=%f' % sm.tsa.stattools.adfuller(wage.WAG_C_M)[1])
# ### Стабилизация дисперсии
# Сделаем преобразование Бокса-Кокса для стабилизации дисперсии:
# +
wage['WAG_C_M_box'], lmbda = stats.boxcox(wage.WAG_C_M)
_ = plt.figure(figsize=(15,7))
_ = wage.WAG_C_M_box.plot()
_ = plt.title(u'Transformed average nominal wage')
print('Optimal parameter of the Box-Cox power transformation: %f' % lmbda)
print('Augmented Dickey-Fuller unit root test p=%f' % sm.tsa.stattools.adfuller(wage.WAG_C_M_box)[1])
# -
# ### Стационарность
# Критерий Дики-Фуллера отвергает гипотезу нестационарности, но визуально в данных виден тренд. Попробуем сезонное дифференцирование; сделаем на продифференцированном ряде STL-декомпозицию и проверим стационарность:
wage['WAG_C_M_box_diff'] = wage.WAG_C_M_box - wage.WAG_C_M_box.shift(12)
_ = sm.tsa.seasonal_decompose(wage.WAG_C_M_box_diff.dropna()).plot()
print('Augmented Dickey-Fuller unit root test p=%f' % sm.tsa.stattools.adfuller(wage.WAG_C_M_box_diff.dropna())[1])
# Критерий Дики-Фуллера отвергает гипотезу нестационарности, НО полностью избавиться от тренда не удалось. Попробуем добавить ещё обычное дифференцирование:
wage['WAG_C_M_box_diff2'] = wage.WAG_C_M_box_diff - wage.WAG_C_M_box_diff.shift(1)
_ = sm.tsa.seasonal_decompose(wage.WAG_C_M_box_diff2.dropna()).plot()
print('Augmented Dickey-Fuller unit root test p=%f' % sm.tsa.stattools.adfuller(wage.WAG_C_M_box_diff2.dropna())[1])
# Гипотеза нестационарности отвергается с ещё большим уровнем значимости, и визуально ряд выглядит лучше — тренда больше нет.
# ### Подбор модели
# Посмотрим на ACF и PACF полученного ряда:
# +
plt.figure(figsize=(15,10))
ax = plt.subplot(211)
sm.graphics.tsa.plot_acf(wage.WAG_C_M_box_diff2.dropna()[12:].squeeze(), lags=50, ax=ax);
ax = plt.subplot(212)
sm.graphics.tsa.plot_pacf(wage.WAG_C_M_box_diff2.dropna()[12:].squeeze(), lags=50, ax=ax);
# -
# Начальные приближения: Q=0, q=1, P=1, p=1.
ps = range(0, 2)
d=1
qs = range(0, 2)
Ps = range(0, 2)
D=1
Qs = range(0, 1)
parameters = product(ps, qs, Ps, Qs)
parameters_list = list(parameters)
parameters_list
len(parameters_list)
# +
# %%time
results = []
best_aic = float("inf")
warnings.filterwarnings('ignore')
for param in parameters_list:
#try except нужен, потому что на некоторых наборах параметров модель не обучается
try:
model=sm.tsa.statespace.SARIMAX(wage.WAG_C_M_box, order=(param[0], d, param[1]),
seasonal_order=(param[2], D, param[3], 12)).fit(disp=-1)
#выводим параметры, на которых модель не обучается и переходим к следующему набору
except ValueError:
print('wrong parameters:', param)
continue
aic = model.aic
#сохраняем лучшую модель, aic, параметры
if aic < best_aic:
best_model = model
best_aic = aic
best_param = param
results.append([param, model.aic])
warnings.filterwarnings('default')
# -
result_table = pd.DataFrame(results)
result_table.columns = ['parameters', 'aic']
print(result_table.sort_values(by = 'aic', ascending=True).head())
# Лучшая модель:
print(best_model.summary())
# Её остатки:
# +
_ = plt.figure(figsize=(15,12))
_ = plt.subplot(211)
_ = best_model.resid[13:].plot()
_ = plt.ylabel(u'Residuals')
_ = ax = plt.subplot(212)
_ = sm.graphics.tsa.plot_acf(best_model.resid.values.squeeze(), lags=50, ax=ax)
print("Критерий Стьюдента: p=%f" % stats.ttest_1samp(best_model.resid[13:], 0)[1])
print("Критерий Дики-Фуллера: p=%f" % sm.tsa.stattools.adfuller(best_model.resid[13:])[1])
# -
# Остатки несмещены (подтверждается критерием Стьюдента), стационарны (подтверждается критерием Дики-Фуллера и визуально), неавтокоррелированы (подтверждается критерием Льюнга-Бокса и коррелограммой). Посмотрим, насколько хорошо модель описывает данные:
def invboxcox(y,lmbda):
if lmbda == 0:
return(np.exp(y))
else:
return(np.exp(np.log(lmbda*y+1)/lmbda))
# +
wage['model'] = invboxcox(best_model.fittedvalues, lmbda)
_ = plt.figure(figsize=(15,7))
_ = wage.WAG_C_M.plot()
_ = wage.model[13:].plot(color='r')
_ = plt.title('Average nominal wage')
# -
# ### Прогноз
# +
wage2 = wage[['WAG_C_M']]
date_list = [datetime.strptime("2017-07-01", "%Y-%m-%d") + relativedelta(months=x) for x in range(0,36)]
future = pd.DataFrame(index=date_list, columns=wage2.columns)
wage2 = pd.concat([wage2, future])
wage2['forecast'] = invboxcox(best_model.predict(start=294, end=329), lmbda)
_ = plt.figure(figsize=(15,7))
_ = wage2.WAG_C_M.plot()
_ = wage2.forecast.plot(color='r')
_ = plt.title('Average nominal wage')
bias,maxnorm,l2_reg,
eps,bn_momentum,activation_function,dropout_rate,subsam,trainable)
xx = res_block(xx,128,kernel_size,3,'same',random_seed,bias,maxnorm,l2_reg,
eps,bn_momentum,activation_function,dropout_rate,subsam,trainable)
xx = res_block(xx,128,kernel_size,1,'same',random_seed,bias,maxnorm,l2_reg,
eps,bn_momentum,activation_function,dropout_rate,subsam,trainable)
xx = res_block(xx,256,kernel_size,2,'same',random_seed,bias,maxnorm,l2_reg,
eps,bn_momentum,activation_function,dropout_rate,subsam,trainable)
xx = res_block(xx,256,kernel_size,1,'same',random_seed,bias,maxnorm,l2_reg,
eps,bn_momentum,activation_function,dropout_rate,subsam,trainable)
xx = res_block(xx,256,kernel_size,2,'same',random_seed,bias,maxnorm,l2_reg,
eps,bn_momentum,activation_function,dropout_rate,subsam,trainable,cat=False)
xx = res_block(xx,256,kernel_size,2,'same',random_seed,bias,maxnorm,l2_reg,
eps,bn_momentum,activation_function,dropout_rate,subsam,trainable,cat=False)
xx = Conv1D(256, kernel_size=kernel_size,
kernel_initializer=initializers.he_normal(seed=random_seed),
padding=padding,
strides=2,
use_bias=bias,
kernel_constraint=max_norm(maxnorm),
trainable=trainable,
kernel_regularizer=l2(l2_reg))(xx)
merged = Flatten()(xx)
dann_in = GradientReversal(hp_lambda=hp_lambda,name='grl')(merged)
dsc = Dense(50,
activation=activation_function,
kernel_initializer=initializers.he_normal(seed=random_seed),
use_bias=bias,
kernel_constraint=max_norm(maxnorm),
kernel_regularizer=l2(l2_reg_dense),
name = 'domain_dense')(dann_in)
dsc = Dense(num_class_domain, activation='softmax', name = "domain")(dsc)
merged = Dense(num_dense,
activation=activation_function,
kernel_initializer=initializers.he_normal(seed=random_seed),
use_bias=bias,
kernel_constraint=max_norm(maxnorm),
kernel_regularizer=l2(l2_reg_dense),
name = 'class_dense')(merged)
merged = Dense(num_class, activation='softmax', name="class")(merged)
model = Model(inputs=input, outputs=[merged,dsc])
if load_path:
model.load_weights(filepath=load_path, by_name=False)
#if load_path: # If path for loading model was specified
#model.load_weights(filepath='../../models_dbt_dann/fold_a_gt 2019-09-09 16:53:52.063276/weights.0041-0.6907.hdf5', by_name=True)
# models/fold_a_gt 2019-09-04 17:36:52.860817/weights.0200-0.7135.hdf5
#if optim=='Adam':
# opt = Adam(lr=lr, decay=lr_decay)
#else:
opt = SGD(lr=lr,decay=lr_decay)
model.compile(optimizer=opt, loss={'class':'categorical_crossentropy','domain':'categorical_crossentropy'}, metrics=['accuracy'])
return model
def res_block(input_tensor,num_filt,kernel_size,stride,padding,random_seed,bias,maxnorm,l2_reg,
eps,bn_momentum,activation_function,dropout_rate,subsam,trainable,cat=True):
t = Conv1D(num_filt, kernel_size=kernel_size,
kernel_initializer=initializers.he_normal(seed=random_seed),
padding=padding,
strides=stride,
use_bias=bias,
kernel_constraint=max_norm(maxnorm),
trainable=trainable,
kernel_regularizer=l2(l2_reg))(input_tensor)
t = BatchNormalization(epsilon=eps, momentum=bn_momentum, axis=-1)(t)
t = Activation(activation_function)(t)
t = Dropout(rate=dropout_rate, seed=random_seed)(t)
print(t.shape)
t = Conv1D(num_filt, kernel_size=kernel_size,
kernel_initializer=initializers.he_normal(seed=random_seed),
padding=padding,
strides=1,
use_bias=bias,
kernel_constraint=max_norm(maxnorm),
trainable=trainable,
kernel_regularizer=l2(l2_reg))(t)
t = BatchNormalization(epsilon=eps, momentum=bn_momentum, axis=-1)(t)
t = Activation(activation_function)(t)
t = Dropout(rate=dropout_rate, seed=random_seed)(t)
p = MaxPooling1D(pool_size=stride)(input_tensor)
if(stride>1):
if(cat):
p = Lambda(zeropad, output_shape=zeropad_output_shape)(p)
print(t.shape)
print(p.shape)
t = Add()([t,p])
return t
52/3
model = heartnet(loadpath,activation_function, bn_momentum, bias, dropout_rate, dropout_rate_dense,
eps, kernel_size, l2_reg, l2_reg_dense, lr, lr_decay, maxnorm,
padding, random_seed, subsam, num_filt, num_dense, FIR_train, trainable, tipe,
num_class=num_class,num_class_domain=9,hp_lambda=hp_lambda)
plot_model(model, to_file='model_test.png', show_shapes=True)
model.summary()
# +
coeff_path = '../data/filterbankcoeff60.mat'
coeff = tables.open_file(coeff_path)
b1 = coeff.root.b1[:]
b1 = np.hstack(b1)
b1 = np.reshape(b1, [b1.shape[0], 1, 1])
b2 = coeff.root.b2[:]
b2 = np.hstack(b2)
b2 = np.reshape(b2, [b2.shape[0], 1, 1])
b3 = coeff.root.b3[:]
b3 = np.hstack(b3)
b3 = np.reshape(b3, [b3.shape[0], 1, 1])
b4 = coeff.root.b4[:]
b4 = np.hstack(b4)
b4 = np.reshape(b4, [b4.shape[0], 1, 1])
# -
3,760,013
# +
from keras import backend as K
def _bn_relu(layer, dropout=0, **params):
from keras.layers import BatchNormalization
from keras.layers import Activation
layer = BatchNormalization()(layer)
layer = Activation(params["conv_activation"])(layer)
if dropout > 0:
from keras.layers import Dropout
layer = Dropout(params["conv_dropout"])(layer)
return layer
def add_conv_weight(
layer,
filter_length,
num_filters,
subsample_length=1,
**params):
from keras.layers import Conv1D
layer = Conv1D(
filters=num_filters,
kernel_size=filter_length,
strides=subsample_length,
padding='same',
kernel_initializer=params["conv_init"])(layer)
return layer
def add_conv_layers(layer, **params):
for subsample_length in params["conv_subsample_lengths"]:
layer = add_conv_weight(
layer,
params["conv_filter_length"],
params["conv_num_filters_start"],
subsample_length=subsample_length,
**params)
layer = _bn_relu(layer, **params)
return layer
def resnet_block(
layer,
num_filters,
subsample_length,
block_index,
**params):
from keras.layers import Add
from keras.layers import MaxPooling1D
from keras.layers.core import Lambda
def zeropad(x):
y = K.zeros_like(x)
return K.concatenate([x, y], axis=2)
def zeropad_output_shape(input_shape):
shape = list(input_shape)
assert len(shape) == 3
shape[2] *= 2
return tuple(shape)
shortcut = MaxPooling1D(pool_size=subsample_length)(layer)
zero_pad = (block_index % params["conv_increase_channels_at"]) == 0 \
and block_index > 0
if zero_pad is True:
shortcut = Lambda(zeropad, output_shape=zeropad_output_shape)(shortcut)
for i in range(params["conv_num_skip"]):
if not (block_index == 0 and i == 0):
layer = _bn_relu(
layer,
dropout=params["conv_dropout"] if i > 0 else 0,
**params)
layer = add_conv_weight(
layer,
params["conv_filter_length"],
num_filters,
subsample_length if i == 0 else 1,
**params)
layer = Add()([shortcut, layer])
return layer
def get_num_filters_at_index(index, num_start_filters, **params):
return 2**int(index / params["conv_increase_channels_at"]) \
* num_start_filters
def add_resnet_layers(layer, **params):
layer = add_conv_weight(
layer,
params["conv_filter_length"],
params["conv_num_filters_start"],
subsample_length=1,
**params)
layer = _bn_relu(layer, **params)
for index, subsample_length in enumerate(params["conv_subsample_lengths"]):
num_filters = get_num_filters_at_index(
index, params["conv_num_filters_start"], **params)
layer = resnet_block(
layer,
num_filters,
subsample_length,
index,
**params)
layer = _bn_relu(layer, **params)
return layer
def add_output_layer(layer, **params):
from keras.layers.core import Dense, Activation
from keras.layers.wrappers import TimeDistributed
print(layer)
layer = TimeDistributed(Dense(params["num_categories"]))(layer)
return Activation('softmax')(layer)
def add_output_Flatten(layer,**params):
from keras.layers import Flatten
from keras.layers.core import Dense, Activation
layer = Flatten()(layer)
layer = Dense(params["num_categories"])(layer)
return layer
def add_compile(model, **params):
from keras.optimizers import Adam
optimizer = Adam(
lr=params["learning_rate"],
clipnorm=params.get("clipnorm", 1))
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
def build_network(**params):
from keras.models import Model
from keras.layers import Input
print("Hellow motherfucker")
inputs = Input(shape=params['input_shape'],
dtype='float32',
name='inputs')
if params.get('is_regular_conv', False):
layer = add_conv_layers(inputs, **params)
else:
layer = add_resnet_layers(inputs, **params)
#output = add_output_layer(layer, **params)
output = add_output_Flatten(layer,**params)
model = Model(inputs=[inputs], outputs=[output])
if params.get("compile", True):
add_compile(model, **params)
return model
# -
params = {
"input_shape":(256,1),
"num_categories":2,
"conv_subsample_lengths": [1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2],
#"conv_subsample_lengths": [1, 2, 1, 2, 1, 2, 1, 2],
"conv_filter_length": 16,
"conv_num_filters_start": 32,
"conv_init": "he_normal",
"conv_activation": "relu",
"conv_dropout": 0.2,
"conv_num_skip": 2,
"conv_increase_channels_at": 4,
"learning_rate": 0.001,
"batch_size": 128,
"train": "examples/irhythm/train.json",
"dev": "examples/irhythm/dev.json",
"save_dir": "saved"
}
model = build_network(**params)
plot_model(model, to_file='model_Resnet.png', show_shapes=True)
model.summary()
import numpy as np, matplotlib.pyplot as plt,math
import sys
eps = sys.float_info.epsilon
p22 = (-12,12)
p33 = (-12,-12)
p44 = (12,-12)
p11 = (12,12)
o = (0,0)
p1 = (4,5)
p2 = (12,12)
def rotate(origin, point, angle=90):
ox, oy = origin
px, py = point
qx = ox + math.cos(math.radians(angle)) * (px - ox) - math.sin(math.radians(angle)) * (py - oy)
qy = oy + math.sin(math.radians(angle)) * (px - ox) + math.cos(math.radians(angle)) * (py - oy)
return qx, qy
def getXpoint(p1,p2,dist):
m = (p2[1]-p1[1])/(p2[0]-p1[0]+eps)
d = (math.atan(m))
if(p2[0]>p1[0]):
x = p1[0]+dist*math.cos(abs(d))
elif(p2[0]<p1[0]):
x = p1[0]-dist*math.cos(abs(d))
else: x = p1[0]
if(p2[1]>p1[1]):
y = p1[1]+dist*math.sin(abs(d))
elif(p2[1]<p1[1]):
y = p1[1]-dist*math.sin(abs(d))
else: y = p1[1]
return x,y
def getX2point(p1,p2,p):
x = p2[0] + (p[0]-p1[0])
y = p2[1] + (p[1]-p1[1])
return x,y
p2 = (20,5)
pout = rotate(p1,getXpoint(p1,p2,4))
pout2 = getX2point(p1,p2,pout)
x = [x for (x,y) in [p1,p2]]
y = [y for (x,y) in [p1,p2]]
x2 = [x for (x,y) in [pout,pout2]]
y2 = [y for (x,y) in [pout,pout2]]
D = 30
plt.figure(figsize=(10,10))
plt.plot([-D,D],[0,0])
plt.plot([0,0],[-D,D])
plt.plot(x,y,'ro')
plt.plot(x2,y2,'bo')
plt.axis([-D, D, -D, D])
plt.show()
| 18,084 |
/blog/Recommended System/8.1-其他算法 - [ 构建一个科学的排行榜系统 ].ipynb
|
e66055b7be3023be4362231ae12bfc7d4524972c
|
[] |
no_license
|
lj72808up/DeepLearning
|
https://github.com/lj72808up/DeepLearning
| 1 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 2,977 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### 8.1.1 排行榜系统的出现
#
# #### 一. 为什么推荐系统中会出现排行榜
# 1. 可以解决冷启动问题. 当新用户到来时, 可以把最近产品中, 比较热门的物品推荐给他
# 2. 排行榜中的物品, 可以作为老用户的兴趣发现, 来拓展老用户的兴趣
# 3. 假如推荐系统出现故障, 可使用排行榜系统暂时作为替代避免推荐栏为空
#
# #### 二. 简单的排序算法为什么不可行
# 简单实用排序算法在所有物品的某个属性上, 然后作为推荐结果给出. 这种方法并不可行, 原因有以下几点
# 1. 容易被攻击, 导致刷榜
# 2. 马太效应一直存在. 就是热门物品可能会永远存在于榜单首位, 无法更新
# 3. 榜单不能随时间变化 (这点和马太效应有关)
#
#
# 接下来, 根据上面的3个弊端, 提出解决方案
# ### 8.1.2 真实的排行榜系统
#
# #### 一. 考虑时间因素的排行榜
# 1. 投票数+时间因素的排行榜
# 这个方案假设每个物品都有各自的投票数, 加入时间因素后, 得分高的就在排行榜的前部分
# $$\frac { P-1 }{ { \left( T+2 \right) }^{ G } } $$
# 1. $P$ : 物品的得分数
# 2. $T$ : 物品出现到现在的时间
# 3. $G$ : 重力因子, $G$越大, 物品随时间增大, 热度消失的就越快
#
# 2. 牛顿冷却定律
# $$T\left( t \right) =H+C{ e }^{ -\alpha t }$$
# 1. $H$ : 环境维度, 在推荐中可以是平均票数, 平均销量, 平均点击次数等, 不影响排序效果
# 2. $C$ : 物品的净胜票数. 即物品的票数,销量,点击次数
# 3. $\alpha$ : 冷却系数. 人为控制冷却速度
# 4. $t$ : 物品存在的时间, 一般以小时为单位
# 3. 牛顿冷却定律中, 冷却系数$\alpha$的确定
# 如果我们想让物品经过A小时后, 只要投票数增加B个, 其热度就不变. 则有推导式 : $$\left( C+B \right) { e }^{ -\alpha \left( t+A \right) }=C{ e }^{ -\alpha t }\\ \quad \quad \quad \quad \quad \quad \quad \quad \alpha =\frac { 1 }{ B } \ln { \left( 1+\frac { A }{ C } \right) } $$
# <img src='img/niudunlengque.png' width='50%' height='50%'>
# #### 二. 考虑物品反对票的排行榜
# 1. 前面的带时间因素的排行榜, 只有赞成票数, 没有反对票. 这个排行榜公式给出反对票存在的场景. 有如下特点
# <img src='img/fanduipiaorank.png' width='50%' height='50%'>
# 1. $Q_{viewers}$ : 物品浏览次数
# 2. $Q_{answers}$ : 回答次数
# 3. $Q_{score}$ : 物品得分 (赞成数-反对数)
# 4. $A_{{score}_i}$ : 物品第i个回答的得分
# 5. $Q_{age}$ : 物品存在的时间
# 6. $Q_{updatede}$ : 物品最后一次修改的时间
# #### 三. 考虑平均好评程度的排行榜
# 1. 首先, 使用威尔逊区间估计物品的平均好评程度
# <img src='img/weierxun.png' width='45%' height='45%'>
# 1. $\hat { p } $ : 物品的好评率
# 2. ${ z }_{ 1-\frac { \alpha }{ 2 } }$ : 置信水平为$\alpha$的统计量
# 2. 得到物品的威尔逊区间后, 再利用UCB算法使用这个置信区间上界
| 2,059 |
/Intro to Applied Data Science Capstone Project.ipynb
|
0b2a909cd0738b9e421859fc30f6966a9b0b3761
|
[] |
no_license
|
jennafu/Coursea_Capstone
|
https://github.com/jennafu/Coursea_Capstone
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 1,646 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python [conda env:.conda-py3]
# language: python
# name: conda-env-.conda-py3-py
# ---
# # 01__nearby_elems
#
# in this notebook, i examine the relationship between cis/trans compensation and # of nearby regulatory elements
# +
import warnings
warnings.filterwarnings('ignore')
import itertools
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import sys
from itertools import combinations
from scipy.integrate import cumtrapz
from scipy.stats import linregress
from scipy.stats import spearmanr
from scipy.stats import pearsonr
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import NearestNeighbors
# import utils
sys.path.append("../../../utils")
from plotting_utils import *
# %matplotlib inline
# %config InlineBackend.figure_format = 'svg'
mpl.rcParams['figure.autolayout'] = False
# -
sns.set(**PAPER_PRESET)
fontsize = PAPER_FONTSIZE
np.random.seed(2019)
QUANT_ALPHA = 0.05
# ## variables
data_f = "../../../data/02__mpra/03__results/all_processed_results.txt"
hg19_elems_f = "../../../misc/03__nearby_elems/hg19.num_elems_1Mb.bed"
mm9_elems_f = "../../../misc/03__nearby_elems/mm9.num_elems_1Mb.bed"
hg19_tss_f = "../../../misc/03__nearby_elems/hg19.num_TSSs_1Mb.bed"
mm9_tss_f = "../../../misc/03__nearby_elems/mm9.num_TSSs_1Mb.bed"
hg19_enh_f = "../../../misc/03__nearby_elems/hg19.num_enhs_1Mb.bed"
mm9_enh_f = "../../../misc/03__nearby_elems/mm9.num_enhs_1Mb.bed"
hg19_elems_closest_f = "../../../misc/03__nearby_elems/hg19_evo.tile1_only.closest_hg19_evo.txt"
mm9_elems_closest_f = "../../../misc/03__nearby_elems/mm9_evo.tile1_only.closest_mm9_evo.txt"
# ## 1. import data
data = pd.read_table(data_f)
data.head()
hg19_elems = pd.read_table(hg19_elems_f, sep="\t", header=None)
hg19_tss = pd.read_table(hg19_tss_f, sep="\t", header=None)
hg19_enh = pd.read_table(hg19_enh_f, sep="\t", header=None)
hg19_elems.columns = ["chr", "start", "end", "name", "n_elems_hg19"]
hg19_tss.columns = ["chr", "start", "end", "name", "n_tss_hg19"]
hg19_enh.columns = ["chr", "start", "end", "name", "n_enh_hg19"]
hg19_elems.head()
mm9_elems = pd.read_table(mm9_elems_f, sep="\t", header=None)
mm9_tss = pd.read_table(mm9_tss_f, sep="\t", header=None)
mm9_enh = pd.read_table(mm9_enh_f, sep="\t", header=None)
mm9_elems.columns = ["chr", "start", "end", "name", "n_elems_mm9"]
mm9_tss.columns = ["chr", "start", "end", "name", "n_tss_mm9"]
mm9_enh.columns = ["chr", "start", "end", "name", "n_enh_mm9"]
mm9_elems.head()
hg19_elems_closest = pd.read_table(hg19_elems_closest_f, header=None, names=["tss_chr", "tss_start", "tss_end",
"name", "score", "tss_strand",
"other_chr", "other_start", "other_end",
"other_id", "other_score",
"other_strand", "distance"])
hg19_elems_closest = hg19_elems_closest[["name", "other_id", "distance"]].drop_duplicates()
hg19_elems_closest.head()
mm9_elems_closest = pd.read_table(mm9_elems_closest_f, header=None, names=["tss_chr", "tss_start", "tss_end",
"name", "score", "tss_strand",
"other_chr", "other_start", "other_end",
"other_id", "other_score",
"other_strand", "distance"])
mm9_elems_closest = mm9_elems_closest[["name", "other_id", "distance"]].drop_duplicates()
# ## 2. join data w/ number of nearby elements
hg19_elems["hg19_id"] = hg19_elems["name"].str.split("__", expand=True)[1]
hg19_elems["tss_tile_num"] = hg19_elems["name"].str.split("__", expand=True)[2].str.split(";", expand=True)[0]
hg19_tss["hg19_id"] = hg19_tss["name"].str.split("__", expand=True)[1]
hg19_tss["tss_tile_num"] = hg19_tss["name"].str.split("__", expand=True)[2].str.split(";", expand=True)[0]
hg19_enh["hg19_id"] = hg19_enh["name"].str.split("__", expand=True)[1]
hg19_enh["tss_tile_num"] = hg19_enh["name"].str.split("__", expand=True)[2].str.split(";", expand=True)[0]
hg19_elems.head()
mm9_elems["mm9_id"] = mm9_elems["name"].str.split("__", expand=True)[1]
mm9_elems["tss_tile_num"] = mm9_elems["name"].str.split("__", expand=True)[2].str.split(";", expand=True)[0]
mm9_tss["mm9_id"] = mm9_tss["name"].str.split("__", expand=True)[1]
mm9_tss["tss_tile_num"] = mm9_tss["name"].str.split("__", expand=True)[2].str.split(";", expand=True)[0]
mm9_enh["mm9_id"] = mm9_enh["name"].str.split("__", expand=True)[1]
mm9_enh["tss_tile_num"] = mm9_enh["name"].str.split("__", expand=True)[2].str.split(";", expand=True)[0]
mm9_elems.head()
len(data)
# +
data = data.merge(hg19_elems[["hg19_id", "tss_tile_num", "n_elems_hg19"]], on=["hg19_id", "tss_tile_num"], how="left")
data = data.merge(hg19_tss[["hg19_id", "tss_tile_num", "n_tss_hg19"]], on=["hg19_id", "tss_tile_num"], how="left")
data = data.merge(hg19_enh[["hg19_id", "tss_tile_num", "n_enh_hg19"]], on=["hg19_id", "tss_tile_num"], how="left")
data = data.merge(mm9_elems[["mm9_id", "tss_tile_num", "n_elems_mm9"]], on=["mm9_id", "tss_tile_num"], how="left")
data = data.merge(mm9_tss[["mm9_id", "tss_tile_num", "n_tss_mm9"]], on=["mm9_id", "tss_tile_num"], how="left")
data = data.merge(mm9_enh[["mm9_id", "tss_tile_num", "n_enh_mm9"]], on=["mm9_id", "tss_tile_num"], how="left")
print(len(data))
data.head()
# -
# ## 3. look at overall numbers of nearby elems in human and mouse
# remove the 1 seq that is on chr1_random in mouse
data = data[~pd.isnull(data["n_elems_mm9"])]
# +
fig = plt.figure(figsize=(1.5, 1))
ax = sns.distplot(data["n_elems_hg19"], color=sns.color_palette("Set2")[1], label="human", hist=False)
sns.distplot(data["n_elems_mm9"], color=sns.color_palette("Set2")[0], label="mouse", hist=False)
ax.set_xlabel("number of regulatory elements within 1 Mb")
ax.set_ylabel("density")
ax.get_legend().remove()
# fig.savefig("n_elems_distplot.pdf", dpi="figure", bbox_inches="tight")
# +
fig = plt.figure(figsize=(1.5, 1))
ax = sns.distplot(data["n_tss_hg19"], color=sns.color_palette("Set2")[1], label="human", hist=False)
sns.distplot(data["n_tss_mm9"], color=sns.color_palette("Set2")[0], label="mouse", hist=False)
ax.set_xlabel("number of TSSs within 1 Mb")
ax.set_ylabel("density")
ax.get_legend().remove()
# fig.savefig("n_tss_distplot.pdf", dpi="figure", bbox_inches="tight")
# +
fig = plt.figure(figsize=(1.5, 1))
ax = sns.distplot(data["n_enh_hg19"], color=sns.color_palette("Set2")[1], label="human", hist=False)
sns.distplot(data["n_enh_mm9"], color=sns.color_palette("Set2")[0], label="mouse", hist=False)
ax.set_xlabel("number of enhancers within 1 Mb")
ax.set_ylabel("density")
ax.get_legend().remove()
# fig.savefig("n_enh_distplot.pdf", dpi="figure", bbox_inches="tight")
# -
data["mean_elems"] = data[["n_elems_hg19", "n_elems_mm9"]].mean(axis=1)
data["mean_tss"] = data[["n_tss_hg19", "n_tss_mm9"]].mean(axis=1)
data["mean_enh"] = data[["n_enh_hg19", "n_enh_mm9"]].mean(axis=1)
# ## 4. assign directional vs. compensatory status and filter
def cis_trans_status(row):
if row.cis_status_one == "significant cis effect":
if row.trans_status_one == "significant trans effect":
if "higher in human" in row.cis_status_det_one:
if "higher in human" in row.trans_status_det_one:
return "cis/trans directional"
else:
return "cis/trans compensatory"
else:
if "higher in human" in row.trans_status_det_one:
return "cis/trans compensatory"
else:
return "cis/trans directional"
else:
return "cis effect only"
else:
if row.trans_status_one == "significant trans effect":
return "trans effect only"
else:
return "no cis or trans effects"
data["cis_trans_status"] = data.apply(cis_trans_status, axis=1)
data.cis_trans_status.value_counts()
data = data[~pd.isnull(data["minimal_biotype_hg19"])]
len(data)
data_filt = data[((data["HUES64_padj_hg19"] < QUANT_ALPHA) | (data["mESC_padj_mm9"] < QUANT_ALPHA))]
len(data_filt)
data_filt_sp = data_filt.drop("orig_species", axis=1)
data_filt_sp.drop_duplicates(inplace=True)
len(data_filt_sp)
# ## filter out elements that are super close together so it doesn't bias results
hg19_elems_closest = hg19_elems_closest[hg19_elems_closest["name"] != hg19_elems_closest["other_id"]]
print(len(hg19_elems_closest))
hg19_elems_1mb = hg19_elems_closest[hg19_elems_closest["distance"].astype(int) <= 100000]
print(len(hg19_elems_1mb))
hg19_elems_1mb.head()
mm9_elems_closest = mm9_elems_closest[mm9_elems_closest["name"] != mm9_elems_closest["other_id"]]
print(len(mm9_elems_closest))
mm9_elems_1mb = mm9_elems_closest[mm9_elems_closest["distance"].astype(int) <= 100000]
print(len(mm9_elems_1mb))
mm9_elems_1mb.head()
# +
# find those to filter out in human
hg19_filter_out = []
for i, row in hg19_elems_1mb.iterrows():
name = row["name"]
other_id = row["other_id"]
if name in hg19_filter_out:
continue
else:
hg19_filter_out.append(other_id)
hg19_filter_out = list(set(hg19_filter_out))
len(hg19_filter_out)
# +
# find those to filter out in human
mm9_filter_out = []
for i, row in mm9_elems_1mb.iterrows():
name = row["name"]
other_id = row["other_id"]
if name in mm9_filter_out:
continue
else:
mm9_filter_out.append(other_id)
mm9_filter_out = list(set(mm9_filter_out))
len(mm9_filter_out)
# -
hg19_filter_out = pd.DataFrame(data=hg19_filter_out)
hg19_filter_out.columns = ["name"]
hg19_filter_out["hg19_id"] = hg19_filter_out["name"].str.split("__", expand=True)[1]
hg19_filter_out.head()
mm9_filter_out = pd.DataFrame(data=mm9_filter_out)
mm9_filter_out.columns = ["name"]
mm9_filter_out["mm9_id"] = mm9_filter_out["name"].str.split("__", expand=True)[1]
mm9_filter_out.head()
# ## 5. look at reg elems vs. cis/trans status
order = ["no cis or trans effects", "cis/trans compensatory", "cis effect only", "trans effect only",
"cis/trans directional"]
min_order = ["cis/trans compensatory", "cis/trans directional"]
pal = {"no cis or trans effects": sns.color_palette("Set2")[7], "cis effect only": sns.color_palette("Set2")[2],
"trans effect only": sns.color_palette("Set2")[2], "cis/trans directional": sns.color_palette("Set2")[2],
"cis/trans compensatory": sns.color_palette("Set2")[7]}
# ### all REs
len(data_filt_sp)
df = data_filt_sp[(~data_filt_sp["hg19_id"].isin(hg19_filter_out["hg19_id"])) &
(~data_filt_sp["mm9_id"].isin(mm9_filter_out["mm9_id"]))]
len(df)
# +
fig = plt.figure(figsize=(1, 1.75))
ax = sns.boxplot(data=df, x="cis_trans_status", y="mean_elems", order=min_order,
flierprops = dict(marker='o', markersize=5), palette=pal)
mimic_r_boxplot(ax)
ax.set_xticklabels(["compensatory", "directional"], rotation=50, ha='right', va='top')
ax.set_xlabel("")
ax.set_ylabel("# total REs within 1 Mb")
for i, l in enumerate(min_order):
sub = df[df["cis_trans_status"] == l]
n = len(sub)
print("%s median REs: %s" % (l, sub["mean_elems"].median()))
color = pal[l]
ax.annotate(str(n), xy=(i, -120), xycoords="data", xytext=(0, 0),
textcoords="offset pixels", ha='center', va='bottom',
color=color, size=fontsize)
### pvals ###
vals1 = np.asarray(df[df["cis_trans_status"] == "cis/trans compensatory"]["mean_elems"])
vals2 = np.asarray(df[df["cis_trans_status"] == "cis/trans directional"]["mean_elems"])
vals1 = vals1[~np.isnan(vals1)]
vals2 = vals2[~np.isnan(vals2)]
_, pval12 = stats.mannwhitneyu(vals1, vals2, alternative="two-sided", use_continuity=False)
annotate_pval(ax, 0.2, 0.8, 400, 0, 400, pval12, fontsize-1)
ax.set_ylim((-150, 1100))
fig.savefig("Fig6G.pdf", dpi="figure", bbox_inches="tight")
# -
# ### TSSs only
# +
fig = plt.figure(figsize=(1, 1.75))
ax = sns.boxplot(data=df, x="cis_trans_status", y="mean_tss", order=min_order,
flierprops = dict(marker='o', markersize=5), palette=pal)
mimic_r_boxplot(ax)
ax.set_xticklabels(["compensatory", "directional"], rotation=50, ha='right', va='top')
ax.set_xlabel("")
ax.set_ylabel("# TSSs within 1 Mb")
for i, l in enumerate(min_order):
sub = df[df["cis_trans_status"] == l]
n = len(sub)
print("%s median REs: %s" % (l, sub["mean_tss"].median()))
color = pal[l]
ax.annotate(str(n), xy=(i, -120), xycoords="data", xytext=(0, 0),
textcoords="offset pixels", ha='center', va='bottom',
color=color, size=fontsize)
### pvals ###
vals1 = np.asarray(df[df["cis_trans_status"] == "cis/trans compensatory"]["mean_tss"])
vals2 = np.asarray(df[df["cis_trans_status"] == "cis/trans directional"]["mean_tss"])
vals1 = vals1[~np.isnan(vals1)]
vals2 = vals2[~np.isnan(vals2)]
_, pval12 = stats.mannwhitneyu(vals1, vals2, alternative="two-sided", use_continuity=False)
annotate_pval(ax, 0.2, 0.8, 400, 0, 400, pval12, fontsize-1)
ax.set_ylim((-150, 1100))
fig.savefig("Fig6I.pdf", dpi="figure", bbox_inches="tight")
# -
# ### enhancers only
# +
fig = plt.figure(figsize=(1, 1.75))
ax = sns.boxplot(data=df, x="cis_trans_status", y="mean_enh", order=min_order,
flierprops = dict(marker='o', markersize=5), palette=pal)
mimic_r_boxplot(ax)
ax.set_xticklabels(["compensatory", "directional"], rotation=50, ha='right', va='top')
ax.set_xlabel("")
ax.set_ylabel("# enhancers within 1 Mb")
for i, l in enumerate(min_order):
sub = df[df["cis_trans_status"] == l]
n = len(sub)
print("%s median REs: %s" % (l, sub["mean_enh"].median()))
color = pal[l]
ax.annotate(str(n), xy=(i, -20), xycoords="data", xytext=(0, 0),
textcoords="offset pixels", ha='center', va='bottom',
color=color, size=fontsize)
### pvals ###
vals1 = np.asarray(df[df["cis_trans_status"] == "cis/trans compensatory"]["mean_enh"])
vals2 = np.asarray(df[df["cis_trans_status"] == "cis/trans directional"]["mean_enh"])
vals1 = vals1[~np.isnan(vals1)]
vals2 = vals2[~np.isnan(vals2)]
_, pval12 = stats.mannwhitneyu(vals1, vals2, alternative="two-sided", use_continuity=False)
annotate_pval(ax, 0.2, 0.8, 100, 0, 100, pval12, fontsize-1)
ax.set_ylim((-30, 150))
fig.savefig("Fig6H.pdf", dpi="figure", bbox_inches="tight")
# -
| 14,956 |
/Chapter 2/Section 2.4.ipynb
|
d4ebfce1db3d67d49857ad1d3070355c7bc9348d
|
[
"MIT"
] |
permissive
|
allenwoods/sicp-jupyter
|
https://github.com/allenwoods/sicp-jupyter
| 0 | 1 |
MIT
| 2019-05-22T05:53:17 | 2019-05-21T01:25:45 |
Jupyter Notebook
|
Jupyter Notebook
| false | false |
.py
| 41,397 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## [2.4 可变数据](http://www-inst.eecs.berkeley.edu/~cs61a/sp12/book/objects.html#mutable-data)
# 我们已经看到了抽象在帮助我们应对大型系统的复杂性时如何至关重要。有效的程序整合也需要一些组织原则,指导我们构思程序的概要设计。特别地,我们需要一些策略来帮助我们构建大型系统,使之模块化。也就是说,它们可以“自然”划分为可以分离开发和维护的各个相关部分。
#
# 我们用于创建模块化程序的强大工具之一,是引入可能会随时间改变的新类型数据。这样,单个数据可以表示独立于其他程序演化的东西。对象行为的改变可能会由它的历史影响,就像世界中的实体那样。向数据添加状态是这一章最终目标:面向对象编程的要素。
#
# 我们目前引入的原生数据类型 -- 数值、布尔值、元组、范围和字符串 -- 都是不可变类型的对象。虽然名称的绑定可以在执行过程中修改为环境中不同的值,但是这些值本身不会改变。这一章中,我们会介绍一组可变数据类型。可变对象可以在程序执行期间改变。
#
# ### 2.4.1 局部状态
#
# 我们第一个可变对象的例子就是局部状态。这个状态会在程序执行期间改变。
#
# 为了展示函数的局部状态是什么东西,让我们对从银行取钱的情况进行建模。我们会通过创建叫做`withdraw`的函数来实现它,它将要取出的金额作为参数。如果账户中有足够的钱来取出,`withdraw`应该返回取钱之后的余额。否则,`withdraw`应该返回消息`'Insufficient funds'`。例如,如果我们以账户中的`$100`开始,我们希望通过调用`withdraw`来得到下面的序列:
withdraw(25)
75
withdraw(25)
50
withdraw(60)
'Insufficient funds'
withdraw(15)
35
# 观察表达式`withdraw(25)`,求值了两次,产生了不同的值。这是一种用户定义函数的新行为:它是非纯函数。调用函数不仅仅返回一个值,同时具有以一些方式修改函数的副作用,使带有相同参数的下次调用返回不同的结果。我们所有用户定义的函数,到目前为止都是纯函数,除非他们调用了非纯的内建函数。它们仍旧是纯函数,因为它们并不允许修改任何在局部环境帧之外的东西。
#
# 为了使`withdraw`有意义,它必须由一个初始账户余额创建。`make_withdraw`函数是个高阶函数,接受起始余额作为参数,`withdraw`函数是它的返回值。
withdraw = make_withdraw(100)
# `make_withdraw`的实现需要新类型的语句:`nonlocal`语句。当我们调用`make_withdraw`时,我们将名称`balance`绑定到初始值上。之后我们定义并返回了局部函数,`withdraw`,它在调用时更新并返回`balance`的值。
def make_withdraw(balance):
"""Return a withdraw function that draws down balance with each call."""
def withdraw(amount):
nonlocal balance # Declare the name "balance" nonlocal
if amount > balance:
return 'Insufficient funds'
balance = balance - amount # Re-bind the existing balance name
return balance
return withdraw
# 这个实现的新奇部分是`nonlocal`语句,无论什么时候我们修改了名称`balance`的绑定,绑定都会在`balance`所绑定的第一个帧中修改。回忆一下,在没有`nonlocal`语句的情况下,赋值语句总是会在环境的第一个帧中绑定名称。`nonlocal`语句表明,名称出现在环境中不是第一个(局部)帧,或者最后一个(全局)帧的其它地方。
#
# 我们可以将这些修改使用环境图示来可视化。下面的环境图示展示了每个调用的效果,以上面的定义开始。我们省略了函数值中的代码,以及不在我们讨论中的表达式树。
#
# 
#
# 我们的定义语句拥有平常的效果:它创建了新的用户定义函数,并且将名称`make_withdraw`在全局帧中绑定到那个函数上。
#
# 下面,我们使用初始的余额参数`20`来调用`make_withdraw`。
wd = make_withdraw(20)
# 这个赋值语句将名称`wd`绑定到全局帧中的返回函数上:
#
# 
#
# 所返回的函数,(内部)叫做`withdraw`,和定义所在位置即`make_withdraw`的局部环境相关联。名称`balance`在这个局部环境中绑定。在例子的剩余部分中,`balance`名称只有这一个绑定,这非常重要。
#
# 下面,我们求出以总数`5`调用`withdraw`的表达式的值:
wd(5)
# 名称`wd`绑定到了`withdraw`函数上,所以`withdraw`的函数体在新的环境中求值,新的环境扩展自`withdraw`定义所在的环境。跟踪`withdraw`求值的效果展示了 Python 中`nonlocal`语句的效果。
#
# 
#
# `withdraw`的赋值语句通常在`withdraw`的局部帧中为`balance`创建新的绑定。由于`nonlocal`语句,赋值运算找到了`balance`定义位置的第一帧,并在那里重新绑定名称。如果`balance`之前没有绑定到值上,那么`nonlocal`语句会产生错误。
#
# 通过修改`balance`绑定的行为,我们也修改了`withdraw`函数。下次`withdraw`调用的时候,名称`balance`会求值为`15`而不是`20`。
#
# 当我们第二次调用`wd`时,
wd(3)
# 我们发现绑定到`balance`的值的修改可在两个调用之间积累。
#
# 
#
# 这里,第二次调用`withdraw`会创建第二个局部帧,像之前一样,但是,`withdraw`的两个帧都扩展自`make_withdraw`的环境,它们都包含`balance`的绑定。所以,它们共享特定的名称绑定,调用`withdraw`具有改变环境的副作用,并且会由之后的`withdraw`调用继承。
#
# **实践指南。**通过引入`nonlocal`语句,我们发现了赋值语句的双重作用。它们修改局部绑定,或者修改非局部绑定。实际上,赋值语句已经有了两个作用:创建新的绑定,或者重新绑定现有名称。Python 赋值的许多作用使赋值语句的执行效果变得模糊。作为一个程序员,你应该用文档清晰记录你的代码,使赋值的效果可被其它人理解。
#
# ### 2.4.2 非局部赋值的好处
#
# 非局部赋值是将程序作为独立和自主的对象观察的重要步骤,对象彼此交互,但是各自管理各自的内部状态。
#
# 特别地,非局部赋值提供了在函数的局部范围中维护一些状态的能力,这些状态会在函数之后的调用中演化。和特定`withdraw`函数相关的`balance`在所有该函数的调用中共享。但是,`withdraw`实例中的`balance`绑定对程序的其余部分不可见。只有`withdraw`关联到了`make_withdraw`的帧,`withdraw`在那里被定义。如果`make_withdraw`再次调用,它会创建单独的帧,带有单独的`balance`绑定。
#
# 我们可以继续以我们的例子来展示这个观点。`make_withdraw`的第二个调用返回了第二个`withdraw`函数,它关联到了另一个环境上。
wd2 = make_withdraw(7)
# 第二个`withdraw`函数绑定到了全局帧的名称`wd2`上。我们使用星号来省略了表示这个绑定的线。现在,我们看到实际上有两个`balance`的绑定。名称`wd`仍旧绑定到余额为`12`的`withdraw`函数上,而`wd2`绑定到了余额为`7`的新的`withdraw`函数上。
#
# 
#
# 最后,我们调用绑定到`wd2`上的第二个`withdraw`函数:
wd2(6)
# 这个调用修改了非局部名称`balance`的绑定,但是不影响在全局帧中绑定到名称`wd`的第一个`withdraw`。
#
# 
#
# 这样,`withdraw`的每个实例都维护它自己的余额状态,但是这个状态对程序中其它函数不可见。在更高层面上观察这个情况,我们创建了银行账户的抽象,它管理自己的内部状态,但以一种方式对真实世界的账户进行建模:它基于自己的历史提取请求来随时间变化。
#
# ### 2.4.3 非局部赋值的代价
#
# 我们扩展了我们的计算环境模型,用于解释非局部赋值的效果。但是,非局部复制与我们思考名称和值的方式有一些细微差异。
#
# 之前,我们的值并没有改变,仅仅是我们的名称和绑定发生了变化。当两个名称`a`和`b`绑定到`4`上时,它们绑定到了相同的`4`还是不同的`4`并不重要。我们说,只有一个`4`对象,并且它永不会改变。
#
# 但是,带有状态的函数不是这样的。当两个名称`wd`和`wd2`都绑定到`withdraw`函数时,它们绑定到相同函数还是函数的两个不同实例,就很重要了。考虑下面的例子,它与我们之前分析的那个正好相反:
wd = make_withdraw(12)
wd2 = wd
wd2(1)
wd(1)
# 这里,通过`wd2`调用函数会修改名称为`wd`的函数的值,因为两个名称都指向相同的函数。这些语句执行之后的环境图示展示了这个现象:
#
# 
#
# 两个名称指向同一个值在世界上不常见,但我们程序中就是这样。但是,由于值会随时间改变,我们必须非常仔细来理解其它名称上的变化效果,它们可能指向这些值。
#
# 正确分析带有非局部赋值代码的关键是,记住只有函数调用可以创建新的帧。赋值语句始终改变现有帧中的绑定。这里,除非`make_withdraw`调用了两次,`balance`还是只有一个绑定。
#
# **变与不变。**这些细微差别出现的原因是,通过引入修改非局部环境的非纯函数,我们改变了表达式的本质。只含有纯函数的表达式是引用透明(referentially transparent)的。如果我们将它的子表达式换成子表达式的值,它的值不会改变。
#
# 重新绑定的操作违反了引用透明的条件,因为它们不仅仅返回一个值。它们修改了环境。当我们引入任意重绑定的时候,我们就会遇到一个棘手的认识论问题:它对于两个相同的值意味着什么。在我们的计算环境模型中,两个分别定义的函数并不是相同的,因为其中一个的改变并不影响另一个。
#
# 通常,只要我们不会修改数据对象,我们就可以将复合数据对象看做其部分的总和。例如,有理数可以通过提供分子和分母来确定。但是这个观点在变化出现时不再成立了,其中复合数据对象拥有一个“身份”,不同于组成它的各个部分。即使我们通过取钱来修改了余额,某个银行账户还是“相同”的银行账户。相反,我们可以让两个银行账户碰巧具有相同的余额,但它们是不同的对象。
#
# 尽管它引入了新的困难,非局部赋值是个创建模块化编程的强大工具,程序的不同部分,对应不同的环境帧,可以在程序执行中独立演化。而且,使用带有局部状态的函数,我们就能实现可变数据类型。在这一节的剩余部分,我们介绍了一些最实用的 Python 内建数据类型,以及使用带有非局部赋值的函数,来实现这些数据类型的一些方法。
#
# ### 2.4.4 列表
#
# `list`是 Python 中最使用和灵活的洗了类型。列表类似于元组,但是它是可变的。方法调用和赋值语句都可以修改列表的内容。
#
# 我们可以通过一个展示(极大简化的)扑克牌历史的例子,来介绍许多列表编辑操作。例子中的注释描述了每个方法的效果。
#
# 扑克牌发明于中国,大概在 9 世纪。早期的牌组中有三个花色,它们对应钱的三个面额。
chinese_suits = ['coin', 'string', 'myriad'] # A list literal
suits = chinese_suits # Two names refer to the same list
# 扑克牌传到欧洲(也可能通过埃及)之后,西班牙的牌组(oro)中之只保留了硬币的花色。
suits.pop() # Removes and returns the final element
suits.remove('string') # Removes the first element that equals the argument
# 然后又添加了三个新的花色(它们的设计和名称随时间而演化),
suits.append('cup') # Add an element to the end
suits.extend(['sword', 'club']) # Add all elements of a list to the end
# 意大利人把剑叫做“黑桃”:
suits[2] = 'spade' # Replace an element
# 下面是传统的意大利牌组:
suits
['coin', 'cup', 'spade', 'club']
# 我们现在在美国使用的法式变体修改了前两个:
# +
suits[0:2] = ['heart', 'diamond'] # Replace a slice
suits
# -
# 也存在用于插入、排序和反转列表的操作。所有这些修改操作都改变了列表的值,它们并不创建新的列表对象。
#
# **共享和身份。**由于我们修改了一个列表,而不是创建新的列表,绑定到名称`chinese_suits`上的对象也改变了,因为它与绑定到`suits`上的对象是相同的列表对象。
chinese_suits # This name co-refers with "suits" to the same list
# 列表可以使用`list`构造函数来复制。其中一个的改变不会影响另一个,除非它们共享相同的结构。
nest = list(suits) # Bind "nest" to a second list with the same elements
nest[0] = suits # Create a nested list
# 在最后的赋值之后,我们只剩下下面的环境,其中列表使用盒子和指针的符号来表示:
#
# 
#
# 根据这个环境,修改由`suites`指向的列表会影响`nest`第一个元素的嵌套列表,但是不会影响其他元素:
suits.insert(2, 'Joker') # Insert an element at index 2, shifting the rest
nest
nest[0].pop(2)
suits
# 与之类似,在`next`的第一个元素上撤销这个修改也会影响到`suit`。
#
# 由于这个`pop`方法的调用,我们返回到了上面描述的环境。
#
# 由于两个列表具有相同内容,但是实际上是不同的列表,我们需要一种手段来测试两个对象是否相同。Python 引入了两个比较运算符,叫做`is`和`is not`,测试了两个表达式实际上是否求值为同一个对象。如果两个对象的当前值相等,并且一个对象的改变始终会影响另一个,那么两个对象是同一个对象。身份是个比相等性更强的条件。
#
# > 译者注:两个对象当且仅当在内存中的位置相同时为同一个对象。CPython 的实现直接比较对象的地址来确定。
suits is nest[0]
suits is ['heart', 'diamond', 'spade', 'club']
suits == ['heart', 'diamond', 'spade', 'club']
# 最后的两个比较展示了`is`和`==`的区别,前者检查身份,而后者检查内容的相等性。
#
# **列表推导式。**列表推导式使用扩展语法来创建列表,与生成器表达式的语法相似。
#
# 例如,`unicodedata`模块跟踪了 Unicode 字母表中每个字符的官方名称。我们可以查找与名称对应的字符,包含这些卡牌花色的字符。
from unicodedata import lookup
[lookup('WHITE ' + s.upper() + ' SUIT') for s in suits]
# 列表推导式使用序列的接口约定增强了数据处理的范式,因为列表是一种序列数据类型。
#
# **扩展阅读。**Dive Into Python 3 的[推导式](http://diveintopython3.ep.io/comprehensions.html)一章包含了一些示例,展示了如何使用 Python 浏览计算机的文件系统。这一章介绍了`os`模块,它可以列出目录的内容。这个材料并不是这门课的一部分,但是推荐给任何想要增加 Python 知识和技巧的人。
#
# **实现。**列表是序列,就像元组一样。Python 语言并不提供给我们列表实现的直接方法,只提供序列抽象,和我们在这一节介绍的可变方法。为了克服这一语言层面的抽象界限,我们可以开发列表的函数式实现,再次使用递归表示。这一节也有第二个目的:加深我们对调度函数的理解。
#
# 我们会将列表实现为函数,它将一个递归列表作为自己的局部状态。列表需要有一个身份,就像任何可变值那样。特别地,我们不能使用`None`来表示任何空的可变列表,因为两个空列表并不是相同的值(例如,向一个列表添加元素并不会添加到另一个),但是`None is None`。另一方面,两个不同的函数足以区分两个两个空列表,它们都将`empty_rlist`作为局部状态。
#
# 我们的可变列表是个调度函数,就像我们偶对的函数式实现也是个调度函数。它检查输入“信息”是否为已知信息,并且对每个不同的输入执行相应的操作。我们的可变列表可响应五个不同的信息。前两个实现了序列抽象的行为。接下来的两个添加或删除列表的第一个元素。最后的信息返回整个列表内容的字符串表示。
empty_rlist = None
def make_rlist(first, rest):
"""Make a recursive list from its first element and the rest."""
return (first, rest)
def first(s):
"""Return the first element of a recursive list s."""
return s[0]
def rest(s):
"""Return the rest of the elements of a recursive list s."""
return s[1]
def make_mutable_rlist():
"""Return a functional implementation of a mutable recursive list."""
contents = empty_rlist
def dispatch(message, value=None):
nonlocal contents
if message == 'len':
return len_rlist(contents)
elif message == 'getitem':
return getitem_rlist(contents, value)
elif message == 'push_first':
contents = make_rlist(value, contents)
elif message == 'pop_first':
f = first(contents)
contents = rest(contents)
return f
elif message == 'str':
return str(contents)
return dispatch
# 我们也可以添加一个辅助函数,来从任何内建序列中构建函数式实现的递归列表。只需要以递归顺序添加每个元素。
def to_mutable_rlist(source):
"""Return a functional list with the same contents as source."""
s = make_mutable_rlist()
for element in reversed(source):
s('push_first', element)
return s
# 在上面的定义中,函数`reversed`接受并返回可迭代值。它是使用序列的接口约定的另一个示例。
#
# 这里,我们可以构造函数式实现的列表,要注意列表自身也是个函数。
s = to_mutable_rlist(suits)
type(s)
s('str')
"('heart', ('diamond', ('spade', ('club', None))))"
# 另外,我们可以像列表`s`传递信息来修改它的内容,比如移除第一个元素。
s('pop_first')
'heart'
s('str')
"('diamond', ('spade', ('club', None)))"
# 原则上,操作`push_first`和`pop_first`足以对列表做任意修改。我们总是可以清空整个列表,之后将它旧的内容替换为想要的结果。
#
# **消息传递。**给予一些时间,我们就能实现许多实用的 Python 列表可变操作,比如`extend`和`insert`。我们有一个选择:我们可以将它们全部实现为函数,这会使用现有的消息`pop_first`和`push_first`来实现所有的改变操作。作为代替,我们也可以向`dispatch`函数体添加额外的`elif`子句,每个子句检查一个消息(例如`'extend'`),并且直接在`contents`上做出合适的改变。
#
# 第二个途径叫做消息传递,它把数据值上面所有操作的逻辑封装在一个函数中,这个函数响应不同的消息。一个使用消息传递的程序定义了调度函数,每个函数都拥有局部状态,通过传递“消息”作为第一个参数给这些函数来组织计算。消息是对应特定行为的字符串。
#
# 可以想象,在`dispatch`的函数体中通过名称来枚举所有这些消息非常无聊,并且易于出现错误。Python 的字典提供了一种数据类型,会帮助我们管理消息和操作之间的映射,它会在下一节中介绍。
#
# ### 2.4.5 字典
#
# 字典是 Python 内建数据类型,用于储存和操作对应关系。字典包含了键值对,其中键和值都可以是对象。字典的目的是提供一种抽象,用于储存和获取下标不是连续整数,而是描述性的键的值。
#
# 字符串通常用作键,因为字符串通常用于表示事物名称。这个字典字面值提供了不同罗马数字的值。
numerals = {'I': 1.0, 'V': 5, 'X': 10}
# 我们可以使用元素选择运算符,来通过键查找值,我们之前将其用于序列。
numerals['X']
# 字典的每个键最多只能拥有一个值。添加新的键值对或者修改某个键的已有值,可以使用赋值运算符来完成。
numerals['I'] = 1
numerals['L'] = 50
numerals
# 要注意,`'L'`并没有添加到上面输出的末尾。字典是无序的键值对集合。当我们打印字典时,键和值都以某种顺序来渲染,但是对语言的用户来说,不应假设顺序总是这样。
#
# 字典抽象也支持多种方法,来从整体上迭代字典中的内容。方法`keys`、`values`和`items`都返回可迭代的值。
sum(numerals.values())
# 通过调用`dict`构造函数,键值对的列表可以转换为字典。
dict([(3, 9), (4, 16), (5, 25)])
# 字典也有一些限制:
#
# + 字典的键不能是可变内建类型的对象。
# + 一个给定的键最多只能有一个值。
#
# 第一条限制被绑定到了 Python 中字典的底层实现上。这个实现的细节并不是这门课的主题。直觉上,键告诉了 Python 应该在内存中的哪里寻找键值对;如果键发生改变,键值对就会丢失。
#
# 第二个限制是字典抽象的结果,它为储存和获取某个键的值而设计。如果字典中最多只存在一个这样的值,我们只能获取到某个键的一个值。
#
# 由字典实现的一个实用方法是`get`,如果键存在的话,它返回键的值,否则返回一个默认值。`get`的参数是键和默认值。
numerals.get('A', 0)
numerals.get('V', 0)
# 字典也拥有推导式语法,和列表和生成器表达式类似。求解字典推导式会产生新的字典对象。
{x: x*x for x in range(3,6)}
# **实现。**我们可以实现一个抽象数据类型,它是一个记录的列表,与字典抽象一致。每个记录都是两个元素的列表,包含键和相关的值。
def make_dict():
"""Return a functional implementation of a dictionary."""
records = []
def getitem(key):
for k, v in records:
if k == key:
return v
def setitem(key, value):
for item in records:
if item[0] == key:
item[1] = value
return
records.append([key, value])
def dispatch(message, key=None, value=None):
if message == 'getitem':
return getitem(key)
elif message == 'setitem':
setitem(key, value)
elif message == 'keys':
return tuple(k for k, _ in records)
elif message == 'values':
return tuple(v for _, v in records)
return dispatch
# 同样,我们使用了传递方法的消息来组织我们的实现。我们已经支持了四种消息:`getitem`、`setitem`、`keys`和`values`。要查找某个键的值,我们可以迭代这些记录来寻找一个匹配的键。要插入某个键的值,我们可以迭代整个记录来观察是否已经存在带有这个键的记录。如果没有,我们会构造一条新的记录。如果已经有了带有这个键的记录,我们将这个记录的值设为新的值。
#
# 我们现在可以使用我们的实现来储存和获取值。
d = make_dict()
d('setitem', 3, 9)
d('setitem', 4, 16)
d('getitem', 3)
d('getitem', 4)
d('keys')
d('values')
# 这个字典实现并不为快速的记录检索而优化,因为每个响应`getitem`消息都必须迭代整个`records`列表。内建的字典类型更加高效。
#
# ### 2.4.6 示例:传播约束
#
# 可变数据允许我们模拟带有变化的系统,也允许我们构建新的抽象类型。在这个延伸的实例中,我们组合了非局部赋值、列表和字典来构建一个基于约束的系统,支持多个方向上的计算。将程序表达为约束是一种声明式编程,其中程序员声明需要求解的问题结构,但是抽象了问题解决方案如何计算的细节。
#
# 计算机程序通常组织为单方向的计算,它在预先设定的参数上执行操作,来产生合理的输出。另一方面,我们通常希望根据数量上的关系对系统建模。例如,我们之前考虑过理想气体定律,它通过波尔兹曼常数`k`关联了理想气体的气压`p`,体积`v`,数量`n`以及温度`t`。
# ```
# p * v = n * k * t
# ```
# 这样一个方程并不是单方向的。给定任何四个数量,我们可以使用这个方程来计算第五个。但将这个方程翻译为某种传统的计算机语言会强迫我们选择一个数量,根据其余四个计算出来。所以计算气压的函数应该不能用于计算温度,即使二者的计算通过相同的方程完成。
#
# 这一节中,我们从零开始设计线性计算的通用模型。我们定义了数量之间的基本约束,例如`adder(a, b, c)`会严格保证数学关系`a + b = c`。
#
# 我们也定义了组合的手段,使基本约束可以被组合来表达更复杂的关系。这样,我们的程序就像一种编程语言。我们通过构造网络来组合约束,其中约束由连接器连接。连接器是一种对象,它“持有”一个值,并且可能会参与一个或多个约束。
#
# 例如,我们知道华氏和摄氏温度的关系是:
# ```
# 9 * c = 5 * (f - 32)
# ```
# 这个等式是`c`和`f`之间的复杂约束。这种约束可以看做包含`adder`、`multiplier`和`contant`约束的网络。
#
# 
#
# 这张图中,我们可以看到,左边是一个带有三个终端的乘法器盒子,标记为`a`,`b`和`c`。它们将乘法器连接到网络剩余的部分:终端`a`链接到了连接器`celsius`上,它持有摄氏温度。终端`b`链接到了连接器`w`上,`w`也链接到持有`9`的盒子上。终端`c`,被乘法器盒子约束为`a`和`b`的乘积,链接到另一个乘法器盒子上,它的`b`链接到常数`5`上,以及它的`a`连接到了求和约束的一项上。
#
# 这个网络上的计算会如下进行:当连接器被提供一个值时(被用户或被链接到它的约束器),它会唤醒所有相关的约束(除了刚刚唤醒的约束)来通知它们它得到了一个值。每个唤醒的约束之后会调查它的连接器,来看看是否有足够的信息来为连接器求出一个值。如果可以,盒子会设置这个连接器,连接器之后会唤醒所有相关的约束,以此类推。例如,在摄氏温度和华氏温度的转换中,`w`、`x`和`y`会被常量盒子`9`、`5`和`32`立即设置。连接器会唤醒乘法器和加法器,它们判断出没有足够的信息用于处理。如果用户(或者网络的其它部分)将`celsis`连接器设置为某个值(比如`25`),最左边的乘法器会被唤醒,之后它会将`u`设置为`25 * 9 = 225`。之后`u`会唤醒第二个乘法器,它会将`v`设置为`45`,之后`v`会唤醒加法器,它将`fahrenheit`连接器设置为`77`。
#
# **使用约束系统。**为了使用约束系统来计算出上面所描述的温度计算,我们首先创建了两个具名连接器,`celsius`和`fahrenheit`,通过调用`make_connector`构造器。
celsius = make_connector('Celsius')
fahrenheit = make_connector('Fahrenheit')
# 之后,我们将这些连接器链接到网络中,这个网络反映了上面的图示。函数`make_converter`组装了网络中不同的连接器和约束:
def make_converter(c, f):
"""Connect c to f with constraints to convert from Celsius to Fahrenheit."""
u, v, w, x, y = [make_connector() for _ in range(5)]
multiplier(c, w, u)
multiplier(v, x, u)
adder(v, y, f)
constant(w, 9)
constant(x, 5)
constant(y, 32)
make_converter(celsius, fahrenheit)
# 我们会使用消息传递系统来协调约束和连接器。我们不会使用函数来响应消息,而是使用字典。用于分发的字典拥有字符串类型的键,代表它接受的消息。这些键关联的值是这些消息的响应。
#
# 约束是不带有局部状态的字典。它们对消息的响应是非纯函数,这些函数会改变所约束的连接器。
#
# 连接器是一个字典,持有当前值并响应操作该值的消息。约束不会直接改变连接器的值,而是会通过发送消息来改变,于是连接器可以提醒其他约束来响应变化。这样,连接器代表了一个数值,同时封装了连接器的行为。
#
# 我们可以发送给连接器的一种消息是设置它的值。这里,我们(`'user'`)将`celsius`的值设置为`25`。
celsius['set_val']('user', 25)
Fahrenheit = 77.0
Celsius = 25
# 不仅仅是`celsius`的值变成了`25`,它的值也在网络上传播,于是`fahrenheit`的值也发生变化。这些变化打印了出来,因为我们在构造这两个连接器的时候命名了它们。
#
# 现在我们可以试着将`fahrenheit`设置为新的值,比如`212`。
fahrenheit['set_val']('user', 212)
# 连接器报告说,它察觉到了一个矛盾:它的值是`77.0`,但是有人尝试将其设置为`212`。如果我们真的想以新的值复用这个网络,我们可以让`celsius`忘掉旧的值。
celsius['forget']('user')
Celsius is forgotten
Fahrenheit is forgotten
# 连接器`celsius`发现了`user`,一开始设置了它的值,现在又想撤销这个值,所以`celsius`同意丢掉这个值,并且通知了网络的其余部分。这个消息最终传播给`fahrenheit`,它现在发现没有理由继续相信自己的值为`77`。于是,它也丢掉了它的值。
#
# 现在`fahrenheit`没有值了,我们就可以将其设置为`212`:
fahrenheit['set_val']('user', 212)
Fahrenheit = 212
Celsius = 100.0
# 这个新值在网络上传播,并强迫`celsius`持有值`100`。我们已经使用了非常相似的网络,提供`fahrenheit`来计算`celsius`,以及提供`celsius`来计算`fahrenheit`。这个无方向的计算就是基于约束的网络的特征。
#
# **实现约束系统。**像我们看到的那样,连接器是字典,将消息名称映射为函数和数据值。我们将要实现响应下列消息的连接器:
#
# + `connector['set_val'](source, value)` 表示`source`请求连接器将当前值设置为该值。
# + `connector['has_val']()` 返回连接器是否已经有了一个值。
# + `connector['val']` 是连接器的当前值。
# + `connector['forget'](source)` 告诉连接器,`source`请求它忘掉当前值。
# + `connector['connect'](source)` 告诉连接器参与新的约束`source`。
#
# 约束也是字典,接受来自连接器的以下两种消息:
#
# + `constraint['new_val']()` 表示连接到约束的连接器有了新的值。
# + `constraint['forget']()` 表示连接到约束的连接器需要忘掉它的值。
#
# 当约束收到这些消息时,它们适当地将它们传播给其它连接器。
#
# `adder`函数在两个连接器上构造了加法器约束,其中前两个连接器必须加到第三个上:`a + b = c`。为了支持多方向的约束传播,加法器必须也规定从`c`中减去`a`会得到`b`,或者从`c`中减去`b`会得到`a`。
from operator import add, sub
def adder(a, b, c):
"""The constraint that a + b = c."""
return make_ternary_constraint(a, b, c, add, sub, sub)
# 我们希望实现一个通用的三元(三个方向)约束,它使用三个连接器和三个函数来创建约束,接受`new_val`和`forget`消息。消息的响应是局部函数,它放在叫做`constraint`的字典中。
def make_ternary_constraint(a, b, c, ab, ca, cb):
"""The constraint that ab(a,b)=c and ca(c,a)=b and cb(c,b) = a."""
def new_value():
av, bv, cv = [connector['has_val']() for connector in (a, b, c)]
if av and bv:
c['set_val'](constraint, ab(a['val'], b['val']))
elif av and cv:
b['set_val'](constraint, ca(c['val'], a['val']))
elif bv and cv:
a['set_val'](constraint, cb(c['val'], b['val']))
def forget_value():
for connector in (a, b, c):
connector['forget'](constraint)
constraint = {'new_val': new_value, 'forget': forget_value}
for connector in (a, b, c):
connector['connect'](constraint)
return constraint
# 叫做`constraint`的字典是个分发字典,也是约束对象自身。它响应两种约束接收到的消息,也在对连接器的调用中作为`source`参数传递。
#
# 无论约束什么时候被通知,它的连接器之一拥有了值,约束的局部函数`new_value`都会被调用。这个函数首先检查是否`a`和`b`都拥有值,如果是这样,它告诉`c`将值设为函数`ab`的返回值,在`adder`中是`add`。约束,也就是`adder`对象,将自身作为`source`参数传递给连接器。如果`a`和`b`不同时拥有值,约束会检查`a`和`c`,以此类推。
#
# 如果约束被通知,连接器之一忘掉了它的值,它会请求所有连接器忘掉它们的值(只有由约束设置的值会被真正丢掉)。
#
# `multiplier`与`adder`类似:
from operator import mul, truediv
def multiplier(a, b, c):
"""The constraint that a * b = c."""
return make_ternary_constraint(a, b, c, mul, truediv, truediv)
# 常量也是约束,但是它不会发送任何消息,因为它只包含一个单一的连接器,在构造的时候会设置它。
def constant(connector, value):
"""The constraint that connector = value."""
constraint = {}
connector['set_val'](constraint, value)
return constraint
# 这三个约束足以实现我们的温度转换网络。
#
# **表示连接器。**连接器表示为包含一个值的字典,但是同时拥有带有局部状态的响应函数。连接器必须跟踪向它提供当前值的`informant`,以及它所参与的`constraints`列表。
#
# 构造器`make_connector`是局部函数,用于设置和忘掉值,它响应来自约束的消息。
def make_connector(name=None):
"""A connector between constraints."""
informant = None
constraints = []
def set_value(source, value):
nonlocal informant
val = connector['val']
if val is None:
informant, connector['val'] = source, value
if name is not None:
print(name, '=', value)
inform_all_except(source, 'new_val', constraints)
else:
if val != value:
print('Contradiction detected:', val, 'vs', value)
def forget_value(source):
nonlocal informant
if informant == source:
informant, connector['val'] = None, None
if name is not None:
print(name, 'is forgotten')
inform_all_except(source, 'forget', constraints)
connector = {'val': None,
'set_val': set_value,
'forget': forget_value,
'has_val': lambda: connector['val'] is not None,
'connect': lambda source: constraints.append(source)}
return connector
# 同时,连接器是一个分发字典,用于分发五个消息,约束使用它们来和连接器通信。前四个响应都是函数,最后一个响应就是值本身。
#
# 局部函数`set_value`在请求设置连接器的值时被调用。如果连接器当前并没有值,它会设置该值并将`informant`记为请求设置该值的`source`约束。之后连接器会提醒所有参与的约束,除了请求设置该值的约束。这通过使用下列迭代函数来完成。
def inform_all_except(source, message, constraints):
"""Inform all constraints of the message, except source."""
for c in constraints:
if c != source:
c[message]()
# 如果一个连接器被请求忘掉它的值,它会调用局部函数`forget_value`,这个函数首先执行检查,来确保请求来自之前设置该值的同一个约束。如果是的话,连接器通知相关的约束来丢掉当前值。
#
# 对`has_val`消息的响应表示连接器是否拥有一个值。对`connect`消息的响应将`source`约束添加到约束列表中。
#
# 我们设计的约束程序引入了许多出现在面向对象编程的概念。约束和连接器都是抽象,它们通过消息来操作。当连接器的值由消息改变时,消息不仅仅改变了它的值,还对其验证(检查来源)并传播它的影响。实际上,在这一章的后面,我们会使用相似的字符串值的字典结构和函数值来实现面向对象系统。
| 19,959 |
/deep learning/无人车/tensorflow_old/notebooks/.ipynb_checkpoints/2-Linear-regression-checkpoint.ipynb
|
ea0dded5e93a085cb2ae7ca68b5be2c9b606ce90
|
[] |
no_license
|
wangyong199207/deeplearning
|
https://github.com/wangyong199207/deeplearning
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 82,235 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **This is the hw NOTES for section *04_00 Python Function Quirks*.**
# integers are immutable (not changeable)
x = 5
print(x)
y = x
print(y)
print(x)
y += 1
print(y)
print(x)
# lists are mutable (can be changed), if y equlas x and we change y then x also changes
x = [1, 2, 3]
y = x
y.append(5)
print(x)
# When you call a function and send it a few names (inputs), that action doesn't create copies of the objects that those names point to. It just creates a *new* name that points at the *same* data.
def add_to_list(mylist):
mylist.append(7)
newlist = mylist.copy()
return newlist
mylist = [1, 2, 3]
newlist = add_to_list(mylist)
print(newlist)
print(mylist)
T = [2, 4, 2]
newlist = add_to_list(T)
print(T)
mylist = [1, 2, 3]
todaylist = mylist.copy()
todaylist.append(7)
mylist
todaylist
minimize(cost)
# +
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# show the initial data
plt.plot(train_X, train_Y, 'ro', label='Original data')
plt.legend()
plt.show()
_ = raw_input("Press [enter] to continue.")
plt.close()
plt.plot(train_X, train_Y, 'ro', label='Original data')
plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
plt.legend()
plt.show()
_ = raw_input("Press [enter] to continue.")
# Fit all training data
for epoch in range(training_epochs):
for (x, y) in zip(train_X, train_Y):
sess.run(optimizer, feed_dict={X: x, Y: y})
#Display logs per epoch step
if (epoch+1) % display_step == 0:
c = sess.run(cost, feed_dict={X: train_X, Y:train_Y})
print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \
"W=", sess.run(W), "b=", sess.run(b)
print "Optimization Finished!"
training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
print "Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n'
#Graphic display
plt.plot(train_X, train_Y, 'ro', label='Original data')
plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
plt.legend()
plt.show()
# -
| 2,517 |
/DAT_DAML_DAY04_02BasicComVision_TensorFlowGG.ipynb
|
e069f7cb76e016d2a6ffd5d46093dcf7395fe024
|
[] |
no_license
|
LisaMorise/deeplearning
|
https://github.com/LisaMorise/deeplearning
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 121,137 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="hrSdBaQD1PU4" colab_type="text"
# <table ><tr><td valign='center' bgcolor='white'>
# <a href="https://web.facebook.com/DAT.KUSRC/" target="_blank"><img src="https://drive.google.com/uc?id=1dNBiKikzW1-osi6lleLOgSOKQ65IIfMC" height="50px"></a>
# </td><td valign='center' bgcolor='white'>
# <a href="https://www.ku.ac.th/" target="_blank"><img src="https://drive.google.com/uc?id=1ZfGOBmxAwg8SAhyseFziyinzxBGme78a" height="80px"></a>
# </td><td valign='center' bgcolor='white'>
# <a href="https://www.tensorflow.org/" target="_blank"><img src="https://upload.wikimedia.org/wikipedia/commons/thumb/1/11/TensorFlowLogo.svg/1200px-TensorFlowLogo.svg.png" height="80px"></a>
# </td><td valign='center' bgcolor='white'>
# <a href="https://mike.cpe.ku.ac.th/" target="_blank"><img src="https://drive.google.com/uc?id=1s6r3iG_Slpu_NSWqdt5zBp8Z9hV0-zh6" height="50px"></a>
# </td></tr></table>
# + [markdown] id="3fsj-W926IGN" colab_type="text"
# ---
#
# <center><h1><b>Basic Computer Vision with Machine Learning</b></h1></center>
#
# ---
# * Acknowledgement: Most parts of this tutorail were extracted from [Machine Learning, Zero to Hero](https://www.youtube.com/watch?v=bemDFpNooA8), Google TensorFlow 2019.
# + id="NPC9v8gWZJiZ" colab_type="code" colab={}
print('Basic Computer Vision with Machine Learning..')
print(' Brought to you by [email protected]')
# + [markdown] id="IQnIfUdleSo7" colab_type="text"
# ## 1. How to recognize different objects?
# ---
# + [markdown] id="C8KdKvhd9pbh" colab_type="text"
# For example, look at these pictures, 'How many <font color=ff00ff>shoes</font> do we see?'
#
# <br>
# <center><img src=https://drive.google.com/uc?id=1N_fAN0zbL7nK1Y9QAYIWBcMmW2CuGQ4q></center>
#
# We might say two, right? But <font color=ff00ff>***how***</font> do we know that they are <font color=ff00ff>shoes</font>?
#
# Imagine that if somebody <font color=ff00ff>had never seen shoes before</font >! How would we tell them that despite the great difference between the <font color=00ffff>high heel</font> and the <font color=ffff00>sport</font> shoes, they are still both shoes.
#
# Maybe we would think that if it's <font color=f72e03>**red**</font>, it's a shoe. Because all we've seen are these two shoes, and they're both <font color=f72e03>red</font>.
#
# But, of course, it's <font color=ffff00>not</font> that simple. But how do we know that these two are <font color=ff00ff>shoes</font>?
# + [markdown] id="7MOPw7lY3ym3" colab_type="text"
# Because, in our life, <font color=ff00ff>we've seen lots of shoes</font>, and <font color=ff00ff>we have learned to understand</font> what makes a shoe a shoe.
#
# <br>
# <center><img src=https://drive.google.com/uc?id=1mRV8KBvFAdCkL8aGEf178wB-Boa8cozz></center>
# + [markdown] id="4ofuvYVm2Z6q" colab_type="text"
# So, it follows logically that if we <font color=ff00ff>show to a computer lots of shoes</font>, it <font color=00ffff>will be able to recognize</font> what a shoe is. And that's where the dataset called <font color=ffff>**Fashin MNIST**</font> is useful.
#
# <br>
# <center><img src=https://drive.google.com/uc?id=1_Lu5MdmnHJp-3I33njzxJRCB1USNkkym></center>
#
# This dataset has 70,000 images in 10 different categories. So, there's 7,000 examples of each category, including shoes. Hopefully that seeing 7,000 shoes is <font color=00ffff>enough</font> for a computer to learn what a shoe looks like.
#
# The images in Fashin MNIST are only 28x28 pixels. So, they are pretty small. And the less data used, the faster it is for a computer to process it.
#
# + [markdown] id="wPyYOduE40Dp" colab_type="text"
#
# <center><img src=https://drive.google.com/uc?id=12nSU-AutucBvyHF093s1yM1jiWz4CC0j></center>
#
# That being said, they still lead to recognize items of clothing. In this case you can still see that it's a shoe.
# + [markdown] id="ODfSfYHdDQV3" colab_type="text"
# ## 2. The code explained
# ---
# + [markdown] id="DNITiiSsDdIq" colab_type="text"
# The type of code we will write is almost identical to what we did in the last tutorial. That's part of the power of <font color=ffff00>**TensorFlow**</font> that allows us to design neural network for a variety of tasks with a <font color=ff00ff>consistent</font> <font color=00ffff>programming API</font>.
# + [markdown] id="_8wqPsNW5ZeK" colab_type="text"
#
# We start by loading the data. The fashion MNIST dataset is <font color=ff00ff>built into</font> <font color=ffff00>TensorFlow</font>, so it is easy to load it with the code look like this.
# + id="TZH_CDcHEAs7" colab_type="code" colab={}
import tensorflow as tf
from tensorflow import keras
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
print('Done..')
# + [markdown] id="ynmAzTDcELPI" colab_type="text"
# The <font color=ff00ff>training images</font> is a set of 60,000 images, like our <font color=ffff00>*ankle boot*</font> here. The other 10,000 are a <font color=ff00ff>test set</font> that we can use to check to see how well our neural network performs.
#
# <br>
# <center><img src=https://drive.google.com/uc?id=1cP8vLir6VUsGX3CELBKjrdDSqNgzbjbV></center>
#
# The <font color=ffff00>**label**</font> is a number indicating the class of that type of clothing. So, in this case, the number <font color=ffff00>09</font> indicated an <font color=ffff00>ankle boot</font>. Why do we think it would be a number and not just the text, <font color=ffff00>'**ankle boot**'</font>?
#
# There're two main reasons: first computer <font color=ff00ff>deals better</font> with <font color=00ff00>numbers</font>; but perhaps more importantly, there's the issue with bias. if we label it as <font color=ffff00>'**ankle boot**'</font>, we're already showing a <font color=00ffff>bias</font> towards the English language. So, by using a number, we can point to a text description in any language as shown in the above picture.
# + [markdown] id="vJsniOiHH27I" colab_type="text"
# When looking at the neural network design, it's always good to explore the <font color=ff00ff>*input*</font> values and the <font color=ff00ff>*output*</font> values first.
#
# <br>
# <center><img src=https://drive.google.com/uc?id=1KX_oNlGn6gfygVE9KQf-QmhAr6ULKck8></center>
#
# Here we can see that our neural network is a little <font color=ff00ff>more complex</font> than the one we have seen before. Our <font color=00ffff>first</font> layer has the input of shape 28x28, which was the size of the image. Our <font color=00ffff>last</font> layer is 10, which is the number of different items of clothing represented in our dataset.
#
# Our neural network will be a kind of act like a <font color=ffff00>**filter**</font>, which takes in 28x28 set of pixels and outputs 1 of 10 values.
# + [markdown] id="UEIVC1ZmJwsJ" colab_type="text"
# So what's about this number, <font color=00ffff>*128*</font>? What does that do?
# <br>
# <center><img src=https://drive.google.com/uc?id=1fQwd1JWqX2FouEY2Z-Vuf0s_iSxGRIyI></center>
# + [markdown] id="zDld4Mzf8PgS" colab_type="text"
# <br>
# <center><img src=https://drive.google.com/uc?id=1btOZmXxqv4OcIHc5fVHoImRBalyZC93k></center>
#
# Think of it like this, we're going to have 128 <font color=ff00ff>functions</font> (or <font color=00ffff>filters</font>), each one of which has parameters inside of it. Let's call these <font color=00ffff>**f0**</font> through <font color=00ffff>**f127**</font>. What we want is that when the pixels of the shoes get fed into them, one by one, that the combination of all of these functions will <font color=ff00ff>output</font> the <font color=ffff00>correct</font> value, in this case, the number <font color=00ffff>**9**</font>.
#
# In order to do that, the computer will need to <font color=00ff00>figure out</font> the <font color=ffff00>parameters</font> inside of these functions to get that result. And it will then <font color=ff00ff>extend</font> this to all of the other items of clothing in the dataset. The logic is, once it has done this, then it <font color=ff00ff>should be able to</font> recognize items of clothing.
# + [markdown] id="rY1-vka8MUJn" colab_type="text"
# <br>
# <center><img src=https://drive.google.com/uc?id=15-Lfo0b9sjosn0gGJmepbi-NxMtH4ZWa></center>
#
# For the <font color=00ffff>loss function</font>, and the <font color=ffff00>optimizer function</font>, the neural network will be initialized with <font color=00ff00>random</font> values.
#
# The <font color=00ffff>**loss**</font> function will then measure how good or how bad the results were. And then with the <font color=ffff00>**optimizer**</font>, it will generate new parameters for the functions to see if it can do better.
# + [markdown] id="yev-epDVNbYX" colab_type="text"
# <br>
# <center><img src=https://drive.google.com/uc?id=183NI_5XXu3kDe8bhG8ZfvirOiwNa68cW></center>
#
# For the <font color=ff00ff>**activation** functions</font>, the first one is on the layer of 128 functions, and it's called <font color=ffff00>**relu**</font>, or <font color=ffff00>rectified linear unit</font>. What it really does is as simple as returning a value if it's greater than zero. So if that function has zero or less as output, it just gets <font color=ff00ff>***filtered*** out</font>.
# + [markdown] id="Eh9lTIprPMCi" colab_type="text"
# <br>
# <center><img src=https://drive.google.com/uc?id=1iVpijNCvrWxvYMIebWR760ZgUY9t2skd></center>
#
# The <font color=ffff00>**softmax**</font> has the effect of <font color=ff00ff>picking</font> the biggest number in a set. The output layer in this neural network has 10 items in it representing the probability that we are looking at the specific item of clothing. So, in this case, it has a high probability that it is the item <font color=00ffff>09</font>, which is our ankle boot. So, instead of searching through to find the largest, what <font color=ffff00>*softmax*</font> does is, it sets it to 1 and the rest to 0. So, all we have to do is to find the 1.
#
# + [markdown] id="iB-2LB_SQg2t" colab_type="text"
# <br>
# <center><img src=https://drive.google.com/uc?id=1BJQFwaoJRGUUr_C1uqTndVSJzXJjxJRQ></center>
#
# <font color=ff00ff>Training</font> the model is quite simple, we just <font color=ffff00>fit</font> the <font color=ffff00>training images</font> to the <font color=ffff00>training lables</font>. This time, we'll try it for just 5 epochs.
#
# Remember earlier that we had 10,000 images and labels that we didn't train with? These are images that the model hasn't previously seen, so we can use them to test how well our model performs. We can do that test by passing them to the <font color=00ffff>`evaluate( )`</font> method like that seen the the above picture.
#
# And then, finally, we can get predictions back for new images, by calling <font color=00ffff>`model.predict( )`</font>.
#
# + [markdown] id="OnkitOB5SsZS" colab_type="text"
# ## 3. Runing the code
#
# Reference: https://www.tensorflow.org/tutorials/keras/classification
#
# + [markdown] id="KeouDBHfIFRj" colab_type="text"
# ### Load the fashion MNIST dataset
# + id="nxaO13aySR8y" colab_type="code" outputId="d9876d9e-42f3-4b90-f1aa-f06f646fc4ec" colab={"base_uri": "https://localhost:8080/", "height": 175}
# run this code block again, and examine what train_images and train_lables
# looks like, if you have not done it yet?
import tensorflow as tf
from tensorflow import keras
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
print('Done..')
# + id="s5wY83YWX0AG" colab_type="code" outputId="259964e8-1f15-42dd-fdf7-453785e1bb02" colab={"base_uri": "https://localhost:8080/", "height": 34}
type(train_images)
# + id="kfI2zEilluU-" colab_type="code" outputId="7d9bdbb6-c7d6-42dd-ddef-73f2b9246a17" colab={"base_uri": "https://localhost:8080/", "height": 34}
train_images.shape
# + id="Kr5yRotrcftI" colab_type="code" outputId="9bd304e4-c417-4486-9678-2beada969cab" colab={"base_uri": "https://localhost:8080/", "height": 34}
test_images.shape
# + id="9jdnnYUjX7Ju" colab_type="code" colab={}
train_images[0]
# + id="-h33tptbY20t" colab_type="code" outputId="a90b4ee8-64b6-4b1d-e76c-a76c89b684bf" colab={"base_uri": "https://localhost:8080/", "height": 34}
train_labels[0]
# + [markdown] id="Etvp8bjYJrwm" colab_type="text"
# ### Preprocess the data
#
# Scale these values to a range of 0 to 1 before feeding them to the neural network model. To do so, divide the values by 255.
# + id="BzxiakNRJpBu" colab_type="code" outputId="9db6cd8b-224b-43fc-cda9-b52fc051d7c3" colab={"base_uri": "https://localhost:8080/", "height": 34}
train_images = train_images / 255.0
test_images = test_images / 255.0
print('Done..')
# + id="cEmVQaNOYFYq" colab_type="code" colab={}
train_images[0]
# + [markdown] id="eF6rbsJ5IRLp" colab_type="text"
# ### Plot the data
# + id="9nTH5CPWWjWT" colab_type="code" outputId="ff7d6254-a591-42ac-a804-120b356d62ec" colab={"base_uri": "https://localhost:8080/", "height": 589}
import matplotlib.pyplot as plt
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i])
plt.xlabel(class_names[train_labels[i]])
plt.show()
# + [markdown] id="WwJqSzqDIYCp" colab_type="text"
# ### Build the model
# + colab_type="code" id="_UbvWwMtH6sG" outputId="c6ca8523-d00b-45f8-af50-cf6959d355d0" colab={"base_uri": "https://localhost:8080/", "height": 210}
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation=tf.nn.relu),
keras.layers.Dense(10, activation=tf.nn.softmax)])
model.compile(optimizer=tf.compat.v1.train.AdamOptimizer(),
loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=5)
print('Done..')
# + [markdown] id="PMoAxNBbJMbd" colab_type="text"
# ### Evaluate the accuracy
# + id="3feJdUUhJLm9" colab_type="code" outputId="83cb681f-999b-438c-bc9b-7f3e7a95fb9f" colab={"base_uri": "https://localhost:8080/", "height": 70}
test_loss, test_acc = model.evaluate(test_images, test_labels)
print(f'\nTest accuracy: {test_acc*100:.2f}%')
# + [markdown] id="fERwnKx6KXWh" colab_type="text"
# ### Make the predictions
# + id="HYCs3CeJKbLM" colab_type="code" colab={}
predictions = model.predict(test_images)
# + [markdown] id="76wEP7X7K_6k" colab_type="text"
# A prediction is an array of 10 numbers. They represent the model's <font color=ff00ff>confidence</font> that the image corresponds to each of the 10 different articles of clothing.
# + id="3DCUluNNLGRb" colab_type="code" outputId="5ca8f454-72b5-42df-a550-bbd76df53cdd" colab={"base_uri": "https://localhost:8080/", "height": 70}
predictions[0]
# + id="iHxi3LopKmyq" colab_type="code" outputId="d3901641-d1c6-4887-9c9f-67185200bf8f" colab={"base_uri": "https://localhost:8080/", "height": 34}
import numpy as np
import matplotlib.pyplot as plt
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array, true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'cyan'
else:
color = 'orange'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array, true_label[i]
plt.grid(False)
plt.xticks(range(10))
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
print('Done..')
# + [markdown] id="rgjgZtyQLX02" colab_type="text"
# Let's look at the 0th image, predictions, and prediction array. Correct prediction labels are blue and incorrect prediction labels are red. The number gives the percentage (out of 100) for the predicted label.
# + id="iK7ls1CcLT-z" colab_type="code" outputId="c459d0f3-59ae-4f47-e890-567f01279c91" colab={"base_uri": "https://localhost:8080/", "height": 211}
i = 0
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions[i], test_labels)
plt.show()
# + id="CoX6fzgJLo6d" colab_type="code" colab={}
i = 12
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions[i], test_labels)
plt.show()
# + [markdown] id="mLMdgfwPL2Lq" colab_type="text"
# Let's plot several images with their predictions. Note that the model can be wrong even when very confident.
# + id="r84o26QEL3Hh" colab_type="code" colab={}
# Plot the first X test images, their predicted labels, and the true labels.
# Color correct predictions in blue and incorrect predictions in red.
num_rows = 7
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(i, predictions[i], test_labels)
plt.tight_layout()
plt.show()
# + [markdown] id="vDombmZJt66r" colab_type="text"
#
#
# ---
#
#
# + [markdown] id="-bsqaqaecAKK" colab_type="text"
# ##4. Your exercise
#
# Complete the following code block for image classification problem with the old MNIST dataset.
# + [markdown] id="sihtg_OgcJN9" colab_type="text"
# <center><img src=https://miro.medium.com/max/1650/1*XdCMCaHPt-pqtEibUfAnNw.png width="520px">
# <br>
# MNIST Dataset and Number Classification <sup><a href=https://www.katacoda.com/basiafusinska/courses/tensorflow-getting-started/tensorflow-mnist-beginner>[1]<a></sup></center>
# + [markdown] id="cuDsFHhhcVjd" colab_type="text"
# ###Downloading the MNIST data
#
# The MNIST dataset is one of the most common datasets used for image classification and accessible from many different sources. In fact, Tensorflow and Keras allow us to import and download the MNIST dataset directly from their API.
# + id="neIQbtKpfl--" colab_type="code" colab={}
import tensorflow as tf
(train_images, train_labels), (test_images, test_labels) = \
tf.keras.datasets.mnist.load_data()
print('Done')
# + [markdown] id="KnQervoyf-jI" colab_type="text"
# The MNIST database contains 60,000 training images and 10,000 testing images taken from American Census Bureau employees and American high school students <sup><a href=https://en.wikipedia.org/wiki/MNIST_database target=_blank>[2]</a></sup>.
#
# train_images and test_images parts contain greyscale RGB codes (from 0 to 255) while train_labels and test_labels parts contains labels from 0 to 9 which represents which number they actually are.
# + id="nY52O7GghUF3" colab_type="code" colab={}
# your code to explore the data here
train_images.shape
#train_images[0]
#train_labels[0]
#plt.imshow(train_images[0])
#plt.show()
# + id="C4aJbkxkT7sc" colab_type="code" colab={}
# + [markdown] id="dBJyr-0Zghf4" colab_type="text"
# To visualize these numbers, we can get help from matplotlib.
# + id="ZTOEXovGgzvW" colab_type="code" colab={}
# your code here
# + [markdown] id="x-YsxCszheGq" colab_type="text"
# ###Normalizing the images
#
# We normalize our data as it is always required in neural network models. We can achieve this by dividing the RGB codes to 255 (which is the maximum RGB code minus the minimum RGB code). This can be done with the following code:
# + id="0noggVzshyPR" colab_type="code" colab={}
# Normalizing the RGB codes by dividing it to the max RGB value.
# your code here
print('train_images shape:', train_images.shape)
print('Number of images in train_images', train_images.shape[0])
print('Number of images in test_images', test_images.shape[0])
# + id="SGT5UlEbmHCP" colab_type="code" colab={}
train_images[0]
# + [markdown] id="7iIrnfaTiFx7" colab_type="text"
# ###Building, compiling and fitting the model
# + id="APKsNpYpiOya" colab_type="code" colab={}
# your code here
# + [markdown] id="6wz8hfOzjUHE" colab_type="text"
# ###Evaluating the model
# + id="wqtKS0yQjWWn" colab_type="code" colab={}
# your code here
# + [markdown] id="-qAzdbfYW65m" colab_type="text"
# ###Makeing the prediction
# + id="rsvBrPm8W7T6" colab_type="code" colab={}
predictions = model.predict(test_images)
# + id="9o9_wTIqXoAm" colab_type="code" colab={}
# modify the above plot_image() and plot_value_array we have already seen here
print('Done..')
# + id="RpgrLWYfX_j3" colab_type="code" colab={}
# Plot the first X test images, their predicted labels, and the true labels.
# Color correct predictions in blue and incorrect predictions in red.
| 21,708 |
/BIOSCI544_logistic_map2.ipynb
|
bb84d8fb3dfb3f7f1b8962ff3f8fde1d60dc2b9a
|
[] |
no_license
|
cohmathonc/BIOSCI544
|
https://github.com/cohmathonc/BIOSCI544
| 0 | 1 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 618,253 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="JdS2eHBeZs8x" colab_type="text"
# [](https://colab.research.google.com/github/cohmathonc/BIOSCI544/blob/master/BIOSCI544_logistic_map2.ipynb)
# + [markdown] id="kL9XCZ12uH5G" colab_type="text"
# # Logistic Map
# + id="hqCVh3Gvua7u" colab_type="code" colab={}
# %matplotlib inline
from ipywidgets import interact, interactive, fixed, interact_manual
import numpy as np
import matplotlib.pyplot as plt
import ipywidgets as widgets
from matplotlib.widgets import TextBox
from scipy.signal import periodogram
# + [markdown] id="DpoW878m8QFi" colab_type="text"
# $x_{n+1}=r(x_n)(1-x_n)$
# + id="6Z6tTYpfvY5Q" colab_type="code" colab={}
def update_x(x_previous, r):
x_next = r * x_previous * (1.0-x_previous)
return x_next
def eval_n(r, x0=0.1, n_max=100):
x = np.zeros(n_max)
x[0] = x0
for n in range(1,n_max):
x[n] = update_x(x[n-1],r)
return x
# + id="8YbZmp8Oxege" colab_type="code" colab={}
r_steps = 500
r_range_min = 2.9
r_range_max = 4
n_max = 1000
rs = np.linspace(2.9, 4, r_steps)
x = np.zeros((r_steps, n_max))
for i, r in enumerate(rs):
x[i,:] = eval_n(r, x0=0.1, n_max=n_max)
# + id="Y02IveeL5fpD" colab_type="code" colab={}
def cobweb_plot(r,nstep):
x=np.linspace(0,1,100)
fig, (ax1,ax2) = plt.subplots(1, 2,figsize=(15,10))
ax2.plot(x,r*x*(1-x))
ax2.plot(x,x)
X=np.linspace(0,1,10)[1:]
mycolors=['b','g','r','c','m','y','k','w']
if nstep>0:
Y=logisticIter(r,0,nstep,0.1)
plt.vlines(Y[0],0,Y[1])
k=0
ax1.plot(Y)
for i in range(1,len(Y)-1):
c=mycolors[k]
ax2.hlines(Y[i],Y[i-1],Y[i],color=c)
ax2.vlines(Y[i],Y[i],Y[i+1],color=c)
if i%2==0:
k=k+1
if k>=len(mycolors):
k=0
# + id="SpbVDQlY6s8V" colab_type="code" outputId="34adeef3-6112-4e52-e2ae-4811967adcf1" colab={"base_uri": "https://localhost:8080/", "height": 671, "referenced_widgets": ["3ebf5c6e79714435bb680ba2a9b8a889", "ead03009fece4a13baa2af61c81c443b", "bbaa30b1065b40ceb3685b31b4a6ccb9", "e1aeb00cc1ad423499c4864f3f8feb0c", "0303db412b874b83b5ec31020b57d709", "896ff7c3898a41df94be84f0369d2501", "851a6b0b754a40458b765bb57a84426b", "a5aa996f491445c9a303b08d450171a4", "3a7567b530b9461d936682264f83898c"]}
interact(cobweb_plot,r=widgets.FloatText(value=2,step=0.1),nstep=widgets.IntText(value=0,step=1));
# + id="yF7Y4KJo7xY6" colab_type="code" colab={}
# + id="UlQCV1saEMIg" colab_type="code" colab={}
logistic =lambda r,p: r*p*(1-p)
def logisticIter(r,tmin,tmax,p=0.1):
x=[p]
for t in range(tmax):
p=logistic(r,p)
x.append(p)
return x[tmin:]
# + id="ZElbHbzrF8jy" colab_type="code" colab={}
def plot_logistic(r):
s = logisticIter(r,0,100,p=0.1)
fig, (ax1, ax2) = plt.subplots(1, 2,figsize=(15,10))
ax1.plot(s,lw=2)
ax1.set_xlabel('time')
ax1.set_ylabel('X',size=20)
ax1.set_xlim(0,100)
ax1.set_ylim(0,1)
s=logisticIter(r,0,16000,p=0.1)[1400:];
freq,P=periodogram(s);
freq=freq[1:];P=np.log(P[1:]);
ax2.plot(1/freq,P)
ax2.set_ylim(-20,0)
ax2.set_xlim(0,18)
ax2.set_ylabel(r"$log(PSD)$")
ax2.set_xlabel(r"$period$")
# + colab_type="code" id="eBlv-34GM3l7" outputId="4a9ec4a9-cd46-4e7a-9489-6b114325c5e6" colab={"base_uri": "https://localhost:8080/", "height": 661, "referenced_widgets": ["a88275cb12be4206813fa50fdb64bacc", "5b32c34a718c4f3f9c10b81f7029be30", "80bbed00de8749e2bceb4943303e393d", "3fa5d92816a1471c957649d508ae5904", "6bf51c33abe24f9eaa828de5b7c309e4", "f4260ed5172c40978efebd5f7897b9a1"]}
interact(plot_logistic,r=widgets.FloatText(value=1,step=0.005));
# + id="u-4f_U6YGHG4" colab_type="code" outputId="8bb4a8ed-5b73-4c8c-e10b-bb81b623106c" colab={"base_uri": "https://localhost:8080/", "height": 661, "referenced_widgets": ["08737a564d7b4e69a57fc2b0341d90fc", "deb39898f17f4b79b93f064f56489c45", "c168af2bfa79461ebaaee4cd73412919", "6db959d988254780829e89cab1f5ccfd", "76a70d9af9ca453baaafb14784812f38", "0a9c31e22dc146489d3ba7a70d040ade"]}
rvalues=[0.8,1,3,3.82842712,3.44948974,3.73817237,3.62655316,3.70164076,3.54409035,3.56440726]
interact(plot_logistic,r=widgets.Dropdown(options=rvalues,value=1));
# + id="gIsuh--7HL4J" colab_type="code" outputId="c84cc31c-97c6-475b-b8ff-a9e1ef778953" colab={"base_uri": "https://localhost:8080/", "height": 592}
# 100 iterations discharged to allow solution to stabilize
plt.figure(figsize=(20,10))
plt.plot(rs, x[:, 100:], '.k', markersize=0.5)
plt.show()
# + [markdown] id="B9HVVywTE6XV" colab_type="text"
#
est['SibSp'] + titanic['Parch']
titanic['large_family'] = titanic['Num_family_members'] >= 4
test['large_family'] = test['Num_family_members'] >= 4
make_probability_chart(titanic, "Survived", "Num_family_members")
# -
# ### 3H.) Cabin
# +
# Replaced cabins with a deck. NaNs for level have 'n'.
titanic['Deck'] = titanic['Cabin'].str[0]
test['Deck'] = test['Cabin'].str[0]
make_probability_chart(titanic, "Survived", "Deck")
# -
# ### 3I.) Fare
# Create a stacked histogram to show survival by age.
f, ax = plt.subplots(figsize=(12, 8))
titanic['Fare'].plot.hist(bins=range(0, 300, 10))
age_plot = titanic[titanic['Survived'] == 1]['Fare'].plot.hist(
bins=range(0, 300, 10))
age_plot.legend(['Died', 'Survived'])
age_plot.set_title('Survival by Fare')
age_plot.set_xlabel('Fare')
plt.show()
# ### 3J.) Embarked
f, ax = plt.subplots(figsize=(12, 8))
sns.countplot(
titanic['Embarked']).set_title('Passenger Frequency by Embarked Location')
make_probability_chart(titanic, "Survived", "Embarked")
# ### 3K.) Ticket
titanic['Ticket_Frequency'] = titanic['Ticket'].map(
titanic['Ticket'].value_counts())
test['Ticket_Frequency'] = test['Ticket'].map(test['Ticket'].value_counts())
# ## 4.) Filling in Missing Values
# ### 4A.) Figure out what is missing
print("Training Data")
print(titanic.isnull().sum())
print('\n')
print("Testing Data")
print(test.isnull().sum())
# ### 4B.) Age
titanic['Age'].isnull().sum() # Save for later.
# ### 4C.) Deck
# +
# Replaced cabins with a space so they are dropped first.
titanic['Deck'] = titanic['Deck'].fillna(' ')
test['Deck'] = test['Deck'].fillna(' ')
# There is no deck 'T' on the Titanic, so replacing that with ' ':
titanic[titanic['Deck'] == 'T'] = titanic[titanic['Deck'] == 'T'].replace(
'T', ' ')
test[test['Deck'] == 'T'] = test[test['Deck'] == 'T'].replace('T', ' ')
# -
# ### 4D.) Fare
# +
# Fill the one NA in test with the fare median
test['Fare'] = test['Fare'].fillna(test['Fare'].median())
def reshape_fare(x):
if x <= 0:
return 0
else:
return np.log1p(x)**.1 - 1
titanic['Fare_transformed'] = titanic['Fare'].apply(lambda x: reshape_fare(x))
test['Fare_transformed'] = test['Fare'].apply(lambda x: reshape_fare(x))
titanic['Fare_transformed'].hist()
test['Fare_transformed'].hist()
# -
# ### 4E.) Embarked
titanic['Embarked'].value_counts()
# According to Encyclopedia Titanica, both of the null passengers embarked at Southampton.
# https://www.encyclopedia-titanica.org/titanic-survivor/amelia-icard.html
titanic['Embarked'].fillna('S', inplace=True)
# ### 4F.) Sanity Check
# Sanity check to make sure all missing values are taken care of. Cabin won't be in any of the models, so we're good.
titanic.isnull().sum()
test.isnull().sum()
test.shape
# +
from sklearn.preprocessing import OneHotEncoder
original_variables = titanic[[
'Survived', 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked'
]]
original_variables.head()
original_variables = pd.get_dummies(original_variables)
f, ax = plt.subplots(figsize=(12, 8))
sns.heatmap(original_variables.corr(), cmap='Blues', annot=True)
f.autofmt_xdate()
# -
# ## 5.) Model Preparations
# ### 5A.) Engineer Features
print(titanic['Salutation'].value_counts())
titanic['Salutation'].replace('Dr', 'Mr', inplace=True)
titanic['Salutation'].replace('Rev', 'Mr', inplace=True)
titanic['Salutation'].replace('Major', 'Mr', inplace=True)
titanic['Salutation'].replace('Col', 'Mr', inplace=True)
titanic['Salutation'].replace('Mlle', 'Miss', inplace=True)
titanic['Salutation'].replace('Mme', 'Mrs', inplace=True)
titanic['Salutation'].replace('Capt', 'Mr', inplace=True)
titanic['Salutation'].replace('Countess', 'Mrs', inplace=True)
titanic['Salutation'].replace('Ms', 'Miss', inplace=True)
titanic['Salutation'].replace('Sir', 'Mr', inplace=True)
titanic['Salutation'].replace('Jonkheer', 'Mr', inplace=True)
titanic['Salutation'].replace('Don', 'Mr', inplace=True)
titanic['Salutation'].replace('Dona', 'Mrs', inplace=True)
titanic['Salutation'].replace('Lady', 'Mrs', inplace=True)
# ### 5B.) Drop Columns
titanic.info()
print(test['Salutation'].value_counts())
test['Salutation'].replace('Dr', 'Mr', inplace=True)
test['Salutation'].replace('Rev', 'Mr', inplace=True)
test['Salutation'].replace('Major', 'Mr', inplace=True)
test['Salutation'].replace('Col', 'Mr', inplace=True)
test['Salutation'].replace('Mlle', 'Miss', inplace=True)
test['Salutation'].replace('Mme', 'Mrs', inplace=True)
test['Salutation'].replace('Capt', 'Mr', inplace=True)
test['Salutation'].replace('Countess', 'Mrs', inplace=True)
test['Salutation'].replace('Ms', 'Miss', inplace=True)
test['Salutation'].replace('Sir', 'Mr', inplace=True)
test['Salutation'].replace('Jonkheer', 'Mr', inplace=True)
test['Salutation'].replace('Don', 'Mr', inplace=True)
test['Salutation'].replace('Dona', 'Mrs', inplace=True)
test['Salutation'].replace('Lady', 'Mrs', inplace=True)
# Dropping columns not needed in the model.
X_train = titanic.drop([
'Survived', 'PassengerId', 'Name', 'Cabin', 'Ticket', 'Num_family_members'
], 1) # 1 is to specify columns
y_train = titanic['Survived']
X_test = test.drop(
['PassengerId', 'Cabin', 'Name', 'Ticket', 'Num_family_members'], 1)
X_train['Pclass'] = X_train['Pclass'].astype("category")
X_test['Pclass'] = X_train['Pclass'].astype("category")
# ### 5C.) Add Dummy Variables
# Fit and transform categorical variables to integers
X = pd.get_dummies(X_train,
columns=['Pclass', 'Sex', 'Embarked', 'Deck', 'Salutation'])
X_test = pd.get_dummies(
X_test, columns=['Pclass', 'Sex', 'Embarked', 'Deck', 'Salutation'])
column_names = list(X.columns)
from sklearn.impute import KNNImputer
imputer = KNNImputer()
X = pd.DataFrame(imputer.fit_transform(X), columns=column_names)
X_test = pd.DataFrame(imputer.fit_transform(X_test), columns=column_names)
y = titanic['Survived']
logreg_sm = sm.Logit(y.astype(float), X.astype(float)).fit()
print(logreg_sm.summary())
X['large_family'] = X['large_family'].map({False: 0, True: 1})
X_test['large_family'] = X_test['large_family'].map({False: 0, True: 1})
X['Child'] = X['Child'].map({False: 0, True: 1})
X_test['Child'] = X_test['Child'].map({False: 0, True: 1})
X['Elder'] = X['Elder'].map({False: 0, True: 1})
X_test['Elder'] = X['Elder'].map({False: 0, True: 1})
# ### 5D.) Check for Multicolinearity
# +
# VIF dataframe
vif_data = pd.DataFrame()
vif_data["feature"] = X.columns
# calculating VIF for each feature
vif_data["VIF"] = [
variance_inflation_factor(X.values, i) for i in range(len(X.columns))
]
print(vif_data)
# +
# VIF dataframe
vif_data = pd.DataFrame()
vif_data["feature"] = X_test.columns
# calculating VIF for each feature
vif_data["VIF"] = [
variance_inflation_factor(X_test.values, i)
for i in range(len(X_test.columns))
]
print(vif_data)
# +
X = X.drop([
'Fare', 'Salutation_Mr', 'Deck_ ', 'Sex_female', 'Embarked_C', 'Pclass_3'
], 1)
X_test = X_test.drop([
'Fare', 'Salutation_Mr', 'Deck_ ', 'Sex_female', 'Embarked_C', 'Pclass_3'
], 1)
# VIF dataframe
vif_data = pd.DataFrame()
vif_data["feature"] = X.columns
# calculating VIF for each feature
vif_data["VIF"] = [
variance_inflation_factor(X.values, i) for i in range(len(X.columns))
]
print(vif_data)
# -
assert len(X_test.columns) == len(X.columns)
column_names = list(X.columns)
# ### 5E.) Normalizing Features
X = StandardScaler().fit_transform(X)
X_test = StandardScaler().fit_transform(X_test)
# ### 5F.) Split Titanic Data into Training and Validation Model
X_train, X_validation, y_train, y_validation = train_test_split(
X, titanic['Survived'], test_size=0.2, random_state=42)
# ## 6.) Modeling and Verifying Model
# ### 6A.) Logistic Regression
# +
logreg = LogisticRegression(max_iter=10000,
penalty='l2',
class_weight='balanced')
logreg.fit(X_train, y_train)
y_pred_log = logreg.predict(X_validation)
print(classification_report(y_validation, y_pred_log))
# Compute predicted probabilities: y_pred_prob
y_pred_prob = logreg.predict(X_validation)
y_pred_proba = logreg.predict_proba(X_validation)[:, 1]
# Generate ROC curve values: fpr, tpr, thresholds
fpr, tpr, thresholds = roc_curve(y_validation, y_pred_proba)
print("Accuracy score:", accuracy_score(y_validation, y_pred_prob))
# -
make_roc_auc_chart("Logistic Regression", y_validation, y_pred_proba)
# Hyperparameter tuning: Logistic Regression
# +
# Create the parameter grid
lr = LogisticRegression(max_iter=10000, class_weight='balanced')
param_grid = {
'C': [.01, .03, .1, .3, 1, 3, 10, 30, 100, 300, 1000, 3000, 10000]
} # Harmonic mean
logreg_cv = GridSearchCV(estimator=lr,
param_grid=param_grid,
n_jobs=-1,
verbose=5,
return_train_score=True,
cv=5,
scoring='roc_auc')
y_pred_gridsearch = logreg_cv.fit(X_train, y_train).predict(X_validation)
# Generate ROC curve values: fpr, tpr, thresholds
fpr, tpr, thresholds = roc_curve(y_validation, y_pred_prob)
print(classification_report(y_validation, y_pred_gridsearch))
print("Accuracy score:", accuracy_score(y_validation, y_pred_gridsearch))
make_roc_auc_chart("Logistic Regression", y_validation, y_pred_proba)
# -
print(logreg_cv.fit(X, y).predict(X_test).sum())
assert logreg_cv.fit(X, y).predict(X_test).sum() < 200
# ### 6B.) Random Forest
# +
random_forest = RandomForestClassifier(n_estimators=1000, random_state=42)
random_forest.fit(X_train, y_train)
y_pred_rf = random_forest.predict(X_validation)
print(classification_report(y_validation, y_pred_rf))
# Compute predicted probabilities: y_pred_prob
y_pred_prob = random_forest.predict(X_validation)
y_pred_proba = random_forest.predict_proba(X_validation)[:, 1]
# Generate ROC curve values: fpr, tpr, thresholds
fpr, tpr, thresholds = roc_curve(y_validation, y_pred_proba)
print("Accuracy score:", accuracy_score(y_validation, y_pred_prob))
# -
plt.figure(figsize=(12, 8))
# Plot ROC curve
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr, tpr)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve for Random Forest')
plt.show()
# +
# Create the parameter grid
rf = RandomForestClassifier()
param_grid = {
'n_estimators': [100],
'max_depth': [x for x in range(3, 10)],
'min_samples_leaf': [x for x in range(3, 20)],
'max_features': ['sqrt', 'log2', .2]
}
rf_cv = GridSearchCV(rf,
param_grid=param_grid,
cv=5,
n_jobs=-1,
scoring='roc_auc',
verbose=5)
y_pred_gridsearch = rf_cv.fit(X_train, y_train).predict(X_validation)
# Generate ROC curve values: fpr, tpr, thresholds
fpr, tpr, thresholds = roc_curve(y_validation, y_pred_prob)
print(classification_report(y_validation, y_pred_gridsearch))
print("Accuracy score:", accuracy_score(y_validation, y_pred_gridsearch))
# -
y_pred_proba = rf_cv.predict_proba(X_validation)[:, 1]
make_roc_auc_chart("Random Forest", y_validation, y_pred_proba)
feature_importances = pd.DataFrame(
zip(list(column_names), random_forest.feature_importances_),
columns=("Feature", "Importance")).sort_values(by="Importance",
ascending=False)[0:10]
f, ax = plt.subplots(figsize=(8, 8))
plt.title("Feature Importances")
plt.xlabel("Importance")
plt.ylabel("Feature")
plt.barh(feature_importances["Feature"], feature_importances["Importance"])
f.autofmt_xdate()
plt.show()
# ### 6C.) K-Nearest Neighbors
# +
param_grid = {'n_neighbors': [3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25]}
knn = sklearn.neighbors.KNeighborsClassifier()
knn_gridsearch = GridSearchCV(knn,
param_grid=param_grid,
n_jobs=-1,
verbose=5,
return_train_score=True,
cv=5)
y_pred = knn_gridsearch.fit(X_train, y_train).predict(X_validation)
y_pred_proba = knn_gridsearch.predict_proba(X_validation)[:, 1]
# Generate ROC curve values: fpr, tpr, thresholds
fpr, tpr, thresholds = roc_curve(y_validation, y_pred_proba)
print(classification_report(y_validation, y_pred))
print("Accuracy score:", accuracy_score(y_validation, y_pred))
make_roc_auc_chart("K Nearest Neighbors", y_validation, y_pred_proba)
# -
knn_gridsearch.best_params_
# ### 6D.) Gradient Boosting Classifier
# +
param_grid = {
'learning_rate': np.linspace(.01, .3, 30),
'max_depth': [x for x in range(2, 20)],
'n_estimators': [100],
'max_features': [.2, 'sqrt', 'log2'],
'warm_start': [True]
}
gbm = GradientBoostingClassifier()
gbm_random_search = RandomizedSearchCV(gbm,
param_distributions=param_grid,
scoring='roc_auc',
n_jobs=-1,
verbose=5,
n_iter=100,
cv=5,
return_train_score=True)
predictions = gbm_random_search.fit(X_train, y_train).predict(X_validation)
fpr, tpr, thresholds = roc_curve(y_validation, predictions)
print(classification_report(y_validation, predictions))
print("Accuracy score:", accuracy_score(y_validation, predictions))
make_roc_auc_chart("Gradient Boosting Classifier", y_validation, y_pred_proba)
# -
# ## 6E.) Linear SVC
# +
from sklearn.svm import SVC
param_grid = {'C': [.01, .03, .1, .3, 1, 3, 10, 30]}
svc = SVC(kernel='rbf')
svc_cv = GridSearchCV(estimator=svc,
param_grid=param_grid,
n_jobs=-1,
verbose=5,
return_train_score=True,
cv=5,
scoring='roc_auc')
predictions = svc_cv.fit(X_train, y_train).predict(X_validation)
fpr, tpr, thresholds = roc_curve(y_validation, predictions)
print(classification_report(y_validation, predictions))
print("Accuracy score:", accuracy_score(y_validation, predictions))
make_roc_auc_chart("Support Vector Machine", y_validation, y_pred_proba)
# -
# ## 6F.) XGBoost
# +
# Instantiate the XGBClassifier: xg_cl
xg_cl = xgb.XGBClassifier(objective='binary:logistic', n_estimators=100)
# Create the parameter grid: gbm_param_grid
xgb_param_grid = {
'colsample_bytree': np.arange(.3, .7, .05),
'n_estimators': [100],
'max_depth': np.arange(2, 20),
'eta': [.001, .003, .001, .003, .01, .03, .1, .3, 1]
}
# Perform grid search: grid_mse
xgb_cv = GridSearchCV(param_grid=xgb_param_grid,
estimator=xg_cl,
scoring="roc_auc",
cv=5,
verbose=1,
n_jobs=-1)
predictions = xgb_cv.fit(X_train, y_train).predict(X_validation)
fpr, tpr, thresholds = roc_curve(y_validation, predictions)
print(classification_report(y_validation, predictions))
print("Accuracy score:", accuracy_score(y_validation, predictions))
make_roc_auc_chart("XGBoost", y_validation, y_pred_proba)
# +
feature_importances = pd.DataFrame(
zip(list(column_names), xg_cl.feature_importances_),
columns=("Feature", "Importance")).sort_values(by="Importance",
ascending=False)[0:10]
f, ax = plt.subplots(figsize=(8, 8))
plt.title("Feature Importances")
plt.xlabel("Importance")
plt.ylabel("Feature")
plt.barh(feature_importances["Feature"], feature_importances["Importance"])
f.autofmt_xdate()
plt.show()
# -
print(logreg_cv.best_params_)
print(rf_cv.best_params_)
print(knn_gridsearch.best_params_)
print(gbm_random_search.best_params_)
print(svc_cv.best_params_)
print(xgb_cv.best_params_)
# ## 7.) Conclusion
classifier1 = LogisticRegression(C=logreg_cv.best_params_['C'], max_iter=10000)
classifier2 = RandomForestClassifier(
max_depth=rf_cv.best_params_['max_depth'],
min_samples_leaf=rf_cv.best_params_['min_samples_leaf'],
max_features=rf_cv.best_params_['max_features'],
n_estimators=2000,
random_state=42)
classifier3 = KNeighborsClassifier(
n_neighbors=knn_gridsearch.best_params_['n_neighbors'])
classifier4 = GradientBoostingClassifier(
n_estimators=2000,
max_features=gbm_random_search.best_params_['max_features'],
max_depth=gbm_random_search.best_params_['max_depth'],
learning_rate=gbm_random_search.best_params_['learning_rate'],
random_state=42)
classifier5 = SVC(C=svc_cv.best_params_['C'])
classifier6 = xgb.XGBClassifier(
objective='binary:logistic',
n_estimators=2000,
colsample_bytree=xgb_cv.best_params_['colsample_bytree'],
eta=xgb_cv.best_params_['eta'],
max_depth=xgb_cv.best_params_['max_depth'])
sclf = StackingClassifier(estimators=[(
'logreg', classifier1), ('rf', classifier2), (
'knn',
classifier3), ('gbm',
classifier4), ('svc',
classifier5), ('xgb', classifier6)]).fit(
X, titanic['Survived'])
final_predictions = sclf.fit(X, y).predict(X_test)
# +
y_sclf = sclf.fit(X_train, y_train).predict(X_validation)
y_sclf_proba = sclf.fit(X_train, y_train).predict_proba(X_validation)[:, 1]
# Generate ROC curve values: fpr, tpr, thresholds
fpr, tpr, thresholds = roc_curve(y_validation, y_sclf_proba)
print(classification_report(y_validation, y_sclf))
print("Accuracy score:", accuracy_score(y_validation, y_sclf))
make_roc_auc_chart("Stacked Classifier", y_validation, y_pred_proba)
# -
pd.DataFrame(final_predictions,
index=test['PassengerId'],
columns=['Survived']).to_csv("Predictions.csv")
final_predictions.sum() / len(final_predictions)
print(logreg_cv.fit(X, y).predict(X_test).sum())
print(rf.fit(X, y).predict(X_test).sum())
print(knn.fit(X, y).predict(X_test).sum())
print(gbm.fit(X, y).predict(X_test).sum())
print(svc.fit(X, y).predict(X_test).sum())
print(xg_cl.fit(X, y).predict(X_test).sum())
print(final_predictions.sum())
| 23,074 |
/Three Month Study/AutoEncoding Varitional Bayes/.ipynb_checkpoints/VAE-checkpoint.ipynb
|
ce55f368386c86309520fc80e2c0f6a01cb09014
|
[] |
no_license
|
YifanXu1999/AI-Learning
|
https://github.com/YifanXu1999/AI-Learning
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 4,603 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
from scipy.stats import norm
from torch.utils.data.dataloader import DataLoader
from torchvision.datasets import MNIST
from torchvision.utils import make_grid
class VAE(nn.Module):
def __init__(self, latent_dim=20, hidden_dim=500):
super(VAE, self).__init__()
self.encoder_l1 = nn.Linear(784, hidden_dim)
self.encoder_mean = nn.Linear(hidden_dim, latent_dim)
self.encoder_logvar = nn.Linear(hidden_dim, latent_dim)
self.decoder_l1 = nn.Linear(latent_dim, hidden_dim)
self.decoder_output = nn.Linear(hidden_dim, 784)
def encode(self, x_in):
x = F.relu(self.encoder_l1(x_in.view(-1, 784)))
mean = self.encoder_mean(x)
logvar = self.encoder_logvar(x)
return mean, logvar
def decode(self, z):
z = F.relu(self.decoder_l1(z))
x_out = F.sigmoid(self.decoder_output(z))
return x_out.view(-1, 1, 28, 28)
def sample(self, mu, log_var):
# z = mu + standard deviavation * eps
eps = torch.normal(torch.zeros(size=mu.size()), torch.ones(size=log_var.size()))
sd = torch.exp(log_var * 0.5)
z = mu + sd * eps
return z
def forward(self, x_in):
z_mean, z_logvar = self.encode(x_in)
z = self.sample(z_mean, z_logvar)
x_out = self.decode(z)
return x_out, z_mean, z_logvar
# +
# Loss function
def criterion(x_out,x_in,z_mu,z_logvar):
bce_loss = F.binary_cross_entropy(x_out,x_in,size_average=False)
kld_loss = -0.5 * torch.sum(1 + z_logvar - (z_mu ** 2) - torch.exp(z_logvar))
loss = (bce_loss + kld_loss) / x_out.size(0) # normalize by batch size
return loss
# -
model = VAE()
# Optimizer
optimizer = torch.optim.Adam(model.parameters())
# Data loaders
trainloader = DataLoader(
MNIST(root='./data',train=True,download=True,transform=transforms.ToTensor()),
batch_size=128,shuffle=True)
testloader = DataLoader(
MNIST(root='./data',train=False,download=True,transform=transforms.ToTensor()),
batch_size=128,shuffle=True)
# +
# Training
def train(model,optimizer,dataloader,epochs=15):
losses = []
for epoch in range(epochs):
print(epochs)
for images,_ in dataloader:
x_in = (images)
optimizer.zero_grad()
x_out, z_mu, z_logvar = model(x_in)
loss = criterion(x_out,x_in,z_mu,z_logvar)
loss.backward()
optimizer.step()
losses.append(loss.data)
return losses
train_losses = train(model,optimizer,trainloader)
plt.figure(figsize=(10,5))
plt.plot(train_losses)
plt.show()
# -
| 3,049 |
/Case_Study_2/Filtering.ipynb
|
5d4dea05b60697318a781a7e0d166e7ac49b592d
|
[] |
no_license
|
Yousef497/Professional-Data-Analyst-Track
|
https://github.com/Yousef497/Professional-Data-Analyst-Track
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 12,736 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 3. Logistic regression
# %matplotlib inline
import warnings;warnings.filterwarnings('ignore');import matplotlib.pyplot as plt;import numpy as np;import pandas as pd;import seaborn as sns;sns.set_context("notebook", font_scale=1.4);sns.set_style("whitegrid");import imp;compomics_import = imp.load_source('compomics_import', '../compomics_import.py');from IPython.core.display import HTML;css_file = '../my.css';HTML(open(css_file, "r").read())
# **Logistic regression** (or **logit regression**) is a classification learning algorithm where the target (or **label** in this case) is **a class**. This is somewhat confusing as the term 'regression' is normally used for modeling a continuous target as with linear regression. However as logistic regression is derived from linear regression (as we will see) the term logistic regression is still accepted.
#
# The logistic regression algorithm only works for **two class** data sets ($y \in \{0,1\})$ and assumes the data is **linearly separable**. Logistic regression is a popular algorithm in bioinformatics as it can learn accurate predictive models for large data sets very fast.
#
# As we will see logistic regression models predict **class probabilities** rather than classes. These probabilities can be seen as an estimation of the confidence the model has in a prediction.
#
# Let's open a data set:
# +
dataset = pd.read_csv("logistic.csv")
sns.lmplot(x="x1", y="y", data=dataset, fit_reg=False, height=5, scatter_kws={"s": 80})
plt.show()
# -
# Here $y$ is the label and can be one of two classes: 0 and 1. Just as with linear regression we will try to model the releationship between the feature $x_1$ and $y$.
#
# So how does classification differ from regression? After all we could just try to fit a linear model
#
# $$f(x,\theta) = \theta^{\prime} x=\theta_0x_0 + \theta_1x_1$$
#
# to the data. This would compute a linear function $f(x,\theta)$ that shows the linear relation between $x_1$ and $y$.
#
# We can then apply a **threshold** to map the output of $f(x,\theta)$ onto class 0 or 1. For instance $y=1$ if $f(x) \geq 0.5$ and $y=0$ otherwise:
# +
from sklearn import metrics
sns.lmplot(x="x1", y="y", data=dataset, fit_reg=True, height=5, scatter_kws={"s": 80})
plt.show()
# -
# This linear fit achieves $R^2=0.88$, which would seem good. Although judging from the plot we have to admit it doesn't look right.
#
# We can obtain a much better fit by using the non-linear **logistic function** $g(z)$ instead of the linear function to better model our data.
#
# This model is formalized as
#
# $$ f(x,\theta)=g(\theta^{\prime} x),$$
#
# with
#
# $$g(z)=\frac{1}{1+e^{-z}}.$$
#
# The following plot shows how $f(x,\theta)$ non-linearly transforms $\theta^{\prime} x$ into a value between zero and one (which are the class probabilities):
plt.figure(figsize=(4,4))
compomics_import.plot_logistic([2, -0.5])
plt.show()
# Since we assume the data to be **linearly separable instead to linearly related** we have to also adapt the cost function $J(\theta)$ accordingly. The cost for the logistic regression learning algorithm is
#
# $$J(\theta)=-[\frac{1}{n}\sum\limits_{i=1}^ny^{(i)}log(f(x^{(i)},\theta))+(1-y^{(i)})log(1- f(x^{(i)},\theta))].$$
#
# Let's take a closer look at this cost function.
#
# We know that $y^{(i)}$ is either 0 or 1. If $y^{(i)}=1$ then the cost function $J(\theta)$ is incremented by
#
# $$-log(f(x^{(i)},\theta)).$$
#
# Similarly, if $y^{(i)}=0$ then the cost function $J(\theta)$ is incremented by
#
# $$-log(1- f(x^{(i)},\theta)).$$
#
# To understand this we can plot these increments as a function of $f(x,\theta)$:
compomics_import.plot_lr_cost()
plt.show()
# The blue curve shows the increment to the cost function for $y=1$. In this case if $f(x,\theta)$ is large then the increment should be small. If instead $f(x,\theta)$ is small then the increment should be large. We can see that the increment shown in the blue curve does indeed satisfy these criteria. The green curve shows the increment for $y=0$. In this case if $f(x,\theta)$ is small then the increment should be small. If instead $f(x,\theta)$ is large then the increment should be large.
#
# We use the gradient descent optimization algorithm to find the optimal values for $\theta$ given a train set. Similarly to linear regression the increments or decrements in each iteration are computed as:
#
# $$\theta_0:=\theta_0-\alpha\frac{1}{n}\sum\limits_{i=1}^n(f(x^{(i)},\theta)-y^{(i)})x_0^{(i)},$$
#
# $$\theta_1:=\theta_1-\alpha\frac{1}{n}\sum\limits_{i=1}^n(f(x^{(i)},\theta)-y^{(i)})x_1^{(i)},$$
#
# with $\alpha$ again the learning rate that controls the convergence of the gradient descent iterations.
#
# In scikit-learn we can use the module `LogisticRegression` to compute the optimal logistic function for our data to obtain a much better fit of the data:
# +
from sklearn.linear_model import LogisticRegression
model = LogisticRegression(C=10000)
model.fit(dataset[['x1']],dataset['y'])
predictions = model.predict_proba(dataset[['x1']])[:,1]
print("R-squared = {}".format(metrics.r2_score(dataset['y'],predictions)))
# -
# Now $R^2=0.96$. When we plot this logistic function we can see why the fit is much better:
# +
def logistic(theta0,theta1,x):
return 1/(1+(np.exp(-1*(theta0+(x*theta1)))))
sns.lmplot(x="x1", y="y", data=dataset, fit_reg=False, height=5, scatter_kws={"s": 80})
plt.plot(np.arange(-7,3,0.1),logistic(model.intercept_,model.coef_[0],np.arange(-7,3,0.1)),lw=4,color='green')
plt.show()
# -
# In fact, the function value $y$ of the logistic fit will always be a value between 0 and 1 and estimates the probability of a data point belonging to class 1. Again we can apply a threshold to map the output of $f(x,\theta)$ onto a class.
#
# When we have $m$ dependent variables in our data set the cost function for logistic regression becomes
#
# $$f(x,\theta)=g(\theta_0x_0 + \theta_1x_1 + \theta_2x_2+...+\theta_mx_m) = g(\theta^{\prime} x),$$
#
# and the updates during gradient descent are computed as
#
# $$\theta_j:=\theta_j-\alpha\frac{1}{n}\sum\limits_{i=1}^n(f(x^{(i)},\theta)-y^{(i)})x_j^{(i)}.$$
#
# Let's take a look at an example for $m=2$. We load a data set with two features and apply logistic regression to compute a probabilistic decision boundary:
# +
dataset2D = pd.read_csv("dataset2D.csv")
X = dataset2D.copy()
y = X.pop('y')
model = LogisticRegression(C=100000)
model.fit(X,y)
score = model.score(X, y)
plt.title("accuracy = %.2f" % score)
compomics_import.plot_decision_boundary(model,X,y)
plt.show()
# -
# The green data points have $y=0$ and the blue points have $y=1$. This scatter plot suggests the following rule for classifying the feature vectors (lesions):
# New feature vectors are classified with high confidence as class 1 if they are located in the green zone; classified with high confidence as class 0 if they are in the blue zone; and classified with lower confidence (depending on whether it is more green or more blue).
#
# In scikit-learn the module `LogisticRegression` contains a method `predict_proba()` that predicts the probabilities for the classes in a data set given a trained model:
predictions = model.predict_proba(X)
print(predictions[:10])
# **Multiclass** or **multinomial** classification is the problem of classifying data points into one of more than two classes. Logistic regression computes **binary** classification models that can only separate two classes. Most machine learning classification learning algorithms compute binary classification models, but there exist also learning algorithms that can compute multiclass models.
#
# Here we describe two popular methods that combine binary classifiers to produce a multiclass classifier: **one-against-all** and **one-against-one**.
#
# Suppose we have a data set with $H$ classes. The one-against-all (or one-against-rest) method involves training a single classifier for each class $h_i$, with the data points that belong to class $h_i$ labeled as positive and all other data points labeled as negative.
#
# This method requires the binary classifier to compute a real-valued confidence score for its decision, rather than just a class label. Discrete class labels alone can lead to ambiguities, where multiple classes are predicted for a single sample.
#
# When a test point is presented each binary classifier is applied and the class $h_i$ associated with the classifier that computes the highest confidence is the predicted class for the test point.
#
# The one-against-one method involves training
#
# $$\frac{H(H-1)}{2}$$
#
# binary classifiers where each model is trained on the data points that belong to a pair of classes from the original train set, and must learn to distinguish these two classes.
#
# When a test point is presented each of these binary classifiers is applied and the class that got the highest number of positive predictions (potentially taking confidence into account) gets predicted by the combined classifier.
| 9,266 |
/CartPole_Keyboard.ipynb
|
4cc925b8ac05e9998a7b5c9d6164ee100f04d6eb
|
[] |
no_license
|
homedy/RLExamples
|
https://github.com/homedy/RLExamples
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 6,290 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: hcad_pred
# language: python
# name: hcad_pred
# ---
# + [markdown] papermill={"duration": 0.046028, "end_time": "2020-09-13T12:43:26.149853", "exception": false, "start_time": "2020-09-13T12:43:26.103825", "status": "completed"}
# # Harris County Appraisal District (HCAD) real and personal property data
# The Harris County is the [third most populous](https://en.wikipedia.org/wiki/List_of_the_most_populous_counties_in_the_United_States) county in the USA. Its appraisal district (HCAD) provides a fantastic dataset with each appraised property characteristics (appraised value, fixtures, features...) on a yearly basis. In this notebook we explore these data for the year 2016, to understand what is available, and to select variables that can help us answer if a given property was appraised fairly.
#
# [HCAD data](https://pdata.hcad.org/download/2016.html) consists of several text files, grouped in zipped files as follows:
#
# 1. Real_acct_owner.zip
# * **Real_acct.txt**: account including owner name, owner mailing address, $\color{red}{values}$, $\color{red}{site~address}$, and legal descriptions.
# * **Real_neighborhood_code.txt**: $\color{red}{neighborhood~code}$, group code and description
# * **Parcel_tiebacks.txt**
# * **Permits.txt**: an account including permit type, permit description, and status.
# * **Owners.txt**: multiple owners.
# * **Deeds.txt**: deed information.
#
#
# 2. Real_building_land.zip
# * **Building_res.txt**: $\color{red}{all~residential~information}$
# * **Building_other.txt**: all other real properties, such as commercial and information for income producing properties including occupancy rates and operating income.
# * **Exterior.txt**: $\color{red}{general~data~about~buildings~and~sub~areas,~(style~or~use,~size,~year~built).}$
# * **Fixture.txt**: $\color{red}{characteristics~of~the~building}$. This includes bedrooms, fireplace, bathrooms, stories for residential. Also contains wall height, elevators, and other descriptions for commercial property.
# * **Extra_features.txt**: extra features for an account. This includes wood deck, $\color{red}{pool}$, storage shed, detached garage, etc. This also contains information on cracked slabs and pools.
# * **Structural_elem1.txt**: $\color{red}{Single~Family}$, Multi Family, Condos, Town homes. $\color{red}{Home~Information}$ (CDU, Grade Adjustment, Physical Condition).
# * **Structural_elem2.txt**: Commercial and exempt Properties. These files contain structural elements of a property. This includes information like $\color{red}{physical~condition,~grade,~exterior~wall,~and~foundation~type}$.
# * **Land.txt**: land use, acreage, and land units.
# * **Land_ag.txt**: agricultural and timber land information including land use, acreage, and land units.
#
#
# 3. Real_jur_exempt.zip
# * **Jur_exempt.txt**: Lists the jurisdictions and exemptions associated with an account and the tax rates.
# * **Jur_exemption_cd**: Lists the exemption code associated with an account.
# * **Jur_exemption_dscr**: Lists the jurisdictions and their exemption description.
# * **Jur_tax_district_exempt_value.txt**: Lists the jurisdictions and their exemption values.
# * **Jur_tax_district_percent_rate**: Lists the Taxing district percent rates.
# * **Jur_value.txt**: Lists the jurisdictions and values associated with an account.
#
#
# 4. PP_files.zip
# * **T_business_acct.txt**: account, including owner name, owner mailing address, values, site address, and legal descriptions, all values and etc.
# * **T_business_detail.txt**: account, items, description and item values.
# * **T_jur_exempt.txt**: Lists the jurisdictions and exemptions associated with an account and the tax rates.
# * **T_jur_value.txt**: Lists the jurisdictions and values associated with an account.
if len(sent) >= 10:
q_sequence = build_sequence(length, sent)
out_list += q_sequence
return out_list
# + colab={} colab_type="code" id="B6-SxutKpGDn"
all_data_words = []
for i in range(len(all_data)):
sent = tokenize(all_data[i].lower())
all_data_words += sent
all_data_words = list(set(all_data_words))
# + colab={} colab_type="code" id="waWg5fDtjEae"
# add words not in the embeddings
words_to_load = 50000
PAD_IDX = 0
UNK_IDX = 1
SOS_IDX = 2
EOS_IDX = 3
import numpy as np
# reserve the 1st 2nd token for padding and <UNK> respectively
wiki_path ='/Users/cyian/Desktop/NYU/FALL2018/DS-GA1011_NLP/project/'
with open(wiki_path+'/wiki-news-300d-1M.vec') as f:
loaded_embeddings_ft_en = np.zeros((words_to_load+4, 300))
words_ft_en = {}
idx2words_ft_en = {}
ordered_words_ft_en = []
ordered_words_ft_en.extend(['<pad>', '<unk>', '<s>', '</s>'])
loaded_embeddings_ft_en[0,:] = np.zeros(300)
loaded_embeddings_ft_en[1,:] = np.random.normal(size = 300)
loaded_embeddings_ft_en[2,:] = np.random.normal(size = 300)
loaded_embeddings_ft_en[3,:] = np.random.normal(size = 300)
for i, line in enumerate(f):
if i >= words_to_load:
break
s = line.split()
loaded_embeddings_ft_en[i+4, :] = np.asarray(s[1:])
words_ft_en[s[0]] = i+4
idx2words_ft_en[i+4] = s[0]
ordered_words_ft_en.append(s[0])
length = len(np.setdiff1d(all_data_words, ordered_words_ft_en))
tmp_embeddings = np.zeros((length, 300))
for idx, word in enumerate(np.setdiff1d(all_data_words, ordered_words_ft_en)):
words_ft_en[word] = idx+words_to_load+4
idx2words_ft_en[idx+words_to_load+4] = word
tmp_embeddings[idx, :] = np.random.normal(size = 300)
loaded_embeddings_ft_en = np.concatenate((loaded_embeddings_ft_en, tmp_embeddings), axis = 0)
words_ft_en['<pad>'] = PAD_IDX
words_ft_en['<unk>'] = UNK_IDX
words_ft_en['<s>'] = SOS_IDX
words_ft_en['</s>'] = EOS_IDX
idx2words_ft_en[PAD_IDX] = '<pad>'
idx2words_ft_en[UNK_IDX] = '<unk>'
idx2words_ft_en[SOS_IDX] = '<s>'
idx2words_ft_en[EOS_IDX] = '</s>'
ordered_words_ft_en = list(words_ft_en.keys())
# + colab={} colab_type="code" id="zckTfH8kkuCl"
import nltk
EMBEDDING_DIM = 300
MAX_SEQUENCE_LENGTH = 30
# + colab={} colab_type="code" id="BH1d63qvk4Zd"
all_d = data_generator(all_data,MAX_SEQUENCE_LENGTH)
train_d = data_generator(train_data,MAX_SEQUENCE_LENGTH)
val_d = data_generator(val_data,MAX_SEQUENCE_LENGTH)
# + colab={} colab_type="code" id="qiKjFifmlEbv"
from keras.optimizers import RMSprop
def data_loader(data_input):
X = []
y = []
y_ind = []
for i in range(len(data_input)):
X.append([words_ft_en[x] if x in ordered_words_ft_en else UNK_IDX for x in data_input[i].split()[:-1]])
sub = data_input[i].split()[-1]
if sub in ordered_words_ft_en:
y.append(loaded_embeddings_ft_en[words_ft_en[sub]])
y_ind.append(words_ft_en[sub])
else:
y.append(loaded_embeddings_ft_en[UNK_IDX])
y_ind.append(UNK_IDX)
return np.array(X), np.array(y), np.array(y_ind)
# train_input2, train_label2, train_label_idx2 = data_loader(train_d)
# val_input2, val_label2, val_label_idx2 = data_loader(val_d)
# + colab={} colab_type="code" id="-H5SVX22tSfw"
# plk.dump(train_input2, open(path + '/train_input2.p', 'wb'))
# plk.dump(train_label2, open(path + '/train_label2.p', 'wb'))
# plk.dump(train_label_idx2, open(path + '/train_label_idx2.p', 'wb'))
# plk.dump(val_input2, open(path+ '/val_input2.p', 'wb'))
# plk.dump(val_label2, open(path +'/val_label2.p', 'wb'))
# plk.dump(val_label_idx2, open(path +'/val_label_idx2.p', 'wb'))
# + colab={} colab_type="code" id="_d4Yq-xdtlPf"
def save_file(lines, filename):
data = '\n'.join(lines)
file = open(filename, 'w')
file.write(data)
file.close()
out_filename = 'Balance sheet.txt'
save_file(all_data, out_filename)
# + colab={} colab_type="code" id="L_BqVmGWt0RQ"
# load doc into memory
def load_file(filename):
file = open(filename, 'r')
text = file.read()
file.close()
return text
# + colab={} colab_type="code" id="55cJfxEUvIaV"
train_input = plk.load(open(path + '/train_input.p', 'rb'))
train_label = plk.load(open(path + '/train_label.p', 'rb'))
train_label_idx = plk.load(open(path + '/train_label_idx.p', 'rb'))
val_input = plk.load(open(path + '/val_input.p', 'rb'))
val_label = plk.load(open(path + '/val_label.p', 'rb'))
val_label_idx = plk.load(open(path + '/val_label_idx.p', 'rb'))
# + colab={"base_uri": "https://localhost:8080/", "height": 340} colab_type="code" id="goK1JYz0k7tl" outputId="21f3f004-ff11-4426-861b-b7fccbe8a7cc"
# sequences = tokenizer.texts_to_sequences(lines)
# vocab_size = len(tokenizer.word_index) + 1
# MAX_NUM_WORDS = words_to_load
# tokenizer = Tokenizer(num_words=MAX_NUM_WORDS)
# tokenizer.fit_on_texts(lines)
# sequences = tokenizer.texts_to_sequences(lines)
# word_index = tokenizer.word_index
# data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
# X, y = data[:,:-1], data[:,-1]
vocab_size = loaded_embeddings_ft_en.shape[0]
# y = to_categorical(y, num_classes=vocab_size)
# seq_length = X.shape[1]
train_label_cate = to_categorical(train_label_idx, vocab_size)
val_label_cate = to_categorical(val_label_idx, vocab_size)
# define model
model = Sequential()
embedding_layer = Embedding(vocab_size,
EMBEDDING_DIM,
weights=[loaded_embeddings_ft_en],
input_length=MAX_SEQUENCE_LENGTH-1,
trainable=True)
model.add(embedding_layer)
model.add(Bidirectional(GRU(300, return_sequences=True)))
model.add(Bidirectional(GRU(300)))
model.add(Dense(100, activation='tanh'))
model.add(Dense(train_label_cate.shape[1], activation='softmax'))
#import the checkpoint to save current model
filepath=path+"/GRU_2.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min')
earlystopper = EarlyStopping(monitor='val_loss', patience=5, verbose=1)
callbacks_list = [checkpoint, earlystopper]
# compile model
# rms = RMSprop(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
# fit the model
model.fit(train_input, train_label_cate, validation_data =(val_input, val_label_cate), batch_size= 128, epochs=10, callbacks=callbacks_list)
# save the model to file
model.save(path+'/model_all_2.h5')
# save the tokenizer
# plk.dump(tokenizer, open('tokenizer.pkl', 'wb'))
# + colab={} colab_type="code" id="B5VUFsVIvHEE"
from random import randint
from keras.models import load_model
from keras.preprocessing.sequence import pad_sequences
# load doc into memory
def load_doc(filename):
file = open(filename, 'r')
text = file.read()
file.close()
return text
# generate a sequence from a language model
def generate_seq(model, seed_text_idx, n_words):
bleu_score = []
result = list()
target_text = lines[seed_text_idx]
seed_text = ' '.join(target_text.split()[:5])
target_text_test = ' '.join(target_text.split()[5:])
in_text = seed_text
for _ in range(n_words):
# encode the text as integer
# encoded = tokenizer.texts_to_sequences([in_text])[0]
encoded = [words_ft_en[x] if x in ordered_words_ft_en else UNK_IDX for x in in_text.split()]
# truncate sequences to a fixed length
encoded = pad_sequences([encoded], maxlen=MAX_SEQUENCE_LENGTH-1, truncating='pre')
print(encoded)
# predict probabilities for each word
yhat = model.predict_classes(encoded, verbose=0)
# map
# predicted word index to word
out_word = idx2words_ft_en[yhat[0]]
if yhat[0] == EOS_IDX:
break
# append to input
in_text += ' ' + out_word
result.append(out_word)
seq = ' '.join(result)
ret_seq = seed_text + ' '+seq
bleu_score = sentence_bleu(seq, target_text_test)
return ret_seq, target_text, bleu_score
# + colab={"base_uri": "https://localhost:8080/", "height": 1788} colab_type="code" executionInfo={"elapsed": 25322, "status": "ok", "timestamp": 1544394547931, "user": {"displayName": "Yiyan Chen", "photoUrl": "", "userId": "08805131014141803120"}, "user_tz": 300} id="_p46F4FLsR3L" outputId="42600c43-c05f-4075-ae1b-2d21141888a9"
#test 1
# load cleaned text sequences
in_filename = 'Balance sheet.txt'
doc = load_file(in_filename)
lines = ['<s> '+x+ ' </s>' for x in doc.split('\n') if x != '']
# seq_length = len(lines[0].split()) - 1
# load the model
model = load_model(path+'/model_all.h5')
# # load the tokenizer
# tokenizer = plk.load(open('tokenizer.pkl', 'rb'))
# select a seed text
seed_text_idx = randint(0,len(lines))
# generate new text
generated, target, bleu = generate_seq(model, seed_text_idx, 50)
print(target)
print(generated)
print('BLEU score is: ', bleu)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 275, "status": "ok", "timestamp": 1544394049474, "user": {"displayName": "Yiyan Chen", "photoUrl": "", "userId": "08805131014141803120"}, "user_tz": 300} id="A_JhLOOJxTZ4" outputId="f01abe0e-2509-4b09-e2b5-b5b59a3d37d9"
train_label.shape
# + colab={} colab_type="code" id="5TPvIGOwSlnA"
from keras.models import Model
from keras.layers import Dense, Input, GRU
from keras.layers.embeddings import Embedding
word_dim = 50
num_tokens = 15000
# Define the layers
word_vec_input = Input(shape=(word_dim,))
decoder_inputs = Input(shape=(None,))
decoder_embed = Embedding(input_dim=num_tokens, output_dim=word_dim, mask_zero=True)
decoder_gru_1 = GRU(word_dim, return_sequences=True, return_state=False)
decoder_gru_2 = GRU(word_dim, return_sequences=True, return_state=True)
decoder_dense = Dense(num_tokens, activation='softmax')
# Connect the layers
embedded = decoder_embed(decoder_inputs)
gru_1_output = decoder_gru_1(embedded, initial_state=word_vec_input)
gru_2_output, state_h = decoder_gru_2(gru_1_output)
decoder_outputs = decoder_dense(gru_2_output)
# Define the model that will be used for training
training_model = Model([word_vec_input, decoder_inputs], decoder_outputs)
# Also create a model for inference (this returns the GRU state)
decoder_model = Model([word_vec_input, decoder_inputs], [decoder_outputs, state_h])
# + colab={} colab_type="code" id="k-MdE2TrCvdt"
# create a weight matrix for words in training docs
embedding_matrix = np.zeros((vocab_size, 300))
for word, i in t.word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 305, "status": "ok", "timestamp": 1544393977800, "user": {"displayName": "Yiyan Chen", "photoUrl": "", "userId": "08805131014141803120"}, "user_tz": 300} id="q_pEEfaRC9pA" outputId="f521cb5d-0b4f-497b-afe1-a3070a392fdd"
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 240, "status": "ok", "timestamp": 1544394001670, "user": {"displayName": "Yiyan Chen", "photoUrl": "", "userId": "08805131014141803120"}, "user_tz": 300} id="unUWD6tjDZrK" outputId="9fc6d2af-cade-4bae-9f41-59edbd7c6100"
train_label_idx
# + colab={} colab_type="code" id="Yjbo5uvkDiKq"
}
building_res_comps = building_res_comps.loc[:, cols]
# + papermill={"duration": 0.08035, "end_time": "2020-09-13T12:43:36.691318", "exception": false, "start_time": "2020-09-13T12:43:36.610968", "status": "completed"}
building_res_comps.head()
# + papermill={"duration": 0.070089, "end_time": "2020-09-13T12:43:36.831523", "exception": false, "start_time": "2020-09-13T12:43:36.761434", "status": "completed"}
building_res_comps.shape
# + [markdown] papermill={"duration": 0.047353, "end_time": "2020-09-13T12:43:36.924596", "exception": false, "start_time": "2020-09-13T12:43:36.877243", "status": "completed"}
# # Describe and clean the columns
#
# Now we must describe each column by answering:
#
# * Meaning
# * Descriptive statistics or value counts
# * Data type
#
# There is no explicit document provided by HCAD explaining all the variables, but most are easy to guess for using their name.
# + [markdown] papermill={"duration": 0.046935, "end_time": "2020-09-13T12:43:37.025829", "exception": false, "start_time": "2020-09-13T12:43:36.978894", "status": "completed"}
# ## Find duplicated rows
# + papermill={"duration": 0.746274, "end_time": "2020-09-13T12:43:37.819310", "exception": false, "start_time": "2020-09-13T12:43:37.073036", "status": "completed"}
cond0 = building_res_comps.duplicated()
building_res_comps.loc[cond0, :]
# + [markdown] papermill={"duration": 0.047921, "end_time": "2020-09-13T12:43:37.917997", "exception": false, "start_time": "2020-09-13T12:43:37.870076", "status": "completed"}
# ## Account numbers: acct
# These are the account numbers or id that the property has in HCAD system. In previous steps we selected accounts for free-standing single-family homes, with a single building per property, thus the account values in the comps DataFrame should be unique.
# + papermill={"duration": 0.190309, "end_time": "2020-09-13T12:43:38.163933", "exception": false, "start_time": "2020-09-13T12:43:37.973624", "status": "completed"}
assert building_res_comps['acct'].is_unique
# + papermill={"duration": 0.071121, "end_time": "2020-09-13T12:43:38.285914", "exception": false, "start_time": "2020-09-13T12:43:38.214793", "status": "completed"}
building_res_comps['acct'].dtype
# + papermill={"duration": 0.073122, "end_time": "2020-09-13T12:43:38.405337", "exception": false, "start_time": "2020-09-13T12:43:38.332215", "status": "completed"}
acct_min = building_res_comps['acct'].min()
acct_max = building_res_comps['acct'].max()
print(f'Minimum value: {acct_min}')
print(f'Maximum value: {acct_max}')
# + papermill={"duration": 0.156872, "end_time": "2020-09-13T12:43:38.608655", "exception": false, "start_time": "2020-09-13T12:43:38.451783", "status": "completed"}
print(f"The number of missing values is: {sum(building_res_comps['acct'].isnull())}")
# + [markdown] papermill={"duration": 0.048407, "end_time": "2020-09-13T12:43:38.704145", "exception": false, "start_time": "2020-09-13T12:43:38.655738", "status": "completed"}
# ## Property quality description: dscr
# This is the current property quality description. It is an ordered categorical variable that can take values: 'Excellent', 'Superior', 'Good', 'Average', 'Low', 'Very Low', and 'Poor'.
# + papermill={"duration": 0.072293, "end_time": "2020-09-13T12:43:38.824798", "exception": false, "start_time": "2020-09-13T12:43:38.752505", "status": "completed"}
building_res_comps['dscr'].head()
# + papermill={"duration": 0.073445, "end_time": "2020-09-13T12:43:38.945948", "exception": false, "start_time": "2020-09-13T12:43:38.872503", "status": "completed"}
order = ['Excellent', 'Superior', 'Good', 'Average', 'Low', 'Very Low', 'Poor']
building_res_comps['dscr'] = building_res_comps['dscr'].cat.reorder_categories(order, ordered=True)
# + papermill={"duration": 0.067852, "end_time": "2020-09-13T12:43:39.063394", "exception": false, "start_time": "2020-09-13T12:43:38.995542", "status": "completed"}
building_res_comps['dscr'].head()
# + papermill={"duration": 0.072347, "end_time": "2020-09-13T12:43:39.184749", "exception": false, "start_time": "2020-09-13T12:43:39.112402", "status": "completed"}
building_res_comps['dscr'].value_counts(normalize=True)
# + papermill={"duration": 0.143608, "end_time": "2020-09-13T12:43:39.378129", "exception": false, "start_time": "2020-09-13T12:43:39.234521", "status": "completed"}
print(f"The number of missing values is: {sum(building_res_comps['dscr'].isnull())}")
# + [markdown] papermill={"duration": 0.049427, "end_time": "2020-09-13T12:43:39.489768", "exception": false, "start_time": "2020-09-13T12:43:39.440341", "status": "completed"}
# ## Date erected: date_erected
# Refers to the year the property was built. Data should be positive integers up to 2015 (year before these data were issued).
# + papermill={"duration": 0.067424, "end_time": "2020-09-13T12:43:39.606994", "exception": false, "start_time": "2020-09-13T12:43:39.539570", "status": "completed"}
building_res_comps['date_erected'].head()
# + papermill={"duration": 0.116904, "end_time": "2020-09-13T12:43:39.773470", "exception": false, "start_time": "2020-09-13T12:43:39.656566", "status": "completed"}
building_res_comps['date_erected'].value_counts(bins=11, normalize=True)
# + papermill={"duration": 0.095812, "end_time": "2020-09-13T12:43:39.919060", "exception": false, "start_time": "2020-09-13T12:43:39.823248", "status": "completed"}
building_res_comps['date_erected'].describe()
# + papermill={"duration": 0.147963, "end_time": "2020-09-13T12:43:40.125550", "exception": false, "start_time": "2020-09-13T12:43:39.977587", "status": "completed"}
print(f"The number of missing values is: {sum(building_res_comps['date_erected'].isnull())}")
# + [markdown] papermill={"duration": 0.051775, "end_time": "2020-09-13T12:43:40.227508", "exception": false, "start_time": "2020-09-13T12:43:40.175733", "status": "completed"}
# ## Year remodeled: yr_remodel
# When was the last year the property was remodeled? In principle there shouldn't be properties remodeled before they were erected. Let's check that assumption.
# + papermill={"duration": 0.083743, "end_time": "2020-09-13T12:43:40.362939", "exception": false, "start_time": "2020-09-13T12:43:40.279196", "status": "completed"}
cond0 = building_res_comps['yr_remodel'] < building_res_comps['date_erected']
remo_before_build = building_res_comps['yr_remodel'].loc[cond0]
remo_before_build_vc = remo_before_build.value_counts()
remo_before_build_vc
# + [markdown] papermill={"duration": 0.05116, "end_time": "2020-09-13T12:43:40.467087", "exception": false, "start_time": "2020-09-13T12:43:40.415927", "status": "completed"}
# Let's assign NaNs to these values.
# + papermill={"duration": 0.073571, "end_time": "2020-09-13T12:43:40.592032", "exception": false, "start_time": "2020-09-13T12:43:40.518461", "status": "completed"}
replace = {num: np.nan for num in remo_before_build_vc.index}
# + papermill={"duration": 0.101552, "end_time": "2020-09-13T12:43:40.744590", "exception": false, "start_time": "2020-09-13T12:43:40.643038", "status": "completed"}
building_res_comps['yr_remodel'].replace(replace, inplace=True)
# + papermill={"duration": 0.114971, "end_time": "2020-09-13T12:43:40.912286", "exception": false, "start_time": "2020-09-13T12:43:40.797315", "status": "completed"}
building_res_comps['yr_remodel'].value_counts(bins=11, normalize=True, sort=False)
# + papermill={"duration": 0.11671, "end_time": "2020-09-13T12:43:41.087936", "exception": false, "start_time": "2020-09-13T12:43:40.971226", "status": "completed"}
building_res_comps['yr_remodel'].describe()
# + papermill={"duration": 0.162067, "end_time": "2020-09-13T12:43:41.328777", "exception": false, "start_time": "2020-09-13T12:43:41.166710", "status": "completed"}
print(f"The number of missing values is: {sum(building_res_comps['yr_remodel'].isnull())}")
# + [markdown] papermill={"duration": 0.053409, "end_time": "2020-09-13T12:43:41.439604", "exception": false, "start_time": "2020-09-13T12:43:41.386195", "status": "completed"}
# ## Areas
# There are a few columns in the comps DataFrame that account for different property areas. Some of their descriptions can be found on [HCAD definitions file](https://pdata.hcad.org/Desc/Definition_help.pdf):
#
# 1. im_sq_ft: Improvement area.
# 2. base_ar: Base area. Building Area for commercial properties; Sum of the areas of all the base areas of a commercial property. Residential properties do not use base area.
# 3. act_ar: Actual area. For residential properties this is the base area of the house.
# 4. heat_ar: Living Area or Building sqft of a residential building.
# 5. gross_ar: Usually equal to Actual area, except in 1+1/2 story houses. Commercial properties do not use Gross Area.
# 6. eff_ar: Effective area. This is used in determining the market value of the improvements, but there is no clear definition in the HCAD website.
#
# From this list we gather that we can safely drop the `base_ar` as we are focused on residential properties. Also the `heat_ar` should be no larger than the `act_ar`, e.g. the house porch in not included in the `heat_ar` but it is included in the `act_ar`.
# + papermill={"duration": 0.096647, "end_time": "2020-09-13T12:43:41.589294", "exception": false, "start_time": "2020-09-13T12:43:41.492647", "status": "completed"}
cols = list(building_res_comps.columns)
cols.remove('base_ar')
building_res_comps = building_res_comps.loc[:, cols]
# + papermill={"duration": 0.071707, "end_time": "2020-09-13T12:43:41.716628", "exception": false, "start_time": "2020-09-13T12:43:41.644921", "status": "completed"}
all(building_res_comps['heat_ar'] > building_res_comps['act_ar'])
# + [markdown] papermill={"duration": 0.052645, "end_time": "2020-09-13T12:43:41.822667", "exception": false, "start_time": "2020-09-13T12:43:41.770022", "status": "completed"}
# We should also consider dropping all the properties that have zero in all of these area columns
# + papermill={"duration": 0.082065, "end_time": "2020-09-13T12:43:41.963257", "exception": false, "start_time": "2020-09-13T12:43:41.881192", "status": "completed"}
cond0 = building_res_comps['im_sq_ft'] == 0
cond1 = building_res_comps['im_sq_ft'] == building_res_comps['act_ar']
cond2 = building_res_comps['im_sq_ft'] == building_res_comps['heat_ar']
cond3 = building_res_comps['im_sq_ft'] == building_res_comps['gross_ar']
cond4 = building_res_comps['im_sq_ft'] == building_res_comps['eff_ar']
# + papermill={"duration": 0.124449, "end_time": "2020-09-13T12:43:42.142571", "exception": false, "start_time": "2020-09-13T12:43:42.018122", "status": "completed"}
building_res_comps = building_res_comps.loc[~(cond0 & cond1 & cond2 & cond3 & cond4), :]
# + papermill={"duration": 0.077034, "end_time": "2020-09-13T12:43:42.277362", "exception": false, "start_time": "2020-09-13T12:43:42.200328", "status": "completed"}
building_res_comps.shape
# + [markdown] papermill={"duration": 0.053854, "end_time": "2020-09-13T12:43:42.383965", "exception": false, "start_time": "2020-09-13T12:43:42.330111", "status": "completed"}
# ### im_sq_ft: Improvement area
# + papermill={"duration": 0.075312, "end_time": "2020-09-13T12:43:42.513284", "exception": false, "start_time": "2020-09-13T12:43:42.437972", "status": "completed"}
from src.data.utils import fix_area_column
# + papermill={"duration": 0.588078, "end_time": "2020-09-13T12:43:43.154829", "exception": false, "start_time": "2020-09-13T12:43:42.566751", "status": "completed"}
building_res_comps = fix_area_column(building_res_comps, 'im_sq_ft')
# + [markdown] papermill={"duration": 0.055534, "end_time": "2020-09-13T12:43:43.271166", "exception": false, "start_time": "2020-09-13T12:43:43.215632", "status": "completed"}
# ### Actual area: act_ar
# + papermill={"duration": 0.192992, "end_time": "2020-09-13T12:43:43.518554", "exception": false, "start_time": "2020-09-13T12:43:43.325562", "status": "completed"}
building_res_comps = fix_area_column(building_res_comps, 'act_ar')
# + [markdown] papermill={"duration": 0.054883, "end_time": "2020-09-13T12:43:43.626719", "exception": false, "start_time": "2020-09-13T12:43:43.571836", "status": "completed"}
# ### Living area: heat_ar
# + papermill={"duration": 0.605935, "end_time": "2020-09-13T12:43:44.286793", "exception": false, "start_time": "2020-09-13T12:43:43.680858", "status": "completed"}
building_res_comps = fix_area_column(building_res_comps, 'heat_ar')
# + [markdown] papermill={"duration": 0.056005, "end_time": "2020-09-13T12:43:44.399011", "exception": false, "start_time": "2020-09-13T12:43:44.343006", "status": "completed"}
# ### Gross area: gross_ar
# + papermill={"duration": 0.205447, "end_time": "2020-09-13T12:43:44.660704", "exception": false, "start_time": "2020-09-13T12:43:44.455257", "status": "completed"}
building_res_comps = fix_area_column(building_res_comps, 'gross_ar')
# + [markdown] papermill={"duration": 0.056241, "end_time": "2020-09-13T12:43:44.771058", "exception": false, "start_time": "2020-09-13T12:43:44.714817", "status": "completed"}
# ### Effective area: eff_ar
# + papermill={"duration": 0.574328, "end_time": "2020-09-13T12:43:45.401271", "exception": false, "start_time": "2020-09-13T12:43:44.826943", "status": "completed"}
building_res_comps = fix_area_column(building_res_comps, 'eff_ar')
# + [markdown] papermill={"duration": 0.05717, "end_time": "2020-09-13T12:43:45.515959", "exception": false, "start_time": "2020-09-13T12:43:45.458789", "status": "completed"}
# ## Perimeter
# Let's say that all perimeter values less than 50 ft are bad, and replace them with NaNs.
# + papermill={"duration": 0.07829, "end_time": "2020-09-13T12:43:45.651012", "exception": false, "start_time": "2020-09-13T12:43:45.572722", "status": "completed"}
cond0 = building_res_comps['perimeter'] < 50
# + papermill={"duration": 0.182005, "end_time": "2020-09-13T12:43:45.889100", "exception": false, "start_time": "2020-09-13T12:43:45.707095", "status": "completed"}
print(f"Number of bad perimeter values: {sum(cond0)}")
# + papermill={"duration": 0.109701, "end_time": "2020-09-13T12:43:46.059831", "exception": false, "start_time": "2020-09-13T12:43:45.950130", "status": "completed"}
bad_perimeter_vc = building_res_comps.loc[cond0, :]['perimeter'].value_counts()
bad_perimeter_vc
# + papermill={"duration": 0.074826, "end_time": "2020-09-13T12:43:46.214016", "exception": false, "start_time": "2020-09-13T12:43:46.139190", "status": "completed"}
replace = {num: np.nan for num in bad_perimeter_vc.index}
# + papermill={"duration": 0.09967, "end_time": "2020-09-13T12:43:46.371757", "exception": false, "start_time": "2020-09-13T12:43:46.272087", "status": "completed"}
building_res_comps['perimeter'].replace(replace, inplace=True)
# + papermill={"duration": 0.135587, "end_time": "2020-09-13T12:43:46.571165", "exception": false, "start_time": "2020-09-13T12:43:46.435578", "status": "completed"}
building_res_comps['perimeter'].describe()
# + papermill={"duration": 0.166622, "end_time": "2020-09-13T12:43:46.794207", "exception": false, "start_time": "2020-09-13T12:43:46.627585", "status": "completed"}
print(f"The number of missing perimeter values is: {sum(building_res_comps['perimeter'].isnull())}")
# + [markdown] papermill={"duration": 0.059368, "end_time": "2020-09-13T12:43:46.922470", "exception": false, "start_time": "2020-09-13T12:43:46.863102", "status": "completed"}
# # Export building_res_comps
# That was a lot of work! Let's save it as a pickle file and continue the data conditioning in the next notebook.
# + papermill={"duration": 0.075113, "end_time": "2020-09-13T12:43:47.062289", "exception": false, "start_time": "2020-09-13T12:43:46.987176", "status": "completed"}
from src.data.utils import save_pickle
# + papermill={"duration": 0.074985, "end_time": "2020-09-13T12:43:47.201476", "exception": false, "start_time": "2020-09-13T12:43:47.126491", "status": "completed"}
save_fn = ROOT_DIR / 'data/raw/2016/building_res_comps.pickle'
# + papermill={"duration": 0.127475, "end_time": "2020-09-13T12:43:47.410869", "exception": false, "start_time": "2020-09-13T12:43:47.283394", "status": "completed"}
save_pickle(building_res_comps, save_fn)
# + [markdown] papermill={"duration": 0.062963, "end_time": "2020-09-13T12:43:47.539107", "exception": false, "start_time": "2020-09-13T12:43:47.476144", "status": "completed"}
# # Export unique account numbers of interest: one_bld_in_acct
# + papermill={"duration": 0.083903, "end_time": "2020-09-13T12:43:47.685824", "exception": false, "start_time": "2020-09-13T12:43:47.601921", "status": "completed"}
save_fn = ROOT_DIR / 'data/raw/2016/one_bld_in_acct.pickle'
# + papermill={"duration": 0.088494, "end_time": "2020-09-13T12:43:47.833020", "exception": false, "start_time": "2020-09-13T12:43:47.744526", "status": "completed"}
save_pickle(one_bld_in_acct, save_fn)
| 32,437 |
/.ipynb_checkpoints/DataExplorationALL-checkpoint.ipynb
|
1bb2167f841803194405c7b1cf31e94c7da8599d
|
[] |
no_license
|
ChiruGit/Capstone
|
https://github.com/ChiruGit/Capstone
| 1 | 3 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 4,711,834 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data = pd.read_csv("AllInput.csv")
data[1:4]
data.describe()
data[data['SenatorName']=='Jim F. Barksdale']
len(data)
selectdata3 = data[['Democratic','Republican','Other','No of opponents','SenatorName','% Votes','Wiki?','Google?',' #Orgs','#Events','#Articles']]
selectdata3.describe()
#'Occupation1'
selectdata = data[['Democratic','Republican','Other','No of opponents','SenatorName','% Votes','Wiki?','Google?',' #Orgs','#Events','#Articles']]
selectdata2 = data[['Party','% Votes','Wiki?','Google?',' #Orgs','#Events','#Articles']]
import seaborn as sns
sns.set(style="ticks")
# +
sns.set(font_scale = 2.5, font='Times New Roman')
sns.pairplot(selectdata2, hue="Party")
#sns.pairplot(selectdata2)
# -
sns.pairplot(selectdata, hue="SenatorName")
#selectdata = data[['Democratic','Republican','Other','No of opponents','SenatorName','% Votes','Wiki?','Google?',' #Orgs','#Events','#Articles']]
selectdatacluster = selectdata.drop(['Democratic','Republican','Other','No of opponents','% Votes','Wiki?','Google?','SenatorName'], axis=1)
selectdatacluster.head()
import random
random.uniform(0, 1)
# +
testcluster = np.array(selectdatacluster,dtype=float)
for i in range(0,len(testcluster)):
testcluster[i][0] = float(testcluster[i][0]) + random.uniform(0, 0.25)
testcluster[i][1] = testcluster[i][1] + random.uniform(0, 0.25)
testcluster[i][2] = testcluster[i][2] + random.uniform(0, 0.25)
# +
from sklearn.cluster import KMeans
import random
# run kmeans algorithm (this is the most traditional use of k-means)
kmeans = KMeans(init='random', # initialization
n_clusters=6, # number of clusters
n_init=1, # number of different times to run k-means
n_jobs=-1)
kmeans.fit(testcluster)
# visualize the data
centroids = kmeans.cluster_centers_
plt.plot(testcluster[:, 0], testcluster[:, 1], 'r.', markersize=8) #plot the data
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='+', s=200, linewidths=3, color='k') # plot the centroids
plt.title('K-means clustering for X1')
plt.xlabel('X1, Feature 1')
plt.ylabel('X1, Feature 2')
plt.grid()
plt.show()
# -
selectdatacluster = selectdatacluster.astype(float)
selectdatacluster.head()
selectdatacluster[0:1]
# +
from sklearn.cluster import KMeans
import random
#len(selectdatacluster)
for i in range(0,len(selectdatacluster)):
#print()
selectdatacluster[i:i+1] = selectdatacluster[i:i+1] + random.uniform(0, 0.5)
#selectdatacluster[:i]['#Events'] = selectdatacluster[:i]['#Events'] + random.uniform(0, 0.25)
#selectdatacluster[:i]['#Articles'] = selectdatacluster[:i]['#Articles'] + random.uniform(0, 0.25)
# -
selectdatacluster[0:3]
selectdata
# +
df = selectdata['% Votes'] *100
#df.head()
df2 = selectdata
df2['% Votes'] = df
df2.head()
# -
df3 = df2[' #Orgs'].astype(float)
df2[' #Orgs'] = df3
df2.head()
df3 = df2['#Events'].astype(float)
df2['#Events'] = df3
df2.head()
df3 = df2['#Articles'].astype(float)
df2['#Articles'] = df3
df2.head()
df2[1:2][' #Orgs'] + random.uniform(0, 0.25)
df2[' #Orgs'] = selectdatacluster[' #Orgs']
df2['#Events'] = selectdatacluster['#Events']
df2['#Articles'] = selectdatacluster['#Articles']
df2.head()
#fig = plt.figure(figsize=(20,20))
df2.columns
hist = df2[['NumOrgs','NumEvents','NumArticles','Wiki','Google']].hist(bins=5, figsize=(15,15), color = '#5bc8e0')
plt.show()
# +
np.random.seed(19680801)
fig = plt.figure(figsize=(12,8))
N = 50
#colors = np.random.rand(N)
area = (30 * np.random.rand(N))**2 # 0 to 15 point radii
colors = df2[' #Orgs']
# c=colors,
scatter=plt.scatter(df2['#Events'], df2['% Votes'], s=200,alpha=0.5, c = colors,cmap='cool')
fig.suptitle('Scatter Plot with Orgs as colorbar', fontsize=30)
plt.xlabel('Events', fontsize=30)
plt.ylabel('% Votes', fontsize=30)
plt.colorbar(scatter)
plt.show()
# +
np.random.seed(196801)
# c=colors,'#5fd0e6'
colors = df2['#Events']
fig = plt.figure(figsize=(12,8))
#scatter = plt.scatter(df2[' #Orgs'], df2['% Votes'], s=150,alpha=0.5, c= colors)
scatter = plt.scatter(df2[' #Orgs'], df2['% Votes'], s=150,alpha=0.5,c= colors, cmap= 'winter')
fig.suptitle('Scatter Plot with Events as Colorbar', fontsize=30)
plt.ylabel('% Votes', fontsize = 30)
plt.xlabel(' #Orgs', fontsize = 30)
plt.colorbar(scatter)
plt.show()
# +
np.random.seed(196801)
# c=colors,'#5fd0e6'
colors = df2['#Articles']
fig = plt.figure(figsize=(12,8))
scatter = plt.scatter(df2[' #Orgs'], df2['% Votes'], s=150,alpha=0.25, c= colors,cmap = 'winter')
fig.suptitle('Scatter Plot with Articles as Colorbar', fontsize=30)
plt.ylabel('% Votes', fontsize=30)
plt.xlabel(' #Orgs', fontsize=30)
plt.colorbar(scatter)
plt.show()
# +
np.random.seed(196801)
colors = df2[' #Orgs'] + df2['#Events']
fig = plt.figure(figsize=(12,8))
scatter = plt.scatter(df2['#Articles'], df2['% Votes'], s=200,alpha=0.5, c=colors, cmap = 'summer')
fig.suptitle('Scatter Plot with Orgs+Events as Colorbar' , fontsize = 30)
plt.ylabel('% Votes', fontsize = 30)
plt.xlabel('#Articles', fontsize = 30)
plt.colorbar(scatter)
plt.show()
# -
df2.columns
# +
np.random.seed(196801)
colors = df2['NumOrgs'] + df2['NumEvents']
fig = plt.figure(figsize=(12,8))
scatter = plt.scatter(df2['Wiki'], df2['Votes'], s=200,alpha=0.5, c=colors, cmap = 'winter')
fig.suptitle('Scatter Plot with Orgs+Events as Colorbar' , fontsize = 30)
plt.ylabel('% Votes', fontsize = 30)
plt.xlabel('Wiki', fontsize = 30)
plt.colorbar(scatter)
plt.show()
# +
np.random.seed(196801)
colors = df2['NumOrgs'] + df2['NumEvents']
fig = plt.figure(figsize=(12,8))
scatter = plt.scatter(df2['Google'], df2['Votes'], s=200,alpha=0.5, c=colors, cmap = 'winter')
fig.suptitle('Scatter Plot with Orgs+Events as Colorbar' , fontsize = 30)
plt.ylabel('% Votes', fontsize = 30)
plt.xlabel('Google', fontsize = 30)
plt.colorbar(scatter)
plt.show()
# +
def doKmeans(X, nclust=3):
model = KMeans(nclust)
model.fit(X)
clust_labels = model.predict(X)
cent = model.cluster_centers_
return (clust_labels, cent)
clust_labels, cent = doKmeans(selectdatacluster,7)
kmeans = pd.DataFrame(clust_labels)
#selectdatacluster.insert((selectdatacluster.shape[1]),'kmeans',kmeans)
fig = plt.figure(figsize=(20,10))
ax = fig.add_subplot(111)
scatter = ax.scatter(selectdatacluster['#Events'],selectdatacluster['#Articles'],
c=kmeans[0],s=100)
ax.set_title('K-Means Clustering')
ax.set_xlabel('#Events')
ax.set_ylabel('#Articles')
plt.colorbar(scatter)
# -
selectdatacluster.drop('kmeans',axis=1)
selectdatacluster
df2.head()
selectdata = data[['Democratic','Republican','Other','No of opponents','SenatorName','% Votes','Wiki?','Google?',' #Orgs','#Events','#Articles']]
selectdata
type(selectdata)
selectdata.head()
df_select = selectdata
df_select.head()
# +
df_select.rename(columns={'Wiki?':'Wiki'},inplace=True)
df_select.rename(columns={'Google?':'Google'},inplace=True)
df_select.rename(columns={'% Votes':'Votes'},inplace=True)
df_select.rename(columns={' #Orgs':'NumOrgs'},inplace=True)
df_select.rename(columns={'#Events':'NumEvents'},inplace=True)
df_select.rename(columns={'#Articles':'NumArticles'},inplace=True)
df_select.head()
# +
#df_select.Wiki.replace(('yes', 'no'), (1, 0,), inplace=True)
#> 30 = 1; 10-30 = 2; 5-10 = 3; 0-5 = 4
#pd.cut(df_select['Votes'], bins=[0, 5, 10, 30], include_lowest=True)
#df_select['Votes'] = pd.cut(df_select['Votes'], bins=[0, 0.05, 0.10, 0.30, 1], include_lowest=True, labels=['0', '1', '2', '3'])
# -
df_select.head()
df1 = df_select
df1 = df_select
df1.head()
###Dont run this code
df1['Wiki'] = df1.Wiki.map(dict(Yes=1, No=0))
df1['Google'] = df1.Google.map(dict(Yes=1, No=0))
df1.head(10)
df1.info() # now our data looks better!!
#One-hot encoding
#Useful link: http://pbpython.com/categorical-encoding.html
df2 = df1
#df2 = df2.drop(['SenatorName'], axis=1)
df2.head()
#df2 = pd.get_dummies(df1, columns=["SenatorName", "Occupation1"], prefix=["Senator", "Occ"]).head()
df2.describe()
#df2['Votes'] = df2['Votes'].astype('int')
df2.info()
# ### Random Forest
# +
#https://roamanalytics.com/2016/10/28/are-categorical-variables-getting-lost-in-your-random-forests/
#Using H20
#https://github.com/h2oai/h2o-tutorials/blob/master/tutorials/gbm-randomforest/GBM_RandomForest_Example.py
# Example from the Git shows H2O implementation of Random forest that would not need one-hot encoding
#import h2o
#import os
#Other useful Links: https://www.blopig.com/blog/2017/07/using-random-forests-in-python-with-scikit-learn/
# -
#df2=df2.drop('Prediction',axis=1)
df2.columns
# +
# %%time
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler
import numpy as np
from sklearn import metrics as mt
from sklearn.model_selection import ShuffleSplit
# we want to predict the X and y data as follows:
if 'Votes' in df2:
y = df2['Votes'].values # get the labels we want
#del df_select['elf'] # get rid of the class label
X= df2[df2.columns.difference(['Votes','SenatorName','No of opponents','Democratic', 'Republican', 'Other'])].values
#X = df_select.values # use everything else to predict!
## X and y are now numpy matrices, by calling 'values' on the pandas data frames we
# have converted them into simple matrices to use with scikit learn
# create cross validation iterator
#cv_object = ShuffleSplit(n_splits=2)
# +
#https://www.blopig.com/blog/2017/07/using-random-forests-in-python-with-scikit-learn/
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.5, random_state=42)
#scaler = StandardScaler().fit(X_train)
#X_train_scaled = pd.DataFrame(scaler.transform(X_train), index=X_train.index.values, columns=X_train.columns.values)
#X_test_scaled = pd.DataFrame(scaler.transform(X_test), index=X_test.index.values, columns=X_test.columns.values)
# -
#http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor(n_estimators=150, oob_score=True, random_state=0)
rf.fit(X_train, y_train)
# +
from sklearn.metrics import r2_score
from scipy.stats import spearmanr, pearsonr
predicted_train = rf.predict(X_train)
predicted_test = rf.predict(X_test)
test_score = r2_score(y_test, predicted_test)
spearman = spearmanr(y_test, predicted_test)
pearson = pearsonr(y_test, predicted_test)
print(f'Out-of-bag R-2 score estimate: {rf.oob_score_:>5.3}')
print(f'Test data R-2 score: {test_score:>5.3}')
print(f'Test data Spearman correlation: {spearman[0]:.3}')
print(f'Test data Pearson correlation: {pearson[0]:.4}')
print(f'Test data Pearson correlation: {pearson}')
# -
# Out-of-bag R-2 score estimate: 0.317
# Test data R-2 score: 0.459
# Test data Spearman correlation: 0.692
# Test data Pearson correlation: 0.696
# +
from sklearn.metrics import mean_squared_error, r2_score
print(mean_squared_error(y_test, predicted_test))
# +
#http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html
from sklearn.ensemble import RandomForestRegressor
for i in range(200,220):
rf = RandomForestRegressor(n_estimators=i, oob_score=True, random_state=0)
rf.fit(X_train, y_train)
predicted_train = rf.predict(X_train)
predicted_test = rf.predict(X_test)
test_score = r2_score(y_test, predicted_test)
spearman = spearmanr(y_test, predicted_test)
pearson = pearsonr(y_test, predicted_test)
print
print(i,pearson[0],mean_squared_error(y_test, predicted_test))
#0.82001,0.01567
# +
import math
print(math.sqrt(0.0041227453111599656)*7512322)
print(((7512322-482355.7367047406)/7512322)*100)
# -
d = {'Actual %Votes': y_test*100, 'Predicted %Votes':predicted_test*100 }
results = pd.DataFrame(data=d)
results
len(results)
df2=df2.drop('Prediction',axis=1)
df2["Prediction"] = ""
df2.head()
predicted = rf.predict(X)
len(predicted)
len(df2)
df2["Prediction"] = predicted
df2.tail()
selectdatacluster2 = df2.drop(['Votes','Wiki','Google','NumOrgs','NumArticles','Prediction'], axis=1)
selectdatacluster2.head()
#from sklearn.cluster import KMeans
#import random
#len(selectdatacluster)
#selectdatacluster2 = df2["Votes","Prediction"]
for i in range(0,len(selectdatacluster2)):
#print()
selectdatacluster2[i:i+1] = selectdatacluster2[i:i+1] + random.uniform(0, 0.25)
#selectdatacluster[:i]['#Events'] = selectdatacluster[:i]['#Events'] + random.uniform(0, 0.25)
#selectdatacluster[:i]['#Articles'] = selectdatacluster[:i]['#Articles'] + random.uniform(0, 0.25)
selectdatacluster2.head()
df2['NumEvents'] = selectdatacluster2['NumEvents']
#df2['Prediction'] = selectdatacluster2['Prediction']
df2.head()
# +
np.random.seed(19680801)
fig = plt.figure(figsize=(12,8))
N = 50
#colors = np.random.rand(N)
area = (30 * np.random.rand(N))**2 # 0 to 15 point radii
# c=colors,
plt.scatter(df2['NumEvents'], df2['Votes'], s=400,alpha=0.5, c = '#ed0082')
plt.scatter(df2['NumEvents'], df2['Prediction'], s=200,alpha=0.8, c = '#000000')
fig.suptitle('Scatter Plot')
plt.xlabel('Events')
plt.ylabel('% Votes')
plt.show()
# -
d = results.drop([1,])
d = d.drop([2,])
d.head()
df2.head()
dfwiki = df2[df2['Wiki']==1]
#dfnowiki = df2[df2['Wiki']==0]['Votes','Prediction','Wiki']
dfwiki = dfwiki[['Votes','Prediction','Wiki']]
dfwiki.head()
dfnowiki = df2[df2['Wiki']==0]
dfnowiki = dfnowiki[['Votes','Prediction','Wiki']]
#dfwiki.head()
dfnowiki.head()
# +
np.random.seed(19601)
colors = df2['Wiki']
fig = plt.figure(figsize=(17,17))
#plt.scatter(df2[df2['State']=='Florida']['Votes'], df2[df2['State']=='Florida']['Prediction'], s=200,alpha=0.5, c='#4c2373')
plt.scatter(dfnowiki['Votes'], dfnowiki['Prediction'], s=200,alpha=0.5, c='Blue', cmap = 'winter', label='No_Wiki')
plt.scatter(dfwiki['Votes'], dfwiki['Prediction'], s=200,alpha=0.5, c='Red', cmap = 'winter', label='Wiki')
#plt.scatter(df2['Votes'], df2['Prediction'], s=200,alpha=0.5, c='#4c2373')
fig.suptitle('Scatter Plot - Predicted Vs Actual',fontsize=30)
plt.ylabel('Predicted % Votes',fontsize=30)
plt.xlabel('Actual % Votes',fontsize=30)
plt.axis('equal')
#plt.yticks(np.arange(0.0, 1.1, 0.1))
#plt.xticks(np.arange(0.0, 1.1, 0.1))
plt.legend(fontsize=30)
plt.plot( [0,1],[0,1] )
plt.show()
# -
# ## Random Forest iteration 2
#df2=df2.drop('Prediction',axis=1)
df2.columns
# +
# %%time
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler
import numpy as np
from sklearn import metrics as mt
from sklearn.model_selection import ShuffleSplit
# we want to predict the X and y data as follows:
if 'Votes' in df2:
y = df2['Votes'].values # get the labels we want
#del df_select['elf'] # get rid of the class label
X= df2[df2.columns.difference(['Votes','SenatorName'])].values
#X = df_select.values # use everything else to predict!
## X and y are now numpy matrices, by calling 'values' on the pandas data frames we
# have converted them into simple matrices to use with scikit learn
# create cross validation iterator
#cv_object = ShuffleSplit(n_splits=2)
# +
#https://www.blopig.com/blog/2017/07/using-random-forests-in-python-with-scikit-learn/
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.5, random_state=42)
#scaler = StandardScaler().fit(X_train)
#X_train_scaled = pd.DataFrame(scaler.transform(X_train), index=X_train.index.values, columns=X_train.columns.values)
#X_test_scaled = pd.DataFrame(scaler.transform(X_test), index=X_test.index.values, columns=X_test.columns.values)
# -
#http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor(n_estimators=150, oob_score=True, random_state=0)
rf.fit(X_train, y_train)
# +
from sklearn.metrics import r2_score
from scipy.stats import spearmanr, pearsonr
predicted_train = rf.predict(X_train)
predicted_test = rf.predict(X_test)
test_score = r2_score(y_test, predicted_test)
spearman = spearmanr(y_test, predicted_test)
pearson = pearsonr(y_test, predicted_test)
print(f'Out-of-bag R-2 score estimate: {rf.oob_score_:>5.3}')
print(f'Test data R-2 score: {test_score:>5.3}')
print(f'Test data Spearman correlation: {spearman[0]:.3}')
print(f'Test data Pearson correlation: {pearson[0]:.4}')
print(f'Test data Pearson correlation: {pearson}')
# +
from sklearn.metrics import mean_squared_error, r2_score
print(mean_squared_error(y_test, predicted_test))
# -
df2["Prediction"] = ""
df2.head()
predicted = rf.predict(X)
len(predicted)
len(df2)
df2["Prediction"] = predicted
df2.tail()
# +
#df2.loc[(df2['Votes']<60.0 & df2['Votes']>20.0) & (df2['Prediction']<10.0)]
#df2.loc[df2[(df2['Votes']<60.0)]['Votes']>20.0]
#df2.loc[df2[(df2['Votes']<60.0)]['Votes']>20.0]
#df2['Senators', 'State'] = data['SenatorName', 'State']
df2['State'] = data['State']
df = df2[(df2['Votes'] >= .20) & (df2['Votes'] <= .60)]
df4 = df[(df['Prediction'] <0.10)]
df4
# +
df5 = df2[(df2['Votes'] >= .50) & df2['Wiki']==1]
df6 = df5[(df5['Prediction'] <.45)]
df6
#'Brian', 'Schatz','Mike', 'Crapo',' John', 'Hoeven','Ron', 'Wyden','Patrick', 'Leahy','Patty', 'Murray','Russ', 'Feingold')
# -
df5 = df2[(df2['Votes'] >= .60) & df2['Wiki']==1]
df6 = df5[(df5['Prediction'] <.65)]
df6
df8 = df2[(df2['Votes'] >= .40) & df2['Wiki']==1]
df8
len(df8)
df8
df7 = df2[(df2['SenatorName'] =='Kamala Harris')]
df7
np.random.seed(19601)
# c=colors,
fig = plt.figure(figsize=(7,7))
plt.scatter(d['Actual %Votes'], d['Predicted %Votes'], s=200,alpha=0.5, c='#4c2373')
fig.suptitle('Scatter Plot')
plt.ylabel('Prdicted % Votes')
plt.xlabel('Actual % Votes')
plt.show()
np.random.seed(19601)
# c=colors,
plt.scatter(results['Actual %Votes'], results['Predicted %Votes'], s=200,alpha=0.5, c='#4c2373')
fig.suptitle('Scatter Plot')
plt.ylabel('Predicted % Votes')
plt.xlabel('Actual % Votes')
plt.show()
# ### Hyperparameter optmization
# +
#https://towardsdatascience.com/hyperparameter-tuning-the-random-forest-in-python-using-scikit-learn-28d2aa77dd74
X = bank_regression['X']
y = bank_regression['y']
#seed=0
#n_splits = 4
#yhat = np.zeros(y.shape)
#cv = KFold(n_splits=n_splits, random_state=seed)
for max_depth in range(1, 100)[::20]:
for train_index, test_index in cv.split(X, y):
clf = RandomForestRegressor(max_depth=max_depth, n_estimators=5, random_state=seed, n_jobs=2)
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = y.iloc[train_index], y.iloc[test_index]
clf.fit(X_train, y_train)
yhat[test_index] = clf.predict(X_test)
print("max_depth: %.f, MSE: %.f, R^2: %0.4f" % (max_depth, mean_squared_error(y, yhat), r2_score(y, yhat)))
# +
from sklearn import metrics as mt
total_accuracy = mt.accuracy_score(y_test, predicted_test)
print(total_accuracy)
# -
# ### METHOD 2 for Random Forest
# +
# %%time
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler
import numpy as np
from sklearn import metrics as mt
from sklearn.model_selection import ShuffleSplit
# we want to predict the X and y data as follows:
if 'Votes' in df2:
y = df2['Votes'].values # get the labels we want
#del df_select['elf'] # get rid of the class label
X= df2[df2.columns.difference(['Votes','SenatorName'])].values
#X = df_select.values # use everything else to predict!
## X and y are now numpy matrices, by calling 'values' on the pandas data frames we
# have converted them into simple matrices to use with scikit learn
# create cross validation iterator
cv_object = StratifiedKFold(n_splits=2)
# -
cv_object
# +
from sklearn.preprocessing import StandardScaler
# we want to normalize the features based upon the mean and standard deviation of each column.
# However, we do not want to accidentally use the testing data to find out the mean and std (this would be snooping)
# to Make things easier, let's start by just using whatever was last stored in the variables:
## X_train , y_train , X_test, y_test (they were set in a for loop above)
for train_indices, test_indices in cv_object.split(X,y):
# I will create new variables here so that it is more obvious what
# the code is doing (you can compact this syntax and avoid duplicating memory,
# but it makes this code less readable)
X_train = X[train_indices]
y_train = y[train_indices]
X_test = X[test_indices]
y_test = y[test_indices]
# -
X_train
print(train_indices, test_indices)
# +
from sklearn.ensemble import RandomForestRegressor
from sklearn.datasets import make_regression
RFR = RandomForestRegressor()
RFR.fit(X_train, y_train) # train object
y_hat = RFR.predict(X_test)
# -
total_accuracy = mt.accuracy_score(y_test, y_hat)
print ('RandomForest', total_accuracy)
conf = mt.confusion_matrix(y_test,y_hat)
print(conf)
print(classification_report(y_test, y_hat8))
# +
from sklearn.ensemble import RandomForestRegressor
from sklearn.datasets import make_regression
X, y = make_regression(n_features=4, n_informative=2,
... random_state=0, shuffle=False)
regr = RandomForestRegressor(max_depth=2, random_state=0)
regr.fit(X, y)
print(regr.feature_importances_)
#[ 0.17339552 0.81594114 0. 0.01066333]
print(regr.predict([[0, 0, 0, 0]]))
#[-2.50699856]
# -
# #### KNN
# +
# %%time
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler
import numpy as np
from sklearn import metrics as mt
from sklearn.model_selection import ShuffleSplit
# we want to predict the X and y data as follows:
if 'Votes' in df_select:
y = df_select['Votes'].values # get the labels we want
#del df_select['elf'] # get rid of the class label
X= df_select[df_select.columns.difference(['Votes'])].values
#X = df_select.values # use everything else to predict!
## X and y are now numpy matrices, by calling 'values' on the pandas data frames we
# have converted them into simple matrices to use with scikit learn
# create cross validation iterator
cv_object = StratifiedKFold(n_splits=2)
# -
cv_object
X
# +
from sklearn.preprocessing import StandardScaler
# we want to normalize the features based upon the mean and standard deviation of each column.
# However, we do not want to accidentally use the testing data to find out the mean and std (this would be snooping)
# to Make things easier, let's start by just using whatever was last stored in the variables:
## X_train , y_train , X_test, y_test (they were set in a for loop above)
for train_indices, test_indices in cv_object.split(X,y):
# I will create new variables here so that it is more obvious what
# the code is doing (you can compact this syntax and avoid duplicating memory,
# but it makes this code less readable)
X_train = X[train_indices]
y_train = y[train_indices]
X_test = X[test_indices]
y_test = y[test_indices]
# scale attributes by the training set
scl_obj = StandardScaler()
scl_obj.fit(X_train) # find scalings for each column that make this zero mean and unit std
# the line of code above only looks at training data to get mean and std and we can use it
# to transform new feature data
X_train_scaled = scl_obj.transform(X_train) # apply to training
X_test_scaled = scl_obj.transform(X_test)
# -
# ## NLP
# +
import wikipediaapi
import pywikibot
import json
import wikipedia
import matplotlib
import numpy as np
import pandas as pd
import nltk
from nltk import word_tokenize
from nltk.book import *
from nltk.corpus import PlaintextCorpusReader
from urllib import request
import math
import copy
import pandas as pd
# +
#Get single page
wiki_wiki = wikipediaapi.Wikipedia('en')
req = ('Russ Feingold','Patty Murray','Patrick Leahy','Pat Toomey','Ron Wyden','Robert Murphy','Rob Portman','John Hoeven','Chuck Schumer','Kelly Ayotte','Roy Blunt','Jerry Moran','Chuck Grassley','Todd Young ','Mike Crapo','Brian Schatz','Johnny Isakson','Marco Rubio','Richard Blumenthal','John Boozman','Lisa Murkowski','Richard Shelby')
page_py = []
j=0
for i in req:
page_py.append(wiki_wiki.page(i))
print("Page - Exists: %s" % page_py[j].exists())
j=j+1
# -
#Print page summary
j=0
for i in req:
print("Page - Title: %s" % page_py[j].title)
print("Page - Title: %s" % page_py[j].fullurl)
j=j+1
page_py[0]
page_py[0].summary[0:400]
# +
#Get full text
wiki_wiki = wikipediaapi.Wikipedia(
language='en',
extract_format=wikipediaapi.ExtractFormat.WIKI
)
p_wiki = wiki_wiki.page("Test 1")
print(page_py[5].text)
# +
#tokenize
#bk_split = bky.content.split()
j=0
tokenlist = []
for i in req:
tokens = nltk.word_tokenize(page_py[j].text)
tokenlist.append(tokens)
j=j+1
print(tokenlist[2])
# -
# ## Brown Corpus train and test data
#
print(train_data[0])
# +
from nltk.classify import NaiveBayesClassifier
from nltk.tag.sequential import ClassifierBasedPOSTagger
nbt = ClassifierBasedPOSTagger(train=train_data,
classifier_builder=NaiveBayesClassifier.
train)
#print("Naive bayes on sentence1:",nbt.tag(tokens))
# -
# ## Regular POS tagger
from nltk.corpus import treebank
data = treebank.tagged_sents()
train_data = data[:3500]
test_data = data[3500:]
# +
j=0
pos_tag_list = []
for i in req:
pos_tag_list.append(nltk.pos_tag(tokenlist[j]))
j=j+1
pos_tag_list[0]
# -
type(pos_tag_list)
# +
j=0
#sentences = []
nouns = []
for i in req:
sentences = nltk.sent_tokenize(page_py[j].text) #tokenize sentences
nouns.append([])
for sentence in sentences:
for word,pos in nltk.pos_tag(nltk.word_tokenize(str(sentence))):
if (pos == 'NN' or pos == 'NNP' or pos == 'NNS' or pos == 'NNPS'):
nouns[j].append(word)
j=j+1
nouns[5]
# +
nouns2 = []
j=0
for i in req:
nouns2.append([])
for w in nouns[j]:
nouns2[j].append(w.lower())
j=j+1
nouns2[3]
# -
# ## Frequency distribution on all senators (Higher votes but bad prediction)
# +
fdist = []
j=0
for i in req:
#fdist.append([])
fdist.append(nltk.FreqDist(nouns2[j]))
print(fdist[j].most_common(10))
j=j+1
# -
fdist[0]
nouns2[1][0]
# +
#nouns
#listOfStrings = ('Brian', 'Schatz','Mike', 'Crapo',' John', 'Hoeven','Ron', 'Wyden','Patrick', 'Leahy','Patty', 'Murray','Russ', 'Feingold')
j=0
list2 = []
for i in req:
for w in range(0,len(nouns2[j])):
if(nouns2[j][w] in i.lower()):
print("Token is name")
else:
list2.append(nouns2[j][w])
j=j+1
print(list2)
#if 'at' in listOfStrings :
# -
fdist3 = nltk.FreqDist(list2)
fdist3.most_common(25)
#Selected list
#Senate,committee,act,bill,president,senator,house,election,republican,congress
print(len(nouns))
print(len(nouns2))
token = '%'
print(nltk.pos_tag(token))
print("Naive bayes on sentence1:",nbt.tag(token))
# ### Term Frequency per document across all Senators for the below six words:
# Senate, act, committee, election, senator, bill
# +
#https://stevenloria.com/tf-idf/
import math
from textblob import TextBlob as tb
def tf(word, blob):
return blob.words.count(word) / len(blob.words)
def n_containing(word, bloblist):
return sum(1 for blob in bloblist if word in blob.words)
def idf(word, bloblist):
return math.log(len(bloblist) / (1 + n_containing(word, bloblist)))
def tfidf(word, blob, bloblist):
return tf(word, blob) * idf(word, bloblist)
# +
import wikipediaapi
import pywikibot
import json
from time import sleep
##Not found wikidata errors either due to Ballotpedia or multiple wikipedia
#"Ron Crumpton", ,"Clair Van Steenwyk", "Pam Keith","Jim Barksdale","Pro-Life ","Andrea Zopp","D.J. Smith","Patrick Wiesner","Jim Gray","John Kennedy","Dave Wallace","Cori Bush","Chris Rey","Don Elijah Eckhart","Kevin Stine","Misty K. Snow","Mohammad Said","Arturo Reyes",
## Tested
#
site = pywikibot.Site("en", "wikipedia")
req = ["Loretta Sanchez","Ron Unz","Kamala Harris","Duf Sundheim","Phil Wyman","Tom Del Beccaro","Greg Conlon","Steve Stokes","George C.Yang","Karen Roseberry","Gail K.Lightfoot","Massie Munroe","Pamela Elizondo","Tom Palzer","Don Krampe","Eleanor García","Jarrell Williamson","Von Hougo","President Cristina Grappo","Jerry J.Laws","Mark Matthew Herd","John Thompson Parker","Ling Ling Shi","Herbert G.Peters","Emory Peretz Rodgers","Mike Beitiks","Clive Grey","Jason Hanania","Paul Merritt","Jason Kraus","Don J.Grundmann","Scott A.Vineberg","Tim Gildersleeve","Gar Myers","Billy Falling","Ric M.Llewellyn","Alexis Stuart","Richard Shelby","Jonathan McConnell","John Martin","Marcus Bowman","Shadrack McGill","Charles Nana","Lisa Murkowski","Bob Lochner","Paul Kendall","Thomas Lamb","Ray Metcalfe","Edgar Blatchford","Cean Stevens","John McCain","Kelli Ward","Alex Meluskey","Sean Webster","Ann Kirkpatrick","Alex Bello","Gary Swing","Merissa Hamilton","John Boozman","Curtis Coleman","Conner Eldridge","Michael Bennet (Incumbent) ","Darryl Glenn","Jack Graham","Robert Blaha","Jon Keyser","Ryan Frazier","Richard Blumenthal","Dan Carter","Richard Lion","Jeffery Russell","Andrew Rule","John M. Traceski ","Marco Rubio","Carlos Beruff","Dwight Young","Ernie Rivera","Patrick Murphy","Alan Grayson","Rocky De La Fuente ","Reginald Luster","Derrick Grayson","Mary Kay","Jim F. Barksdale","Cheryl Copeland","John Coyne","Johnny Isakson","Allen Buckley","Brian Schatz","Makani Christensen","Miles Shiratori","Tutz Honeychurch","Joy Allison","John Giuffre","Mike Crapo","Jerry Sturgill","Ray J. Writz ","Mark Kirk","James T. Marter","Tammy Duckworth","Napoleon Harris","Todd Young ","Marlin Stutzman ","Baron Hill ","Chuck Grassley","Patty Judge","Rob Hogg","Tom Fiegen","Bob Krause","Jerry Moran","Monique Singh-Bey","Rand Paul","James Gould","Stephen Slaughter","Sellus Wilder","Ron Leach","Tom Recktenwald","Grant Short","Jeff Kender","Rory Houlihan","Foster Campbell","Chris Van Hollen","Donna Edwards","Freddie Dickson","Theresa Scaldaferri","Violet Staley","Lih Young","Charles Smith","Ralph Jaffe","Blaine Taylor","Ed Tinus","Kathy Szeliga","Chris Chaffee","Chrys Kefalas", "Richard Douglas","Sean Connor","Lynn Richardson","John Graziani","Greg Holmes","Mark McNicholas","Joe Hooe","Anthony Seda","Richard Shawver","Garry Yarrington","Margaret Flowers","Roy Blunt","Kristi Nichols","Ryan Luethy","Bernie Mowinski","Jason Kander", "Chief Wana Dubie","Robert Mack","Jonathan Dine","Herschel Young","Fred Ryman","Catherine Cortez Masto ","Allen Rheinhart ","Liddo Susan O'Briant ","Bobby Mahendra ","Joe Heck","Sharron Angle","Thomas Heck","Eddie Hamilton","D'Nese Davis","Bill Tarbell","Robert Leeds","Juston Preble","Carlo Poliak","Kelly Ayotte","Jim Rubens","Tom Alciere","Gerald Beloin","Stanley Emanuel","Maggie Hassan","Chuck Schumer","Wendy Long","Robin Laverne Wilson","Alex Merced","Richard Burr","Greg Brannon","Paul Wright","Larry Holmquist","Deborah Ross", "Kevin Griffin","Ernest Reeves","John Hoeven","Eliot Glassheim","Robert Marquette","Rob Portman", "Ted Strickland","P.G. Sittenfeld","Kelli Prather","Joe DeMare","Robert Murphy","Dax Ewbank","Ron Wyden", "Paul Weaver","Mark Callahan","Sam Carpenter","Faye Stewart","Dan Laschober","Steven Reynolds","Marvin Sandnes","Pat Toomey","Katie McGinty","Joe Sestak","John Fetterman","Joseph Vodvarka", "Jonathan Swinton","Patrick Leahy","Cris Ericson","Patty Murray","Chris Vance","Eric John Markus","Phil Cornell","Scott Nazarino","Mike Luke","Donna Rae","Ted Cummings","Sam Wright","Uncle Mover","Jeremy Teuton","Thor Amundson","Chuck Jackson","Pano Churchill","Zach Haller","Alex Tsimerman","Russ Feingold","Scott Harbach"]
page = []
item = []
names = []
for i in range(0,len(req)):
#sleep(0.1)
print(req[i])
#try:
page.append(pywikibot.Page(site, req[i]))
if(page[i].exists()):
item.append(pywikibot.ItemPage.fromPage(page[i]))
names.append(req[i])
#print(req[i], item[i])
else:
print("Page not found", i)
#except NoPage:
# print("No wikidata page found")
# -
len(names)
# +
#Get single page
wiki_wiki = wikipediaapi.Wikipedia('en')
#req = ('Russ Feingold','Patty Murray','Patrick Leahy','Pat Toomey','Ron Wyden','Robert Murphy','Rob Portman','John Hoeven','Chuck Schumer','Kelly Ayotte','Roy Blunt','Jerry Moran','Chuck Grassley','Todd Young ','Mike Crapo','Brian Schatz','Johnny Isakson','Marco Rubio','Richard Blumenthal','John Boozman','Lisa Murkowski','Richard Shelby')
page_py = []
for i in range(0,len(names)):
page_py.append(wiki_wiki.page(names[i]))
print("Page - Exists: %s" % page_py[i].exists())
# +
#new cell
#Print page summary
for i in range(0,len(names)):
print("Page - Title: %s" % page_py[i].title)
print("Page - Title: %s" % page_py[i].fullurl)
# -
tokenlist = []
for i in range(0,len(names)):
tokens = nltk.word_tokenize(page_py[i].text)
tokenlist.append(tokens)
print(tokenlist[2])
# +
pos_tag_list = []
for i in range(0,len(names)):
pos_tag_list.append(nltk.pos_tag(tokenlist[i]))
pos_tag_list[0]
# +
nouns = []
for i in range(0,len(names)):
sentences = nltk.sent_tokenize(page_py[i].text) #tokenize sentences
nouns.append([])
for sentence in sentences:
for word,pos in nltk.pos_tag(nltk.word_tokenize(str(sentence))):
if (pos == 'NN' or pos == 'NNP' or pos == 'NNS' or pos == 'NNPS'):
nouns[i].append(word)
nouns[5]
# +
#Lower Case
nouns2 = []
for i in range(0,len(names)):
nouns2.append([])
for w in nouns[i]:
nouns2[i].append(w.lower())
nouns2[3]
# +
def tf(word, blob):
return blob.words.count(word) / len(blob.words)
def n_containing(word, bloblist):
return sum(1 for blob in bloblist if word in blob.words)
def idf(word, bloblist):
return math.log(len(bloblist) / (1 + n_containing(word, bloblist)))
def tfidf(word, blob, bloblist):
return tf(word, blob) * idf(word, bloblist)
# +
#page_py[j].text
listOfStrings = ['congress','republican','election','house','senator','president','bill','act','committee','senate']
termfreq = []
for i in range(0,len(names)):
termfreq.append([])
blob = tb(page_py[i].text)
for w in listOfStrings:
if(w in blob.words.lower()):
termfreq[i].append(tf(w, blob))
else:
termfreq[i].append(0)
print("Not a frequent word")
termfreq[0]
# -
termfreq[1]
# +
freqDataframe = pd.DataFrame(data=termfreq)
freqDataframe.head()
# -
freqDataframe['SenatorName'] = names
freqDataframe.head()
freqDataframe.columns = ['congress','republicanword','election','house','senator','president','bill','act','committee','senate','SenatorName']
freqDataframe.head()
len(freqDataframe)
df2[df2['SenatorName']=='Jim F. Barksdale']
result = pd.merge(df2,
freqDataframe,
on='SenatorName', how='left')
result.head(20)
results2 = result[result['Wiki']==1]
len(results2)
#len(result)
results2.describe()
blob1 = tb(page_py[0].text)
type(blob1)
dir(blob1)
results2.head()
# ## Random forest on Subset of Data - Higher votes candidates
finaldata = results2[['Democratic','Republican','republicanword','Other','No of opponents','SenatorName','Votes','Wiki','Google','NumOrgs','NumEvents','NumArticles','congress','election','house','senator','president','bill','act','committee','senate']]
finaldata.head()
finaldata.describe()
# +
# %%time
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler
import numpy as np
from sklearn import metrics as mt
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import KFold
# we want to predict the X and y data as follows:
if 'Votes' in finaldata:
y = finaldata['Votes'].values # get the labels we want
#del df_select['elf'] # get rid of the class label
X= finaldata[finaldata.columns.difference(['Votes','SenatorName'])].values
# create cross validation iterator
#cv_object = StratifiedKFold(n_splits=2)
kf = KFold(n_splits = 5, shuffle=True)
# +
#from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
for train_indices, test_indices in kf.split(X,y):
# I will create new variables here so that it is more obvious what
# the code is doing (you can compact this syntax and avoid duplicating memory,
# but it makes this code less readable)
X_train = X[train_indices]
y_train = y[train_indices]
X_test = X[test_indices]
y_test = y[test_indices]
# -
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor(n_estimators=150, oob_score=True, random_state=0)
model = rf.fit(X_train, y_train)
model
# +
from sklearn.metrics import r2_score
from scipy.stats import spearmanr, pearsonr
predicted_train = rf.predict(X_train)
predicted_test = rf.predict(X_test)
test_score = r2_score(y_test, predicted_test)
spearman = spearmanr(y_test, predicted_test)
pearson = pearsonr(y_test, predicted_test)
print(f'Out-of-bag R-2 score estimate: {rf.oob_score_:>5.3}')
print(f'Test data R-2 score: {test_score:>5.3}')
print(f'Test data Spearman correlation: {spearman[0]:.3}')
print(f'Test data Pearson correlation: {pearson[0]:.3}')
# +
from sklearn.metrics import mean_squared_error, r2_score
print(mean_squared_error(y_test, predicted_test))
# -
# ## Feature importance Random Forest for candidates with Wiki pages - using NLP features
print(importances)
print(indices)
# +
importances = model.feature_importances_
# Sort feature importances in descending order
indices = np.argsort(importances)[::-1]
# Rearrange feature names so they match the sorted feature importances
names = [finaldata.columns[i] for i in indices]
#color = finaldata.columns
fig = plt.figure(figsize=(15,15))
# Barplot: Add bars
plt.bar(range(X_train.shape[1]), importances[indices],
color=('#DC143C','#DC143C','#DC143C','#DC143C','#DC143C','#DC143C','#A9A9A9','#A9A9A9','#A9A9A9','#A9A9A9','#A9A9A9','#A9A9A9','#A9A9A9','#A9A9A9','#A9A9A9','#A9A9A9','#A9A9A9','#A9A9A9','#A9A9A9'))
# Add feature names as x-axis labels
color = ()
plt.xticks(range(X_train.shape[1]), names, rotation=75, fontsize = 20)
plt.yticks(fontsize = 20)
## Create plot title
plt.title("Feature Importance", fontsize=30)
# Show plot
plt.show()
#importances
#indices
#names
# -
finaldata.columns[9]
# ## Feature exploration on important features
# +
np.random.seed(196806)
#colors = df2['NumOrgs'] + df2['NumEvents']
fig = plt.figure(figsize=(12,8))
scatter = plt.scatter(finaldata['congress'],finaldata['Votes'], s=200,alpha=0.5, c='Red', cmap = 'winter')
fig.suptitle('Scatter Plot with Orgs+Events as Colorbar' , fontsize = 30)
plt.ylabel('% Votes', fontsize = 30)
plt.xlabel('Google', fontsize = 30)
#plt.colorbar(scatter)
plt.show()
# -
# ## Predict for whole data and plot
finaldata["Prediction"] = ""
finaldata.head()
predicted = rf.predict(X)
len(predicted)
len(finaldata)
finaldata["Prediction"] = predicted
finaldata.tail()
# +
np.random.seed(19605)
# c=colors,#4c2373
fig = plt.figure(figsize=(15,15))
#plt.scatter(df2[df2['State']=='Florida']['Votes'], df2[df2['State']=='Florida']['Prediction'], s=200,alpha=0.5, c='#4c2373')
plt.scatter(finaldata['Votes'], finaldata['Prediction'], s=200,alpha=0.5, c='Red')
fig.suptitle('Scatter Plot - Predicted Vs Actual', fontsize = 30)
plt.ylabel('Predicted % Votes', fontsize = 30)
plt.xlabel('Actual % Votes', fontsize = 30)
plt.axis('equal')
#plt.yticks(np.arange(0.0, 1.1, 0.1))
#plt.xticks(np.arange(0.0, 1.1, 0.1))
plt.plot( [0,1],[0,1] )
plt.show()
# -
df9 = finaldata[(finaldata['Votes'] <0.35)]
df10 = df9[(df9['Prediction']> 0.4)]
df10
max(df9['Prediction'])
df9
df2.head()
dffinal = df2[df2['Wiki']==0]
dffinal.head()
len(dffinal)
#After dropping state
dffinal = dffinal.drop('State', axis=1)
dffinal.head()
finaldata.head()
len(finaldata)
# +
ultimatedata = dffinal[['Wiki','Prediction','Votes']]
ultimatedata2 = finaldata[['Wiki','Prediction','Votes']]
#ultimatedataframe.append(ultimatedata2, ignore_index=True)
ultimatedataframe=pd.concat([ultimatedata,ultimatedata2])
ultimatedataframe.head()
#ultimatedataframe = pd.concat([dffinal['Wiki','Prediction','Votes'], finaldata['Wiki','Prediction','Votes']], axis=1, sort=False)
# -
len(ultimatedataframe)
# +
np.random.seed(19605)
#color=ultimatedataframe['Wiki']
fig = plt.figure(figsize=(15,15))
#plt.scatter(df2[df2['State']=='Florida']['Votes'], df2[df2['State']=='Florida']['Prediction'], s=200,alpha=0.5, c='#4c2373')
#plt.scatter(ultimatedataframe['Votes'],ultimatedataframe['Prediction'], s=100,alpha=0.5, c=color, cmap = 'cool')
plt.scatter(ultimatedata['Votes'], ultimatedata['Prediction'], s=100,alpha=0.5, c='blue', cmap = 'cool', label = 'No_WikiData_No_NLP')
plt.scatter(ultimatedata2['Votes'], ultimatedata2['Prediction'], s=100,alpha=0.5, c='red', cmap = 'summer', label = 'Yes_WikiData_Yes_NLP')
fig.suptitle('Scatter Plot of Predicted Vs Actual',fontsize = 30)
plt.ylabel('Predicted % Votes', fontsize = 30)
plt.xlabel('Actual % Votes', fontsize = 30)
plt.axis('equal')
#plt.yticks(np.arange(0.0, 1.1, 0.1))
#plt.xticks(np.arange(0.0, 1.1, 0.1))
plt.plot( [0,1],[0,1] )
#plt.legend((ultimatedataframe[ultimatedataframe['Wiki']==0]['Wiki'], ultimatedataframe[ultimatedataframe['Wiki']==1]['Wiki']), ('label1', 'label2'))
plt.legend(fontsize=30)
plt.show()
# -
# ## Descriptive Stats
# +
#dffinal[]
dff = dffinal[(df2['Votes'] >= .20) & (df2['Votes'] <= .425)]
dfff = dff[(dff['Prediction'] <0.20)]
dfff
# -
result.head()
import seaborn as sns
sns.set(style="ticks")
sns.pairplot(result, hue="Wiki")
select1 = result[['Democratic','Republican','Other','No of opponents','SenatorName','Votes', 'Wiki', 'Google', 'NumOrgs', 'NumEvents', 'NumArticles']]
sns.set(style="ticks")
sns.pairplot(select1, hue="Wiki")
result.columns
fig = plt.figure(figsize=(20,20))
hist = select1.hist(bins=5, figsize=(15,15), color = '#5bc8e0')
plt.show()
# ## Cross validation
#
#df2 = df2.drop('Prediction',axis=1)
#df2.head()
#len(df2)
finaldata=finaldata.drop('Prediction',axis=1)
finaldata.head()
#len(finaldata)
# +
# %%time
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler
import numpy as np
from sklearn import metrics as mt
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import KFold
# we want to predict the X and y data as follows:
if 'Votes' in finaldata:
y = finaldata['Votes'].values # get the labels we want
#del df_select['elf'] # get rid of the class label
X= finaldata[finaldata.columns.difference(['Votes','SenatorName'])].values
#X = df_select.values # use everything else to predict!
## X and y are now numpy matrices, by calling 'values' on the pandas data frames we
# have converted them into simple matrices to use with scikit learn
# create cross validation iterator
#cv_object = StratifiedKFold(n_splits=2)
kf = KFold(n_splits = 5, shuffle=True)
# +
from sklearn.preprocessing import StandardScaler
# we want to normalize the features based upon the mean and standard deviation of each column.
# However, we do not want to accidentally use the testing data to find out the mean and std (this would be snooping)
# to Make things easier, let's start by just using whatever was last stored in the variables:
## X_train , y_train , X_test, y_test (they were set in a for loop above)
#for train_indices, test_indices in cv_object.split(X,y):
for train_indices, test_indices in kf.split(X,y):
# I will create new variables here so that it is more obvious what
# the code is doing (you can compact this syntax and avoid duplicating memory,
# but it makes this code less readable)
X_train = X[train_indices]
y_train = y[train_indices]
X_test = X[test_indices]
y_test = y[test_indices]
# -
rf = RandomForestRegressor(n_estimators=150, oob_score=True, random_state=0)
rf.fit(X_train, y_train)
# +
from sklearn.metrics import r2_score
from scipy.stats import spearmanr, pearsonr
predicted_train = rf.predict(X_train)
predicted_test = rf.predict(X_test)
test_score = r2_score(y_test, predicted_test)
spearman = spearmanr(y_test, predicted_test)
pearson = pearsonr(y_test, predicted_test)
print(f'Out-of-bag R-2 score estimate: {rf.oob_score_:>5.3}')
print(f'Test data R-2 score: {test_score:>5.3}')
print(f'Test data Spearman correlation: {spearman[0]:.3}')
print(f'Test data Pearson correlation: {pearson[0]:.3}')
# +
from sklearn.metrics import mean_squared_error, r2_score
print(mean_squared_error(y_test, predicted_test))
# -
finaldata["Prediction"] = ""
finaldata.head()
predicted = rf.predict(X)
len(predicted)
len(finaldata)
finaldata["Prediction"] = predicted
finaldata.tail()
finaldata.head()
# +
np.random.seed(19608)
# c=colors,#4c2373
fig = plt.figure(figsize=(4,3))
#plt.scatter(df2[df2['State']=='Florida']['Votes'], df2[df2['State']=='Florida']['Prediction'], s=200,alpha=0.5, c='#4c2373')
plt.scatter(finaldata['Votes'], finaldata['Prediction'], s=200,alpha=0.5, c='Red')
fig.suptitle('Scatter Plot - Predicted Vs Actual', fontsize = 10)
plt.ylabel('Predicted % Votes', fontsize = 10)
plt.xlabel('Actual % Votes', fontsize = 10)
plt.axis('equal')
#plt.yticks(np.arange(0.0, 1.1, 0.1))
#plt.xticks(np.arange(0.0, 1.1, 0.1))
plt.plot( [0,1],[0,1] )
plt.show()
# -
ultimatedata = dffinal[['Wiki','Prediction','Votes']]
ultimatedata2 = finaldata[['Wiki','Prediction','Votes']]
# +
np.random.seed(19605)
#color=ultimatedataframe['Wiki']
fig = plt.figure(figsize=(15,15))
#plt.scatter(df2[df2['State']=='Florida']['Votes'], df2[df2['State']=='Florida']['Prediction'], s=200,alpha=0.5, c='#4c2373')
#plt.scatter(ultimatedataframe['Votes'],ultimatedataframe['Prediction'], s=100,alpha=0.5, c=color, cmap = 'cool')
plt.scatter(ultimatedata['Votes'], ultimatedata['Prediction'], s=100,alpha=0.5, c='blue', cmap = 'cool', label = 'No_WikiData_No_NLP')
plt.scatter(ultimatedata2['Votes'], ultimatedata2['Prediction'], s=100,alpha=0.5, c='red', cmap = 'summer', label = 'Yes_WikiData_Yes_NLP')
fig.suptitle('Scatter Plot of Predicted Vs Actual',fontsize = 30)
plt.ylabel('Predicted % Votes', fontsize = 30)
plt.xlabel('Actual % Votes', fontsize = 30)
plt.axis('equal')
#plt.yticks(np.arange(0.0, 1.1, 0.1))
#plt.xticks(np.arange(0.0, 1.1, 0.1))
plt.plot( [0,1],[0,1] )
#plt.legend((ultimatedataframe[ultimatedataframe['Wiki']==0]['Wiki'], ultimatedataframe[ultimatedataframe['Wiki']==1]['Wiki']), ('label1', 'label2'))
plt.legend(fontsize=30)
plt.show()
# -
print('Thank You!')
| 47,357 |
/analysis/条形图.ipynb
|
cbc15e795024d4d19f93cc2c00343ecc06618a4a
|
[] |
no_license
|
Rockyzsu/convertible_bond
|
https://github.com/Rockyzsu/convertible_bond
| 174 | 70 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 9,916 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="jb1MXhgZwrL_"
import numpy as np
import tensorflow as tf
import tensorflow.keras.layers as tfl
from tensorflow.keras import models, datasets
import matplotlib.pyplot as plt
# + colab={"base_uri": "https://localhost:8080/"} id="RFBNJw271OvJ" outputId="4bc1d8b6-8ca7-4f9c-80be-7e4ecf858469"
data= datasets.cifar10
(X_train,y_train),(X_test,y_test)= data.load_data()
# + id="ABRl7ohXXzw8"
def talk_data(X,y,index):
print(X.shape , y.shape)
plt.figure(figsize=(18,3))
plt.imshow(X_train[index])
#print(X[:4] , y[:4])
# + id="TxitorcJa6LG"
y_train = y_train.reshape(-1,)
#y_train[:4]
X_train=X_train/255
X_test=X_test/255
# + id="koSNcEjaMnm6"
model = tf.keras.Sequential()
model.add(tfl.Conv2D(filters=45,kernel_size=(3,3),activation='relu',input_shape=(32,32,3)))
model.add(tfl.MaxPool2D(2,2))
model.add(tfl.Conv2D(filters=60,kernel_size=(3,3),activation='relu'))
model.add(tfl.MaxPool2D(2,2))
model.add(tfl.Flatten())
model.add(tfl.Dense(70,activation='relu'))
model.add(tfl.Dense(10,activation='softmax'))
# + id="bCXZ1s93hIid"
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# + colab={"base_uri": "https://localhost:8080/"} id="FNqYvk_xiIS7" outputId="f78cf436-9a3a-49d6-cc79-3403daa537b9"
model.fit(X_train,y_train,epochs=12)
# + colab={"base_uri": "https://localhost:8080/"} id="7bPaBEZuCrlE" outputId="4cbcf7f6-d6be-42dd-c0ef-c9a22005f81d"
model.evaluate(X_test,y_test)
| 1,717 |
/nycpulseproject/.ipynb_checkpoints/Testing Tweepy_Streaming-checkpoint.ipynb
|
145adc3ab5addebbaa04346ad17e2cacbc0ed874
|
[] |
no_license
|
jianweili0/jianweili0.github.io
|
https://github.com/jianweili0/jianweili0.github.io
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 42,871 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: PySpark
# language: python
# name: pyspark
# ---
# + extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"col": 0, "height": 17, "hidden": false, "row": 0, "width": 6}, "report_default": {"hidden": false}}}}
from pyspark.sql import SQLContext
import json
from IPython import display
from IPython.core.display import HTML
import pandas
import time
import numpy as np
import matplotlib.pyplot as plt
sql = SQLContext(sc)
#get tweets on 7rd march 2016 : first 10 seconds
tweetsDf = sql.read.load(format = "au.com.d2dcrc.carbon.spark.tweets", startTime = "2016-03-08T09:00:00Z", endTime = "2016-03-08T09:10:10Z")
tweetsDf.printSchema()
tweetsDf.count()
tweetsDf.take(5)
#displaytweets = tweetsDf.select( "author", "civilEvents", "phraseList", "locations", "bodyText").orderBy("civilEvents")
displaytweets = tweetsDf.select( "author", "publicationTime", "bodyText").orderBy("author")
pandas.set_option('display.max_colwidth', 500)
display.display(displaytweets.toPandas())
# +
def contains_hashtag(bodyText):
if bodyText is None:
return False # This can sometimes be null, due to data inconsistncy.
return '#ReclaimAustralia' in bodyText
# Now we register the above python function with spark as a UDF. A UDF transforms a column or columns into a new column
contains_hashtag_udf = functions.udf(contains_hashtag, BooleanType())
# We use the udf by passing a dataframe column to the udf. We can refer to dataframe columns by df.<column_name> or
# df['<column_name>']. The result of the udf is another column, specificially of type boolean.
df2 = df1.filter(contains_hashtag_udf(df1.bodyText))
# Again, we have not started any processing yet or reading of any data yet. Only specified how to transform
# our first dataframe df1, into second df2.
# this will trigger processing
df2.show()
# +
#I Started working on this scipts
displaytweets.select('bodyText').dropDuplicates().show()
pandas.set_option('display.max_colwidth', 500)
display.display(displaytweets.toPandas())
# -
# + extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"hidden": true}, "report_default": {"hidden": true}}}}
#trying hands on scripts
displaytweets.describe()
displaytweets.head(20)
displaytweets.show(2, truncate= True)
displaytweets.count()
displaytweets.columns #displays the coloms
displaytweets.describe().show()
displaytweets.describe('bodyText').show()
seedistinct = displaytweets.select('author' , 'publicationTime').distinct().count() #this can be use to output the number of different users in our datasets
seedistinct.show()
# +
# trying to plot something
import numpy as np
import matplotlib.pyplot as plt
seedistinct = displaytweets.select('author' , 'publicationTime').distinct() #this can be use to output the number of different users in our datasets
#seedistinct.show()
#seedistinct.plt(x=)
#seedusergroups = seedistinct.groupby('author')
#seedistinct.plt(x= 'publicationTime', y= 'author').show()
#myarray = np.array(seedistinct)
#plt.plot(myarray)
#plt.show()
# +
displaytweets.select('publicationTime').distinct().count() #this can be use to output the number of different users in our datasets
#This idea can use to construct cascade.. so for out tweets we use time to check the distinct time clusters
displaytweets.crosstab('publicationTime', 'bodyText').show() #still trying to figure out what it does( calculation of pairwise frequency)
# -
#drop duplicates
displaytweets.select('bodyText').dropDuplicates().show()
displaytweets.dropna().show(200) #dropping not available rows
# +
import numpy as np
import matplotlib.pyplot as plt
myshows = displaytweets.select('bodyText').dropDuplicates()
plt.hist(myshows)
plt.show()
plt.clf()
# + extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"hidden": true}, "report_default": {"hidden": true}}}}
displaytweets['bodyText']
df1 = displaytweets
df1
pandas.set_option('display.max_colwidth', 500)
display.display(df1.toPandas())
df3 = df1.select("bodyText").show()
#displaytweets = tweetsDf.select( "author", "civilEvents", "phraseList", "locations", "bodyText").orderBy("times")
# -
out_vert[h+pad, w+pad] = max(np.sum(vert_kernel * (tmp[h:h+size, w:w+size])), 0)
out_horiz[h+pad, w+pad] = max(np.sum(horiz_kernel * (tmp[h:h+size, w:w+size])), 0)
out_vert = out_vert[pad:pad+height, pad:pad+width].astype(np.uint8)
out_horiz = out_horiz[pad:pad+height, pad:pad+width].astype(np.uint8)
return out_vert, out_horiz
# +
img = cv2.imread('imori.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
fig = plt.figure(figsize=(8, 8))
ax = fig.subplots(1,3)
ax[0].imshow(gray, cmap="gray")
ax[1].imshow(prewitt_filter(gray, 3)[0], cmap="gray")
ax[2].imshow(prewitt_filter(gray, 3)[1], cmap="gray")
# -
# # Task 20: Histogram
# +
img = cv2.imread('imori_dark.jpg')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.hist(img.ravel(),bins=255, rwidth=0.8, range=(0, 255))
# -
larer å finne de tre gruppene. Dessverre er det ikke alltid slikt. Plasseringer til klustersenterne i den første omgangen har mye å si for resultatet.
start = np.array([[5, -8], [7, 2], [11, 2]])
k_means(X, start, 2)
# **Oppgave:** Hva skjer her, og hvorfor? Ville du heller valgt dette resultatet enn det i forrige eksempel?
# Ditt svar her:
# **Hvordan løser man dette problemet?** Når maskinen gjør K-means, vil de første senterne bli plassert tilfeldig. I et forsøk på å unngå situasjoner som den ovenfor, blir algoritmen kjørt flere ganger, og vi velger det resultatet der summen av alle avstandene fra punktene til deres clustersenter er minst.
# ### Valg av $K$
# En annen ulempe med K-means er at vi må velge $K$ før vi kjører algoritmen, forskjellige valg av $K$ gir helt forskjellige resultater. I eksempelet vi har sett på hittill, er det ganske lett. Vi ser at det er tre grupper. Om dataene derimot hadde hatt flere dimensjoner, kunne vi ikke ha plottet de, og heller ikke sett på dem. Det finnes metoder for å finne gode verdier av $K$, men vi vil ikke se på det i kurset.
#
# Nedenfor er et eksempel med fire clustersentre
start = np.array([[5, -8], [7, 2], [11, 2], [8, -3]])
k_means(X, start, 2)
# ### K-means i scikit
#
# K-means gjøres omtrent slik som alle andre metoder vi har sett på hittil i scikit
# +
# importerer KMeans
from sklearn.cluster import KMeans
# Lager KMeans og bestemmer antall clustre
model = KMeans(n_clusters=3)
# Trener modellen
model.fit(X)
# Finner ut hvilket cluster hvert av punktene hører til, og clustercsenterne
clusters = model.predict(X)
centers = model.cluster_centers_
# Plotter resultatet
plt.scatter(X[:,0], X[:,1], c=clusters)
plt.scatter(centers[:,0], centers[:,1], c=[0, 1, 2], s=200)
plt.show()
# -
# Siden det er vanlig å la være å dele opp i trening- og testsett når vi driver med clustering, vil vi predikere på de samme dataene som vi trente med. Slik som ovenfor skriver vi altså
#
# ```python
# model.fit(X)
# clusters = model.predict(X)
# ```
#
# KMeans definerer for nettopp dette formålet en funksjon som heter `fit_predict` som gjør begge deler med bare ett funksjonskall. Koden ovenfor blir dermed
#
# ```python
# clusters = model.fit_predict(X)
# ```
#
# Du velger selvsagt selv hvilken metode du vil bruke. Den første metoden er helt lik den du er vant med fra tidligere, så det er helt greit å holde seg til den.
# ## Anvendelser
#
# Clustering kan for eksempel brukes til å anbefale filmer/serier hos Netflix. Hvis du liker en film, kan man bruke clustering til å finne filmer som ligner på den du liker. Da vil det være stor sannsynlighet for at du liker filmene som er i samme cluster som filmen du likte.
#
# [Oppgave om filmanbefalinger med data fra IMDb](../RecommendMovies.ipynb)
| 7,977 |
/.ipynb_checkpoints/dz 3-2-checkpoint.ipynb
|
b1491cc50e57b658a3f54ad1c14d1853a562b3e6
|
[] |
no_license
|
DmitryShv/pyda-13-hw-3
|
https://github.com/DmitryShv/pyda-13-hw-3
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 1,947 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Задача 3-2 (лекция "Введение в типы данных и циклы. Часть 1")
# Напишите программу, которая последовательно запрашивает у пользователя числа (по одному за раз) и после первого нуля выводит сумму всех ранее введенных чисел.
#
# Примеры работы программы:
#
# 1. Введите число:
# 1
#
# Введите число:
# 4
#
# Введите число:
# 6
#
# Введите число:
# 0
#
# Результат:
# 11
#
#
# 2. Введите число:
#
# 0
#
# Результат:
# 0
itog = 0
while True:
x = int(input('Введите число '))
itog += x
if x == 0:
break
print('Результат: ', itog)
| 890 |
/larch_workflow/larch_batch_01.ipynb
|
7414eef67e696698c2d966032855958b92a635c6
|
[] |
no_license
|
scman1/XAS-Workflow-Demo
|
https://github.com/scman1/XAS-Workflow-Demo
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 9,016 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3.7
# language: python
# name: python3
# ---
# # TASK 1 - Predicting percentage of a student based on number of study hours.
#
# # Astha Gupta
# Importing libraries
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
lm=LinearRegression()
# #
# Importing data into dataframe
df=pd.read_csv("https://raw.githubusercontent.com/AdiPersonalWorks/Random/master/student_scores%20-%20student_scores.csv")
df.head()
#
#
# Splitting data into training and testing sets.
x=df[["Hours"]]
y=df["Scores"]
x_train, x_test, y_train, y_test=train_test_split(x,y,test_size=0.2,random_state=0)
# #
# Building and training the model
lm.fit(x_train,y_train)
#
# A linear relationship between predictor variable(x) and target variable(y) is depicted by: y = ax + b where a is intercept and b is slope
#
lm.intercept_ #Value of a
lm.coef_ #Value of b
# #
# Plotting the regression line
line=lm.coef_*x+lm.intercept_
plt.scatter(x,y)
plt.plot(x,line);
plt.show()
# #
# There is a positive relationship between no. of study hours and percentage.
# #
# Using testing set to asses the performance
yhat=lm.predict(x_test)
assessment=pd.DataFrame({'Actual Value ':y_test,'Predicted Value ':yhat})
assessment
# #
# Predicting percentage for 9.25 study hours
x_value=[[9.25]]
y_predicted=lm.predict(x_value)
print("The predicted score at ",x_value," hour is ",y_predicted)
lm.score(x,y)
# #
# This is coefficent of determination.Since it is close to 1 we can conclude that the line is good fit for the data
Grouping data by year and month
growth = data.copy()
growth['Date'] = pd.to_datetime(growth.Date,format='%d-%m-%Y')
growth['Year'] = growth['Date'].dt.year
growth['Month'] = growth['Date'].dt.month
growth
# Group data with year = 2012
growth_rate = growth.groupby('Year')
growth_rate_2012 = growth_rate.get_group(2012)
growth_rate_2012.head()
# +
# Getting data for 4 quaters for year 2012
growth_rate_2012_Quaters = growth_rate_2012.groupby('Month')
growth_rate_2012_Q1_1 = growth_rate_2012_Quaters.get_group(1)
growth_rate_2012_Q1_2 = growth_rate_2012_Quaters.get_group(2)
growth_rate_2012_Q1_3 = growth_rate_2012_Quaters.get_group(3)
Quater_1 = growth_rate_2012_Q1_1.append(growth_rate_2012_Q1_2)
Quater_1 = Quater_1.append(growth_rate_2012_Q1_3) #Q1 data of 2012
display(Quater_1.head())
growth_rate_2012_Q2_4 = growth_rate_2012_Quaters.get_group(4)
growth_rate_2012_Q2_5 = growth_rate_2012_Quaters.get_group(5)
growth_rate_2012_Q2_6 = growth_rate_2012_Quaters.get_group(6)
Quater_2 = growth_rate_2012_Q2_4.append(growth_rate_2012_Q2_5)
Quater_2 = Quater_2.append(growth_rate_2012_Q2_6) #Q2 data of 2012
display(Quater_2.head())
growth_rate_2012_Q3_7 = growth_rate_2012_Quaters.get_group(7)
growth_rate_2012_Q3_8 = growth_rate_2012_Quaters.get_group(8)
growth_rate_2012_Q3_9 = growth_rate_2012_Quaters.get_group(9)
Quater_3 = growth_rate_2012_Q3_7.append(growth_rate_2012_Q3_8)
Quater_3 = Quater_3.append(growth_rate_2012_Q3_9) #Q3 data of 2012
display(Quater_3.head())
# Q4 data of 2012
growth_rate_2012_Q4_10 = growth_rate_2012_Quaters.get_group(10)
Quater_4 = growth_rate_2012_Q4_10
display(Quater_4.head())
# +
# Grouping the data "Store" wise each Quarter
df2 = pd.DataFrame(Quater_1.groupby('Store')['Weekly_Sales'].sum())
df2["Quater1_Sales"] = pd.DataFrame(Quater_1.groupby('Store')['Weekly_Sales'].sum())
df2["Quater2_Sales"] = pd.DataFrame(Quater_2.groupby('Store')['Weekly_Sales'].sum())
df2["Quater3_Sales"] = pd.DataFrame(Quater_3.groupby('Store')['Weekly_Sales'].sum())
df2["Quater4_Sales"] = pd.DataFrame(Quater_4.groupby('Store')['Weekly_Sales'].sum())
df2.drop('Weekly_Sales', axis = 1, inplace = True)
df2
# +
# Growth rate formula- ((Present value — Past value )/Past value )*100
df2['Q3 - Q2'] = df2['Quater3_Sales'] - df2['Quater2_Sales']
df2['Overall Growth Rate in 2012 Q3 %'] = (df2['Q3 - Q2']/df2['Quater2_Sales'])*100
df2['Overall Growth Rate in 2012 Q3 %'].idxmax() # Store which has good growth in Q3-2012
# -
# Plotting the data in Bar chart
plt.figure(figsize=(15,5))
sns.barplot(x=df2.index, y = 'Overall Growth Rate in 2012 Q3 %', data = df2)
# Store 7 has good growth in Q3-2012
# ### 4. Some holidays have a negative impact on sales. Find out holidays which have higher sales than the mean sales in non-holiday season for all stores together.
#finding the mean sales of non holiday and holiday
data.groupby('Holiday_Flag')['Weekly_Sales'].mean()
# +
# Marking the holiday dates
data['Date'] = pd.to_datetime(data['Date'])
Christmas1 = pd.Timestamp(2010,12,31)
Christmas2 = pd.Timestamp(2011,12,30)
Christmas3 = pd.Timestamp(2012,12,28)
Christmas4 = pd.Timestamp(2013,12,27)
Thanksgiving1=pd.Timestamp(2010,11,26)
Thanksgiving2=pd.Timestamp(2011,11,25)
Thanksgiving3=pd.Timestamp(2012,11,23)
Thanksgiving4=pd.Timestamp(2013,11,29)
LabourDay1=pd.Timestamp(2010,9,10)
LabourDay2=pd.Timestamp(2011,9,9)
LabourDay3=pd.Timestamp(2012,9,7)
LabourDay4=pd.Timestamp(2013,9,6)
SuperBowl1=pd.Timestamp(2010,2,12)
SuperBowl2=pd.Timestamp(2011,2,11)
SuperBowl3=pd.Timestamp(2012,2,10)
SuperBowl4=pd.Timestamp(2013,2,8)
#Calculating the mean sales during the holidays
Christmas_mean_sales=data[(data['Date'] == Christmas1) | (data['Date'] == Christmas2) | (data['Date'] == Christmas3) | (data['Date'] == Christmas4)]
Thanksgiving_mean_sales=data[(data['Date'] == Thanksgiving1) | (data['Date'] == Thanksgiving2) | (data['Date'] == Thanksgiving3) | (data['Date'] == Thanksgiving4)]
LabourDay_mean_sales=data[(data['Date'] == LabourDay1) | (data['Date'] == LabourDay2) | (data['Date'] == LabourDay3) | (data['Date'] == LabourDay4)]
SuperBowl_mean_sales=data[(data['Date'] == SuperBowl1) | (data['Date'] == SuperBowl2) | (data['Date'] == SuperBowl3) | (data['Date'] == SuperBowl4)]
Christmas_mean_sales
list_of_mean_sales = {'Christmas_mean_sales' : round(Christmas_mean_sales['Weekly_Sales'].mean(),2),
'Thanksgiving_mean_sales': round(Thanksgiving_mean_sales['Weekly_Sales'].mean(),2),
'LabourDay_mean_sales' : round(LabourDay_mean_sales['Weekly_Sales'].mean(),2),
'SuperBowl_mean_sales':round(SuperBowl_mean_sales['Weekly_Sales'].mean(),2),
'Non holiday weekly sales' : round(data[data['Holiday_Flag'] == 0 ]['Weekly_Sales'].mean(),2)}
list_of_mean_sales
# -
# "Thanksgiving Day" has much high sale than mean sales in Non-Holiday season.
# ### 5. Provide a monthly and semester view of sales in units and give insights
# +
#Monthly sales
monthly = data.groupby(pd.Grouper(key='Date', freq='1M')).sum() # groupby each 1 month
monthly=monthly.reset_index()
fig, ax = plt.subplots(figsize=(10,5))
X = monthly['Date']
Y = monthly['Weekly_Sales']
plt.plot(X,Y)
plt.title('Month Wise Sales')
plt.xlabel('Monthly')
plt.ylabel('Weekly_Sales')
# Analysis- highest sum of sales is recorded in between jan-2011 to march-2011.
# +
#Semester Sales
Semester = data.groupby(pd.Grouper(key='Date', freq='6M')).sum()
Semester = Semester.reset_index()
fig, ax = plt.subplots(figsize=(10,5))
X = Semester['Date']
Y = Semester['Weekly_Sales']
plt.plot(X,Y)
plt.title('Semester Wise Sales')
plt.xlabel('Semester')
plt.ylabel('Weekly_Sales')
# ANalysis- sales are lowest in beginning of 1st sem of 2010 and 1st sem of 2013
# -
# ### For Store 1 – Build prediction models to forecast demand
#
# Linear Regression – Utilize variables like date and restructure dates as 1 for 5 Feb 2010 (starting from the earliest date in order). Hypothesize if CPI, unemployment, and fuel price have any impact on sales.
# +
hypothesis = growth.groupby('Store')[['Fuel_Price','Unemployment', 'CPI','Weekly_Sales', 'Holiday_Flag']]
factors = hypothesis.get_group(1) #Filter by Store 1
day_arr = [1]
for i in range (1,len(factors)):
day_arr.append(i*7)
factors['Day'] = day_arr.copy()
factors
# -
sns.heatmap(factors.corr(), annot = True)
# Few variables which are positive and have value greater than zero are correlated with Weekly_Sales. We can also see CPI and Holiday_Flag is fairly strongly correlated to Weekly_Sales. Holiday_Flag = 1 means it's holiday_week we have sales more than the non_holiday_weeks.
sns.lmplot(x='Fuel_Price', y = 'Unemployment', data = factors)
#plt.figure()
sns.lmplot(x='CPI', y = 'Unemployment', data = factors)
# As the Fuel_price and Cpi goes high, rate of Unemployment Fairly Decreases (shown above in Line Regression plot).
# ### Hypothesis Testing - CPI
# +
from scipy import stats
ttest,pval = stats.ttest_rel(factors['Weekly_Sales'],factors['CPI'])
sns.distplot(factors.CPI)
plt.figure()
print(pval)
if pval<0.05:
print("reject null hypothesis")
else:
print("accept null hypothesis")
sns.scatterplot(x='CPI', y = 'Weekly_Sales', data = factors, hue = 'Holiday_Flag')
#plt.figure()
sns.lmplot(x='CPI', y = 'Weekly_Sales', data = factors, hue = 'Holiday_Flag')
#plt.figure()
sns.lineplot(x='CPI', y = 'Weekly_Sales', data = factors)
# -
# 1) Earlier, we rejected the null hypothesis saying that ther is no relationship between Weekly_sales and CPI. But we found there is a positive corrlation between CPI and Weekly_sales as shown in the above graphs.
#
# 2) The CPI is not normally distributed and line regression plot is showing how CPI is varying with Weekly_Sales on days of Holidays and non holiday weeks.
# ### Hypothesis Testing - Fuel_Price
# +
from scipy import stats
ttest,pval = stats.ttest_rel(factors['Weekly_Sales'],factors['Fuel_Price'])
sns.distplot(factors.Fuel_Price)
plt.figure()
print(pval)
if pval<0.05:
print("reject null hypothesis")
else:
print("accept null hypothesis")
sns.scatterplot(x='Fuel_Price', y = 'Weekly_Sales', data = factors, hue = 'Holiday_Flag')
#plt.figure()
sns.lmplot(x='Fuel_Price', y = 'Weekly_Sales', data = factors, hue = 'Holiday_Flag')
#plt.figure()
sns.lineplot(x='Fuel_Price', y = 'Weekly_Sales', data = factors)
# -
# There are more number of Sales when the Fuel_Price are higher and also we can see more Sales during Holiday_Weeks when fuel_prices were fairly low. So its not clear to say on what factors Fuel_price has a direct dependency on Sales.
# ### Hypothesis Testing - Uneployment
# +
from scipy import stats
ttest,pval = stats.ttest_rel(factors['Weekly_Sales'],factors['Unemployment'])
sns.distplot(factors.Unemployment)
plt.figure()
print(pval)
if pval<0.05:
print("reject null hypothesis")
else:
print("accept null hypothesis")
sns.scatterplot(x='Unemployment', y = 'Weekly_Sales', data = factors, hue = 'Holiday_Flag')
#plt.figure()
sns.lmplot(x='Unemployment', y = 'Weekly_Sales', data = factors, hue = 'Holiday_Flag')
#plt.figure()
sns.lineplot(x='Unemployment', y = 'Weekly_Sales', data = factors)
# -
# We can see as the rate of unemployment increases, people only buy during holiday seasons, as there are only few outliers present for weekly_sales and which are on the day of Holiday. Speaking of which people only buy necessary products and try to save more. Hence rejecting the null hypothesis was appropriate.
# ### Plotting the Weekly_sales for store 1 (Day wise)
plt.figure(figsize=(10,5))
sns.barplot(x='Day', y = 'Weekly_Sales', data = factors.head(50), hue = 'Holiday_Flag')
# We can infer during the days of holidays, there is comparatively more sales for store1.
# ## Thank you!!
| 11,570 |
/.ipynb_checkpoints/Untitled-checkpoint.ipynb
|
72658bd9d4a3c0dfb3cda886c3992d41987971b9
|
[] |
no_license
|
kn27/PGM
|
https://github.com/kn27/PGM
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 55,972 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: venv
# language: python
# name: venv
# ---
from scipy import stats as stats
import scipy
import numpy as np
from numpy import random
from matplotlib import pyplot as plt
# %matplotlib inline
# +
#play with Gamma, Poisson, digamma
# -
fig, ax = plt.subplots(1, 1)
a = 2
scale = 3
priors = random.gamma(shape = a, scale = scale, size = 10000)
ax.hist(priors, bins = 50)
plt.show()
#x = np.linspace(stats.gamma.ppf(0.01, a),stats.gamma.ppf(0.99, a), 100)
#ax.hist(random.gamma(shape = a, scale = scale),'r-', lw=5, alpha=0.6, label='gamma pdf'))
fig, ax = plt.subplots(1, 1)
a = 0.3
scale = 1
priors = random.gamma(shape = a, scale = scale, size = 10000)
ax.hist(priors, bins = 50)
plt.show()
fig, ax = plt.subplots(1, 1)
a = 0.3
scale = 1
priors = random.dgamma(shape = a, scale = scale, size = 10000)
ax.hist(priors, bins = 50)
plt.show()
fig, ax = plt.subplots(1, 1)
X = np.arange(0, 100000000000, 100000000)
ax.plot(X,[scipy.special.digamma(x) for x in X])
plt.show()
fig, ax = plt.subplots(1, 1)
a = 2
scale = 1/1
priors = stats.gamma.rvs(a = a, scale = scale, size = 10000)
ax.hist(priors, bins = 50)
plt.show()
posteriors = [random.gamma(shape = a, scale = prior, size = 10000) for prior in priors]
posteriors = [y for x in posteriors for y in x]
plt.hist(posteriors, bins = 100)
plt.show()
import numpy as np
from scipy.sparse import csr_matrix
A = csr_matrix([[1, 2, 0], [0, 0, 3], [4, 0, 5]])
v = np.array([1, 0, -1])
A.dot(v)
A = csr_matrix([[1, 2, 0], [0, 0, 3], [4, 0, 5]])
v = np.array([1, 0, -1])
A.dot(v)
from scipy.sparse import coo_matrix
row = np.array([0, 0, 1, 2, 2, 2])
col = np.array([0, 2, 2, 0, 1, 2])
data = np.array([1, 2, 3, 4, 5, 6])
test = csr_matrix((data, (row, col)), shape=(3, 3))
test.toarray()
test._
test.indptr
# +
# csr_matrix??
# -
test.data
posteriors = **posteriors
random.gamma()
a = [[1,2],[3,4]]
[*x for x in a]
fig, ax = plt.subplots(1, 1)
a = 1
scale = 3
prior = random.gamma(shape = a, scale = scale, size = 10000)
ax.hist(prior, bins = 100)
plt.show()
#x = np.linspace(stats.gamma.ppf(0.01, a),stats.gamma.ppf(0.99, a), 100)
#ax.hist(random.gamma(shape = a, scale = scale),'r-', lw=5, alpha=0.6, label='gamma pdf'))
fig, ax = plt.subplots(1, 1)
a =
scale = 3
x = np.linspace(stats.gamma.ppf(0.01, a),stats.gamma.ppf(0.99, a), 100)
ax.plot(x, stats.gamma.pdf(x, a = a, scale = scale),'r-', lw=5, alpha=0.6, label='gamma pdf')
fig, ax = plt.subplots(1, 1)
a = 10
scale = 0.7
x = np.linspace(stats.gamma.ppf(0.01, a),stats.gamma.ppf(0.99, a), 100)
ax.plot(x, stats.gamma.pdf(x, a = a, scale = scale),'r-', lw=5, alpha=0.6, label='gamma pdf')
import tensorflow as tf
import tensorflow_probability as tfp
dir(tfp)
dir(tfp.edward2)
# +
from tensorflow_probability import edward2 as ed
normal_rv = ed.Normal(loc=0., scale=1.)
## <ed.RandomVariable 'Normal/' shape=() dtype=float32>
normal_rv.distribution.log_prob(1.231)
## <tf.Tensor 'Normal/log_prob/sub:0' shape=() dtype=float32>
dirichlet_rv = ed.Dirichlet(concentration=tf.ones([2, 10]))
## <ed.RandomVariable 'Dirichlet/' shape=(2, 10) dtype=float32>
# -
x = ed.Normal(loc=tf.zeros(10), scale=tf.ones(10))
y = 5.
x + y, x / y
## (<tf.Tensor 'add:0' shape=(10,) dtype=float32>,
## <tf.Tensor 'div:0' shape=(10,) dtype=float32>)
tf.tanh(x * y)
## <tf.Tensor 'Tanh:0' shape=(10,) dtype=float32>
x[2] # 3rd normal rv
## <tf.Tensor 'strided_slice:0' shape=() dtype=float32>
# +
def logistic_regression(features):
"""Bayesian logistic regression p(y | x) = int p(y | x, w, b) p(w, b) dwdb."""
coeffs = ed.Normal(loc=tf.zeros(features.shape[1]), scale=1., name="coeffs")
intercept = ed.Normal(loc=0., scale=1., name="intercept")
outcomes = ed.Bernoulli(
logits=tf.tensordot(features, coeffs, [[1], [0]]) + intercept,
name="outcomes")
return outcomes
num_features = 10
features = tf.random.normal([100, num_features])
outcomes = logistic_regression(features)
# -
plt.hist(ed.Gamma(concentration = [4]*100000, rate= 1).value,bins = 100, dir)
plt.hist(ed.Gamma(concentration = [1]*1000, rate= 2))
plt.plot(ed.Gamma(concentration = [1]*1000, rate= 2))
# +
import tensorflow_probability as tfp
def logistic_regression_posterior(num_features):
"""Posterior of Bayesian logistic regression p(w, b | {x, y})."""
posterior_coeffs = ed.MultivariateNormalTriL(
loc=tf.get_variable("coeffs_loc", [num_features]),
scale_tril=tfp.trainable_distributions.tril_with_diag_softplus_and_shift(
tf.get_variable("coeffs_scale", [num_features*(num_features+1) / 2])),
name="coeffs_posterior")
posterior_intercept = ed.Normal(
loc=tf.get_variable("intercept_loc", []),
scale=tfp.trainable_distributions.softplus_and_shift(
tf.get_variable("intercept_scale", [])),
name="intercept_posterior")
return posterior_coeffs, posterior_intercept
num_features = 5
posterior_coeffs, posterior_intercept = logistic_regression_posterior(num_features)
# Execute the program, returning a sample
# (np.ndarray of shape (55,), np.ndarray of shape ()).
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
posterior_coeffs_, posterior_intercept_ = sess.run(
[posterior_coeffs, posterior_intercept])
| 5,425 |
/Python Programs/Change Row and Column to 0 if any 0 is encountered.ipynb
|
17e8ec714c95aaaf4c635059d372fa278b8d5101
|
[] |
no_license
|
SohanBiswasSB/Random
|
https://github.com/SohanBiswasSB/Random
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 2,493 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
"""
Author: Bryan Bo Cao
Email: [email protected] or [email protected]
Github Repo: https://github.com/BryanBo-Cao/neuralnets-deeplearning
Reference:
https://github.com/tylersco/deep-neural-networks-art-images
http://www.scipy-lectures.org/advanced/image_processing/
"""
import numpy as np
from scipy import misc
print ("libraries imported")
class ArtData:
def __init__(self):
self.path = path
self.image_folder = '16x16-small'
self.metadata_filename = 'metadata.csv'
self.train_images = {}
self.alpha_pairs = {
'same': [],
'diff': [],
}
self.beta_pairs = {
'same': [],
'diff': []
}
def load_images(self):
pass
def load_metadata(self):
pass
mat[i][j] = 0
if __name__ == '__main__':
mat = [
[-1, 1, 0, 1, 1],
[-11, 1, 1, 1, 1],
[1, 1, 5, 1, 1],
[1, 1, 1, 1, -1],
[70, 1, 1, 1, 1]
]
# convert the matrix
convert(mat)
# print matrix
for r in mat:
print(r)
# -
| 1,392 |
/Decision Tree/Homework.ipynb
|
c55aa01517e0e9235040e51dcfaa921ac4f8d839
|
[] |
no_license
|
rgridnev/python_course
|
https://github.com/rgridnev/python_course
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 67,709 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: 'Python 3.7.4 64-bit (''base'': conda)'
# language: python
# name: python37464bitbaseconda5eae52ef982343258f102ad8e7e22d57
# ---
# # Домашее задание "деревья решений"
#
# ### На основе датасета titanic обучить дерево решений
import pandas as pd
train = pd.read_csv('titanic/train.csv')
train.head()
test = pd.read_csv('titanic/test.csv')
test.head()
from sklearn.tree import DecisionTreeClassifier
dt = DecisionTreeClassifier()
train['Sex'] = train['Sex'].apply(lambda x: 1 if x == 'male' else 0)
train = pd.get_dummies(train, columns=['Embarked'])
train.head()
train.drop(['Ticket', 'Cabin', 'Name'], axis = 1, inplace = True)
train.head()
train.dropna(inplace = True)
x = train.drop('Survived', axis=1)
y = train.Survived
dt.fit(x, y)
gs = pd.read_csv('titanic/gender_submission.csv')
gs.head()
test = test.merge(gs, on='PassengerId', how='left')
test['Sex'] = test['Sex'].apply(lambda x: 1 if x == 'male' else 0)
test = pd.get_dummies(test, columns=['Embarked'])
test.drop(['Ticket', 'Cabin', 'Name'], axis = 1, inplace = True)
test.dropna(inplace = True)
test.head()
xt = test.drop('Survived', axis=1)
yt = test.Survived
yp = dt.predict(xt)
from sklearn import metrics
metrics.roc_auc_score(yt, yp)
# ### Подобрать оптимальную глубину дерева так же, как мы делали на занятии
# +
from sklearn.model_selection import GridSearchCV
import numpy as np
depths = np.arange(1,20)
grid = {'max_depth': depths}
gridsearch = GridSearchCV(DecisionTreeClassifier(), grid, scoring='neg_log_loss', cv=5)
# -
gridsearch.fit(x, y)
gridsearch.cv_results_
from matplotlib import pyplot as plt
scores = [-x for x in gridsearch.cv_results_['mean_test_score']]
plt.plot(depths, scores)
plt.scatter(depths, scores)
best_point = np.argmin(scores)
plt.scatter(depths[best_point], scores[best_point], c='g', s=100)
plt.show()
dt = DecisionTreeClassifier(max_depth=2)
dt.fit(x,y)
yp = dt.predict(xt)
metrics.roc_auc_score(yt, yp)
# ### Определить самые важные фичи
dt.feature_importances_
# ### Визуализировать полученное дерево
from six import StringIO
from IPython.display import Image
from sklearn.tree import export_graphviz
import pydotplus
dot_data = StringIO()
export_graphviz(dt, out_file=dot_data,
filled=True, rounded=True,
special_characters=True)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
Image(graph.create_png())
x.columns
from sklearn import tree
tree.plot_tree(dt, feature_names = x.columns)
| 2,691 |
/.ipynb_checkpoints/Untitled-checkpoint.ipynb
|
3910de284ed199287123149ac9f9134473b6a51b
|
[] |
no_license
|
wiemMH/Brief-LSTM
|
https://github.com/wiemMH/Brief-LSTM
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 7,123 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#Import file to python
filename='/Users/komerle/Desktop/S09'
def files():
n = 0
while True:
yield open('/Users/komerle/Desktop/output/Part%d.txt' % n, 'w')
n += 1
#splitting file into number of boxes
pat = 'Start Date:'
fs = files()
outfile = next(fs)
with open(filename) as infile:
for line in infile:
if pat not in line:
outfile.write(line)
else:
items = line.split(pat)
outfile.write(items[0])
for item in items[1:]:
outfile = next(fs)
outfile.write(pat + item)
# +
import pandas as pd
import numpy as np
import fnmatch
import os
data.drop(data.index, inplace=True)
for file in os.listdir('/Users/komerle/Desktop/output'):
if fnmatch.fnmatch(file, '*.txt') and file != "Part0.txt":
filePath = '/Users/komerle/Desktop/output/' + file
#reading rat number
Rat=pd.read_table(filePath, header=None, names=None,
usecols=[1], skiprows=2, nrows=1, delim_whitespace=True)
#reading box number
Box=pd.read_table(filePath, header=None, names=None, index_col=None,
usecols=[1], skiprows=5, nrows=1, delim_whitespace=True)
#reading C array
cArray=pd.read_table(filePath, header=None, names=None, index_col=None,
usecols=[1,2,3,4,5], skiprows=33, nrows=5,
delim_whitespace=True)
#position of data to import into new array
rat = Rat.iloc[0][1]
box = Box.iloc[0][1]
infnplus = cArray.iloc[0][4]
infnminus = cArray.iloc[4][4]
pressplus = cArray.iloc[0][2]
pressminus = cArray.iloc[0][3]
trialsplus = cArray.iloc[2][4]
trialsminus = cArray.iloc[3][5]
latencyplus = cArray.iloc[1][4]
latencyminus = cArray.iloc[1][5]
#new row of data in order
row = {"Box": [box],"Rat": [rat],"+Trials":[trialsplus],"-Trials": [trialsminus],"+Press":
[pressplus],"-Press": [pressminus],"+Infusion": [infnplus], "-Infusion":
[infnminus],"+Latency": [latencyplus],"-Latency": [latencyminus]}
datatemp=pd.DataFrame(row, columns=["Box","Rat","+Trials","-Trials","+Press","-Press",
"+Infusion","-Infusion","+Latency","-Latency"])
data=pd.concat([data,datatemp], axis=0, join='outer', join_axes=None, ignore_index=True, copy=True)
#data transcribed to excel worksheet
data=data.sort_values(by='Box')
print(data)
writer = pd.ExcelWriter('/Users/komerle/Desktop/practice/DSdata3.xlsx', engine='xlsxwriter')
data.to_excel(writer, sheet_name='DS_data')
writer.save()
#data.to_excel("/Users/komerle/Desktop/practice/DSdata2.xlsx",sheet_name="DS_data")
# -
data=data.sort_values(by='Box')
print(data)
writer = pd.ExcelWriter('/Users/komerle/Desktop/practice/DSdata3.xlsx', engine='xlsxwriter')
data.to_excel(writer, sheet_name='DS_data')
writer.save()
| 3,326 |
/day_of_case_practice.ipynb
|
312776b307fc6e13cf7740ff5afc862becbaf0cd
|
[] |
no_license
|
chenboy3/mittrading2017
|
https://github.com/chenboy3/mittrading2017
| 2 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 12,953 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **README**: Information about notebook
#
# This is a starter notebook that will be provided to you on competition day that provides boilerplate code to load all of the data (both the sample data and the data for each case as they go live). Since this notebook is mainly for you to get acquianted with loading/manipulating data, you will be able to access the sample data and data from any case. However on competition day, you will only be able to access data from the current case that is live. If you want to use a different statistical analysis framework, you can also download the zip files containing all the data that can be accessed from this notebook from the website: http://18.216.4.171:8080/ and write similar boilerplate code yourself. On the day of the competition, you will be able to access the sample data and data from each case as it goes live as a downloadable zip file.
# +
import sys
if sys.version_info[0] < 3:
from StringIO import StringIO
else:
from io import StringIO
# Make sure you have the following libaries installed.
# Can be installed with `pip install` or `conda install`
# on terminal if you have pip or conda on your machine.
import pandas as pd
import requests
# +
#This cell contains logic for
base_url = 'http://18.216.4.171:8080'
import_url = '/data/'
sample_import_url = '/public/sample_data/'
submit_url = '/submissions/new'
def get_import_url(case_number, signal_name):
return base_url + import_url + str(case_number) + '/' + signal_name
def get_sample_import_url(signal_name):
return base_url + sample_import_url + signal_name + '.csv'
def get_submit_url():
return base_url + submit_url
def get_signal_list():
return ['A', 'B', 'C', 'D', 'E']
# -
#This class helps load data (sample data and case data)
#and also allows you to submit prediction intervals programmatically
class DayOf(object):
def __init__(self, credentials):
'''
Initializes object with your team credentials
Will not be able to submit data to our server if a valid
set of credentials is not passed to this constructor
'''
self.team_credentials = credentials
def load_data(self, case_number):
'''
Use this function to import data for case_number
Note: will only be able to import data for a case while a case is active
Alternatively, you can visit the URL given by `get_import_url(case_number)`
and directly download the data for that case as a csv file
Returns a dict with keys corresponding to signal names and values
as a pandas DataFrame
The fields of each pandas DataFrame are
time_step (contains time step index, integer)
bid_size (total number of bids, float )
ask_size (total number of asks, float )
bid_exec (number of executions at bid price, float )
ask_exec (number of executions at ask price, float )
spread (spread between bid/ask prices, float )
price (contains stock price, float )
'''
result_data = {}
for signal_name in get_signal_list():
request_data = requests.get(get_import_url(case_number, signal_name))
if not request_data.status_code == 200:
print('Error: cannot load data for ' +
str(case_number) + signal_name + ' at this time')
return result_data
loaded_request_data = StringIO(request_data.text)
result_data[signal_name] = pd.read_csv(loaded_request_data)
return result_data
def submit_data(self, case_number, signal_name, lower_bound, upper_bound):
'''
Parameters should be self-explanatory
Use this method to submit your guesses to our server
You can also do this directly on our website at `base_url`
Will raise an error if not your data wasn't submitted
'''
submission_data = {
'team_credentials': self.team_credentials,
'submission_for': case_number,
'signal': signal_name,
'lower_bound': lower_bound,
'upper_bound': upper_bound
}
request_data = requests.post(get_submit_url(), submission_data)
request_datadict = request_data.json()
if not request_datadict['status'] == 'success':
raise RuntimeError(request_datadict['reason'])
# Initalizes a DayOf instance as D
D = DayOf('Insert your team credentials here')
#on the day of competition, you will need to insert valid credentials here, which will be emailed out to you,
#to submit your intervals (if you want to submit it programmatically)
#However to retrieve data only, you do not need valid credentials
# Running the cell below will populate sample_data with the sample data on our server so you can build your models. For format information, see DayOf.load_data above. Note that for practice purposes, prices are not correlated with the features at all (so don't be surprised if all of your modeling attempts fail)! The point of this notebook is to help you get familiarized with getting/manipulating the data and submitting predictions.
sample_data = {}
for signal_name in get_signal_list():
request_data = requests.get(get_sample_import_url(signal_name))
if not request_data.status_code == 200:
print('Error: cannot load data for ' +
signal_name + ' at this time')
continue
loaded_request_data = StringIO(request_data.text)
sample_data[signal_name] = pd.read_csv(loaded_request_data)
# You can convert this pandas dataframe into a numpy array
# with sample_data[signal_name].values
# To load data for a **case number** 1 and store in `case1_data`, do
# ```python
# case1_data = D.load_data(1)
# ```
#
# To submit **lower bound** = 5.1 and **upper bound** = 5.9 for **case number** 1 and **stock name** A, do
# ```python
# D.submit_data(1, 'A', 5.1, 5.9)
# ```
# Multiple submissions are acceptable. We will only consider your latest submission within the time limit of the case.
# You can also submit your answers on a gui interface at `http://18.216.4.171:8080/`.
# **PRACTICE ONLY**
#
# Currently, you will be able to access all of the data from all of the cases (each distinct 5 minute period is referred to as a case). On competition day, you will only be able to access the sample data and data from the currently active case. Allowing you to view data from all the different cases will allow you to familiarize yourself with the format for the case data and submission format.
case1_data = D.load_data(1)
#there are 50 ticks of data for each stock, of which only 49 contain information
#notice that the last tick contains no values for the features and for the price of the stock,
#as the price is what you're trying to predict
print("Head for stock A")
print (case1_data['A'].head()) #show the top of the dataframe containing data for stock A in case 1
print("Tail for stock A")
print (case1_data['A'].tail()) #show the bottom of the dataframe containing data for stock A in case 1
# An example of something one might do with the data is given below:
#
# Say we're trying to train a model for stock A: we might want to regress the log return from time t-1 to time t with the features at time t. Obviously with this data, since the prices are randomly generated, so the log returns are normally distributed, there will be no correlation between the log returns and the signals. But in general, you might expect that relationships such as these will hold.
# +
import numpy as np
A_prices = sample_data['A']['price'].values
A_log_returns = np.log(A_prices[1:]) - np.log(A_prices[:-1])
feature_cols = sample_data['A'].columns[1:6] #only columns 1-5 inclusive have relevant features
A_features = sample_data['A'][feature_cols].values[:-1] #don't want to include last row in features
from sklearn.linear_model import LinearRegression #may need to install this
# See http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html
model = LinearRegression()
X_train = A_features[:-10] #features
y_train = A_log_returns[:-10] #what we want to predict
X_test = A_features[-10:]
y_test = A_log_returns[-10:]
model.fit(X_train, y_train)
print(model.score(X_test, y_test)) #print r^2 of predicting on X_test with true values
print('Wow our r^2 sucks because we\'re trying to predict random noise!')
| 8,865 |
/mass_spring/dual_mass_spring_test.ipynb
|
e25b14428adcff0cb7315737ba61d2e17f488969
|
[] |
no_license
|
song-ranlab/DLL
|
https://github.com/song-ranlab/DLL
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 1,203,609 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import tensorflow as tf
sess = tf.InteractiveSession()
x = tf.constant([[2, 5, 3, -5],
[0, 3,-2, 5],
[4, 3, 5, 3],
[6, 1, 4, 0]])
y = tf.constant([[4, -7, 4, -3, 4],
[6, 4,-7, 4, 7],
[2, 3, 2, 1, 4],
[1, 5, 5, 5, 2]])
floatx = tf.constant([[2., 5., 3., -5.],
[0., 3.,-2., 5.],
[4., 3., 5., 3.],
[6., 1., 4., 0.]])
# -
tf.transpose(x).eval() # Transpose matrix
tf.matmul(x, y).eval()
tf.matrix_determinant(floatx).eval() # Matrix determinant
tf.matrix_inverse(floatx).eval() # Matrix inverse
tf.matrix_solve(floatx, [[1],[1],[1],[1]]).eval()
# +
x = tf.constant([[1, 2, 3],
[3, 2, 1],
[-1,-2,-3]])
boolean_tensor = tf.constant([[True, False, True],
[False, False, True],
[True, False, False]])
# -
tf.reduce_prod(x, reduction_indices=1).eval() # reduce prod
# +
seg_ids = tf.constant([0,1,1,2,2]); # Group indexes : 0|1,2|3,4
tens1 = tf.constant([[2, 5, 3, -5],
[0, 3,-2, 5],
[4, 3, 5, 3],
[6, 1, 4, 0],
[6, 1, 4, 0]]) # A sample constant matrix
# -
tf.segment_sum(tens1, seg_ids).eval() # Sum segmentation
x = tf.constant([[2, 5, 3, -5],
[0, 3,-2, 5],
[4, 3, 5, 3],
[6, 1, 4, 0]])
listx = tf.constant([1,2,3,4,5,6,7,8])
listy = tf.constant([4,5,8,9])
boolx = tf.constant([[True,False], [False,True]])tf.where(boolx).eval() # Show true values
tf.argmax(x, 1).eval() # Position of the maximum value of columns
tf.setdiff1d(listx, listy)[0].eval() # List differences
tf.where(boolx).eval() # Show true values
tf.unique(listx)[0].eval()
x = tf.constant([[2, 5, 3, -5],
[0, 3,-2, 5],
[4, 3, 5, 3],
[6, 1, 4, 0]])
tf.shape(x).eval() # Shape of the tensor
tf.size(x).eval() # size of the tensor
tf.rank(x).eval() # rank of the tensor
tf.reshape(x, [8, 2]).eval() # converting to a 10x2 matrix
tf.squeeze(x).eval() # squeezing
x.eval()
tf.expand_dims(x,1).eval() #Expanding dims
tf.train.SummaryWriter.__init__(logdir='./')
# +
import tensorflow as tf
sess = tf.Session()
filename_queue = tf.train.string_input_producer(tf.train.match_filenames_once("./*.csv"),shuffle=True)
reader = tf.TextLineReader(skip_header_lines=1)
key, value = reader.read(filename_queue)
record_defaults = [[0.], [0.], [0.], [0.], [""]]
col1, col2, col3, col4, col5 = tf.decode_csv(value,record_defaults=record_defaults)
# Convert CSV records to tensors. Eachcolumn maps to one tensor.
features = tf.pack([col1, col2, col3, col4])
tf.global_variables_initializer().run(session=sess)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord, sess=sess)
for iteration in range(0, 5):
example = sess.run([features])
print(example)
coord.request_stop()
coord.join(threads)
# +
import tensorflow as tf
sess = tf.Session()
filename_queue =tf.train.string_input_producer(tf.train.match_filenames_once("./51.jpg"))
reader = tf.WholeFileReader()
key, value = reader.read(filename_queue)
image=tf.image.decode_jpeg(value)
flipImageUpDown=tf.image.encode_jpeg(tf.image.flip_up_down(image))
flipImageLeftRight=tf.image.encode_jpeg(tf.image.flip_left_right(image))
tf.initialize_all_variables().run(session=sess)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord, sess=sess)
example = sess.run(flipImageLeftRight)
print example
file=open ("flippedUpDown.jpg", "wb+")
file.write (flipImageUpDown.eval(session=sess))
file.close()
file=open ("flippedLeftRight.jpg", "wb+")
file.write (flipImageLeftRight.eval(session=sess))
file.close()
# -
s to learned relative dynamics
# +
model.fit(delta, t=dt)
model.print()
print('Model score: %f' % model.score(delta, t=dt))
delta_0 = [-2, 0]
delta_sim = model.simulate(delta_0, t)
fig, axs = plt.subplots(delta.shape[1], 1, sharex=True, figsize=(7, 9))
for i in range(delta.shape[1]):
axs[i].plot(t, delta[:, i], 'k', label='true delta')
axs[i].plot(t, delta_sim[:, i], 'r--', label='model delta')
axs[i].legend()
axs[i].set(xlabel='t', ylabel='$x_{}$'.format(i))
fig.show()
# -
# ### -Learn model using SINDy on noisy data
# ### -Compare actual relative dynamics to learned relative dynamics
# +
model.fit(delta, t=dt)
model.print()
print('Model score: %f' % model.score(delta_noise, t=dt))
delta_0 = [-2, 0]
delta_sim = model.simulate(delta_0, t)
fig, axs = plt.subplots(delta_noise.shape[1], 1, sharex=True, figsize=(7, 9))
for i in range(delta.shape[1]):
axs[i].plot(t, delta_noise[:, i], 'k', label='true delta')
axs[i].plot(t, delta_sim[:, i], 'r--', label='model delta')
axs[i].legend()
axs[i].set(xlabel='t', ylabel='$x_{}$'.format(i))
fig.show()
# -
# ## 4.Observer State Estimation
# ### Dead Reckoning with noisy IMU
plt.plot(t,acc_noise) #acceleration of observer over time
# +
vel_noise = np.zeros(acc_noise.size-1)
#Integrate Acceleration
for x in range(1,vel_noise.size):
vel_noise[x] = acc_noise[x-1]*dt+vel_noise[x-1]
plt.plot(t[0:vel_noise.size],vel_noise) # velocity of observer over time
#vel_noise
# +
pos_noise = np.zeros(vel_noise.size-1)
pos_noise[0] = x_0[0]
#Integrate Acceleration
for x in range(1,pos_noise.size):
pos_noise[x] = vel_noise[x-1]*dt+pos_noise[x-1]
plt.plot(t[0:pos_noise.size],pos_noise) # velocity of observer over time
# -
# ### Feature in the global frame using the dead reckoning est
# +
delta_state = delta_sim[t.size-1]
x_proc = [pos_noise[pos_noise.size-1],vel_noise[vel_noise.size-1]]
y_state_est = delta_state + x_proc
y_state_est
# -
y[t.size-1]
# +
y1_est = delta_sim[2:,0]+ pos_noise
y2_est = delta_sim[1:,1]+ vel_noise
plt.plot(t[0:pos_noise.size],y1_est)
plt.plot(t[0:vel_noise.size],y2_est)
# -
# ### Estimated relative state vs measure relative state
delta_sim[len(delta_sim)-1]
delta_noise[len(delta_noise)-1]
delta_noise
delta_est = delta_sim[len(delta_sim)-1]-delta_noise[len(delta_noise)-1]
delta_est
# +
plt.plot(t,(delta_sim+delta_noise)/2)
#x_cor=((delta_sim-delta_noise)+x_proc)/2
#plt.plot(t,x_cor)
# +
F = np.array([[0,-1.025],[0.9518,0]])
# = np.array([[0,1],[-1.025,0]])
x_cor = delta_noise.dot(F)
#x_cor=np.linalg.lstsq(F, np.transpose(delta_noise))
plt.plot(t,x_cor)
| 6,805 |
/experiments/ow-on-generator/7/ow_template.ipynb
|
3922d57f270da976ae24d98f1bccfedd6995e01d
|
[
"MIT"
] |
permissive
|
luisoala/gen-seq-noise
|
https://github.com/luisoala/gen-seq-noise
| 2 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 13,805 |
# ---
# jupyter:
# jupytext:
# formats: ipynb,md:myst
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# <p><font size="6"><b>Visualisation: Seaborn </b></font></p>
#
#
# > *© 2021, Joris Van den Bossche and Stijn Van Hoey (<mailto:[email protected]>, <mailto:[email protected]>). Licensed under [CC BY 4.0 Creative Commons](http://creativecommons.org/licenses/by/4.0/)*
#
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# # Seaborn
# [Seaborn](https://seaborn.pydata.org/) is a Python data visualization library:
#
# * Built on top of Matplotlib, but providing
# 1. High level functions.
# 2. Support for _tidy data_, which became famous due to the `ggplot2` R package.
# 3. Attractive and informative statistical graphics out of the box.
# * Interacts well with Pandas
import seaborn as sns
# ## Introduction
# We will use the Titanic example data set:
titanic = pd.read_csv('data/titanic.csv')
# + jupyter={"outputs_hidden": false}
titanic.head()
# -
# Let's consider following question:
# >*For each class at the Titanic and each gender, what was the average age?*
# Hence, we should define the *mean* of the male and female groups of column `Survived` in combination with the groups of the `Pclass` column. In Pandas terminology:
# + jupyter={"outputs_hidden": false}
age_stat = titanic.groupby(["Pclass", "Sex"])["Age"].mean().reset_index()
age_stat
# -
# Providing this data in a bar chart with pure Pandas is still partly supported:
# + jupyter={"outputs_hidden": false}
age_stat.plot(kind='bar')
## A possible other way of plotting this could be using groupby again:
#age_stat.groupby('Pclass').plot(x='Sex', y='Age', kind='bar') # (try yourself by uncommenting)
# -
# but with mixed results.
# __Seaborn__ provides another level of abstraction to visualize such *grouped* plots with different categories:
# + jupyter={"outputs_hidden": false}
sns.catplot(data=age_stat,
x="Sex", y="Age",
col="Pclass", kind="bar")
# -
# Check <a href="#this_is_tidy">here</a> for a short recap about `tidy` data.
# <div class="alert alert-info">
#
# **Remember**
#
# - Seaborn is especially suitbale for these so-called <a href="http://vita.had.co.nz/papers/tidy-data.pdf">tidy</a> dataframe representations.
# - The [Seaborn tutorial](https://seaborn.pydata.org/tutorial/data_structure.html#long-form-vs-wide-form-data) provides a very good introduction to tidy (also called _long-form_) data.
# - You can use __Pandas column names__ as input for the visualisation functions of Seaborn.
#
# </div>
# ## Interaction with Matplotlib
# Seaborn builds on top of Matplotlib/Pandas, adding an additional layer of convenience.
#
# Topic-wise, Seaborn provides three main modules, i.e. type of plots:
#
# - __relational__: understanding how variables in a dataset relate to each other
# - __distribution__: specialize in representing the distribution of datapoints
# - __categorical__: visualize a relationship involving categorical data (i.e. plot something _for each category_)
#
# The organization looks like this:
# 
# We first check out the top commands of each of the types of plots: `relplot`, `displot`, `catplot`, each returning a Matplotlib `Figure`:
# ### Figure level functions
# Let's start from: _What is the relation between Age and Fare?_
# + jupyter={"outputs_hidden": false}
# A relation between variables in a Pandas DataFrame -> `relplot`
sns.relplot(data=titanic, x="Age", y="Fare")
# -
# Extend to: _Is the relation between Age and Fare different for people how survived?_
# + jupyter={"outputs_hidden": false}
sns.relplot(data=titanic, x="Age", y="Fare",
hue="Survived")
# -
# Extend to: _Is the relation between Age and Fare different for people how survived and/or the gender of the passengers?_
# + jupyter={"outputs_hidden": false}
age_fare = sns.relplot(data=titanic, x="Age", y="Fare",
hue="Survived",
col="Sex")
# -
# The function returns a Seaborn `FacetGrid`, which is related to a Matplotlib `Figure`:
# + jupyter={"outputs_hidden": false}
type(age_fare), type(age_fare.fig)
# -
# As we are dealing here with 2 subplots, the `FacetGrid` consists of two Matplotlib `Axes`:
# + jupyter={"outputs_hidden": false}
age_fare.axes, type(age_fare.axes.flatten()[0])
# -
# Hence, we can still apply all the power of Matplotlib, but start from the convenience of Seaborn.
# <div class="alert alert-info">
#
# **Remember**
#
# The `Figure` level Seaborn functions:
#
# - Support __faceting__ by data variables (split up in subplots using a categorical variable)
# - Return a Matplotlib `Figure`, hence the output can NOT be part of a larger Matplotlib Figure
#
# </div>
# ### Axes level functions
# In 'technical' terms, when working with Seaborn functions, it is important to understand which level they operate, as `Axes-level` or `Figure-level`:
#
# - __axes-level__ functions plot data onto a single `matplotlib.pyplot.Axes` object and return the `Axes`
# - __figure-level__ functions return a Seaborn object, `FacetGrid`, which is a `matplotlib.pyplot.Figure`
#
# Remember the Matplotlib `Figure`, `axes` and `axis` anatomy explained in [visualization_01_matplotlib](visualization_01_matplotlib.ipynb)?
#
# Each plot module has a single `Figure`-level function (top command in the scheme), which offers a unitary interface to its various `Axes`-level functions (.
# We can ask the same question: _Is the relation between Age and Fare different for people how survived?_
# + jupyter={"outputs_hidden": false}
scatter_out = sns.scatterplot(data=titanic, x="Age", y="Fare", hue="Survived")
# + jupyter={"outputs_hidden": false}
type(scatter_out)
# -
# But we can't use the `col`/`row` options for facetting:
# +
# sns.scatterplot(data=titanic, x="Age", y="Fare", hue="Survived", col="Sex") # uncomment to check the output
# -
# We can use these functions to create custom combinations of plots:
# + jupyter={"outputs_hidden": false}
fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(10, 6))
sns.scatterplot(data=titanic, x="Age", y="Fare", hue="Survived", ax=ax0)
sns.violinplot(data=titanic, x="Survived", y="Fare", ax=ax1) # boxplot, stripplot,.. as alternative to represent distribution per category
# -
# __Note!__ Check the similarity with the _best of both worlds_ approach:
#
# 1. Prepare with Matplotlib
# 2. Plot using Seaborn
# 3. Further adjust specific elements with Matplotlib if needed
# <div class="alert alert-info">
#
# **Remember**
#
# The `Axes` level Seaborn functions:
#
# - Do NOT support faceting by data variables
# - Return a Matplotlib `Axes`, hence the output can be used in combination with other Matplotlib `Axes` in the same `Figure`
#
# </div>
# ### Summary statistics
# Aggregations such as `count`, `mean` are embedded in Seaborn (similar to other 'Grammar of Graphics' packages such as ggplot in R and plotnine/altair in Python). We can do these operations directly on the original `titanic` data set in a single coding step:
# + jupyter={"outputs_hidden": false}
sns.catplot(data=titanic, x="Survived", col="Pclass",
kind="count")
# -
# To use another statistical function to apply on each of the groups, use the `estimator`:
sns.catplot(data=titanic, x="Sex", y="Age", col="Pclass", kind="bar",
estimator=np.mean)
# ## Exercises
# <div class="alert alert-success">
#
# **EXERCISE 1**
#
# - Make a histogram of the age, split up in two subplots by the `Sex` of the passengers.
# - Put both subplots underneath each other.
# - Use the `height` and `aspect` arguments of the plot function to adjust the size of the figure.
#
# <details><summary>Hints</summary>
#
# - When interested in a histogram, i.e. the distribution of data, use the `displot` module
# - A split into subplots is requested using a variable of the DataFrame (facetting), so use the `Figure`-level function instead of the `Axes` level functions.
# - Link a column name to the `row` argument for splitting into subplots row-wise.
#
# </details>
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/visualization_02_seaborn1.py
# -
# <div class="alert alert-success">
#
# **EXERCISE 2**
#
# Make a violin plot showing the `Age` distribution in each of the `Pclass` categories comparing for `Sex`:
#
# - Use the `Pclass` column to create a violin plot for each of the classes. To do so, link the `Pclass` column to the `x-axis`.
# - Use a different color for the `Sex`.
# - Check the behavior of the `split` argument and apply it to compare male/female.
# - Use the `sns.despine` function to remove the boundaries around the plot.
#
# <details><summary>Hints</summary>
#
# - Have a look at https://seaborn.pydata.org/examples/grouped_violinplots.html for inspiration.
#
# </details>
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/visualization_02_seaborn2.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/visualization_02_seaborn3.py
# -
# ## Some more Seaborn functionalities to remember
# Whereas the `relplot`, `catplot` and `displot` represent the main components of the Seaborn library, more useful functions are available. You can check the [gallery](https://seaborn.pydata.org/examples/index.html) yourself, but let's introduce a few rof them:
# __jointplot()__ and __pairplot()__
#
# `jointplot()` and `pairplot()` are Figure-level functions and create figures with specific subplots by default:
# + jupyter={"outputs_hidden": false}
# joined distribution plot
sns.jointplot(data=titanic, x="Fare", y="Age",
hue="Sex", kind="scatter") # kde
# + jupyter={"outputs_hidden": false}
sns.pairplot(data=titanic[["Age", "Fare", "Sex"]], hue="Sex") # Also called scattermatrix plot
# -
# __heatmap()__
# Plot rectangular data as a color-encoded matrix.
# + jupyter={"outputs_hidden": false}
titanic_age_summary = titanic.pivot_table(columns="Pclass", index="Sex",
values="Age", aggfunc="mean")
titanic_age_summary
# + jupyter={"outputs_hidden": false}
sns.heatmap(data=titanic_age_summary, cmap="Reds")
# -
# __lmplot() regressions__
# `Figure` level function to generate a regression model fit across a FacetGrid:
# + jupyter={"outputs_hidden": false}
g = sns.lmplot(
data=titanic, x="Age", y="Fare",
hue="Survived", col="Survived", # hue="Pclass"
)
# -
# # Exercises data set road casualties
# The [Belgian road casualties data set](https://statbel.fgov.be/en/themes/mobility/traffic/road-accidents) contains data about the number of victims involved in road accidents.
#
# The script `load_casualties.py` in the `data` folder contains the routine to download the individual years of data, clean up the data and concatenate the individual years.
#
# The `%run` is an ['IPython magic' ](https://ipython.readthedocs.io/en/stable/interactive/magics.html#magic-run) function to run a Python file as if you would run it from the command line. Run `%run ./data/load_casualties.py --help` to check the input arguments required to run the script. As data is available since 2005, we download 2005 till 2020.
#
# __Note__ As the scripts downloads the individual files, it can take a while to run the script the first time.
# RUN THIS CELL TO PREPARE THE ROAD CASUALTIES DATA SET
# %run ./data/load_casualties.py 2005 2020
# When succesfull, the `casualties.csv` data is available in the `data` folder:
casualties = pd.read_csv("./data/casualties.csv", parse_dates=["datetime"])
# The data contains the following columns (in bold the main columns used in the exercises):
#
# - **datetime**: Date and time of the casualty.
# - **week_day**: Weekday of the datetime.
# - **n_victims**: Number of victims
# - n_victims_ok: Number of victims without injuries
# - n_slightly_injured: Number of slightly injured victims
# - n_seriously_injured: Number of severely injured victims
# - **n_dead_30days**: Number of victims that died within 30 days
# - **road_user_type**: Road user type (passenger car, motorbike, bicycle, pedestrian, ...)
# - victim_type: Type of victim (driver, passenger, ...)
# - **gender**
# - age
# - **road_type**: Regional road, Motorway or Municipal road
# - build_up_area: Outside or inside built-up area
# - **light_conditions**: Day or night (with or without road lights), or dawn
# - refnis_municipality: Postal reference ID number of municipality
# - municipality: Municipality name
# - refnis_region: Postal reference ID number of region
# - region: Flemish Region, Walloon Region or Brussels-Capital Region
#
# Each row of the dataset does not represent a single accident, but a number of victims for a set of characteristics (for example, how many victims for accidents that happened between 8-9am at a certain day and at a certain road type in a certain municipality with the given age class and gender, ...). Thus, in practice, the victims of one accidents might be split over multiple rows (and one row might in theory also come from multiple accidents).
# Therefore, to get meaningful numbers in the exercises, we will each time _sum_ the number of victims for a certain aggregation level (a subset of those characteristics).
# <div class="alert alert-success">
#
# **EXERCISE 3**
#
# Create a barplot with the number of victims ("n_victims") for each hour of the day. Before plotting, calculate the total number of victims for each hour of the day with pandas and assign it to the variable `victims_hour_of_day`. Update the column names to respectively "Hour of the day" and "Number of victims".
#
# Use the `height` and `aspect` to adjust the figure width/height.
#
# <details><summary>Hints</summary>
#
# - The sum of victims _for each_ hour of the day requires `groupby`. One can create a new column with the hour of the day or pass the hour directly to `groupby`.
# - The `.dt` accessor provides access to all kinds of datetime information.
# - `rename` requires a dictionary with a mapping of the old vs new names.
# - A bar plot is in seaborn one of the `catplot` options.
#
# </details>
# + tags=["nbtutor-solution"]
# # %load _solutions/visualization_02_seaborn4.py
# + tags=["nbtutor-solution"]
# # %load _solutions/visualization_02_seaborn5.py
# -
# <div class="alert alert-success">
#
# **EXERCISE 4**
#
# Create a barplot with the number of victims ("n_victims") for each hour of the day for each category in the gender column. Before plotting, calculate the total number of victims for each hour of the day and each gender with Pandas and assign it to the variable `victims_gender_hour_of_day`.
#
# Create a separate subplot for each gender category in a separate row and apply the `rocket` color palette.
#
# Make sure to include the `NaN` values of the "gender" column as a separate subplot, called _"unknown"_ without changing the `casualties` DataFrame data.
#
# <details><summary>Hints</summary>
#
# - The sum of victims _for each_ hour of the day requires `groupby`. Groupby accepts multiple inputs to group on multiple categories together.
# - `groupby` also accepts a parameter `dropna=False` and/or using `fillna` is a useful function to replace the values in the gender column with the value "unknown".
# - The `.dt` accessor provides access to all kinds of datetime information.
# - Link the "gender" column with the `row` parameter to create a facet of rows.
# - Use the `height` and `aspect` to adjust the figure width/height.
#
# </details>
# + tags=["nbtutor-solution"]
# # %load _solutions/visualization_02_seaborn6.py
# + tags=["nbtutor-solution"]
# # %load _solutions/visualization_02_seaborn7.py
# -
# <div class="alert alert-success">
#
# **EXERCISE 5**
#
# Compare the number of victims for each day of the week for casualties that happened on a "Motorway" (`road_type` column) for trucks ("Truck" and "Light truck" in the `road_user_type` column).
#
# Use a bar plot to compare the victims for each day of the week with Seaborn directly (do not use the `groupby`).
#
# __Note__ The `week_day` is converted to an __ordered__ categorical variable. This ensures the days are sorted correctly in Seaborn.
#
# <details><summary>Hints</summary>
#
# - The first part of the exercise is filtering the data. Combine the statements with `&` and do not forget to provide the necessary brackets. The `.isin()`to create a boolean condition might be useful for the road user type selection.
# - Whereas using `groupby` to get to the counts is perfectly correct, using the `estimator` in Seaborn gives the same result.
#
# __Note__ The `estimator=np.sum` is less performant than using pandas `groupby`. After filtering the data set, the summation with Seaborn is a feasible option.
#
# </details>
# Convert weekday to Pandas categorical data type
casualties["week_day"] = pd.Categorical(
casualties["week_day"],
categories=["Monday", "Tuesday", "Wednesday",
"Thursday", "Friday", "Saturday", "Sunday"],
ordered=True
)
casualties_motorway_trucks = casualties[
(casualties["road_type"] == "Motorway")
& casualties["road_user_type"].isin(["Light truck", "Truck"])
]
sns.catplot(data=casualties_motorway_trucks,
x="week_day",
y="n_victims",
estimator=np.sum,
ci=None,
kind="bar",
color="#900C3F",
height=3,
aspect=4)
# <div class="alert alert-success">
#
# **EXERCISE 6**
#
# Compare the relative number of deaths within 30 days (in relation to the total number of victims) in between the following "road_user_type"s: "Bicycle", "Passenger car", "Pedestrian", "Motorbike" for the year 2019 and 2020:
#
# - Filter the data for the years 2019 and 2020.
# - Filter the data on the road user types "Bicycle", "Passenger car", "Pedestrian" and "Motorbike". Call the new variable `compare_dead_30`.
# - Count for each combination of year and road_user_type the total victims and the total deaths within 30 days victims.
# - Calculate the percentage deaths within 30 days (add a new column "dead_prop").
# - Use a horizontal bar chart to plot the results with the "road_user_type" on the y-axis and a separate color for each year.
#
# <details><summary>Hints</summary>
#
# - By setting `datetime` as the index, slicing time series can be done using strings to filter data on the years 2019 and 2020.
# - Use `isin()` to filter "road_user_type" categories used in the exercise.
# - Count _for each_... Indeed, use `groupby` with 2 inputs, "road_user_type" and the year of `datetime`.
# - Deriving the year from the datetime: When having an index, use `compare_dead_30.index.year`, otherwise `compare_dead_30["datetime"].dt.year`.
# - Dividing columns works element-wise in Pandas.
# - A horizontal bar chart in seaborn is a matter of defining `x` and `y` inputs correctly.
#
# </details>
# + tags=["nbtutor-solution"]
# # %load _solutions/visualization_02_seaborn8.py
# + tags=["nbtutor-solution"]
# # %load _solutions/visualization_02_seaborn9.py
# -
# <div class="alert alert-success">
#
# **EXERCISE 7**
#
# Create a line plot of the __monthly__ number of victims for each of the categories of victims ('n_victims_ok', 'n_dead_30days', 'n_slightly_injured' and 'n_seriously_injured') as a function of time:
#
# - Create a new variable `monthly_victim_counts` that contains the monthly sum of 'n_victims_ok', 'n_dead_30days', 'n_slightly_injured' and 'n_seriously_injured'.
# - Create a line plot of the `monthly_victim_counts` using Seaborn. Choose any [color palette](https://seaborn.pydata.org/tutorial/color_palettes.html).
# - Create an `area` plot (line plot with the individual categories stacked on each other) using Pandas.
#
# What happens with the data registration since 2012?
#
# <details><summary>Hints</summary>
#
# - Monthly statistics from a time series requires `resample` (with - in this case - `sum`), which also takes the `on` parameter to specify the datetime column (instead of using the index of the DataFrame).
# - Apply the resampling on the `["n_victims_ok", "n_slightly_injured", "n_seriously_injured", "n_dead_30days"]` columns only.
# - Seaborn line plots works without tidy data when NOT providing `x` and `y` argument. It also works using tidy data. To 'tidy' the data set, `.melt()` can be used, see [pandas_08_reshaping.ipynb](pandas_08_reshaping.ipynb).
# - Pandas plot method works on the non-tidy data set with `plot.area()` .
#
# __Note__ Seaborn does not have an area plot.
#
# </details>
# + tags=["nbtutor-solution"]
# # %load _solutions/visualization_02_seaborn10.py
# + tags=["nbtutor-solution"]
# # %load _solutions/visualization_02_seaborn11.py
# + tags=["nbtutor-solution"]
# # %load _solutions/visualization_02_seaborn12.py
# + tags=["nbtutor-solution"]
# # %load _solutions/visualization_02_seaborn13.py
# -
# <div class="alert alert-success">
#
# **EXERCISE 8**
#
# Make a line plot of the daily victims (column "n_victims") in 2020. Can you explain the counts from March till May?
#
# <details><summary>Hints</summary>
#
# - To get the line plot of 2020 with daily counts, the data preparation steps are:
# - Filter data on 2020. By defining `datetime` as the index, slicing time series can be done using strings.
# - Resample to daily counts. Use `resample` with the sum on column "n_victims".
# - Create a line plot. Do you prefer Pandas or Seaborn?
#
# </details>
# + tags=["nbtutor-solution"]
# # %load _solutions/visualization_02_seaborn14.py
# + tags=["nbtutor-solution"]
# # %load _solutions/visualization_02_seaborn15.py
# -
# <div class="alert alert-success">
#
# **EXERCISE 9**
#
# Combine the following two plots in a single Matplotlib figure:
#
# - (left) The empirical cumulative distribution of the _weekly_ proportion of victims that died (`n_dead_30days` / `n_victims`) with a separate color for each "light_conditions".
# - (right) The empirical cumulative distribution of the _weekly_ proportion of victims that died (`n_dead_30days` / `n_victims`) with a separate color for each "road_type".
#
# Prepare the data for both plots separately with Pandas and use the variable `weekly_victim_dead_lc` and `weekly_victim_dead_rt`.
#
# <details><summary>Hints</summary>
#
# - The plot can not be made by a single Seaborn Figure-level plot. Create a Matplotlib figure first and use the __axes__ based functions of Seaborn to plot the left and right Axes.
# - The data for both subplots need to be prepared separately, by `groupby` once on "light_conditions" and once on "road_type".
# - Weekly sums (`resample`) _for each_ (`groupby`) "light_conditions" or "road_type"?! yes! you need to combine both here.
# - [`sns.ecdfplot`](https://seaborn.pydata.org/generated/seaborn.ecdfplot.html#seaborn.ecdfplot) creates empirical cumulative distribution plots.
#
# </details>
# + tags=["nbtutor-solution"]
# # %load _solutions/visualization_02_seaborn16.py
# + tags=["nbtutor-solution"]
# # %load _solutions/visualization_02_seaborn17.py
# -
# <div class="alert alert-success">
#
# **EXERCISE 10**
#
# You wonder if there is a relation between the number of victims per day and the minimal daily temperature. A data set with minimal daily temperatures for the year 2020 is available in the `./data` subfolder: `daily_min_temperature_2020.csv`.
#
# - Read the file `daily_min_temperature_2020.csv` and assign output to the variable `daily_min_temp_2020`.
# - Combine the daily (minimal) temperatures with the `daily_total_counts_2020` variable
# - Create a regression plot with Seaborn.
#
# Does it make sense to present the data as a regression plot?
#
# <details><summary>Hints</summary>
#
# - `pd.read_csv` has a `parse_dates` parameter to load the `datetime` column as a Timestamp data type.
# - `pd.merge` need a (common) key to link the data.
# - `sns.lmplot` or `sns.jointplot` are both seaborn functions to create scatter plots with a regression. Joint plot adds the marginal distributions.
#
# </details>
# available (see previous exercises)
daily_total_counts_2020 = casualties.set_index("datetime")["2020": "2021"].resample("D")["n_victims"].sum()
# + tags=["nbtutor-solution"]
# # %load _solutions/visualization_02_seaborn18.py
# + tags=["nbtutor-solution"]
# # %load _solutions/visualization_02_seaborn19.py
# + tags=["nbtutor-solution"]
# # %load _solutions/visualization_02_seaborn20.py
# -
# # Need more Seaborn inspiration?
# <div class="alert alert-info" style="font-size:18px">
#
# __Remember__
#
# [Seaborn gallery](https://seaborn.pydata.org/examples/index.html) and package [documentation](https://seaborn.pydata.org/index.html)
#
# </div>
# <a id='this_is_tidy'></a>
# # Recap: what is `tidy`?
# If you're wondering what *tidy* data representations are, you can read the scientific paper by Hadley Wickham, http://vita.had.co.nz/papers/tidy-data.pdf.
#
# Here, we just introduce the main principle very briefly:
# Compare:
#
# #### un-tidy
#
# | WWTP | Treatment A | Treatment B |
# |:------|-------------|-------------|
# | Destelbergen | 8. | 6.3 |
# | Landegem | 7.5 | 5.2 |
# | Dendermonde | 8.3 | 6.2 |
# | Eeklo | 6.5 | 7.2 |
#
# *versus*
#
# #### tidy
#
# | WWTP | Treatment | pH |
# |:------|:-------------:|:-------------:|
# | Destelbergen | A | 8. |
# | Landegem | A | 7.5 |
# | Dendermonde | A | 8.3 |
# | Eeklo | A | 6.5 |
# | Destelbergen | B | 6.3 |
# | Landegem | B | 5.2 |
# | Dendermonde | B | 6.2 |
# | Eeklo | B | 7.2 |
# This is sometimes also referred as *short* versus *long* format for a specific variable... Seaborn (and other grammar of graphics libraries) work better on `tidy` (long format) data, as it better supports `groupby`-like transactions!
# <div class="alert alert-info" style="font-size:16px">
#
# **Remember:**
#
# A tidy data set is setup as follows:
#
# - Each <code>variable</code> forms a <b>column</b> and contains <code>values</code>
# - Each <code>observation</code> forms a <b>row</b>
# - Each type of <code>observational unit</code> forms a <b>table</b>.
#
# </div>
#
# * `seed` for the random number generator (RNG) is an arbitrary number that defines the starting point in the RNG sequence. Using the same seed should yield reproducible simualtion results if the same RNG implementation is used. When running multiple copies of the same simulation, each of them should use a different seed.
#
# **Exercise:**
#
# * Use [`espressomd.reaction_methods.ConstantpHEnsemble`](https://espressomd.github.io/doc/espressomd.html#espressomd.reaction_methods.ConstantpHEnsemble) to create an instance of the reaction-ensemble constant $\mathrm{pH}$-method called `RE`
# + [markdown] solution2="hidden"
# ```python
# exclusion_range = PARTICLE_SIZE_REDUCED if USE_WCA else 0.0
# RE = espressomd.reaction_methods.ConstantpHEnsemble(
# kT=KT_REDUCED,
# exclusion_range=exclusion_range,
# seed=77,
# constant_pH=2 # temporary value
# )
# RE.set_non_interacting_type(type=len(TYPES)) # this parameter helps speed up the calculation in an interacting system
# ```
# -
# The next step is to define the chemical reaction. The order of species in the lists of reactants and products is very important for ESPResSo because it determines which particles are created or deleted in the reaction move. Specifically, identity of the first species in the list of reactants is changed to the first species in the list of products, the second reactant species is changed to the second product species, and so on. If the reactant list has more species than the product list, then excess reactant species are deleted from the system. If the product list has more species than the reactant list, then the excess product species are created and randomly placed inside the simulation box. This convention is especially important if some of the species belong to a chain-like molecule, so that they cannot be inserted at an arbitrary position.
# + [markdown] solution2="hidden" solution2_first=true
# **Exercise:**
#
# * Use [`espressomd.reaction_methods.ConstantpHEnsemble.add_reaction()`](https://espressomd.github.io/doc/espressomd.html#espressomd.reaction_methods.ConstantpHEnsemble.add_reaction) to add the reaction; remember to use the variables that were set up above for the reaction constant and the particle types and charges
#
#
# ***Hint:*** Make sure to place `TYPES["HA"]` and `TYPES["A"]` as first elements in the `reactant_types` and `product_types` lists respectively
# + [markdown] solution2="hidden"
# ```python
# RE.add_reaction(
# gamma=10**(-pKa),
# reactant_types=[TYPES["HA"]],
# product_types=[TYPES["A"], TYPES["B"]],
# default_charges={TYPES["HA"]: CHARGES["HA"],
# TYPES["A"]: CHARGES["A"],
# TYPES["B"]: CHARGES["B"]}
# )
# ```
# -
# In the example above, the order of reactants and products ensures that identity of $\mathrm{HA}$ is changed to $\mathrm{A^{-}}$ and vice versa, while $\mathrm{B^{+}}$ is inserted/deleted in the reaction move.
#
# If we had used the reverse the order of products in our reaction (i.e. from `product_types=[TYPES["A"], TYPES["B"]]` to `product_types=[TYPES["B"], TYPES["A"]]`), then the identity $\mathrm{HA}$ would be changed to $\mathrm{B^{+}}$, while $\mathrm{A^{-}}$ would be inserted/deleted at a random position in the box. Therefore $\mathrm{B^{+}}$ would become a part of the polymer chain whereas $\mathrm{A^{-}}$ would become a free ion.
# ## Run the simulations
#
# Finally, we can perform simulations at different $\mathrm{pH}$ values. To do so in a single loop, we need to equilibrate the simulated system each time when we change the $\mathrm{pH}$, before collecting the samples.
# + [markdown] solution2="hidden" solution2_first=true
# **Exercise:**
#
# * Write a function called `equilibrate_reaction()` that performs the equilibration of the $\mathrm{pH}$ value by performing reaction moves in the system by calling [`RE.reaction()`](https://espressomd.github.io/doc/espressomd.html#espressomd.reaction_methods.ReactionAlgorithm.reaction).
#
# ***Hint:*** To ensure sufficient equilibration of the reaction, one should use a number of steps, `reaction_steps`, that is greater than the number of titratable acid groups in the system, `N_ACID`.
# + [markdown] solution2="hidden"
# ```python
# def equilibrate_reaction(reaction_steps=1):
# RE.reaction(steps=reaction_steps)
# ```
# -
# + [markdown] solution2="hidden" solution2_first=true
# After the system has been equilibrated, the integration/sampling loop follows.
#
# **Exercise:**
#
# * Write a function called `perform_sampling()` that implements the sampling loop
#
# * The function should take the following parameters:
# * an integer `type_A` that contains the particle type that should be followed when sampling the composition
# * an integer `num_samples` that determines the number of sampling steps to be performed
# * a numpy array `num_As`, such that `len(num_As) == num_samples` which will be used to store the time series of the instantaneous values of $N_{\mathrm{A^{-}}}$
# * a float `reaction_steps` that determines the number of reactions to be performed
#
#
# * The function should perform the following tasks:
# * sample the composition fluctuations using reaction moves of the constant $\mathrm{pH}$ algorithm by calling [`RE.reaction()`](https://espressomd.github.io/doc/espressomd.html#espressomd.reaction_methods.ReactionAlgorithm.reaction)
# * sample the configuration space using the Langevin dynamics (LD) integration
# * perform the reaction moves at each sampling step
# * perform the LD integration in each sampling step with the probability `PROB_INTEGRATION`, and only if the interactions have been activated
# * after each sampling step, save the current number of particles of type $\mathrm{A^-}$ in the array `num_As` for later analysis
#
# ***Hint:***
# * the value of `reaction_steps` in each sampling step should be comparable to the number of titratable groups (`N_ACID`) in the system
# * the number of LD integration steps should be at least `1000`
# * the number of particles of a certain type can be obtained via the function [`espressomd.system.System.number_of_particles()`](https://espressomd.github.io/doc/espressomd.html#espressomd.system.System.number_of_particles)
# + [markdown] solution2="hidden"
# ```python
# def perform_sampling(type_A, num_samples, num_As:np.ndarray, reaction_steps,
# prob_integration=0.5, integration_steps=1000):
# for i in range(num_samples):
# if USE_WCA and np.random.random() < prob_integration:
# system.integrator.run(integration_steps)
# # we should do at least one reaction attempt per reactive particle
# RE.reaction(steps=reaction_steps)
# num_As[i] = system.number_of_particles(type=type_A)
# ```
# -
# Finally we have everything together to run our simulations. We set the $\mathrm{pH}$ value in [`RE.constant_pH`](https://espressomd.github.io/doc/espressomd.html#espressomd.reaction_methods.ConstantpHEnsemble.constant_pH) and use our `equilibrate_reaction` function to equilibrate the system. After that the samplings are performed with our `perform_sampling` function.
# +
# empty numpy array as a placeholder for collecting the data
num_As_at_each_pH = -np.ones((len(pHs), NUM_SAMPLES)) # number of A- species observed at each sample
# run a productive simulation and collect the data
print(f"Simulated pH values: {pHs}")
for ipH, pH in enumerate(pHs):
print(f"Run pH {pH:.2f} ...")
RE.constant_pH = pH # set new pH value
start_time = time.time()
equilibrate_reaction(reaction_steps=N_ACID + 1) # pre-equilibrate the reaction to the new pH value
perform_sampling(type_A=TYPES["A"],
num_samples=NUM_SAMPLES,
num_As=num_As_at_each_pH[ipH, :],
reaction_steps=N_ACID + 1,
prob_integration=PROB_INTEGRATION) # perform sampling / run production simulation
runtime = (time.time() - start_time) * ureg.s
print(f"runtime: {runtime:.2g};",
f"measured number of A-: {np.mean(num_As_at_each_pH[ipH]):.2f}",
f"(ideal: {N_ACID * ideal_alpha(pH, pKa):.2f})",
)
print("\nDONE")
# -
# ## Results
#
# Now we plot our results and compare them to the analytical results obtained from the Henderson-Hasselbalch equation. Since our simulation is a stochastic process, its outputs are random numbers that may change with each run. However, in the limit of infinite simulation time, they converge to one specific value, that is the ensemble average. For a meaningful comparison of simulation results with the theory, it is necessary to supply the estimates of the ensemble averages as well as the estimates of their statistical error. Differences between the simulation and theory can be considered significant only if they are greater than the estimated statistical error.
#
# ### Statistical analysis of the data
#
# The molecular simulation produces a time series of the observables, that
# constitute a Markov chain. It is a sequence of realizations of a random process, where
# the next value in the sequence depends on the preceding one. Therefore,
# the subsequent values are correlated. To estimate statistical error of the averages
# determined in the simulation, one needs to correct for the correlations.
#
# Here, we will use a rudimentary way of correcting for correlations, termed the binning method.
# We refer the reader to specialized literature for a more sophisticated discussion, for example <a href='#[Janke2002]'>[Janke2002]</a>. The general idea is to group a long sequence of correlated values into a rather small number of blocks, and compute an average per each block. If the blocks are big enough, they
# can be considered uncorrelated, and one can apply the formula for standard error of the mean of uncorrelated values. If the number of blocks is small, then they are uncorrelated but the obtained error estimate has a high uncertainty. If the number of blocks is high, then they are too short to be uncorrelated, and the obtained error estimates are systematically lower than the correct value. Therefore, the method works well only if the sample size is much greater than the autocorrelation time, so that it can be divided into a sufficient number of mutually uncorrelated blocks.
# statistical analysis of the results
def block_analyze(input_data, n_blocks=16):
data = np.asarray(input_data)
block = 0
# this number of blocks is recommended by Janke as a reasonable compromise
# between the conflicting requirements on block size and number of blocks
block_size = int(data.shape[1] / n_blocks)
print(f"block_size: {block_size}")
# initialize the array of per-block averages
block_average = np.zeros((n_blocks, data.shape[0]))
# calculate averages per each block
for block in range(n_blocks):
block_average[block] = np.average(data[:, block * block_size: (block + 1) * block_size], axis=1)
# calculate the average and average of the square
av_data = np.average(data, axis=1)
av2_data = np.average(data * data, axis=1)
# calculate the variance of the block averages
block_var = np.var(block_average, axis=0)
# calculate standard error of the mean
err_data = np.sqrt(block_var / (n_blocks - 1))
# estimate autocorrelation time using the formula given by Janke
# this assumes that the errors have been correctly estimated
tau_data = np.zeros(av_data.shape)
for val in range(av_data.shape[0]):
if av_data[val] == 0:
# unphysical value marks a failure to compute tau
tau_data[val] = -1.0
else:
tau_data[val] = 0.5 * block_size * n_blocks / (n_blocks - 1) * block_var[val] \
/ (av2_data[val] - av_data[val] * av_data[val])
return av_data, err_data, tau_data, block_size
# Now, we use the above function to calculate the average number of particles of type $\mathrm{A^-}$ and estimate its statistical error and autocorrelation time.
# Then we use these values to calculate the degree of ionization $\alpha$ by dividing the number of particles of type $\mathrm{A^-}$ by the number of titratable units `N_ACID`. Finally, we plot the degree of ionization $\alpha$ as a function of the $\mathrm{pH}$.
# +
# estimate the statistical error and the autocorrelation time using the formula given by Janke
av_num_As, err_num_As, tau, block_size = block_analyze(num_As_at_each_pH, N_BLOCKS)
print(f"av = {av_num_As}")
print(f"err = {err_num_As}")
print(f"tau = {tau}")
# calculate the average ionization degree
av_alpha = av_num_As / N_ACID
err_alpha = err_num_As / N_ACID
# plot the simulation results compared with the ideal titration curve
plt.figure(figsize=(10, 6), dpi=80)
plt.errorbar(pHs - pKa, av_alpha, err_alpha, marker='o', linestyle='none',
label=r"simulation")
pHs2 = np.linspace(pHmin, pHmax, num=50)
plt.plot(pHs2 - pKa, ideal_alpha(pHs2, pKa), label=r"Henderson-Hasselbalch")
plt.xlabel(r'$\mathrm{pH} - \mathrm{p}K_{\mathrm{A}}$', fontsize=16)
plt.ylabel(r'$\alpha$', fontsize=16)
plt.legend(fontsize=16)
plt.show()
# -
# The simulation results for the non-interacting case match very well with the analytical solution of Henderson-Hasselbalch equation. There are only minor deviations, and the estimated errors are small too. This situation will change when we introduce interactions.
#
# It is useful to check whether the estimated errors are consistent with the assumptions that were used to obtain them. To do this, we follow <a href='#[Janke2002]'>[Janke2002]</a> to estimate the number of uncorrelated samples per block, and check whether each block contains a sufficient number of uncorrelated samples (we choose 10 uncorrelated samples per block as the threshold value).
#
# Intentionally, we made our simulation slightly too short, so that it does not produce enough uncorrelated samples. We encourage the reader to vary the number of blocks or the number of samples to see how the estimated error changes with these parameters.
# check if the blocks contain enough data for reliable error estimates
print(f"uncorrelated samples per block:\nblock_size/tau = {block_size / tau}")
threshold = 10 # block size should be much greater than the correlation time
if np.any(block_size / tau < threshold):
print(f"\nWarning: some blocks may contain less than {threshold} uncorrelated samples."
"\nYour error estimated may be unreliable."
"\nPlease, check them using a more sophisticated method or run a longer simulation.")
print(f"? block_size/tau > threshold ? : {block_size / tau > threshold}")
else:
print(f"\nAll blocks seem to contain more than {threshold} uncorrelated samples."
"Error estimates should be OK.")
# To look in more detail at the statistical accuracy, it is useful to plot the deviations from the analytical result. This provides another way to check the consistency of error estimates. In the case of non-interacting system, the simulation should exactly reproduce the Henderson-Hasselbalch equation. In such case, about 68% of the results should be within one error bar from the analytical result, whereas about 95% of the results should be within two times the error bar. Indeed, if you plot the deviations by running the script below, you should observe that most of the results are within one error bar from the analytical solution, a smaller fraction of the results is slightly further than one error bar, and one or two might be about two error bars apart. Again, this situation changes when we activate interactions because the ionization of the interacting system deviates from the Henderson-Hasselbalch equation.
# plot the deviations from the ideal result
plt.figure(figsize=(10, 6), dpi=80)
ylim = np.amax(abs(av_alpha - ideal_alpha(pHs, pKa)))
plt.ylim((-1.5 * ylim, 1.5 * ylim))
plt.errorbar(pHs - pKa, av_alpha - ideal_alpha(pHs, pKa),
err_alpha, marker='o', linestyle='none', label=r"simulation")
plt.plot(pHs - pKa, 0.0 * ideal_alpha(pHs, pKa), label=r"Henderson-Hasselbalch")
plt.xlabel(r'$\mathrm{pH} - \mathrm{p}K_{\mathrm{A}}$', fontsize=16)
plt.ylabel(r'$\alpha - \alpha_{ideal}$', fontsize=16)
plt.legend(fontsize=16)
plt.show()
# ### The Neutralizing Ion $\mathrm{B^+}$
# Up to now we did not discuss the chemical nature the neutralizer $\mathrm{B^+}$.
# It is not obvious how the chemical nature of the $\mathrm{B^+}$ ion should be interpreted in a coarse-grained model, where water molecules and $\mathrm{H^+}$ ions are not represented explicitly.
# Now, we address this issue using some specific examples.
#
# In the simplest case, if we add an acidic polymer to pure water that has $\mathrm{pH} = 7$, some of the acid groups dissociate and release $\mathrm{H^+}$ ions into the solution. The $\mathrm{pH}$ decreases to a value that depends on $\mathrm{p}K_{\mathrm{A}}$ and on the concentration of ionizable groups. Now, three ionic species are present in the solution: $\mathrm{H^+}$, $\mathrm{A^-}$, and $\mathrm{OH^-}$.
# In this case the $\mathrm{B^+}$ ions generated in our implementation of the reaction correspond to the $\mathrm{H^+}$ ions. The $\mathrm{H^+}$ act as the counter-ions, neutralizing both the $\mathrm{A^-}$ and the $\mathrm{OH^-}$ ions. At acidic $\mathrm{pH}$ there are only very few $\mathrm{OH^-}$ ions and nearly all $\mathrm{H^+}$ ions act as neutralizer for the $\mathrm{A^-}$ ions. Therefore, the concentration of $\mathrm{B^+}$ is very close to the concentration of $\mathrm{H^+}$ in the real aqueous solution. Only very few $\mathrm{OH^-}$ ions, and the $\mathrm{H^+}$ ions needed to neutralize them, are missing in the simulation box, when compared to the real solution.
#
# To achieve a more acidic $\mathrm{pH}$ (with the same $\mathrm{p}K_{\mathrm{A}}$ and polymer concentration), we need to add an acid to the system. We can do that by adding a strong acid, such as $\mathrm{HCl}$ or $\mathrm{HNO}_3$. We will denote this acid by a generic name $\mathrm{HX}$ to emphasize that in general its anion can be different from the salt anion $\mathrm{Cl^{-}}$. Now, there are 4 ionic species in the solution: $\mathrm{H^+}$, $\mathrm{A^-}$, $\mathrm{OH^-}$, and $\mathrm{X^-}$ ions. By the same argument as before, the $\mathrm{B^+}$ ions again correspond to the $\mathrm{H^+}$ ions. The $\mathrm{H^+}$ ions neutralize the $\mathrm{A^-}$, $\mathrm{OH^-}$, and the $\mathrm{X^-}$ ions. Because the concentration of $\mathrm{X^-}$ is not negligible, the concentration of $\mathrm{B^+}$ in the simulation box differs from the $\mathrm{H^+}$ concentration in the real solution. Now, many more ions are missing in the simulation box, as compared to the real solution: Few $\mathrm{OH^-}$ ions, many $\mathrm{X^-}$ ions, and all the $\mathrm{H^+}$ ions that neutralize them.
#
# To achieve a neutral $\mathrm{pH}$ we need to add some base to the system to neutralize the polymer.
# In the simplest case we add an alkali metal hydroxide, such as $\mathrm{NaOH}$ or $\mathrm{KOH}$, that we will generically denote as $\mathrm{MOH}$. Now, there are 4 ionic species in the solution: $\mathrm{H^+}$, $\mathrm{A^-}$, $\mathrm{OH^-}$, and $\mathrm{M^+}$. In such situation, we can not clearly attribute a specific chemical identity to the $\mathrm{B^+}$ ions. However, only very few $\mathrm{H^+}$ and $\mathrm{OH^-}$ ions are present in the system at $\mathrm{pH} = 7$. Therefore, we can make the approximation that at this $\mathrm{pH}$, all $\mathrm{A^-}$ are neutralized by the $\mathrm{M^+}$ ions, and the $\mathrm{B^+}$ correspond to $\mathrm{M^+}$. Then, the concentration of $\mathrm{B^+}$ also corresponds to the concentration of $\mathrm{M^+}$ ions. Now, again only few ions are missing in the simulation box, as compared to the real solution: Few $\mathrm{OH^-}$ ions, and few $\mathrm{H^+}$ ions.
#
# To achieve a basic $\mathrm{pH}$ we need to add even more base to the system to neutralize the polymer.
# Again, there are 4 ionic species in the solution: $\mathrm{H^+}$, $\mathrm{A^-}$, $\mathrm{OH^-}$, and $\mathrm{M^+}$ and we can not clearly attribute a specific chemical identity to the $\mathrm{B^+}$ ions. Because only very few $\mathrm{H^+}$ ions should be present in the solution, we can make the approximation that at this $\mathrm{pH}$, all $\mathrm{A^-}$ ions are neutralized by the $\mathrm{M^+}$ ions, and therefore $\mathrm{B^+}$ ions in the simulation correspond to $\mathrm{M^+}$ ions in the real solution. Because additional $\mathrm{M^+}$ ions in the real solution neutralize the $\mathrm{OH^-}$ ions, the concentration of $\mathrm{B^+}$ does not correspond to the concentration of $\mathrm{M^+}$ ions. Now, again many ions are missing in the simulation box, as compared to the real solution: Few $\mathrm{H^+}$ ions, many $\mathrm{OH^-}$ ions, and a comparable amount of the $\mathrm{M^+}$ ions.
#
# To further illustrate this subject, we compare the concentration of the neutralizer ion $\mathrm{B^+}$ calculated in the simulation with the expected number of ions of each species. At a given $\mathrm{pH}$ and $\mathrm{p}K_{\mathrm{A}}$ we can calculate the expected degree of ionization from the Henderson-Hasselbalch equation. Then we apply the electroneutrality condition
# $$c_\mathrm{A^-} + c_\mathrm{OH^-} + c_\mathrm{X^-} = c_\mathrm{H^+} + c_\mathrm{M^+}$$
# where we use either $c_\mathrm{X^-}=0$ or $c_\mathrm{M^+}=0$ because we always only add extra acid or base, but never both. Adding both would be equivalent to adding extra salt $\mathrm{MX}$.
# We obtain the concentrations of $\mathrm{OH^-}$ and $\mathrm{H^+}$ from the input $\mathrm{pH}$ value, and substitute them to the electroneutrality equation to obtain
# $$\alpha c_\mathrm{acid} + 10^{-(\mathrm{p}K_{\mathrm{w}} - \mathrm{pH})} + 10^{-\mathrm{pH}} = c_\mathrm{M^+} - c_\mathrm{X^-}$$
# Depending on whether the left-hand side of this equation is positive or negative we know whether we should add $\mathrm{M^+}$ or $\mathrm{X^-}$ ions.
# +
# average concentration of B+ is the same as the concentration of A-
av_c_Bplus = av_alpha * C_ACID
err_c_Bplus = err_alpha * C_ACID # error in the average concentration
full_pH_range = np.linspace(2, 12, 100)
ideal_c_Aminus = ideal_alpha(full_pH_range, pKa) * C_ACID
ideal_c_OH = np.power(10.0, -(pKw - full_pH_range))*ureg.molar
ideal_c_H = np.power(10.0, -full_pH_range)*ureg.molar
# ideal_c_M is calculated from electroneutrality
ideal_c_M = (ideal_c_Aminus + ideal_c_OH - ideal_c_H)
# plot the simulation results compared with the ideal results of the cations
plt.figure(figsize=(10, 6), dpi=80)
plt.errorbar(pHs,
av_c_Bplus.magnitude,
err_c_Bplus.magnitude,
marker='o', c="tab:blue", linestyle='none',
label=r"measured $c_{\mathrm{B^+}}$", zorder=2)
plt.plot(full_pH_range,
ideal_c_H.magnitude,
c="tab:green",
label=r"ideal $c_{\mathrm{H^+}}$",
zorder=0)
plt.plot(full_pH_range[np.nonzero(ideal_c_M.magnitude > 0.)],
ideal_c_M.magnitude[np.nonzero(ideal_c_M.magnitude > 0.)],
c="tab:orange",
label=r"ideal $c_{\mathrm{M^+}}$",
zorder=0)
plt.plot(full_pH_range,
ideal_c_Aminus.magnitude,
c="tab:blue",
ls=(0, (5, 5)),
label=r"ideal $c_{\mathrm{A^-}}$",
zorder=1)
plt.yscale("log")
plt.ylim(5e-6,1e-2)
plt.xlabel('input pH', fontsize=16)
plt.ylabel(r'concentration $c$ $[\mathrm{mol/L}]$', fontsize=16)
plt.legend(fontsize=16)
plt.show()
# -
# The plot shows that at intermediate $\mathrm{pH}$ the concentration of $\mathrm{B^+}$ ions is approximately equal to the concentration of $\mathrm{M^+}$ ions. Only at one specific $\mathrm{pH}$ the concentration of $\mathrm{B^+}$ ions is equal to the concentration of $\mathrm{H^+}$ ions. This is the $\mathrm{pH}$ one obtains when dissolving the weak acid $\mathrm{A}$ in pure water.
#
# In an ideal system, the ions missing in the simulation have no effect on the ionization degree. In an interacting system, the presence of ions in the box affects the properties of other parts of the system. Therefore, in an interacting system this discrepancy is harmless only at intermediate $\mathrm{pH}$. The effect of the small ions on the rest of the system can be estimated from the overall the ionic strength.
# $$ I = \frac{1}{2}\sum_i c_i z_i^2 $$
# +
ideal_c_X = -(ideal_c_Aminus + ideal_c_OH - ideal_c_H)
ideal_ionic_strength = 0.5 * \
(ideal_c_X + ideal_c_M + ideal_c_H + ideal_c_OH + 2 * C_SALT)
# in constant-pH simulation ideal_c_Aminus = ideal_c_Bplus
cpH_ionic_strength = 0.5 * (ideal_c_Aminus + 2 * C_SALT)
cpH_ionic_strength_measured = 0.5 * (av_c_Bplus + 2 * C_SALT)
cpH_error_ionic_strength_measured = 0.5 * err_c_Bplus
plt.figure(figsize=(10, 6), dpi=80)
plt.errorbar(pHs,
cpH_ionic_strength_measured.magnitude,
cpH_error_ionic_strength_measured.magnitude,
c="tab:blue",
linestyle='none', marker='o',
label=r"measured", zorder=3)
plt.plot(full_pH_range,
cpH_ionic_strength.magnitude,
c="tab:blue",
ls=(0, (5, 5)),
label=r"constant-pH", zorder=2)
plt.plot(full_pH_range,
ideal_ionic_strength.magnitude,
c="tab:orange",
linestyle='-',
label=r"Henderson-Hasselbalch", zorder=1)
plt.ylim(1.8e-3,3e-3)
plt.xlabel('input pH', fontsize=16)
plt.ylabel(r'Ionic Strength [$\mathrm{mol/L}$]', fontsize=16)
plt.legend(fontsize=16)
plt.show()
# -
# We see that the ionic strength in the simulation box significantly deviates from the ionic strength of the real solution only at high or low $\mathrm{pH}$ value. If the $\mathrm{p}K_{\mathrm{A}}$ value is sufficiently large, then the deviation at very low $\mathrm{pH}$ can also be neglected because then the polymer is uncharged in the region where the ionic strength is not correctly represented in the constant-$\mathrm{pH}$ simulation. At a high $\mathrm{pH}$ the ionic strength will have an effect on the weak acid, because then it is fully charged. The $\mathrm{pH}$ range in which the constant-$\mathrm{pH}$ method uses approximately the right ionic strength depends on salt concentration, weak acid concentration and the $\mathrm{p}K_{\mathrm{A}}$ value. See also <a href='#[Landsgesell2019]'>[Landsgesell2019]</a> for a more detailed discussion of this issue, and its consequences.
#
# ## Suggested problems for further work
#
# * Try changing the concentration of ionizable species in the non-interacting system. You should observe that it does not affect the obtained titration.
#
# * Try changing the number of samples and the number of particles to see how the estimated error and the number of uncorrelated samples will change. Be aware that if the number of uncorrelated samples is low, the error estimation is too optimistic.
#
# * Try running the same simulations with steric repulsion and then again with electrostatic interactions. Observe how the ionization equilibrium is affected by various interactions. Warning: simulations with electrostatics are much slower. If you want to obtain your results more quickly, then decrease the number of $\mathrm{pH}$ values.
# ## References
#
# <span id='[Janke2002]'></span>[Janke2002] Janke W. Statistical Analysis of Simulations: Data Correlations and Error Estimation,
# In Quantum Simulations of Complex Many-Body Systems: From Theory to Algorithms, Lecture Notes,
# J. Grotendorst, D. Marx, A. Muramatsu (Eds.), John von Neumann Institute for Computing, Jülich,
# NIC Series, Vol. 10, ISBN 3-00-009057-6, pp. 423-445, 2002, [link](https://www.physik.uni-leipzig.de/~janke/Paper/nic10_423_2002.pdf).<br>
# <span id='[Landsgesell2019]'></span>[Landsgesell2019] Landsgesell, J.; Nová, L.; Rud, O.; Uhlík, F.; Sean, D.; Hebbeker, P.; Holm, C.; Košovan, P. Simulations of Ionization Equilibria in Weak Polyelectrolyte Solutions and Gels. Soft Matter 2019, 15 (6), 1155–1185, doi:[10.1039/C8SM02085J](https://doi.org/10.1039/C8SM02085J).<br>
# <span id='[Reed1992]'></span>[Reed1992] Reed, C. E.; Reed, W. F. Monte Carlo Study of Titration of Linear Polyelectrolytes. The Journal of Chemical Physics 1992, 96 (2), 1609–1620, doi:[10.1063/1.462145](https://doi.org/10.1063/1.462145).<br>
# <span id='[Smith1994]'></span>[Smith1994] Smith, W. R.; Triska, B. The Reaction Ensemble Method for the Computer Simulation of Chemical and Phase Equilibria. I. Theory and Basic Examples. The Journal of Chemical Physics 1994, 100 (4), 3019–3027, doi:[10.1063/1.466443](https://doi.org/10.1063/1.466443).<br>
# <span id='[Israelachvili2011]'></span>[Israelachvili2011] Israelachvili, J. N. Intermolecular and Surface Forces, Third Edition, 2011, Academic Press, ISBN 978-0-12-391927-4, doi:[10.1016/C2011-0-05119-0](https://doi.org/10.1016/C2011-0-05119-0).
| 55,358 |
/Tp 2/Modelos/CrossValid/CV.ipynb
|
5330d2993ba8d773348a36ae0f6d81fc81d8a5ec
|
[] |
no_license
|
Fiuba-Big-Data-Analytics/Fiuba-Big-Data-Analytics-TPs
|
https://github.com/Fiuba-Big-Data-Analytics/Fiuba-Big-Data-Analytics-TPs
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 387,935 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
from sklearn.datasets import load_digits
import matplotlib.pyplot as plt
from sklearn.preprocessing import OneHotEncoder
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.model_selection import cross_val_score
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import GridSearchCV
import matplotlib.pyplot as plt
import seaborn as sns
import xgboost as xgb
from sklearn.model_selection import train_test_split
from sklearn.model_selection import TimeSeriesSplit
# +
test = pd.read_csv('/home/leandro/Documentos/Organizacion de datos/Datos Santi/train_pre (1).csv')
train = pd.read_csv('/home/leandro/Documentos/Organizacion de datos/Datos Santi/train_pre (1).csv')
# -
train = train.drop(['Opportunity_ID'], axis=1)
test = test.drop(['Opportunity_ID'], axis=1)
# +
X_train, X_test, y_train, y_test = train_test_split(train.drop(columns = 'Stage'),train['Stage'],test_size=0.2)
# -
# # Grid Search (Ajustado)
#
X = train.drop(['Stage'], axis=1)
y = train[['Stage']]
# +
param_test = {
'max_depth':range(3,9,1),
'gamma':[0,0.1,0.2,0.3,0.4,0.5],
'n_estimators':range(20,210,10),
'learning_rate':[0.05,0.1,0.15,0.2,0.25,0.3],
}
gsearch = GridSearchCV(estimator = xgb.XGBClassifier(),
param_grid = param_test, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
gsearch.fit(X,y)
gsearch.cv_results_, gsearch.best_params_, gsearch.best_score_
# -
# # CV (Ajustado)
xg = xgb.XGBClassifier(learning_rate =0.3,
n_estimators=200,
max_depth=8,
gamma=0.5)
xg.fit(X_train,y_train)
xg.score(X_test, y_test)
# +
X = train.drop(columns = 'Stage')
y = train['Stage']
CV=10
tscv = TimeSeriesSplit(n_splits=CV)
cv_df10 = pd.DataFrame(index=range(CV))
entries = []
model_name = xg.__class__.__name__
accuracies = cross_val_score(model, X, y, cv=tscv)
for fold_idx, accuracy in enumerate(accuracies):
entries.append((model_name, fold_idx, accuracy))
cv_df10 = pd.DataFrame(entries, columns=['model_name', 'fold_idx', 'accuracy'])
cv_df10
# -
# # Grid Search
#
param_test1 = {
'max_depth':range(3,9,1)
}
gsearch1 = GridSearchCV(estimator = xgb.XGBClassifier( learning_rate =0.05, n_estimators=20,
gamma=0),
param_grid = param_test1, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
gsearch1.fit(X,y)
gsearch1.cv_results_, gsearch1.best_params_, gsearch1.best_score_
param_test1 = {
'gamma':[0,0.1,0.2,0.3,0.4,0.5]
}
gsearch1 = GridSearchCV(estimator = xgb.XGBClassifier( learning_rate =0.05, n_estimators=20,
max_depth = 4),
param_grid = param_test1, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
gsearch1.fit(X,y)
gsearch1.cv_results_, gsearch1.best_params_, gsearch1.best_score_
param_test1 = {
'n_estimators':range(20,210,10)
}
gsearch1 = GridSearchCV(estimator = xgb.XGBClassifier( learning_rate =0.05,
max_depth = 4, ganmma = 0.4),
param_grid = param_test1, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
gsearch1.fit(X,y)
gsearch1.cv_results_, gsearch1.best_params_, gsearch1.best_score_
param_test1 = {
'learning_rate':[0.05,0.1,0.15,0.2,0.25,0.3]
}
gsearch1 = GridSearchCV(estimator = xgb.XGBClassifier( learning_rate =0.05, n_estimators=40,
max_depth = 4, ganmma = 0.4),
param_grid = param_test1, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
gsearch1.fit(X,y)
gsearch1.cv_results_, gsearch1.best_params_, gsearch1.best_score_
xg = xgb.XGBClassifier(learning_rate =0.3,
n_estimators=200,
max_depth=8,
gamma=0.5)
xg.fit(X_train,y_train)
xg.score(X_test, y_test)
# # CV
# +
X = train.drop(columns = 'Stage')
y = train['Stage']
CV=100
tscv = TimeSeriesSplit(n_splits=CV)
cv_df100 = pd.DataFrame(index=range(CV))
entries = []
model_name = xg.__class__.__name__
accuracies = cross_val_score(model, X, y, cv=tscv)
for fold_idx, accuracy in enumerate(accuracies):
entries.append((model_name, fold_idx, accuracy))
cv_df100 = pd.DataFrame(entries, columns=['model_name', 'fold_idx', 'accuracy'])
# -
cv_df100
cv_df100['accuracy'].mean()
# +
plt.style.use('default')
plt.rcParams['figure.figsize'] = (20, 10)
fig = plt.figure()
sns.set(style="whitegrid")
sns.set(font_scale = 2)
sns.boxplot(x='model_name', y='accuracy', data=cv_df100)
sns.stripplot(x='model_name', y='accuracy', data=cv_df100,
size=8, jitter=True, edgecolor="gray", linewidth=2)
#fig.suptitle('Modelos', fontsize=28)
plt.xlabel('Modelo Utilizado', fontsize=24)
plt.ylabel('Scores obtenidos', fontsize=24)
plt.show()
# +
X = train.drop(columns = 'Stage')
y = train['Stage']
CV=300
tscv = TimeSeriesSplit(n_splits=CV)
cv_df300 = pd.DataFrame(index=range(CV))
entries = []
model_name = xg.__class__.__name__
accuracies = cross_val_score(model, X, y, cv=tscv)
for fold_idx, accuracy in enumerate(accuracies):
entries.append((model_name, fold_idx, accuracy))
cv_df300 = pd.DataFrame(entries, columns=['model_name', 'fold_idx', 'accuracy'])
# -
cv_df300
cv_df300['accuracy'].mean()
# +
plt.style.use('default')
plt.rcParams['figure.figsize'] = (20, 10)
fig = plt.figure()
sns.set(style="whitegrid")
sns.set(font_scale = 2)
sns.boxplot(x='model_name', y='accuracy', data=cv_df300)
sns.stripplot(x='model_name', y='accuracy', data=cv_df300,
size=8, jitter=True, edgecolor="gray", linewidth=2)
#fig.suptitle('Modelos', fontsize=28)
plt.xlabel('Modelo Utilizado', fontsize=24)
plt.ylabel('Scores obtenidos', fontsize=24)
plt.show()
# -
# # CV varios
# +
X = train.drop(columns = 'Stage')
y = train['Stage']
cv_df = pd.DataFrame(columns=['CV', 'accuracy_mean'])
for CV in range(2,51,1):
model=xgb.XGBClassifier(learning_rate =0.3,
n_estimators=200,
max_depth=8,
gamma=0.5)
tscv = TimeSeriesSplit(n_splits=CV)
accuracies = cross_val_score(model, X, y, cv=tscv)
cv_df.loc[CV-2] = [CV,accuracies.mean()]
# -
cv_df
ax = cv_df.plot.line(x = 'CV', y= 'accuracy_mean', grid=True)
ax.set_xlabel("CV utilizado")
ax.set_ylabel("Score")
| 6,522 |
/notebooks/multi-year-plot.ipynb
|
c68a4998406cbb2b2ace3f6df8f313ae8475f40c
|
[] |
no_license
|
bmjoshi/ECAL_FAIR
|
https://github.com/bmjoshi/ECAL_FAIR
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 109,863 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: pythonDSAI
# language: python
# name: pythondsai
# ---
# ## Assignment 2
#
# +
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.datasets import make_classification, make_blobs
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
X, y = make_blobs(n_samples=500, centers=2, n_features=2, random_state=0)
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
intercept = np.ones((X_train.shape[0], 1))
X_train = np.concatenate((intercept, X_train), axis=1)
intercept = np.ones((X_test.shape[0], 1))
X_test = np.concatenate((intercept, X_test), axis=1)
class LogisticRegression:
def __init__(self, method='mini-batch',max_iter=1000, alpha=0.001):
self.method = method
self.max_iter = max_iter
self.alpha = alpha
def mini_batch_GD(self,X,y):
self.w = np.zeros(X.shape[1])
self.loss = []
self.iters = []
batch_size = int(0.1 * X.shape[0])
for i in range(self.max_iter):
shuffled_index = np.random.permutation(X.shape[0])
X_shuffled = X[shuffled_index]
y_shuffled = y[shuffled_index]
for index in range(0, X.shape[0], batch_size):
batch_X = X_shuffled[index : index + batch_size]
batch_y = y_shuffled[index : index + batch_size]
cost, grad = self.gradient(batch_X, batch_y, self.w)
self.w =self. w - self.alpha * grad
if i % 500 ==0:
print(f"Cost at iteration {i}", cost)
self.loss.append(cost)
self.iters.append(i)
return self.w, i
def plot(self):
plt.plot(self.iters, self.loss, label='Training Losses')
plt.xlabel('num of iters')
plt.ylabel('training loss')
plt.title('Training loss vs number of iters')
plt.legend()
def gradient(self, X, y, w):
m = X.shape[0]
h = self.h_theta(X)
error = h - y
cost = -np.sum(y * np.log(h) + (1 - y) * np.log(1 - h))
grad = np.dot(X.T, error)
return cost, grad
def sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def h_theta(self,X):
return self.sigmoid(X @ self.w)
def output(self, pred):
return np.round(pred)
# -
model = LogisticRegression(max_iter=20000)
w, i = model.mini_batch_GD(X_train, y_train)
model.plot()
yhat = model.h_theta(X_test)
y_pred = model.output(yhat)
y_pred
class classification_report:
def __init__(self, actual, predict):
self.actual = actual
self.predict = predict
self.TP = sum((self.actual == 1) & (self.predict == 1))
self.TN = sum((self.actual == 0) & (self.predict == 0))
self.FN = sum((self.actual == 1) & (self.predict == 0))
self.FP = sum((self.actual == 0) & (self.predict == 1))
def accuracy(self):
self.acc = 100 * (self.TP + self.TN) / float(self.TP + self.TN + self.FP + self.FN)
return self.acc
def precision(self):
self.pre = 100 * self.TP / float(self.TP + self.FP)
return self.pre
def recall(self):
self.rec = 100 * self.TP / float(self.TP + self.FN)
return self.rec
def f1(self):
self.F1 = 2 * (self.precision() * self.recall()) / (self.precision() + self.recall())
return self.F1
report = classification_report(y_test, y_pred)
accuracy = report.accuracy()
recall = report.recall()
precision = report.precision()
f1 = report.f1()
print(f"accuracy: {accuracy}")
print(f"recall: {recall}")
print(f"precision: {precision}")
print(f"f1: {f1}")
# checking wherethere it is correct or not
from sklearn.metrics import classification_report
print("Report: ", classification_report(y_test, y_pred))
ax1.set_xlabel('FED firing time')
#ax1.set_ylim([0.87,0.94])
plt.setp(ax1.get_xticklabels(), rotation=30, horizontalalignment='right')
#calibration data
ax1.scatter(df_slim16.index, df_slim16.p2,
label='2016 p2', color='red', s=4, linestyle='solid')
ax1.scatter(df_slim17.index, df_slim17.p2,
label='2017 p2', color='red', s=4, linestyle='solid')
ax1.scatter(df_slim18.index, df_slim18.p2,
label='2018 p2', color='red', s=4, linestyle='solid')
ax1.scatter(df_slim16.index, df_slim16.calibration,
label='2016 raw calib', color='green', s=1, linestyle='solid')
ax1.scatter(df_slim17.index, df_slim17.calibration,
label='2017 raw calib', color='green', s=1, linestyle='solid')
ax1.scatter(df_slim18.index, df_slim18.calibration,
label='2018 raw calib', color='green', s=1, linestyle='solid')
#lumi data on alternate y axis
ax2 = ax1.twinx()
ax2.set_ylabel('Integrated luminosity (/ub)')
ax2.plot(df_slim16.index, df_slim16.int_deliv_inv_ub,
label='2016 lumi', color='blue', linewidth=2, linestyle='dashed')
ax2.plot(df_slim17.index, df_slim17.int_deliv_inv_ub,
label='2017 lumi', color='blue', linewidth=2, linestyle='dashed')
ax2.plot(df_slim18.index, df_slim18.int_deliv_inv_ub,
label='2018 lumi', color='blue', linewidth=2, linestyle='dashed')
#combined legend
fig.legend(ncol=3, loc = (0.47,0.78),framealpha=1)
fig.show()
# -
| 5,715 |
/notebooks/binet-cauchy/Binet-Cauchy kernel + Grassmann vector tinker.ipynb
|
f5363b68df2b1ac927a80cfe4ddc32fc97bbc215
|
[
"MIT"
] |
permissive
|
maxentile/msm-learn
|
https://github.com/maxentile/msm-learn
| 7 | 2 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 2,478,040 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import numpy as np
import numpy.random as npr
import matplotlib.pyplot as plt
# %matplotlib inline
import mdtraj as md
import pyemma
plt.rc('font', family='serif')
from msmbuilder.example_datasets import AlanineDipeptide
trajs = AlanineDipeptide().get().trajectories
print(AlanineDipeptide().description())
# +
# for alanine dipeptide, we can explicitly compute the corresponding feature vector
# for larger proteins, we will not be able to compute the feature vector, since its
# size is (n_atoms choose 3)
# -
traj = trajs[0]
traj.xyz.shape
X = traj.xyz[0]
X.shape
from itertools import product
def grassman_vector(X):
''' Explicitly featurize'''
return np.array([np.linalg.det(X[np.array(s)]) for s in product(*[(False,True)]*len(X)) if sum(s)==3])
# %%time
grassman_vector(X)
# +
# what if we precompute the indices
indices = [np.array(s) for s in product(*[(False,True)]*len(X)) if sum(s)==3]
def grassman_vector_(X):
''' Explicitly featurize'''
return np.array([np.linalg.det(X[s]) for s in indices])
# -
# %%time
grassman_vector_(X)
# much better!
def grassman_featurize(trajs):
indices = [np.array(s) for s in product(*[(False,True)]*trajs[0].n_atoms) if sum(s)==3]
return [np.array([grassman_vector_(x) for x in traj.xyz]) for traj in trajs]
# %%time
X_grassman = grassman_featurize(trajs)
np.save('X_grassman_alanine.npy',X_grassman)
X_grassman = np.load('X_grassman_alanine.npy')
X_grassman = [x for x in X_grassman]
X_grassman[0].shape
tica = pyemma.coordinates.tica(X_grassman)
plt.plot(tica.eigenvalues)
plt.xlabel('Index')
plt.ylabel('Eigenvalue')
plt.title('tICA eigenvalues from Grassmann features')
plt.savefig('ala_tica_eigenvalues_grassmann.jpg',dpi=300)
plt.plot(np.cumsum(tica.eigenvalues**2))
plt.xlabel(r'Number of tICs, $k$')
plt.ylabel(r'$\sum_{i=1}^k \lambda_i^2$',rotation=0)
plt.title('Cumulative kinetic variance explained using Grassmann features')
plt.savefig('ala_tica_kin_var_grassmann.jpg',dpi=300)
X_tica = tica.get_output()
np.sum(tica.eigenvalues[:20])
kmeans = pyemma.coordinates.cluster_mini_batch_kmeans(X_tica,k=100,max_iter=1000)
dtrajs = [dtraj.flatten() for dtraj in kmeans.get_output()]
nits=20
its = pyemma.msm.its(dtrajs,lags=range(1,101),nits=nits)
pyemma.plots.plot_implied_timescales(its,units='ps')
plt.title('100-state decomposition of Grassmann features')
plt.savefig('ala_grassmann_kmeans.jpg',dpi=300)
pyemma.plots.plot_implied_timescales(its,units='ps',ylog=False)
evecs = tica.eigenvectors[:,:20]
def random_feature_indices(total_features,n_select=1000):
all_indices = np.arange(total_features)
npr.shuffle(all_indices)
feature_ind = np.zeros(total_features,dtype=bool)
feature_ind[all_indices[:n_select]] = True
return feature_ind
curves = []
for i in range(10):
feature_mask = random_feature_indices(X_grassman[0].shape[1],n_select=100)
tica = pyemma.coordinates.tica([x[:,feature_mask] for x in X_grassman],var_cutoff=1.0)
curves.append(np.cumsum(tica.eigenvalues**2))
for curve in curves:
plt.plot(curve)
#curves = np.array(curves)
curves = []
for i in range(100):
feature_mask = random_feature_indices(X_grassman[0].shape[1],n_select=50)
tica = pyemma.coordinates.tica([x[:,feature_mask] for x in X_grassman],var_cutoff=1.0)
curves.append(np.cumsum(tica.eigenvalues**2))
for curve in curves:
plt.plot(curve)
# +
# how does the total explained kinetic variance depend on the number of features selected?
# -
tot_kin_var = np.array([curve[-1] for curve in curves])
tot_kin_var.mean(),tot_kin_var.std()
# +
n_features = [5,10,20,50,100,200,400,600,800,1000]
all_curves = dict()
n_replicates = 100
for n_select in n_features:
curves = []
for i in range(n_replicates):
feature_mask = random_feature_indices(X_grassman[0].shape[1],n_select=n_select)
tica = pyemma.coordinates.tica([x[:,feature_mask] for x in X_grassman],var_cutoff=1.0)
curves.append(np.cumsum(tica.eigenvalues**2))
all_curves[n_select] = curves
# -
n_features[:7]
# +
n_replicates = 10
for n_select in n_features[7:]:
print(n_select)
curves = []
for i in range(n_replicates):
print(i)
feature_mask = random_feature_indices(X_grassman[0].shape[1],n_select=n_select)
tica = pyemma.coordinates.tica([x[:,feature_mask] for x in X_grassman],var_cutoff=1.0)
curves.append(np.cumsum(tica.eigenvalues**2))
all_curves[n_select] = curves
# +
n_replicates = 10
for n_select in n_features[7:]:
print(n_select)
curves = []
for i in range(n_replicates):
print(i)
feature_mask = random_feature_indices(X_grassman[0].shape[1],n_select=n_select)
tica = pyemma.coordinates.tica([x[:,feature_mask] for x in X_grassman],var_cutoff=1.0)
curves.append(np.cumsum(tica.eigenvalues**2))
all_curves[n_select] = curves
# -
mean_kin_var = [np.array([curve[-1] for curve in all_curves[n]]).mean() for n in n_features]
std_kin_var = [np.array([curve[-1] for curve in all_curves[n]]).std() for n in n_features]
# add total n_features
lags.append(1540)
mean_kin_var.append(np.sum(tica.eigenvalues**2))
std_kin_var.append(0)
n_features.append(1540)
fractions = np.array(n_features) / 1540.0
fractions
tica = pyemma.coordinates.tica(X_grassman,var_cutoff=1.0)
tot_var = np.sum(tica.eigenvalues**2)
plt.errorbar(n_features,mean_kin_var,yerr=std_kin_var)
#plt.plot(n_features,mean_kin_var,'.')
plt.hlines(tot_var,0,1540,linestyles='--')
plt.hlines(3.59,0,1540,linestyles='--')
plt.xlim(0,1540)
plt.ylim(0,11)
plt.xlabel('# of random features')
plt.ylabel('Total kinetic variance')
plt.title('Kinetic variance contained in random Grassmann features')
plt.savefig('kinetic_var_explained_ala_g.jpg',dpi=300)
#plt.xticks(n_features,fractions)
# +
# so, about 20 randomly selected Grassmann features are better than exhaustive structural features
# and about 200 randomly selected Grassmann features are better than
# +
# how does the number of retained tICs depend on the number of input features?
# -
[len(c) for c in curves]
# +
# what does
subset_sizes = [10,100,500,1000]
for i in range(10):
# -
feature_mask = random_feature_indices(X_grassman[0].shape[1],n_select=1540)
tica = pyemma.coordinates.tica([x[:,feature_mask] for x in X_grassman],var_cutoff=1.0)
np.sum(tica.eigenvalues**2)
X_grassman[0].shape
import triangle
Y_tica = np.vstack(X_tica)
n_tics=5
triangle.corner(Y_tica[:,:n_tics],
plot_contours=False,
labels=['tIC{0}'.format(i+1) for i in range(n_tics)]
)
lag_time=20
msm = pyemma.msm.estimate_markov_model(dtrajs,lag_time)
plt.imshow(msm.transition_matrix,interpolation='none',cmap='Blues')
# +
def plot_contiguous(T,mapping):
sorted_inds = np.array(sorted(range(len(T)),key=lambda i:mapping[i]))
plt.imshow(T[sorted_inds][:,sorted_inds],interpolation='none',cmap='Blues')
plt.colorbar()
from sklearn.cluster import SpectralBiclustering
cocluster = SpectralBiclustering(4,svd_method='arpack')
cocluster.fit(msm.transition_matrix)
plot_contiguous(msm.transition_matrix,cocluster.row_labels_)
# -
plt.plot(msm.timescales(),'.')
np.trace(msm.transition_matrix)
from msmbuilder.cluster import MiniBatchKMedoids
kmed = MiniBatchKMedoids(n_clusters=100,metric='rmsd')
dtrajs_rmsd = kmed.fit_transform(trajs)
lag_time=20
msm_rmsd = pyemma.msm.estimate_markov_model(dtrajs_rmsd,lag_time)
plt.imshow(msm_rmsd.transition_matrix,interpolation='none',cmap='Blues')
np.trace(msm_rmsd.transition_matrix)
from sklearn
T = msm_rmsd.transition_matrix
cocluster = SpectralBiclustering(4,svd_method='arpack')
cocluster.fit(T)
plot_contiguous(T,cocluster.row_labels_)
plt.plot(msm_rmsd.timescales(),'.')
nits=20
its = pyemma.msm.its(dtrajs_rmsd,lags=range(1,101),nits=nits)
pyemma.plots.plot_implied_timescales(its,units='ps')
plt.title('100-state decomposition w.r.t. minRMSD')
plt.savefig('ala_rmsd_kmeds.jpg',dpi=300)
# So this is promising! The Binet-Cauchy kernel corresponds to dot-products in a useful feature-space.
#
# Here, we explicitly computed those features, and weighted them optimally using tICA. The resulting discretization
#
# It would be useful if we knew how to weight the features for large systems.
# can I do this with the MetEnkephalin example data?
from msmbuilder.example_datasets import MetEnkephalin
print(MetEnkephalin().description())
met = MetEnkephalin().get().trajectories
n_atoms = met[0].n_atoms
print(n_atoms/3)
from math import factorial
bin_coeff = lambda m,k=3:factorial(m)/(factorial(m-k)*factorial(k))
bin_coeff(25)
# %%time
from msmbuilder.cluster import MiniBatchKMedoids
kmed = MiniBatchKMedoids(n_clusters=100,metric='rmsd')
dtrajs_rmsd_met = kmed.fit_transform(met)
nits=20
its = pyemma.msm.its(dtrajs_rmsd_met,lags=range(1,101),nits=nits)
pyemma.plots.plot_implied_timescales(its,units='ps',dt=5)
plt.title('100-state decomposition of Met-enkaphalin w.r.t. minRMSD')
plt.savefig('met_rmsd_kmeds.jpg',dpi=300)
n_atoms = met[0].n_atoms
stride = 3
trajs_reduced_atoms = [traj.atom_slice(range(n_atoms)[::stride]) for traj in met]
trajs_reduced_atoms[0]
indices = [np.array(s) for s in product(*[(False,True)]*trajs_reduced_atoms[0].n_atoms) if sum(s)==3]
len(indices)
# %%time
# compute explicit grassman features
X_grassman_met = [np.array([grassman_vector_(x) for x in traj.xyz]) for traj in trajs_reduced_atoms]
# +
np.save('X_grassman_met.npy',X_grassman_met)
tica = pyemma.coordinates.tica(X_grassman_met)
X_tica = tica.get_output()
plt.plot(np.cumsum(tica.eigenvalues))
# +
# distance decay plots
# unweighted BC
# rmsd
# optimally weighted BC
from scipy.spatial.distance import euclidean
def distance_decay(X,lags=range(1,101)):
max_lag = max(lags)
distances = np.zeros((len(X) - max_lag,len(lags)))
for i in range(len(distances)):
for j in range(len(lags)):
distances[i,j] = euclidean(X[i],X[i+lags[j]])
return distances
def distance_decay_multiseq(Xs,lags=range(1,101)):
distances = []
for X in Xs:
distances.append(distance_decay(X,lags))
return np.vstack(distances)
distances = distance_decay(X_grassman[0])
plt.plot(distances.mean(0))
# -
def distance_decay_plot(distances,lags,c='blue',label=''):
n = len(distances)
mean = distances.mean(0)
stdev = distances.std(0)
stderr = stdev / np.sqrt(n) # standard error of the mean
err = stderr
plt.plot(lags,mean,color=c,label=label)
plt.fill_between(lags,mean-err,mean+err,alpha=0.5,color=c)
lags = range(1,101)
distance_decay_plot(distances,lags)
# +
lags = range(1,501)[::10]
distances_g = distance_decay_multiseq(X_grassman,lags)
distances_t = distance_decay_multiseq(X_tica,lags)
# +
distance_decay_plot(distances_g,lags,'blue', label='Raw')
distance_decay_plot(distances_t,lags,'green', label='Kinetically weighted')
plt.xlim(min(lags),max(lags))
plt.ylim(0,5)
plt.xlabel(r'$\tau$ (picoseconds)')
plt.ylabel(r'Average distance $\pm$ standard error')
plt.title(r'Alanine dipeptide: $d(\mathbf{x}_t,\mathbf{x}_{t+\tau})$ for varying $\tau$')
plt.legend(loc='lower right')
plt.savefig('distance_decay_ala.jpg',dpi=300)
# -
from scipy.spatial.distance import pdist,squareform
pdist_g = pdist(X_grassman[0][::10])
plt.imshow(squareform(pdist_g),interpolation='none',cmap='Blues')
plt.colorbar()
pdist_t = pdist(X_tica[0][::10])
plt.imshow(squareform(pdist_t),interpolation='none',cmap='Blues')
plt.colorbar()
X_grassman[0][::10].shape
X_grassman_met[0].shape
n_tics=5
triangle.corner(np.vstack(X_tica)[:,:n_tics],
plot_contours=False,
labels=['tIC{0}'.format(i+1) for i in range(n_tics)]
)
plt.plot(np.abs(tica.feature_TIC_correlation[:,0]))
plt.xlabel('Feature #')
plt.ylabel('Absolute value of correlation with tIC1')
plt.plot(np.abs(tica.feature_TIC_correlation).sum(1))
# +
A = np.abs(tica.feature_TIC_correlation)
plt.plot(A.dot(np.arange(428)[::-1]+1))
# hmm, this isn't exactly what I was hoping for -- it seems like only a few features
# are not informative, most are informative
# on the other hand, this might actually be good news for a random approximation -- if I draw some
# features totally at random, they'll likely be informative
# +
kmeans_met = pyemma.coordinates.cluster_mini_batch_kmeans(X_tica,k=100,max_iter=200)
dtrajs_met = [dtraj.flatten() for dtraj in kmeans_met.get_output()]
its = pyemma.msm.its(dtrajs_met,lags=range(1,101),nits=nits)
pyemma.plots.plot_implied_timescales(its)
# -
lags = range(1,101)# + range(100,1001)[::10]
its = pyemma.msm.its(dtrajs_met,lags=lags)
pyemma.plots.plot_implied_timescales(its)
lags = range(1,101) + range(100,1001)[::10]
its = pyemma.msm.its(dtrajs_met,lags=lags,nits=nits)
pyemma.plots.plot_implied_timescales(its)
msm = pyemma.msm.estimate_markov_model(dtrajs_met,lag=20)
plt.scatter(msm.stationary_distribution,msm.count_matrix_active.sum(0))
# +
kmeans_met = pyemma.coordinates.cluster_mini_batch_kmeans(X_tica,k=500,max_iter=1000)
dtrajs_met = [dtraj.flatten() for dtraj in kmeans_met.get_output()]
its = pyemma.msm.its(dtrajs_met,lags=range(1,101),nits=nits)
pyemma.plots.plot_implied_timescales(its)
# -
msm = pyemma.msm.estimate_markov_model(dtrajs_met,20)
msm.active_count_fraction
plt.plot(msm.timescales(),'.')
plt.imshow(msm.transition_matrix,interpolation='none',cmap='Blues')
plt.scatter(msm.stationary_distribution,msm.count_matrix_active.sum(0))
def select_random_features(n_atoms,n_features=1000,seed=0):
npr.seed(0)
all_indices = np.arange(n_atoms)
feature_indices = []
for i in range(n_features):
npr.shuffle(all_indices)
feature_ind = np.zeros(n_atoms,dtype=bool)
feature_ind[all_indices[:3]] = True
feature_indices.append(feature_ind)
return feature_indices
[sum(f) for f in feature_indices]
factorial(75) / (factorial(75 - 3) * factorial(3))
feature_indices = select_random_features(75,n_features=500)
# +
def grassman_vector_with_indices(X,indices):
return np.array([np.linalg.det(X[s]) for s in indices])
def grassman_featurize_(trajs,indices):
return [np.array([grassman_vector_with_indices(x,indices) for x in traj.xyz]) for traj in trajs]
# -
# %%time
X_grassman_random_met = grassman_featurize_(met,feature_indices)
# +
tica_r = pyemma.coordinates.tica(X_grassman_random_met)
X_tica_r = tica_r.get_output()
plt.plot(np.cumsum(tica_r.eigenvalues))
# +
kmeans_met_r = pyemma.coordinates.cluster_mini_batch_kmeans(X_tica_r,k=100,max_iter=100)
dtrajs_met_r = [dtraj.flatten() for dtraj in kmeans_met_r.get_output()]
its = pyemma.msm.its(dtrajs_met_r,lags=range(1,101),nits=nits)
pyemma.plots.plot_implied_timescales(its)
# -
| 14,991 |
/HWK 03/whale_analysis.ipynb
|
6b7205be7ddf1fbf19f8aa9dd477fc7e91e5451c
|
[] |
no_license
|
godz1919/python-homework
|
https://github.com/godz1919/python-homework
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 1,274,921 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from python_utils import *
import time
import numpy as np
from mxnet import nd, autograd, gluon
from mxnet.gluon import nn, rnn
import mxnet as mx
import datetime
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
from sklearn.decomposition import PCA
import math
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import StandardScaler
import xgboost as xgb
from sklearn.metrics import accuracy_score
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
context = mx.cpu(); model_ctx=mx.cpu()
mx.random.seed(1719)
def parser(x):
return datetime.datetime.strptime(x,'%d-%m-%Y')
dataset_ex_df = pd.read_csv('dataset.csv', header=0, parse_dates=[0], date_parser=parser)
dataset_ex_df[['Date', 'Close']].head(3)
print('There are {} number of days in the dataset.'.format(dataset_ex_df.shape[0]))
plt.figure(figsize=(14, 5), dpi=100)
plt.plot(dataset_ex_df['Date'], dataset_ex_df['Close'], label='Goldman Sachs stock')
plt.vlines(datetime.date(2016,4, 20), 0, 270, linestyles='--', colors='gray', label='Train/Test data cut-off')
plt.xlabel('Date')
plt.ylabel('USD')
plt.title('Figure 2: Goldman Sachs stock price')
plt.legend()
plt.show()
num_training_days = int(dataset_ex_df.shape[0]*.7)
print('Number of training days: {}. Number of test days: {}.'.format(num_training_days, \
dataset_ex_df.shape[0]-num_training_days))
def get_technical_indicators(data_s):
# Create 7 and 21 days Moving Average
data_s['ma7'] = data_s['Close'].rolling(window=7).mean()
data_s['ma21'] = data_s['Close'].rolling(window=21).mean()
# Create Exponential moving average
data_s['ema'] = data_s['Close'].ewm(com=0.5).mean()
# Create Momentum
data_s['momentum'] = data_s['Close']-1
return data_s
dataset_TI_df = get_technical_indicators(dataset_ex_df[['Close']])
dataset_TI_df.head(10)
def plot_technical_indicators(dataset, last_days):
plt.figure(figsize=(16, 10), dpi=100)
shape_0 = dataset.shape[0]
xmacd_ = shape_0-last_days
dataset = dataset.iloc[-last_days:, :]
x_ = range(3, dataset.shape[0])
x_ =list(dataset.index)
# Plot first subplot
plt.subplot(2, 1, 1)
plt.plot(dataset['ma7'],label='MA 7', color='g',linestyle='--')
plt.plot(dataset['Close'],label='Closing Price', color='b')
plt.plot(dataset['ma21'],label='MA 21', color='r',linestyle='--')
plt.title('Technical indicators for Goldman Sachs - last {} days.'.format(last_days))
plt.ylabel('USD')
plt.legend()
# Plot second subplot
plt.subplot(2, 1, 2)
plt.plot(dataset['momentum'],label='Momentum', color='b',linestyle='-')
plt.legend()
plt.show()
plot_technical_indicators(dataset_TI_df, 400)
import bert
data_FT = dataset_ex_df[['Date', 'Close']]
close_fft = np.fft.fft(np.asarray(data_FT['Close'].tolist()))
fft_df = pd.DataFrame({'fft':close_fft})
fft_df['absolute'] = fft_df['fft'].apply(lambda x: np.abs(x))
fft_df['angle'] = fft_df['fft'].apply(lambda x: np.angle(x))
plt.figure(figsize=(14, 7), dpi=100)
fft_list = np.asarray(fft_df['fft'].tolist())
for num_ in [3, 6, 9, 100]:
fft_list_m10= np.copy(fft_list); fft_list_m10[num_:-num_]=0
plt.plot(np.fft.ifft(fft_list_m10), label='Fourier transform with {} components'.format(num_))
plt.plot(data_FT['Close'], label='Real')
plt.xlabel('Days')
plt.ylabel('USD')
plt.title('Figure 3: Goldman Sachs (close) stock prices & Fourier transforms')
plt.legend()
plt.show()
from collections import deque
items = deque(np.asarray(fft_df['absolute'].tolist()))
items.rotate(int(np.floor(len(fft_df)/2)))
plt.figure(figsize=(10, 7), dpi=80)
plt.stem(items)
plt.title('Figure 4: Components of Fourier transforms')
plt.show()
# +
from statsmodels.tsa.arima_model import ARIMA
from pandas import DataFrame
from pandas import datetime
series = data_FT['Close']
model = ARIMA(series, order=(5, 1, 0))
model_fit = model.fit(disp=0)
print(model_fit.summary())
# -
from pandas.plotting import autocorrelation_plot
autocorrelation_plot(series)
plt.figure(figsize=(10, 7), dpi=80)
plt.show()
# +
from pandas import read_csv
from pandas import datetime
from statsmodels.tsa.arima_model import ARIMA
from sklearn.metrics import mean_squared_error
X = series.values
size = int(len(X) * 0.66)
train, test = X[0:size], X[size:len(X)]
history = [x for x in train]
predictions = list()
for t in range(len(test)):
model = ARIMA(history, order=(5,1,0))
model_fit = model.fit(disp=0)
output = model_fit.forecast()
yhat = output[0]
predictions.append(yhat)
obs = test[t]
history.append(obs)
# -
error = mean_squared_error(test, predictions)
print('Test MSE: %.3f' % error)
plt.figure(figsize=(12, 6), dpi=100)
plt.plot(test, label='Real')
plt.plot(predictions, color='red', label='Predicted')
plt.xlabel('Days')
plt.ylabel('USD')
plt.title('Figure 5: ARIMA model on GS stock')
plt.legend()
plt.show()
print('Total dataset has {} samples, and {} features.'.format(dataset_TI_df.shape[0], \
dataset_TI_df.shape[1]))
def get_feature_importance_data(data_income):
data = data_income.copy()
y = data['Close']
X = data.iloc[:, 1:]
train_samples = int(X.shape[0] * 0.65)
X_train = X.iloc[:train_samples]
X_test = X.iloc[train_samples:]
y_train = y.iloc[:train_samples]
y_test = y.iloc[train_samples:]
return (X_train, y_train), (X_test, y_test)
(X_train_FI, y_train_FI), (X_test_FI, y_test_FI) = get_feature_importance_data(dataset_TI_df)
regressor = xgb.XGBRegressor(gamma=0.0,n_estimators=150,base_score=0.7,colsample_bytree=1,learning_rate=0.05)
xgbModel = regressor.fit(X_train_FI,y_train_FI, \
eval_set = [(X_train_FI, y_train_FI), (X_test_FI, y_test_FI)], \
verbose=False)
eval_result = regressor.evals_result()
training_rounds = range(len(eval_result['validation_0']['rmse']))
plt.scatter(x=training_rounds,y=eval_result['validation_0']['rmse'],label='Training Error')
plt.scatter(x=training_rounds,y=eval_result['validation_1']['rmse'],label='Validation Error')
plt.xlabel('Iterations')
plt.ylabel('RMSE')
plt.title('Training Vs Validation Error')
plt.legend()
plt.show()
ig = plt.figure(figsize=(8,8))
plt.xticks(rotation='vertical')
plt.bar([i for i in range(len(xgbModel.feature_importances_))], xgbModel.feature_importances_.tolist(), tick_label=X_test_FI.columns)
plt.title('Figure 6: Feature importance of the technical indicators.')
plt.show()
def gelu(x):
return 0.5 * x * (1 + math.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * math.pow(x, 3))))
def relu(x):
return max(x, 0)
def lrelu(x):
return max(0.01*x, x)
# +
plt.figure(figsize=(15, 5))
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.5, hspace=None)
ranges_ = (-10, 3, .25)
plt.subplot(1, 2, 1)
plt.plot([i for i in np.arange(*ranges_)], [relu(i) for i in np.arange(*ranges_)], label='ReLU', marker='.')
plt.plot([i for i in np.arange(*ranges_)], [gelu(i) for i in np.arange(*ranges_)], label='GELU')
plt.hlines(0, -10, 3, colors='gray', linestyles='--', label='0')
plt.title('Figure 7: GELU as an activation function for autoencoders')
plt.ylabel('f(x) for GELU and ReLU')
plt.xlabel('x')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot([i for i in np.arange(*ranges_)], [lrelu(i) for i in np.arange(*ranges_)], label='Leaky ReLU')
plt.hlines(0, -10, 3, colors='gray', linestyles='--', label='0')
plt.ylabel('f(x) for Leaky ReLU')
plt.xlabel('x')
plt.title('Figure 8: LeakyReLU')
plt.legend()
plt.show()
# +
batch_size = 64
VAE_data = dataset_TI_df
n_batches = VAE_data.shape[0]/batch_size
VAE_data = VAE_data.values
train_iter = mx.io.NDArrayIter(data={'data': VAE_data[:num_training_days,:-1]}, \
label={'label': VAE_data[:num_training_days, -1]}, batch_size = batch_size)
test_iter = mx.io.NDArrayIter(data={'data': VAE_data[num_training_days:,:-1]}, \
label={'label': VAE_data[num_training_days:,-1]}, batch_size = batch_size)
# -
model_ctx = mx.cpu()
class VAE(gluon.HybridBlock):
def __init__(self, n_hidden=400, n_latent=2, n_layers=1, n_output=784, \
batch_size=100, act_type='relu', **kwargs):
self.soft_zero = 1e-10
self.n_latent = n_latent
self.batch_size = batch_size
self.output = None
self.mu = None
super(VAE, self).__init__(**kwargs)
with self.name_scope():
self.encoder = nn.HybridSequential(prefix='encoder')
for i in range(n_layers):
self.encoder.add(nn.Dense(n_hidden, activation=act_type))
self.encoder.add(nn.Dense(n_latent*2, activation=None))
self.decoder = nn.HybridSequential(prefix='decoder')
for i in range(n_layers):
self.decoder.add(nn.Dense(n_hidden, activation=act_type))
self.decoder.add(nn.Dense(n_output, activation='sigmoid'))
def hybrid_forward(self, F, x):
h = self.encoder(x)
#print(h)
mu_lv = F.split(h, axis=1, num_outputs=2)
mu = mu_lv[0]
lv = mu_lv[1]
self.mu = mu
eps = F.random_normal(loc=0, scale=1, shape=(self.batch_size, self.n_latent), ctx=model_ctx)
z = mu + F.exp(0.5*lv)*eps
y = self.decoder(z)
self.output = y
KL = 0.5*F.sum(1+lv-mu*mu-F.exp(lv),axis=1)
logloss = F.sum(x*F.log(y+self.soft_zero)+ (1-x)*F.log(1-y+self.soft_zero), axis=1)
loss = -logloss-KL
return loss
# +
n_hidden=400 # neurons in each layer
n_latent=2
n_layers=3 # num of dense layers in encoder and decoder respectively
n_output=VAE_data.shape[1]-1
net = VAE(n_hidden=n_hidden, n_latent=n_latent, n_layers=n_layers, n_output=n_output, batch_size=batch_size, act_type='relu')
# -
net.collect_params().initialize(mx.init.Xavier(), ctx=mx.cpu())
net.hybridize()
trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': .01})
print(net)
# +
n_epoch = 150
print_period = n_epoch // 10
start = time.time()
training_loss = []
validation_loss = []
for epoch in range(n_epoch):
epoch_loss = 0
epoch_val_loss = 0
train_iter.reset()
test_iter.reset()
n_batch_train = 0
for batch in train_iter:
n_batch_train +=1
data = batch.data[0].as_in_context(mx.cpu())
with autograd.record():
loss = net(data)
loss.backward()
trainer.step(data.shape[0])
epoch_loss += nd.mean(loss).asscalar()
n_batch_val = 0
for batch in test_iter:
n_batch_val +=1
data = batch.data[0].as_in_context(mx.cpu())
loss = net(data)
epoch_val_loss += nd.mean(loss).asscalar()
epoch_loss /= n_batch_train
epoch_val_loss /= n_batch_val
training_loss.append(epoch_loss)
validation_loss.append(epoch_val_loss)
end = time.time()
print('Training completed in {} seconds.'.format(int(end-start)))
# -
dataset_TI_df['Date'] = dataset_ex_df['Date']
vae_added_df = mx.nd.array(dataset_TI_df.iloc[:, :-1].values)
print('The shape of the newly created (from the autoencoder) features is {}.'.format(vae_added_df.shape))
# +
gan_num_features = dataset_TI_df.shape[1]
sequence_length = 17
class RNNModel(gluon.Block):
def __init__(self, num_embed, num_hidden, num_layers, bidirectional=False, \
sequence_length=sequence_length, **kwargs):
super(RNNModel, self).__init__(**kwargs)
self.num_hidden = num_hidden
with self.name_scope():
self.rnn = rnn.LSTM(num_hidden, num_layers, input_size=num_embed, \
bidirectional=bidirectional, layout='TNC')
self.decoder = nn.Dense(1, in_units=num_hidden)
def forward(self, inputs, hidden):
output, hidden = self.rnn(inputs, hidden)
decoded = self.decoder(output.reshape((-1, self.num_hidden)))
return decoded, hidden
def begin_state(self, *args, **kwargs):
return self.rnn.begin_state(*args, **kwargs)
lstm_model = RNNModel(num_embed=gan_num_features, num_hidden=500, num_layers=1)
lstm_model.collect_params().initialize(mx.init.Xavier(), ctx=mx.cpu())
trainer = gluon.Trainer(lstm_model.collect_params(), 'adam', {'learning_rate': .01})
loss = gluon.loss.L1Loss()
# -
print(lstm_model)
# +
class TriangularSchedule():
def __init__(self, min_lr, max_lr, cycle_length, inc_fraction=0.5):
self.min_lr = min_lr
self.max_lr = max_lr
self.cycle_length = cycle_length
self.inc_fraction = inc_fraction
def __call__(self, iteration):
if iteration <= self.cycle_length*self.inc_fraction:
unit_cycle = iteration * 1 / (self.cycle_length * self.inc_fraction)
elif iteration <= self.cycle_length:
unit_cycle = (self.cycle_length - iteration) * 1 / (self.cycle_length * (1 - self.inc_fraction))
else:
unit_cycle = 0
adjusted_cycle = (unit_cycle * (self.max_lr - self.min_lr)) + self.min_lr
return adjusted_cycle
class CyclicalSchedule():
def __init__(self, schedule_class, cycle_length, cycle_length_decay=1, cycle_magnitude_decay=1, **kwargs):
self.schedule_class = schedule_class
self.length = cycle_length
self.length_decay = cycle_length_decay
self.magnitude_decay = cycle_magnitude_decay
self.kwargs = kwargs
def __call__(self, iteration):
cycle_idx = 0
cycle_length = self.length
idx = self.length
while idx <= iteration:
cycle_length = math.ceil(cycle_length * self.length_decay)
cycle_idx += 1
idx += cycle_length
cycle_offset = iteration - idx + cycle_length
schedule = self.schedule_class(cycle_length=cycle_length, **self.kwargs)
return schedule(cycle_offset) * self.magnitude_decay**cycle_idx
schedule = CyclicalSchedule(TriangularSchedule, min_lr=0.5, max_lr=2, cycle_length=500)
iterations=1500
plt.plot([i+1 for i in range(iterations)],[schedule(i) for i in range(iterations)])
plt.title('Learning rate for each epoch')
plt.xlabel("Epoch")
plt.ylabel("Learning Rate")
plt.show()
# -
| 14,718 |
/Chapter10 tuples.ipynb
|
d5273d0f16888296e69a62db95b86ca718da3937
|
[] |
no_license
|
yfc1/python-for-informatics-in-python3
|
https://github.com/yfc1/python-for-informatics-in-python3
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 14,839 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pds
import matplotlib.pyplot as plt
import numpy as np
import math
import copy
# On commence d'abord par importer nos données :
datatxt=pds.read_csv('data_tp1_app.txt',sep=" ",header=None)
data_entry_raw=open("data_tp1_app.txt","r").read().splitlines()
data_entry=[i.split(" ") for i in data_entry_raw]
# On affiche nos données :
#pd.set_option('display.max_row', None)
datatxt
# on donne une structure classe,x,y à notre jeux de données :
df=pds.DataFrame(np.array(datatxt),columns=["classe","x","y"])
df_entry=pds.DataFrame(data_entry,columns=["classe","x","y"])
df
df_entry
# On utilise la fonction describe de python pour obtenir des indicateurs statistiques sur notre jeux de données, tout juste avant de le faire de nous même :
df.describe()
df_entry.describe()
# ### Trouvons le centre des classes :
#
# Pour trouver le centre de classe, il nous faut trouver la moyenne de chaque classe par rapport à x et y.
# On commence par grouper chaque points dans sa classe, ici une classe est un tableau numpy :
# 1) on récupère le nombre maximal de classe :
def classesnbr2(data):
return int(max([ligne[0] for ligne in data]))
def classesnbr(data):
dfmaxclass=pds.DataFrame(data.sort_values(by=["classe"], ascending=False))
maxi=int(dfmaxclass["classe"][0:1])
return maxi
# 2) on calcul la moyenne de chaque classe
def moyenneclass_op(data):
dfmeans=pds.DataFrame(data.sort_values(by=["Classe"], ascending=False,columns=["x","y"]))
nbr_element=[]
tot=[0,0,0,0,0]
for i in range(classesnbr2(df_entry)):
switcher = {
df_entry['Classe'][0:i]==1:
nbr_element[0]=nbr_element[0]+1
tot[1]=tot[1]+df_entry["x"][0:i]
,
df_entry['Classe'][0:i]==2:
nbr_element[2]=nbr_element[2]+1
tot[2]=tot[2]+df_entry["x"][0:i]
,
df_entry['Classe'][0:i]==3:
nbr_element[3]=nbr_element[3]+1
tot[3]=tot[3]+df_entry["x"][0:i]
,
df_entry['Classe'][0:i]==4:
nbr_element[4]=nbr_element[4]+1
tot[4]=tot[4]+df_entry["x"][0:i]
,
df_entry['Classe'][0:i]==5:
nbr_element[5]=nbr_element[5]+1
tot[5]=tot[5]+df_entry["x"][0:i]
,
}
print switcher.get(argument, "Invalid month")
array = [ligne[] for ligne in data if (ligne[0])==int(classe)]
return
def moyenne(data):
dfclass=pds.DataFrame(data,columns=["classe"])
print(dfclass)
dfX=pds.DataFrame(data.sort_values(by=["classe"], ascending=True),columns=["x"])
print(dfX)
dfY=pds.DataFrame(data.sort_values(by=["classe"], ascending=True),columns=["y"])
print(dfY)
# +
print(df_entry["classe"][0:3])
print(df["x"][0:3])
moyenne(df)
# -
def classmean(data,classe,axis):
array = [float(ligne[axis+1]) for ligne in data if str(ligne[0])==int(classe)]
#array = []
total = sum(array)
return (total/len(array)) if len(array) !=0 else 0
# 3) on récupère la moyenne de chaque classe :
def mean_of_classes(data):
means=[] #a vector
for classe in range(classesnbr(data)):
means.append([])
for i in range(2):
means[classe].append(classmean(data,classe+1,i))
return means
means=mean_of_classes(df_entry)
print(means)
print("les moyennes de classes sont : ")
pds.DataFrame(means, index=range(1,len(means)+1),columns=["µx","µy"])
# Mainteanant nous pouvons importer nos données sur lesquelles nous devons porter des décisions :
data_test_raw = open("data_tp1_dec.txt","r").read().splitlines()
data_test = [i.split(" ") for i in data_test_raw]
df2 = pds.DataFrame(data_test,columns=['classe',"x","y"])
df2
# ### Calcul de la distance eucleudienne par interpolation :
def distance_eucl(x1,y1,x2,y2):
return math.sqrt((x2-x1)**2 + (y2-y1)**2)
def interpolation_eucl(means,data):
liste_interp=copy.deepcopy(data)
for ligne in range(len(liste_interp)):
#voici nos 2 nouvelles colonnes du tableau
liste_interp[ligne].append(-1)
liste_interp[ligne].append(-1)
#
x1=float(liste_interp[ligne][0+1])#le x de chaque ligne
y1=float(liste_interp[ligne][1+1])#le y de chaque ligne
#
for classe in range(0,len(means)):
x2=float(means[classe][0])
y2=float(means[classe][1])
#
if distance_eucl(x1,y1,x2,y2)<liste_interp[ligne][3] or liste_interp[ligne][3]==-1:
liste_interp[ligne][3]=distance_eucl(x1,y1,x2,y2)
liste_interp[ligne][4]=classe+1
return liste_interp
liste_interpolee = interpolation_eucl(means,data_test)
newdf=pds.DataFrame(liste_interpolee,columns=["classe","x","y","distance","Interpolation"])
newdf
# #### Déterminons TOP1 & TOP2
topp1=pds.DataFrame(newdf[newdf.classe!=newdf.Interpolation])
topp1
TOP1=((500-4)/500)*100
TOP1
# pour trouver TOP2, nous devons regarder la deuxième distance la plus proche parce que le tableau précédent c'est le point X, Y dans leur classe respective, maintenat on peut tester avec une autre classe.
# ### Trouvons la matrice de confusion
Df_confusionarray=pds.DataFrame(topp1,columns=["classe","1","2","3","4","5"])
#pds.set_option('display.max_row', None)
def moyenneclass_op(data):
#dfmeans=pds.DataFrame(data.sort_values(by=["Classe"], ascending=False,columns=["x","y"]))
nbr_element=[]
tot=[]
tot.append([])
switcher = {
df_entry['classe'][0:99]==1:
tot[0].append(tot[0]+1)
,
df_entry['classe'][100:199]==2:
tot[1].append(tot[1]+1)
,
df_entry['classe'][200:299]==3:
tot[2].append(tot[2]+1)
,
df_entry['classe'][300:399]==4:
tot[3].append(tot[3]+1)
,
df_entry['classe'][400:499]==5:
tot[4].append(tot[4]+1)
}
print(switcher.get(data, "Invalid month"))
moyenneclass_op(df_entry)
Df_confusionarray
ning rate and clipping) to ensure proper convergence.
#
# ## Exercises
#
# 1. Try to implement a two-layer RNN from scratch using the single layer implementation we discussed in :numref:`sec_rnn_scratch`.
# 2. Replace the LSTM by a GRU and compare the accuracy and training speed.
# 3. Increase the training data to include multiple books. How low can you go on the perplexity scale?
# 4. Would you want to combine sources of different authors when modeling text? Why is this a good idea? What could go wrong?
#
# + [markdown] origin_pos=13 tab=["pytorch"]
# [Discussions](https://discuss.d2l.ai/t/1058)
#
| 6,891 |
/Python for Data Sceince/Week-4-Pandas/Week-4-Exercises-2/.ipynb_checkpoints/ex04_1_pandas-checkpoint.ipynb
|
57599c23a6fff603d7400e45eb7ad6988b007495
|
[] |
no_license
|
Peter-Pater/Data-Science
|
https://github.com/Peter-Pater/Data-Science
| 1 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 36,958 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib notebook
import os
import json
import time
import pickle
import requests
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from table_reader import TableReader
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet, RidgeCV, LogisticRegression
from sklearn.pipeline import make_pipeline
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split as tts
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.preprocessing import PolynomialFeatures
# Load Data
tr = TableReader()
df = tr.properties_vector(include_amenitites=True)
tr.close()
# +
features = df[df.columns.drop(['price', 'listingID'])]
label = df['price']
df.describe()
# -
# Split data to training data and test data
X_train, X_test, y_train, y_test = tts(features, label, test_size=0.2)
# Simple Regression with Ordinary Least Squares (OLS)
regr = LinearRegression()
regr.fit(X_train,y_train)
print(mean_squared_error(y_test, regr.predict(X_test)))
print(regr.score(X_test,y_test))
# Ridge Regression
clf = Ridge(alpha=0.5)
clf.fit(X_train, y_train)
print(mean_squared_error(y_test, clf.predict(X_test)))
print(clf.score(X_test, y_test))
# Choose alpha for Ridge Regression
# +
import numpy as np
# try 200 different alphas between -10 and -2
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = RidgeCV(alphas=alphas)
clf.fit(X_train, y_train)
#which alpha did it pick?
print(clf.alpha_)
# -
clf.score(X_test, y_test)
# Lasso Regression
clf = Lasso(alpha=0.5)
clf.fit(X_train, y_train)
print(mean_squared_error(y_test, clf.predict(X_test)))
clf.score(X_test, y_test)
# Logistic Regression
model = LogisticRegression().fit(X_train, y_train)
get_internal_params(model)
print(mean_squared_error(y_test, clf.predict(X_test)))
clf.score(X_test, y_test)
| 2,201 |
/ch0.ipynb
|
30ca331500f9972763710c776cdec9c8836f4429
|
[] |
no_license
|
oilmcut2019/Python_teaching_material
|
https://github.com/oilmcut2019/Python_teaching_material
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 2,212 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
# %load_ext autoreload
# %autoreload 2
import nolearn.lasagne.visualize
from matplotlib.pyplot import imshow
import matplotlib.pyplot as plt
# %matplotlib inline
# -
from stats import Stats
# +
cnn1 = '/home/d/nets/MergeNetDropout_cylinder1_border_overlap_dropout_usevars/net.p'
cnn2 = '/home/d/nets/MergeNetDropout_cylinder1_larger_border_overlap_dropout_usevars/net.p'
cnn3 = '/home/d/nets/MergeNetThreeLegDropout_cylinder1_larger_border_overlap_dropout_usevars/net.p'
cnn4 = '/home/d/nets/MergeNetDropout_cylinder1_border_overlap_dropout_usevars_LargerDropout/net.p'
cnn5 = '/home/d/nets/MergeNetDropout_cylinder1_larger_border_overlap_dropout_usevars_LargerDropout/net.p'
cnn6 = '/home/d/nets/MergeNetThreeLeg_LargerDropout/net.p'
cnn = Stats.load_cnn(cnn4)
# -
nolearn.lasagne.visualize.draw_to_notebook(cnn)
Stats.run_dojo_xp(cnn)
fixes, vis = Stats.run_cylinder_xp(cnn)
s[i]
count += 1
# +
# alternative: PCA for dimensionality reduction
#sel = PCA()
#pca = sel.fit(X)
#print pca.explained_variance_ratio_
#X_sel = sel.fit_transform(X)
# -
y_array = np.load('user_class_array.npz')
y_np = y_array['matrix']
y = y_np.tolist()[0]
y = label_binarize(y, classes=[0, 1, 2, 3, 4])
# zero-r
from collections import Counter
data = Counter(y)
print data.most_common()
print 1195/float(len(y))
X_train, X_test, y_train, y_test = train_test_split(X_sel, y, test_size=0.1,
random_state=9)
# kernel rbf
cv_clf = OneVsRestClassifier(svm.SVC(C=10.0, gamma=0.001, random_state=9))
#cv_clf.set_params(C=0.01, kernel='linear')
#cv_clf.set_params(C=10.0, gamma=0.001, kernel='poly')
#cv_clf.set_params(C=10000.0, gamma=9.9999999999999995e-07, kernel='sigmoid')
kf = KFold(X_train.shape[0], 10)
cv_scores = cross_val_score(cv_clf, X_train, y_train, cv=kf, n_jobs=-1)
print cv_scores, 'CV accuracy: %0.2f (+/- %0.2f)' % (cv_scores.mean(), cv_scores.std()*2)
# kernel sigmoid
cv_clf_sig = OneVsRestClassifier(svm.SVC(C=10000.0, gamma=9.9999999999999995e-07, kernel='sigmoid', random_state=9))
kf_sig = KFold(X_train.shape[0], 10)
cv_scores_sig = cross_val_score(cv_clf_sig, X_train, y_train, cv=kf_sig, n_jobs=-1)
print cv_scores_sig, 'CV accuracy: %0.2f (+/- %0.2f)' % (cv_scores_sig.mean(), cv_scores_sig.std()*2)
# kernel linear
cv_clf_lin = OneVsRestClassifier(svm.SVC(C=0.01, gamma=1.0000000000000001e-09, kernel='linear', random_state=9))
kf_lin = KFold(X_train.shape[0], 10)
cv_scores_lin = cross_val_score(cv_clf_sig, X_train, y_train, cv=kf_lin, n_jobs=-1)
print cv_scores_lin, 'CV accuracy: %0.2f (+/- %0.2f)' % (cv_scores_lin.mean(), cv_scores_lin.std()*2)
# +
#cv_clf = svm.LinearSVC()
#cv_clf.set_params(C=10.0) #, gamma=0.001)
#kf = KFold(X_train.shape[0], 10)
#cv_scores = cross_val_score(cv_clf, X_train, y_train, cv=kf, n_jobs=-1)
#print cv_scores, 'CV accuracy: %0.2f (+/- %0.2f)' % (cv_scores.mean(), cv_scores.std()*2)
# -
cv_clf.fit(X_train, y_train)
feature_weights = cv_clf.coef_
feature_weights_class1 = feature_weights[0]
feature_weights_class2 = feature_weights[1]
feature_weights_class3 = feature_weights[2]
feature_weights_class4 = feature_weights[3]
feature_weights_class5 = feature_weights[4]
top_keys_class1 = sorted(range(len(feature_weights_class1)), key = lambda i: feature_weights_class1[i])[-3:]
top_keys_class2 = sorted(range(len(feature_weights_class2)), key = lambda i: feature_weights_class2[i])[-3:]
top_keys_class3 = sorted(range(len(feature_weights_class3)), key = lambda i: feature_weights_class3[i])[-3:]
top_keys_class4 = sorted(range(len(feature_weights_class4)), key = lambda i: feature_weights_class4[i])[-3:]
top_keys_class5 = sorted(range(len(feature_weights_class5)), key = lambda i: feature_weights_class5[i])[-3:]
with open('ngram_map_trim.txt', 'r') as rf:
ngram_map_trim = json.loads(rf.read())
ngram_map_trim_flip = dict((v,k) for k,v in ngram_map_trim.iteritems())
ngram_map_trim_flip[indices_dict[top_keys_class5[2]]]
y_pred = cv_clf.predict(X_test)
print(confusion_matrix(y_test, y_pred, labels=range(5)))
# +
# set final parameters
clf = OneVsRestClassifier(svm.SVC(C=10.0, gamma=0.001, random_state=9))
clf.fit(X_train, y_train)
#clf.set_params(C=10, gamma=0.001)
#clf.set_params(C=0.01, kernel='linear')
#clf.set_params(C=10, gamma=0.001, kernel='poly')
#clf.set_params(C=10000.0, gamma=9.9999999999999995e-07, kernel='sigmoid')
# accuracy of final model on the test set
acc = clf.score(X_test, y_test)
print('Accuracy: {:.4f}'.format(acc))
print ('Total: ' + str(len(y_test)) + ', Correctly classified: ' + str(len(y_test)*acc))
# +
# set final parameters
clf_sig = OneVsRestClassifier(svm.SVC(C=10000.0, gamma=9.9999999999999995e-07, kernel='sigmoid', random_state=9))
clf_sig.fit(X_train, y_train)
# accuracy of final model on the test set
acc_sig = clf_sig.score(X_test, y_test)
print('Accuracy: {:.4f}'.format(acc_sig)), len(y_test)
print ('Total: ' + str(len(y_test)) + ', Correctly classified: ' + str(len(y_test)*acc_sig))
# +
# set final parameters
clf_lin = OneVsRestClassifier(svm.SVC(C=0.01, gamma=1.0000000000000001e-09, kernel='linear', random_state=9))
clf_lin.fit(X_train, y_train)
# accuracy of final model on the test set
acc_lin = clf_lin.score(X_test, y_test)
print('Accuracy: {:.4f}'.format(acc_lin)), len(y_test)
print ('Total: ' + str(len(y_test)) + ', Correctly classified: ' + str(len(y_test)*acc_lin))
# +
#def vectorize(labels):
# vectorized_labels = np.zeros((len(labels), 5))
# for i in range(len(labels)):
# vectorized_labels[i][int(labels[i])] = 1
# return vectorized_labels
#y_test_vect = vectorize(y_test)
#y_pred_vect = vectorize(clf.predict(X_test))
#roc_scores = roc_auc_score(y_test_vect, y_pred_vect)
# -
y_test_vect = label_binarize(y_test, classes=[0, 1, 2, 3, 4])
y_pred = clf.predict(X_test)
y_pred_vect = label_binarize(y_pred, classes=[0, 1, 2, 3, 4])
roc_scores = roc_auc_score(y_test_vect, y_pred_vect, average='micro')
roc_scores
y_test_vect = label_binarize(y_test, classes=[0, 1, 2, 3, 4])
y_pred = clf_sig.predict(X_test)
y_pred_vect = label_binarize(y_pred, classes=[0, 1, 2, 3, 4])
roc_scores_sig = roc_auc_score(y_test_vect, y_pred_vect, average='micro')
roc_scores_sig
y_test_vect = label_binarize(y_test, classes=[0, 1, 2, 3, 4])
y_pred = clf_lin.predict(X_test)
y_pred_vect = label_binarize(y_pred, classes=[0, 1, 2, 3, 4])
roc_scores_lin = roc_auc_score(y_test_vect, y_pred_vect, average='micro')
roc_scores_lin
| 6,707 |
/MeJ - Jehan de Beauce - Pile ou Face.ipynb
|
1113bd40f21729885208c800d676f11da4a90c29
|
[] |
no_license
|
nsiJdB/MeJ_JdB_2021
|
https://github.com/nsiJdB/MeJ_JdB_2021
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 10,137 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Atelier MATHs.en.JEANS - Lycée Jehan de Beauce
# # Sujet des piles ou face
# Pour exécuter les lignes de code ci-dessous placer le curseur de la souris dans la cellule contenant les lignes de code à exécuter puis cliquer sur `Exécuter`.
# ## Simulation pour une combinaison de 2 lettres
# +
from random import randint
#programme qui fait un tableau quand les joueurs choisise deux lettres
def test(j1, j2, tirage):
""" fonction qui renvoie si un des joueur a gagne"""
if j1[0] == tirage[-2] and j1[1] == tirage[-1]:
return True, 1
elif j2[0] == tirage[-2] and j2[1] == tirage[-1]:
return True, 2
else:
return False, 0
def jeu(j):
"""change la sequence qu'on choisit les joueur"""
j[-1] += 1
if j[-1] == 2:
j[-1] = 0
j[-2] += 1
return j
def pf(conbinaison):
"""fonction qui transforme les conbinaisons des joueurs
en lettre pour les afficher"""
final = ""
for i in conbinaison:
if i == 0:
final += "P"
else:
final += "F"
return final
def main():
joueur1 = [0,-1]
# le joueur 1 choisit sa configuration
joueur2 = [0,-1]
# le joueur 2 choisit sa configuration
gagne = 0
# variable qui compte combien de fois ou joueur 1 gagne
gagne2 = 0
# variable qui compte le nombre de fois ou joueur 2 gagne
print(" PP PF FP FF")
# On teste si les choix des joueurs sont tombe
# et combien de fois le joueur 1 ou le 2 gagnent
for a in range(4):
joueur1 = jeu(joueur1)
print(pf(joueur1), end = " ")
for b in range(4):
joueur2 = jeu(joueur2)
if joueur1 == joueur2:
print(" / ", end = "")
else:
for i in range(100000):
teste = False
# variable teste qui regarde si un joueur à gagner
L = []
L.append(randint(0,1))
# On commence une liste avec tous les tirs.
while teste != True:
L.append(randint(0,1))
teste, gagnant = test(joueur1,joueur2,L)
if gagnant == 1:
gagne += 1
if gagnant == 2:
gagne2 += 1
print(int(gagne/1000),"% /", int(gagne2/1000),"%", end = " ")
gagne = 0
gagne2 = 0
joueur2 = [0,-1]
print()
main()
# -
# ## Simulation pour une combinaison de 3 lettres
# +
from random import randint
#programme qui fait un tableau quand les joueurs choisise trois lettres
def test(j1, j2, tirage):
""" fonction qui renvoie si un des joueur a gagne"""
if j1[0] == tirage[-3] and j1[1] == tirage[-2] and j1[2] == tirage[-1]:
return True, 1
elif j2[0] == tirage[-3] and j2[1] == tirage[-2] and j2[2] == tirage[-1]:
return True, 2
else:
return False, 0
def jeu(j):
"""change la sequence qu'on choisit les joueur"""
j[-1] += 1
if j[-1] == 2:
j[-1] = 0
j[-2] += 1
if j[-2] == 2:
j[-2] = 0
j[-3] += 1
return j
def pf(conbinaison):
"""fonction qui transforme les conbinaisons des joueurs
en lettre pour les afficher"""
final = ""
for i in conbinaison:
if i == 0:
final += "P"
else:
final += "F"
return final
def main():
joueur1 = [0,0,-1]
# le joueur 1 choisit sa configuration
joueur2 = [0,0,-1]
# le joueur 2 choisit sa configuration
gagne = 0
# variable qui compte combien de fois ou joueur 1 gagne
gagne2 = 0
# variable qui compte le nombre de fois ou joueur 2 gagne
print(" PPP PPF PFP", end = "")
print(" PFF FPP FPF", end ="")
print(" FFP FFF")
# On teste si les choix des joueurs sont tombe
# et combien de fois le joueur 1 ou le 2 gagnent
for a in range(8):
joueur1 = jeu(joueur1)
print(pf(joueur1), end = " ")
for b in range(8):
joueur2 = jeu(joueur2)
if joueur1 == joueur2:
print(" / ", end = "")
else:
for i in range(10000):
teste = False
# variable teste qui regarde si un joueur à gagner
L = []
L.append(randint(0,1))
L.append(randint(0,1))
# On commence une liste avec tous les tirs.
while teste != True:
L.append(randint(0,1))
teste, gagnant = test(joueur1,joueur2,L)
if gagnant == 1:
gagne += 1
if gagnant == 2:
gagne2 += 1
print(round(gagne/100),"% /", round(gagne2/100),"%", end = " ")
gagne = 0
gagne2 = 0
joueur2 = [0,0,-1]
print()
main()
# -
# # Algorithme pour trouver la stratégie gagnante
# ## Pour une combinaison de longueur 2
# +
def meilleur_strategie_2(combinaison):
"""fonction qui trouve la meilleur strategie au pile ou face
pour le deuxieme joueur pour 2 lancers"""
if combinaison == "PP":
return "FP"
elif combinaison == "PF":
return "PP ou FP"
elif nv_combinaison == "FP":
return "FF ou PF"
else:
return "PF"
reponse = input("La combinaison du premier joueur en majuscule, P pour pile et F pour face : ")
print("La (les) combinaison(s) que vous devez choisir est (sont) :", end = " ")
print(meilleur_strategie_2(reponse))
# -
# ## Pour une combinaison de longueur 3
# +
def meilleur_combinaison_3(combinaison):
"""fonction qui trouve la meilleur strategie au pile ou face
pour le deuxieme joueur pour 3 lancers"""
if combinaison[1] == "P":
return "F" + combinaison[0] + combinaison[1]
else:
return "P" + combinaison[0] + combinaison[1]
reponse = input("La combinaison du premier joueur en majuscule, P pour pile et F pour face : ")
print("La combinason que vous devez choisir est :", meilleur_combinaison_3(reponse))
# -
| 6,594 |
/Cells_hela_cnn.ipynb
|
768b3238070e4fdba3222cd3bf2f02684e981490
|
[] |
no_license
|
AhmedEldib/Images_classification_using_CNN
|
https://github.com/AhmedEldib/Images_classification_using_CNN
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 151,529 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="D2utJIsR0NLl"
import numpy as np
import pandas as pd
import skimage.io as io
from skimage.transform import resize
import os
import matplotlib.pyplot as plt
import seaborn as sns
import glob
# + colab={"base_uri": "https://localhost:8080/"} id="B_zQGj8xEG3u" outputId="cc005091-85e3-4fc6-fcce-3fd4f18edd9e"
# !pip install -U albumentations
# + colab={"base_uri": "https://localhost:8080/"} id="3JiMTveGCHzs" outputId="7dbce6b2-1a6e-44fa-ff7a-82cb634a2469"
from google.colab import drive
drive.mount('/content/drive')
# + id="788ymOeyzdt2"
directory = glob.glob('/content/drive/MyDrive/pattern_project/hela/*')
# + colab={"base_uri": "https://localhost:8080/"} id="s6OOws3DBlC3" outputId="3ec597b7-8594-41bb-8e86-53530fe1719a"
directory
# + id="qX4PdrBjK5Pc"
directory = [i + "/*" for i in directory]
directory = [glob.glob(i) for i in directory]
# + id="R7K7VrWJLgj7"
img_dir = np.array([])
for i in directory:
img_dir = np.concatenate((img_dir, i), axis = 0)
# + colab={"base_uri": "https://localhost:8080/"} id="28C0Z2siNPRq" outputId="6e84c5d2-b500-4148-f30f-f5f32522ad6d"
img_dir[0].endswith(('.tif', '.TIF'))
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="koZ10PmcUM8a" outputId="66f88d0c-bb01-42df-9f19-257fbe54a96a"
img_dir[0].split('/')[-1].split('_')[0]
# + id="Tbm09wZbCE8a"
labels = []
images = []
for i in img_dir:
if i.endswith(('.tif', '.TIF')):
#images.append(io.imread(i))
images.append(np.round(resize(io.imread(i), (332, 332), anti_aliasing=True, preserve_range=True)).astype(int))
labels.append(i.split('/')[-1].split('_')[0])
# + colab={"base_uri": "https://localhost:8080/", "height": 286} id="UkqQQTJM0XHG" outputId="fe977908-4d57-4e9a-fbb5-d3d8bddd947a"
plt.imshow(images[400], cmap='gray')
# + colab={"base_uri": "https://localhost:8080/"} id="sVfySUvRCE8b" outputId="8b1c5a74-6404-4819-ee23-da02512e87ee"
len(np.unique(labels, axis=0))
# + id="d_N4JcshCE8b"
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.preprocessing import LabelEncoder
# + id="f7kSIcX7CE8c"
lb = LabelEncoder()
y = lb.fit_transform(labels)
# + id="vmjAFrc1CE8c"
x = np.array(images)
# + colab={"base_uri": "https://localhost:8080/"} id="V0UH0U-UFiC_" outputId="49843df9-9a03-475c-ae7f-a3b7cdd01db8"
x.shape
# + id="GXaxzSCPAZqR"
import albumentations as A
# + id="q0Yl-QxyEa_X"
transform = A.Compose([
A.RandomCrop(width=x[0].shape[0]-50, height=x[0].shape[0]-50),
A.augmentations.transforms.Flip(),
A.RandomBrightnessContrast(p=0.2),
A.augmentations.transforms.Blur(blur_limit=8),
A.augmentations.transforms.GaussNoise()
])
# + colab={"base_uri": "https://localhost:8080/", "height": 286} id="RGakVga_Ep7e" outputId="dc94df9f-666d-4255-aa9a-37c3181a7de9"
plt.imshow(images[120], cmap='gray')
# + id="ibT50MQOEvgn"
transformed_image_1 = transform(image=x[120])['image']
transformed_image_2 = transform(image=x[120])['image']
transformed_image_3 = transform(image=x[120])['image']
# + colab={"base_uri": "https://localhost:8080/"} id="__m6rOvEFmVn" outputId="8cea43f2-11b2-425d-ed3e-b9ae02bc0654"
transformed_image_1.shape
# + id="Pu1UD2rDIVBv"
x_aug = []
y_aug = []
for i in range(len(x)):
x_aug.append(np.round((resize(x[i], (x[0].shape[0]-50, x[0].shape[0]-50), anti_aliasing=True, preserve_range=True))).astype(int))
y_aug.append(y[i])
for j in range(3):
x_aug.append(transform(image=x[i])['image'])
y_aug.append(y[i])
# + colab={"base_uri": "https://localhost:8080/", "height": 125} id="razHt2wbFBvO" outputId="02350b76-1a1a-4a5b-84ec-d3eca01ed1c1"
plt.subplot(1, 4, 1)
plt.imshow(x_aug[480], cmap='gray')
plt.subplot(1, 4, 2)
plt.imshow(transformed_image_1, cmap='gray')
plt.subplot(1, 4, 3)
plt.imshow(transformed_image_2, cmap='gray')
plt.subplot(1, 4, 4)
plt.imshow(transformed_image_3, cmap='gray')
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="xMhkg78iJLYf" outputId="1372f86d-f223-4383-e1c1-6b15d6693986"
np.unique(y_aug, return_counts=True)
# + colab={"base_uri": "https://localhost:8080/"} id="MfU0WHm7I6Vc" outputId="f839b666-c333-4217-941b-f7aa81065217"
np.unique(y, return_counts=True)
# + colab={"base_uri": "https://localhost:8080/"} id="NKXDw-btKrxe" outputId="2173726d-02a2-42df-b4dc-aa34379a8f69"
len(x_aug)
# + colab={"base_uri": "https://localhost:8080/"} id="kWXC1WNMwKr8" outputId="d2c16665-c0e8-4834-cc28-66799f278533"
len(y)
# + id="0l4iTSncKbvH"
x_aug = np.array(x_aug)/255
y_aug = np.array(y_aug)
# + id="4uDAhFkdKhZd"
x_new = x_aug
y_new = y_aug
# + colab={"base_uri": "https://localhost:8080/"} id="UbV9UqhqCE8d" outputId="244d37aa-2f84-4b90-b8df-f5b8b8c03d58"
x_new = x_new.reshape(len(x_new), x_new.shape[1], x_new.shape[2], 1)
x_new.shape
# + id="1ORpoBn-CE8d"
X_train, X_test, Y_train, Y_test = train_test_split(x_new, y_new, test_size = 0.15, shuffle=True, random_state=47)
# + id="M3QIlMxzMr6A"
del x_new
del y_new
del x_aug
del y_aug
# + id="CluUxBw3CE8e"
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.18, shuffle=True, random_state=47)
# + colab={"base_uri": "https://localhost:8080/"} id="XajLsKUAQxp2" outputId="cbec9ec3-93f9-4afb-9e9b-fe324ab53aa7"
np.unique(Y_train, return_counts=True)
# + colab={"base_uri": "https://localhost:8080/"} id="T66GD_iLM9sF" outputId="b0bc33dc-25f8-40d2-cd2b-66fc210d43cf"
np.unique(Y_test, return_counts=True)
# + colab={"base_uri": "https://localhost:8080/"} id="R98kOT-XCE8e" outputId="b26099eb-ffff-4937-ad68-226c2ca02370"
np.unique(Y_val, return_counts=True)
# + id="aSzhRQ4sCE8e"
from keras import layers
from keras import models
from keras import optimizers
# + id="udqfKGBAwqCZ"
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Input,Lambda ,SeparableConv2D,BatchNormalization,MaxPooling2D,Flatten,Dropout
from tensorflow.keras.utils import to_categorical
from keras.optimizers import SGD,Adam
import tensorflow as tf
# + colab={"base_uri": "https://localhost:8080/"} id="PjVZPHy8NcM2" outputId="b5399704-d858-4c29-a0aa-2cdc43db5a6f"
X_train.shape
# + colab={"base_uri": "https://localhost:8080/"} id="WUH9lzZSCE8f" outputId="6cfdae5a-ab71-4166-eab2-9d1406f078c9"
model = models.Sequential()
model.add(layers.Conv2D(16, (3, 3), activation='relu', input_shape=X_train[0].shape))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(32, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(256, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(512, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dropout(0.1))
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dropout(0.1))
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dropout(0.1))
model.add(layers.Dense(10, activation='softmax'))
model.summary()
# + id="QuQ4pDMjCE8f"
model.compile(optimizer = 'adam', loss = 'sparse_categorical_crossentropy', metrics = ['accuracy'])
# + id="rkPUWwI1CE8g"
callbacks = [tf.keras.callbacks.ModelCheckpoint('/content/drive/MyDrive/pattern_project/habd_model.h5', monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')]
callbacks += [tf.keras.callbacks.EarlyStopping(monitor='val_accuracy',patience=5)]
# + colab={"base_uri": "https://localhost:8080/"} id="9RVU3plICE8h" outputId="db00d0e1-2f2a-4df4-d61e-d9c0a3e9bfb1"
model.fit(X_train, Y_train, validation_data= (X_val, Y_val), epochs = 50, batch_size= 128, shuffle = True, callbacks=callbacks)
# + id="HmlMD0ZDRJUk"
n_model = models.load_model('/content/drive/MyDrive/pattern_project/norm_model_val.h5')
# + colab={"base_uri": "https://localhost:8080/"} id="ISxrDNBZdJH8" outputId="c3ff921b-2335-43d9-ceaf-e55209b98691"
n_model.evaluate(X_test, Y_test)
# + colab={"base_uri": "https://localhost:8080/"} id="FVpoQTmDh-i5" outputId="81a862c0-6231-4eab-aa05-bef9965d8105"
n_model.evaluate(x.reshape(-1, 332, 332, 1)/255, y)
# + id="2AGcefszCE8h"
#model.save('/content/drive/MyDrive/pattern_project/norm_model_val.h5')
# + colab={"base_uri": "https://localhost:8080/"} id="O3y3TcD9vJ5e" outputId="4d23cf33-53c8-40ba-864e-d772d1baef9c"
len(X_test)/len(x)
# + id="IAteAG4mvWvb"
Y_pred = np.argmax(n_model.predict(x.reshape(-1, 332, 332, 1)), axis = 1)
# + colab={"base_uri": "https://localhost:8080/"} id="03R0TJqcztlL" outputId="abddb668-9d2e-4fbf-cada-d54b88c1636c"
lb.inverse_transform([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
# + id="KlkWzQoFyq_B"
from sklearn.metrics import classification_report, confusion_matrix
# + colab={"base_uri": "https://localhost:8080/"} id="72Xohn1Xy1Vl" outputId="fb5e75f2-9bcf-44c2-9f52-f6ad08dd87d2"
confusion_matrix(Y_pred, y)
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="8QN7tR4zy6j_" outputId="26db4ccc-47a1-4b0d-a1ff-e4b90462982c"
img_dir[0]
# + id="1sTOX1Zm0azz"
import pandas as pd
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="vzJyMcns1Tla" outputId="0af1457d-4440-4de7-c73a-c814efa091ce"
img_dir[0].split("/")[-1]
# + id="vf9y608N1bil"
image_names = np.array([i.split("/")[-1] for i in img_dir])
# + colab={"base_uri": "https://localhost:8080/"} id="OGwsw7OV1hwa" outputId="6267e1f9-cde3-4dd9-ee17-d9cffb9e3adf"
y[1]
# + colab={"base_uri": "https://localhost:8080/"} id="FG0SmaRF19_T" outputId="15d1ef4a-0f1d-4508-f233-0ef91ee64b5e"
image_names.shape
# + id="UQtUuGwB1MQm"
outputs = pd.DataFrame()
outputs["Image Name"] = image_names
outputs["Model Prediction"] = lb.inverse_transform(Y_pred)
outputs["Ground Truth"] = lb.inverse_transform(y)
outputs["Correct Classification"] = Y_pred == y
# + colab={"base_uri": "https://localhost:8080/", "height": 417} id="V7HHto0w131D" outputId="f5af6b8c-190a-4da3-af0e-e54c17ac765a"
outputs
# + id="fjXUS0KO2gDT"
#outputs.to_excel("outputs.xlsx", index=False)
# + colab={"base_uri": "https://localhost:8080/"} id="DVqzkJOD-ucm" outputId="542a5da6-870d-4542-bbb9-428be65d18bb"
outputs['Model Prediction'].value_counts()
# + colab={"base_uri": "https://localhost:8080/", "height": 534} id="fsZVXsCV3BYk" outputId="e19ad6f9-3abf-45f1-d685-0a1c32533fb1"
sns.set(rc={'figure.figsize':(15,8.27)})
sns.countplot(x="Model Prediction", hue="Correct Classification", data=outputs, palette="mako")
# + id="ZTmScEA-_3OI"
= np.array(list(range(0, inc_deaths.shape[0])))
plt.figure()
plt.plot(date, data.loc[state, 'Recovered'].values, c='blue')
plt.plot(date, np.array(pred_recovered), c='green')
plt.show()
plot_cur += 1
data['Active'] = data['Confirmed'] - data['Deaths'] - data['Recovered']
# set entries where active < 0 to 0, and recmopute recovered
data.loc[data['Active']<0, 'Active'] = 0
data['Recovered'] = data['Confirmed'] - data['Deaths'] - data['Active']
print('Finish')
# +
def SuEIR(t, y, N, beta, sigma, mu, gamma):
# y[0] is S, y[1] is E, y[2] is I, y[3] is R
gradient = np.zeros_like(y)
gradient[0] = - beta * (y[2] + y[1]) * y[0] / N
gradient[1] = beta * (y[2] + y[1]) * y[0] / N - sigma * y[1]
gradient[2] = mu * sigma * y[1] - gamma * y[2]
gradient[3] = gamma * y[2]
return gradient
def predict_with_ODE(fun, t_span, y0, t_eval, args):
sol = solve_ivp(lambda t,y: fun(t,y,*args), t_span, y0, t_eval = t_eval)
return sol.y
def optimize_SuEIR_parameters(beta0, sigma0, mu0, gamma0, N0, I, R, t_span, y0, t_eval, deaths, ratios):
I_max = np.max(I)
R_max = np.max(R)
scale_I = I / I_max
scale_R = R / R_max
scale_D = deaths / R_max
#print(scale_I)
#print(scale_R)
def loss(x):
y = predict_with_ODE(SuEIR, t_span, y0, t_eval, args = (N0, x[0], x[1], x[2], x[3]))
#print(y)
I_pred = y[2,:] / I_max
R_pred = y[3,:] / R_max
#D_pred = (R_pred + 1) * ratios
Inc_R_pred = y[3, 1:] - y[3,:-1]
Inc_D_pred = (Inc_R_pred + 1) * ratios[1:]
D_pred = np.zeros_like(deaths)
D_pred[0] = deaths[0]
D_pred[1:] = Inc_D_pred
D_pred = np.cumsum(D_pred)
D_pred = D_pred / R_max
res = np.array(np.concatenate([I_pred-scale_I, R_pred - scale_R, D_pred - scale_D])).astype(np.float64)
return res
x0 = np.array([beta0, sigma0, mu0, gamma0])
return least_squares(loss, x0, bounds = (0, 1))
def optimize_SuEIR_parameters_2(beta0, sigma0, mu0, gamma0, N0, I, R, t_span, y0, t_eval, deaths, ratios):
I_max = np.max(I)
R_max = np.max(R)
scale_I = I / I_max
scale_R = R / R_max
scale_D = deaths / R_max
#print(scale_I)
#print(scale_R)
def loss(x):
y = predict_with_ODE(SuEIR, t_span, y0, t_eval, args = (N0, x[0], x[1], x[2], x[3]))
#print(y)
I_pred = y[2,:] / I_max
R_pred = y[3,:] / R_max
D_pred = (R_pred + 1) * ratios
#return np.mean((I_pred-scale_I)**2) + np.mean((R_pred-scale_R)**2) + np.mean((D_pred-scale_D)**2)
res = np.array(np.concatenate([I_pred-scale_I, R_pred - scale_R, D_pred - scale_D])).astype(np.float64)
#print(res)
#print(np.isfinite(res))
return res
x0 = np.array([beta0, sigma0, mu0, gamma0])
return least_squares(loss, x0, bounds = (0, 1))
# +
N0_ratio_list = [0.7, 0.8, 0.9, 1]
E0_ratio_list = [0.1, 0.2, 0.3]
best_ratio = {}
state_params = {}
best_mape_list = []
for state, _ in tqdm(true_state_partial):
best_mape = 10
for N0_ratio in N0_ratio_list:
for E0_ratio in E0_ratio_list:
# split training and validation set
values = data.loc[state].values
training_data = values[:-7,:]
valid_data = values[-7:,:]
# set y0
I0 = values[0,4] # Active
R0 = values[0,2] + values[0,3] # Recovered + Death
N0 = N0_ratio * state_population.loc[state].values[0]
E0 = E0_ratio * state_population.loc[state].values[0]
S0 = N0 - E0 - I0 - R0
y0 = np.array([S0, E0, I0, R0])
# set initial values for parameters
x0 = np.random.rand(4)
# set t_span, t_eval
t_span = (0, training_data.shape[0]-1)
t_eval = np.array(list(range(training_data.shape[0]))).astype(np.float64)
# retrieve I, R
I = training_data[:,4]
R = training_data[:,2] + training_data[:,3]
deaths = training_data[:,2]
date = np.array(range(values.shape[0])).astype(np.float64)
ratios = state_ratio_by_date[state](torch.FloatTensor(date).view(-1,1))
ratios = ratios.detach().numpy().squeeze()
res = optimize_SuEIR_parameters_2(x0[0], x0[1], x0[2], x0[3], N0, I, R, t_span, y0, t_eval, deaths, ratios[:-7])
# predict with learned parameters and compute MAPE on validation dataset
pred = predict_with_ODE(SuEIR, (0, values.shape[0]), y0, t_eval = np.array(list(range(values.shape[0]))).astype(np.float64),
args = (N0, res.x[0], res.x[1], res.x[2], res.x[3]))
pred_valid = pred[2,-7:] + pred[3,-7:]
death_valid = valid_data[:,2]
#pred_death = (pred[3,-7:] + 1) * ratios[-7:]
pred_remove = pred[3,:]
pred_death_ = (pred_remove + 1) * ratios
"""
pred_inc_remove = pred_remove[1:] - pred_remove[:-1]
pred_inc_death = (pred_inc_remove+1) * ratios[1:]
pred_death_ = np.zeros_like(pred_remove)
pred_death_[0] = deaths[0]
pred_death_[1:] = pred_inc_death
pred_death_ = np.cumsum(pred_death_)
#print(pred_remove)
#print(ratios)
#print(pred_inc_death)
"""
pred_death_ = pred_death_[-7:]
mape = np.mean(np.abs(valid_data[:,1]-pred_valid) / valid_data[:,1]) +\
np.mean(np.abs(pred_death_-death_valid)/death_valid)
#break
#mape = np.mean(mape_cfm)
if mape < best_mape:
best_mape = mape
best_ratio[state] = (N0_ratio, E0_ratio)
state_params[state] = copy.deepcopy(res)
best_mape_list.append(best_mape)
print(state, best_ratio[state], best_mape)
#break
#break
#N0_ratio, E0_ratio = best_ratio[0], best_ratio[1]
#print('Best ratio is N0 ratio: %.3f, E0 ratio: %.3f, validation mape: %.3f'%(N0_ratio, E0_ratio, np.mean(best_mape)) #0.117
print(np.mean(best_mape_list))
# -
# predict
pred_cfm = {}
pred_death = {}
for state, _ in true_state_partial:
# split training and validation set
values = data.loc[state].values
training_data = values[:,:]
# valid_data = values[-7:,:]
N0_ratio, E0_ratio = best_ratio[state]
# set y0
I0 = values[0,4] # Active
R0 = values[0,2] + values[0,3] # Recovered + Death
N0 = N0_ratio * state_population.loc[state].values[0]
E0 = E0_ratio * state_population.loc[state].values[0]
S0 = N0 - E0 - I0 - R0
y0 = np.array([S0, E0, I0, R0])
# set initial values for parameters
#x0 = np.random.rand(4)
# set t_span, t_eval
t_span = (0, values.shape[0]+1)
t_eval = np.array(range(values.shape[0])).astype(np.float64)
res = state_params[state]
pred = predict_with_ODE(SuEIR, t_span, y0, t_eval = t_eval,
args = (N0, res.x[0], res.x[1], res.x[2], res.x[3]))
new_S0, new_E0 = pred[0, values.shape[0]-1], pred[1, values.shape[0]-1]
t_span = (0, 31)
t_eval = np.array(range(31)).astype(np.float64)
# retrieve I, R
#I = training_data[:,4]
#R = training_data[:,2] + training_data[:,3]
#res = optimize_SuEIR_parameters(x0[0], x0[1], x0[2], x0[3], N0, I, R, t_span, y0, t_eval)
#state_params[state] = res.x
y0 = np.array([new_S0, new_E0, values[78,4], values[78,2]+values[78,3]])
pred = predict_with_ODE(SuEIR, t_span, y0, t_eval = t_eval,
args = (N0, res.x[0], res.x[1], res.x[2], res.x[3]))
pred_cfm[state] = pred[2,1:31] + pred[3,1:31]
date = np.array(range(values.shape[0]+30)).astype(np.float64)
ratio = state_ratio_by_date[state](torch.FloatTensor(date).view(-1,1))
ratio = ratio.detach().numpy().squeeze()
death = (pred[3,1:31] + 1) * ratio[-30:]
#print(death.shape)
#break
pred_death[state] = death
"""
pred_remove = pred[3,:]
pred_inc_remove = pred_remove[1:] - pred_remove[:-1]
pred_inc_death = (pred_inc_remove+1) * ratio[1:]
pred_death_ = np.zeros_like(pred_remove)
pred_death_[0] = deaths[0]
pred_death_[1:] = pred_inc_death
pred_death_ = np.cumsum(pred_death_)
pred_death[state] = pred_death_[-30:]
"""
print('Finish')
best_ratio_value = np.array(list(best_ratio.values()))
N0_E0_ratio = np.mean(best_ratio_value, axis=0)
#print(N0_E0_ratio)
N0_ratio, E0_ratio = N0_E0_ratio[0], N0_E0_ratio[1]
print(N0_ratio, E0_ratio)
# +
# Now process states without recovered number
a_list = [-0.5, -0.4, -0.3, -0.2, -0.1, 0, 0.1, 0.2, 0.3, 0.4, 0.5]
b_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
def predictRatio(t, a, b):
t = t/100
return a * t + b
train_date = 72
valid_date = 79
total_date = 109
for state in tqdm(state_none):
# Try to predict Recovered Data
best_params = None
best_mape = 100
death = data.loc[state, 'Deaths'].values
confirmed = data.loc[state, 'Confirmed'].values
for a in a_list:
for b in b_list:
t = np.array(range(0, total_date)).astype(np.float64)
ratio = predictRatio(t, a, b)
if np.sum(ratio <= 0) > 0:
continue
#print(ratio)
removed = death / ratio[:valid_date] - 1
active = confirmed - removed
I0 = active[0]
R0 = removed[0]
N0 = N0_ratio * state_population.loc[state].values[0]
E0 = E0_ratio * state_population.loc[state].values[0]
S0 = N0 - E0 - I0 - R0
y0 = np.array([S0, E0, I0, R0])
#print(ratio)
#print(y0)
#print(N0)
x0 = np.random.rand(4)
t_span = (0, train_date)
t_eval = np.array(range(0, train_date)).astype(np.float64)
I = active[:train_date]
R = removed[:train_date]
#print(I)
#print(R)
res = optimize_SuEIR_parameters_2(x0[0], x0[1], x0[2], x0[3], N0, I, R, t_span, y0, t_eval, death[:train_date], ratio[:train_date])
pred = predict_with_ODE(SuEIR, (0, valid_date), y0, t_eval = np.array(list(range(valid_date))).astype(np.float64),
args = (N0, res.x[0], res.x[1], res.x[2], res.x[3]))
pred_valid = pred[2,-7:] + pred[3,-7:]
valid_data = data.loc[state, 'Confirmed'].values[-7:]
pred_death_ = (pred[3,-7:] + 1) * ratio[-7:]
valid_death = death[-7:]
mape = np.mean(np.abs(valid_data-pred_valid) / valid_data) + np.mean(np.abs(valid_death-pred_death_)/valid_death)
if mape < best_mape:
best_mape = mape
best_params = (a, b, N0, res.x[0], res.x[1], res.x[2], res.x[3])
print(state, best_mape)
ratio = predictRatio(0, best_params[0], best_params[1])
R0 = death[0]/ratio - 1
I0 = confirmed[0] - R0
N0 = N0_ratio * state_population.loc[state].values[0]
E0 = E0_ratio * state_population.loc[state].values[0]
S0 = N0 - E0 - I0 - R0
y0 = np.array([S0, E0, I0, R0])
state_params[state] = best_params
# use best parameters to predict
pred = predict_with_ODE(SuEIR, (0, total_date), y0, t_eval = np.array(range(total_date)).astype(np.float64),
args = best_params[-5:])
new_S0, new_E0 = pred[0, valid_date-1], pred[1, valid_date-1]
t_span = (0, total_date - valid_date)
t_eval = np.array(range(31)).astype(np.float64)
ratio = predictRatio(valid_date-1, best_params[0], best_params[1])
new_R0 = death[valid_date-1]/ratio - 1
# retrieve I, R
#I = training_data[:,4]
#R = training_data[:,2] + training_data[:,3]
#res = optimize_SuEIR_parameters(x0[0], x0[1], x0[2], x0[3], N0, I, R, t_span, y0, t_eval)
#state_params[state] = res.x
y0 = np.array([new_S0, new_E0, confirmed[valid_date-1]-new_R0, new_R0])
pred = predict_with_ODE(SuEIR, t_span, y0, t_eval = t_eval,
args = (N0, res.x[0], res.x[1], res.x[2], res.x[3]))
pred_cfm[state] = pred[2,1:31] + pred[3,1:31]
removed = pred[3,1:31]
date = np.array(range(values.shape[0],values.shape[0]+30)).astype(np.float64)
ratio = predictRatio(date, best_params[0], best_params[1])
death = (removed + 1) * ratio
pred_death[state] = death
# +
# Write submission.csv
test_data = pd.read_csv('test.csv')
test_data['Date'] = pd.to_datetime(test_data['Date'], format='%m-%d-%Y')
start_date = pd.to_datetime('04-01-2021', format='%m-%d-%Y')
for index in test_data.index:
idx = (test_data.loc[index, 'Date'] - start_date).days
state = test_data.loc[index, 'Province_State']
test_data.loc[index, 'Confirmed'] = pred_cfm[state][idx]
test_data.loc[index, 'Deaths'] = pred_death[state][idx]
test_data.loc[:,['Confirmed', 'Deaths']].to_csv('submission.csv')
# -
target_state = 'Vermont'
for state, _ in true_state_partial:
if state != target_state:
continue
# split training and validation set
values = data.loc[state].values
#training_data = values[:,:]
# valid_data = values[-7:,:]
N0_ratio, E0_ratio = best_ratio[state]
# set y0
I0 = values[0,4] # Active
R0 = values[0,2] + values[0,3] # Recovered + Death
N0 = N0_ratio * state_population.loc[state].values[0]
E0 = E0_ratio * state_population.loc[state].values[0]
S0 = N0 - E0 - I0 - R0
y0 = np.array([S0, E0, I0, R0])
# set initial values for parameters
#x0 = np.random.rand(4)
# set t_span, t_eval
t_span = (0, values.shape[0]+1)
t_eval = np.array(range(values.shape[0])).astype(np.float64)
res = state_params[state]
pred = predict_with_ODE(SuEIR, t_span, y0, t_eval = t_eval,
args = (N0, res.x[0], res.x[1], res.x[2], res.x[3]))
date = np.array(range(79))
ratio = state_ratio_by_date[state](torch.FloatTensor(date).view(-1,1))
ratio = ratio.detach().numpy().squeeze()
death = (pred[3,:] + 1) * ratio
print(ratio)
print(death)
plt.figure()
plt.plot(date, values[:,1], c='red')
plt.plot(date, pred[2,:]+pred[3,:], c='yellow')
plt.figure()
plt.plot(date, values[:,2], c='red')
plt.plot(date, death, c='yellow')
plt.figure()
plt.plot(date, values[:,4], c='red')
plt.plot(date, pred[2,:], c='yellow')
plt.figure()
plt.plot(date, values[:,2]+values[:,3], c='red')
plt.plot(date, pred[3,:], c='yellow')
print(data.loc[state, 'Active'])
print('Finish')
deaths = data.loc['Vermont', 'Deaths'].values[:25]
rcv = data.loc['Vermont', 'Recovered'].values[:25]
ratio = deaths / (deaths+rcv+1)
print(ratio)
print(state_ratio_by_date['Vermont'].linear.weight)
data.loc[data['Active']<0, 'Active'] = 0
print(data.loc['Vermont'])
ize the post).
#
# <a href="https://github.com/sameersingh/uci-statnlp/blob/master/hw2/generator.py">generator.py</a> from Sameer Sing demonstrates a method to generate from a LM with a temperature parameter.
#
# Explain how the code in this method corresponds to the mathematical explanation provided in the blog above.
# <a name="nn-lm"></a>
# <h3>1.4 Character language model</h3>
#
# It is interesting to compare word-based and character-based language models.
# On the one hand, character-based models need to predict a much smaller range of options (one character out of ~100 possible characters vs. one word out of 200K possible words - or 10K in the example we reviewed above).
# On the other hand, we need to maintain a much longer history of characters to obtain a significant memory of the context which would make sense semantically.
#
#
# <a name="effectiveness"></a>
# <h4>1.4.1 Read and Summarize</h4>
#
# <a href="http://karpathy.github.io/2015/05/21/rnn-effectiveness/">The Unreasonable Effectiveness of Recurrent Neural Networks</a>, May 21, 2015, Andrej Karpathy (up to Section "Further Reading").
# Write a summary of this essay of about 200 words highlighting what is most surprising in the experimental results reported in the blog.
# Refer to what you know about formal languages and Chomsky's hierarchy.
#
# Read the follow-up article:
# <a href="http://nbviewer.jupyter.org/gist/yoavg/d76121dfde2618422139">The unreasonable effectiveness of Character-level Language Models (and why RNNs are still cool)</a>, Sept 2015, Yoav Goldberg.
# Write a summary of this essay of about 200 words.
# **Summary of 'The Unreasonable Effectiveness of Recurrent Neural Networks:**
# One of the main reasons Recurrent Neural Networks are so efficient is the fact that they allow us to operate over sequences of vectors: Sequences in the input, the output, or in the most general case both , unlike Vanilla Neural Networks or CNN which has many linitations: they accept a fixed-sized vector as input and produce a fixed-sized vector as output , and even perform this mapping using a fixed amount of computational steps (e.g. the number of layers in the model).
# How RNN works- RNNs accept an input vector x and give an output vector y. However, this output vector’s contents are influenced not only by the input we just fed in, but also on the entire history of inputs we’ve fed in in the past.
# Multiple layers -we can have a 2-layer (or more) recurrent network which means we have two separate RNNs: One RNN is receiving the input vectors and the second RNN is receiving the output of the first RNN as its input. Except neither of these RNNs know or care- it’s all just vectors coming in and going out, and some gradients flowing through each module during backpropagation.
# We saw how we can generate a new text which look like it was written by a real person , for example Shakespeare just by giving the RNN data to learn.
#
#
# **Summary of 'The unreasonable effectiveness of Character-level Language Models (and why RNNs are still cool)':**
# The Unsmoothed Maximum Likelihood Character Level Language Model idea is – a model that guesses the next character based on previous n letters.
# To solve this problem, use RNNs and LSTMs networks to learn fixed order language model. Mathematically, learn P(character | history). A simple approach will be counting the number of times c appear after h, and divide by the total letters appear after h, if a letter did not followed h than then its probability is 0.
# Train the model - with large dataset of words, and a given order – the history size consult.
# Generate letter – given the history, sample a random letter based on distribution of the last order characters.
# It is shown in the article that in the Shakespeare example, with order of 4 and above, the algorithm result is quite reasonable text, in the same "writing style" of the origin text trained from. In the Linux-kernel files example, it worked well from order 15, keeping good indentation and brackets.
# The network learned something not trivial - after the training it could create an original text based only on what it learned.
#
# <a name="recipes"></a>
# <h4>1.4.2 Recipes with a Character LM</h4>
#
# Strikingly realistic output can be generated when training a character language-model on a strongly-constrained genre of text like cooking recipes.
# Train your n-gram model on the dataset provided in
# <a href="https://gist.github.com/nylki/1efbaa36635956d35bcc">do androids dream of cooking?</a> which contains about 32K recipes gathered from the Internet.
# Gather the recipes dataset and prepare a dataset reader according to the structure of the files.
# +
def prepare_data(direc):
entries = os.listdir(direc)
return entries
direc = prepare_data("../data/recipes")
# -
print(f'number of recipes: {len(direc)}')
# Report basic statistics about the dataset (number of recipes, tokens, characters, vocabulary size, distribution of the size of recipes in words and in chars, distribution of length of words).
# +
files=direc
tokens_per_reciepe={}
chars_per_reciepe={}
chars_per_word_in_rec={}
vocab_per_rec={}
mean_char_per_word_per_rec={}
std_char_per_word_per_rec={}
statistics=[tokens_per_reciepe,chars_per_reciepe,vocab_per_rec\
,mean_char_per_word_per_rec,std_char_per_word_per_rec]
statistics_names=['number of token','number of chars','vocabulary size'\
'mean_char_per_word','std_char_per_word']
concat_files = ""
for file in files:
file_name=file.split('.')[0]
read_file = open(f'../data/recipes/{file}', encoding="utf8", errors='ignore').read()
concat_files += (read_file + ' ')
print(make_bold(f'statistics for {file}'))
tok = get_tokens(read_file)
vocab = get_vocabulary(read_file)
chars_per_word = [len(w) for w in tok]
mean_char_per_word= np.mean(chars_per_word)
std_char_per_word=np.std(chars_per_word)
std_char_per_word_per_rec[file_name]=std_char_per_word
tokens_per_reciepe[file_name]=len(tok)
chars_per_reciepe[file_name]=len(read_file)
vocab_per_rec[file_name]=len(vocab)
mean_char_per_word_per_rec[file_name]=mean_char_per_word
std_char_per_word_per_rec[file_name]=std_char_per_word
print(make_underline('\tnumber of tokens is:'),len(tok))
print(make_underline('\tnumber of chars is:'),len(read_file))
print(make_underline('\tvocabulary size is:'),len(vocab))
print(make_underline('\tdistribution of length of words:'),f'mean - {mean_char_per_word}, deviation - {std_char_per_word}')
# +
# let plot some histograms
index=0
statistics_names=np.array(['number of token','number of chars','vocabulary size'\
'mean_char_per_word','std_char_per_word'])
for i in range(len (statistics_names)):
print(make_bold(statistics_names[i]))
plt.bar(statistics[i].keys(),statistics[i].values())
plt.title(statistics_names[i])
plt.show()
token_statistics = np.fromiter(tokens_per_reciepe.values(), dtype=int)
tokens_per_reciepe_mean=np.mean(token_statistics)
tokens_per_reciepe_std=np.std(token_statistics)
print(make_bold('\ndistribution of the size of recipes in words:'),f'mean-{tokens_per_reciepe_mean} \
std-{tokens_per_reciepe_std}')
print('\n')
# -
# statistics for all files together
print(make_bold(f'statistics for all files:'))
tok = get_tokens(concat_files)
print(make_underline('\tnumber of tokens is:'),len(tok))
print(make_underline('\tnumber of chars is:'),len(concat_files))
vocab = get_vocabulary(concat_files)
print(make_underline('\tvocabulary size is:'),len(vocab))
chars_per_word = [len(w) for w in tok]
mean_char_per_word= np.mean(chars_per_word)
std_char_per_word=np.std(chars_per_word)
print(make_underline('\tdistribution of length of words:'),f'mean - {mean_char_per_word}, deviation - {std_char_per_word}')
# Split the dataset into training, dev and test as a 80%/10%/10% split. Provide a Python interface to access the split conveniently.
# +
def get_file_list_from_dir(datadir):
all_files = os.listdir(os.path.abspath(datadir))
data_files = list(filter(lambda file: file.endswith('.mmf'), all_files))
return data_files
def randomize_files(file_list):
np.random.shuffle(file_list)
def get_training_validating_testing_sets(file_list):
split_first_index = int(0.8 * len(file_list))
split_second_index = int(0.9 * len(file_list))
training = file_list[:split_first_index]
validating = file_list[split_first_index:split_second_index]
testing = file_list[split_second_index:]
return training, validating, testing
# +
def split_dataset(path):
all_files = get_file_list_from_dir(path)
randomize_files(all_files)
train, valid, test = get_training_validating_testing_sets(all_files)
return train, valid, test
train, valid, test = split_dataset("../data/recipes")
# -
# Choose the order of the char n-gram according to the indications given in Yoav Goldberg's article.
# Justify the choice (you should use the dev test for this).
# +
# Char n-gram model (different from the one above - this one is not adaptef to words)
def create_lm(fname, order=4):
# with open(fname) as f:
# data = f.read()
data = fname
pad = '*' * order
data = pad + data
cfd = nltk.ConditionalFreqDist((data[i : i + order], data[i + order]) for i in range(len(data) - order))
cpd = nltk.ConditionalProbDist(cfd, nltk.MLEProbDist)
return cpd
def test_create_lm(dataset, order):
# order = 4
lm = create_lm(dataset, order)
return lm
def test_generate_from_lm(lm, order):
out = []
hist = '*' * order
for _ in range(1000):
letter = lm[hist].generate()
hist = hist[1:] + letter
out.append(letter)
return (''.join(out))
def train_char_lm(dataset, order):
lm_test = test_create_lm(dataset, order)
return test_generate_from_lm(lm_test, order)
# -
# We will check a gew different orders of the char n-gram and check with which one we preformed better.
# As in the article, we will now present a few different results and compare between them.
# Train a char language model using your LM mode adapted to work on characters instead of words.
def compare_order(order, data_type):
for file in data_type:
with open(f'../data/recipes/{file}', encoding="utf8", errors='ignore') as fname:
file_read = fname.read()
gen = train_char_lm(file_read, order)
print(make_bold(f'Datasetis {data_type} and order is {order}\n'))
print(gen)
compare_order(2, valid)
compare_order(4, valid)
compare_order(8, valid)
compare_order(12, valid)
# As we can see from the results above:
# * order of ngram of 2 is not so good, wh cannot understand it, because it did not predict real words.
# * order of ngram of 4 is a bit better, it does not make a lot of sense, but it is written of real English words.
# * order of ngram of 8 is even better, it has a form that is more alike to the recipes in the dataset, it is more understood, eventhough it is still not good enough, we cannot realy understand the meaning of the entire text, and we sure cannot use it.
# * order of ngram of 12 is a lot better. It look like almost a real recipe, we can see real improvement.
#
# So, We will choose the order of the char n-gram to be 12.
# It is obvious form the examples above on the valid dataset, that order of 12 had the best result.
# Of course, as the ngram is higher the result will be more accurate, but the model will work harder and need to remember longer chars history.
# We think that order of 12 gave nice results on valid dataset and would also work good with the train dataset.
compare_order(12, train)
# This is the resulted recipe after being trained on the entire dataset.
# As we can see, this result is pretty good. Maybe not like a person would do, but still, impressive!
# Report on the perplexity of the trained language model. Comment on the value you observe compared to the perplexity of the word LM model obtained above.
# +
# get arbitrary file from train
train_sample_num = randint(0, len(train))
train_sample = train[train_sample_num]
# get arbitrary file from valid
valid_sample_num = randint(0, len(valid))
valid_sample = valid[valid_sample_num]
# -
lm = create_lm(train_sample, 12)
valid="../data/recipes/"+valid_sample
print(f'\noverall perplexity is: {perplexity(lm, valid, 12)}')
# Sample about 5 generated recipes from the trained language model.
for num in range(5):
print(f'recipe number {num+1}:')
compare_order(12, train)
print('\n')
# Write 3 to 5 observations about the generated samples.
# From the sample above we can see that:
#
# Each recipe we get is a bit different, all based on the recieps in the dataset. As we train more time, the result is more accurate, and makes more sense. The last recipe is more similar to a 'real' one, eaventhough there is still work to do untill it will generate a 'human-like' text. We can that in each reciep, there are parts that makes more sense and parts that doesn't make a lot of sense, for example - the text ends in a middle of a sentence. I order to solve it we could give a different order, or, train the model for longer.
# <hr>
# <hr>
# <a name="P2"></a>
# <h2>Part 2: Polynomial Curve Fitting</h2>
#
# We reproduce the polynomial curve fitting example used in Bishop's <a href="https://www.microsoft.com/en-us/research/people/cmbishop/#!prml-book">book</a> in Chapter 1.
#
# <a name="syntheticdata"></a>
# <h3>2.1 Synthetic Dataset Generation</h3>
#
# We generate a dataset of points in the form of 2 vectors $x$ and $t$ of size $N$ where:
#
# $t_i = y(x_i) + \mathcal{N}(\mu, \sigma)$
#
# where:
# * The $x_i$ values are equi-distant on the $[0 \dots 1]$ segment (that is, $x_1 = 0, x_2=\frac{1}{N-1}, x_3=\frac{2}{N-1}..., x_N = 1.0$)
# * $\mu = 0.0$
# * $\sigma = 0.03$
# * $y(x) = sin(2\pi \times x)$
def generateDataset(N, f, sigma):
x = np.linspace(0, 1, N)
y = f(x)
return x, y + np.random.normal(mu, sigma)
# Draw the plot (scatterplot) of (x,t) using matplotlib for N=100.
# * Look at the documentation of the <a href="https://numpy.org/doc/stable/reference/random/generated/numpy.random.Generator.normal.html#numpy.random.Generator.normal">numpy.random.Generator.normal</a> function in Numpy for an example of usage.
# * Look at the definition of the function <a href="https://numpy.org/doc/stable/reference/generated/numpy.linspace.html#numpy.linspace">numpy.linspace</a> to generate your dataset.
#
# +
mu = 0.0
N = 100
# define f(x)=sin(2*pi*x)
def f(x):
return np.sin(2*np.pi*x)
#plotting
x, t = generateDataset(N, f, 0.03)
plt.scatter(x, t)
# +
def s(x): return x**2
def f(x): return math.sin(2 * math.pi * x)
vf = np.vectorize(f) # Create a vectorized version of f
z = np.array([1,2,3,4])
print(z)
sz = s(z) # You can apply simple functions to an array
print(sz.shape) # Same dimension as z (4)
print(sz)
fz = vf(z) # For more complex ones, you must use the vectorized version of f
print(fz.shape)
print(fz)
# -
# <a name="curvefitting"></a>
# <h3>2.2 Polynomial Curve Fitting</h3>
#
# We will attempt to learn the function y given a synthetic dataset $(x, t)$.
#
# We assume that $y$ is a polynomial of degree $M$ - that is:
#
# $y(x) = w_0 + w_1 x + w_2 x^2 + ... + w_M x^M$
#
# Our objective is to estimate the vector $w = (w_0 \dots w_M)$ from the dataset $(x, t)$.
# +
import numpy as np
import scipy.linalg
t = np.array([1,2,3,4]) # This is a vector of dim 4
t.shape # (4,)
phi = np.array([[1,1],[2,4],[3,3],[2,4]]) # This is a 4x2 matrix
phi.shape # (4, 2)
prod = np.dot(phi.T, phi) # prod is a 2x2 matrix
prod.shape # (2, 2)
i = np.linalg.inv(prod) # i is a 2x2 matrix
i.shape # (2, 2)
m = np.dot(i, phi.T) # m is a 2x4 matrix
m.shape # (2, 4)
w = np.dot(m, t) # w is a vector of dim 2
w.shape # (2,)
# -
# We implement a method <b>optimizeLS(x, t, M)</b> which given the dataset $(x, t)$ returns the optimal polynomial of degree $M$ that approximates the dataset according to the least squares objective.
# +
def compute_matrix(x, M):
arr = np.zeros([len(x), M+1], dtype=float)
for i in range(len(x)):
for m in range(M+1):
arr[i, m] = x[i] ** m
return arr
def optimizeLS(x, t, M):
phi = compute_matrix(x, M)
prod = np.dot(phi.T, phi)
i = np.linalg.inv(prod)
m = np.dot(i, phi.T)
w = np.dot(m, t)
return w
def polynomial(w, x_i, M):
res = 0
for i in range(M+1):
res += w[i] * (x_i ** i)
return res
# -
# We now plot the learned polynomial $w_M(x_i)$ and the real function $sin(2πx)$ for a dataset of size $N=10$ and $M=1,3,5,10$.
#
# +
N = 10
M = [1, 3, 5, 10]
def f(x):return np.sin(2*np.pi*x)
x,t = generateDataset(N, f, 0.03)
for m in M:
w = optimizeLS(x,t, m)
y = polynomial(w, x, m)
line1 = plt.plot(x, y,label=('M= '+str(m)))
line2 = plt.plot(x,t)
plt.legend()
plt.show()
# -
# let's try to plot each graph seperately to get more information
# +
N = 10
M = [1, 3, 5, 10]
def f(x):return np.sin(2*np.pi*x)
x,t = generateDataset(N, f, 0.03)
for num in M:
w = optimizeLS(x,t, num)
y = polynomial(w, x, num)
line1 = plt.plot(x, y)
line2 = plt.plot(x,t)
plt.title('M= '+str(num))
plt.show()
# -
# We can tell that when M=5 we get overfitting!
# <a name="regularization"></a>
# <h3>2.3 Polynomial Curve Fitting with Regularization</h3>
#
# We observe in the plot above that the solution to the least-squares optimization has a tendency to over-fit the dataset.
#
# To avoid over-fitting, we will use <i>regularization</i>: the objective function we want to optimize will take into account the least-squares error as above, and in addition the complexity of the learned model $w$.
# We write a function <b>optimizePLS(x, t, M, lambda)</b> which returns the optimal parameters $w_{PLS}$ given $M$ and $\lambda$.
def optimizePLS(x, t, M, lamb):
phi = compute_matrix(x, M)
tmp = np.dot(phi.T, phi)
prod = tmp + (lamb * np.eye(int(M+1)))
i = np.linalg.inv(prod)
m = np.dot(i, phi.T)
w = np.dot(m, t)
return w
# We want to optimize the value of λ. The way to optimize is to use a development set in addition to our training set.
#
# To construct a development set, we extend our synthetic dataset construction function to return 3 samples:
# * one for training,
# * one for development
# * and one for testing
# return xt,tt,vx,vt,xtst,vtst
def generateDataset3(N, f, sigma):
x, t1 = generateDataset(N, f, sigma)
x, t2 = generateDataset(N, f, sigma)
x, t3 = generateDataset(N, f, sigma)
list_tuple = {}
for index in range(len(x)):
list_tuple[x[index]] = [t1[index], t2[index], t3[index]]
np.random.shuffle(list_tuple[x[index]])
train = {}
valid = {}
test = {}
for index in range(len(list_tuple)):
train[x[index]] = list_tuple[x[index]][0]
valid[x[index]] = list_tuple[x[index]][1]
test[x[index]] = list_tuple[x[index]][2]
return np.fromiter(train.keys(),dtype=float), np.fromiter(train.values(),dtype=float)\
,np.fromiter(valid.keys(),dtype=float), np.fromiter(valid.values(),dtype=float)\
, np.fromiter( test.keys(),dtype=float), np.fromiter(test.values(),dtype=float)
# +
N = 10
M = 10
xt,tt,xv,tv,xtst,ttst = generateDataset3(N, f, 0.03)
print(make_bold ('xt:'),xt \
,make_bold ('tt:'),tt \
,make_bold ('xv:'),xv \
,make_bold ('tv:'),tv \
,make_bold ('xtst:'),xtst \
,make_bold ('ttst:'),ttst \
,sep='\n')
# -
# Given a synthetic dataset, we optimize for the value of λ by varying the value of log(λ) from -40 to -20 on the development set.
def normalized_error(x, t, N, M):
# the indexes in the array are by the log result + 40
# log(x) = -40 => errors_arr[0] = error_for_x
errors = []
for num in range(-40, -19):
# get pls with lambda=2^num
w = optimizePLS(x, t, M, 2**num)
# create the polynom
pol_res = polynomial(w, x, M)
#the error is L2
error = float((math.sqrt(sum(pow((t - pol_res), 2)))) / N)
errors.append(error)
min_arg = np.argmin(errors)-40
lamb = 2**float(min_arg)
lambdas= [2**i for i in range(-40,-19)]
plt.plot(lambdas,errors)
plt.title("labmbda Vs.error")
return lamb
def optimizePLS2(xt, tt, xv, tv, M):
return normalized_error(xv, tv, len(tv), M)
N = 10
M = 3
xt,tt,xv,tv,xtst,ttst = generateDataset3(N, f, 0.03)
lamb = optimizePLS2(xt, tt, xv, tv, M)
# +
N = 10
M = 3
xt,tt,xv,tv,xtst,ttst = generateDataset3(N, f, 0.03)
lamb=optimizePLS2(xt, tt, xv, tv, M)
def plot_polynom(x,t,M,lamb,title,N):
w= optimizePLS(x,t,M,lamb)
y = polynomial(w,x, M)
plt.title(f"{title},N={N}")
line1 = plt.plot(x, y)
line2 = plt.plot(x, t)
plt.legend(['f(x)','t'])
plt.show()
plot_polynom(xt,tt,M,lamb,'train',N)
plot_polynom(xv,tv,M,lamb,'valid',N)
plot_polynom(xtst,ttst,M,lamb,'test',N)
# -
# This is a pretty good gob!
# <a name="prob-regr"></a>
# <h3>2.4 Probabilistic Regression Framework</h3>
#
# We now consider the same problem of regression (learning a function from a dataset) formulated in a probabilistic framework.
def bayesianEstimator(x_bold, t, M, alpha, sigma2):
phi_x = compute_matrix(x_bold, M)
phi_x = phi_x.T
prod = np.dot(phi_x, phi_x.T)
s_matrix_inv = alpha * np.eye(M + 1) + (1 / sigma2) * prod
s_matrix = np.linalg.inv(s_matrix_inv)
def var(x):
x=np.array([x])
tmp_matrix_x = compute_matrix(x, M)
return sigma2 + np.linalg.multi_dot([tmp_matrix_x, s_matrix, tmp_matrix_x.T])
def mean(x):
x=np.array([x])
tmp_matrix_x = compute_matrix(x, M)
return (1/sigma2) * np.linalg.multi_dot([tmp_matrix_x, s_matrix, phi_x, t])
return var,mean
# We draw the plot of the original function $y = sin(2πx)$ over the range $[0 \dots 1]$, the mean of the predictive distribution $m(x)$ and the confidence interval $m(x) - \sqrt{var(x)}$ and $m(x) + \sqrt{var(x)}$ (that is, one standard deviation around each predicted point) for the values:
# * $\alpha = 0.005$
# * $\sigma^2 = 1/11.1$
# * $M = 9$
#
# over a synthetic dataset of size $N=10$ and $N=100$.
# +
## Draw the plot
N = 10
M = 9
x, t = generateDataset(N, f, math.sqrt(1/11.1))
x=np.array(x)
var,mean = bayesianEstimator(x, t, M, 0.005, 1/11.1)
var=np.vectorize(var)
mean =np.vectorize(mean)
f1=mean(x)-np.sqrt(var(x))
f2=mean(x)+np.sqrt(var(x))
plt.title("N=10")
plt.plot(x, f(x),label='f(x)',color='orange')
plt.plot(x,mean(x),label='mean',color='g')
plt.scatter(x, t, facecolors='none',edgecolors='b',s=50,label='t')
plt.fill_between(x,f1,f2, color='r', alpha=.1)
plt.legend()
plt.show()
# +
## Draw the plot
N = 40
M = 5
x, t = generateDataset(N, f, math.sqrt(1/11.1))
x=np.array(x)
var,mean = bayesianEstimator(x, t, M, 0.005, 1/11.1)
var=np.vectorize(var)
mean =np.vectorize(mean)
f1=mean(x)-np.sqrt(var(x))
f2=mean(x)+np.sqrt(var(x))
plt.title("N=100")
plt.plot(x, f(x),label='f(x)',color='orange')
plt.plot(x,mean(x),label='mean',color='g')
plt.scatter(x, t, facecolors='none',edgecolors='b',s=50,label='t')
plt.fill_between(x,f1,f2, color='r', alpha=.1)
plt.legend()
plt.show()
# -
# Interpret the height of the band around the most likely function in terms of the distribution of the xs in your synthetic dataset.
# Can you think of ways to make this height very small in one segment of the function and large in another?
# <hr/>
# <hr/>
# <a name="P3"></a>
# <h2>Part 3: Neural Models for Classification</h2>
#
# In this section, we adopt the PyTorch tutorial on
# <a href="https://pytorch.org/tutorials/intermediate/char_rnn_classification_tutorial.html">Character RNN for classification</a>
# to a different dataset.
#
# <a name="readtut"></a>
# <h3>3.1 Summarize the Tutorial</h3>
#
# We summarize the <a href="https://pytorch.org/tutorials/intermediate/char_rnn_classification_tutorial.html">PyTorch tutorial</a>:
# The task discussed in this article is build and train a basic character level RNN to classify words. Module is built with 2 linear layers to operate on the input and hidden state, with LogSoftmax layer on the output.
# The network receive input -words (refer to it as a series of characters). Each layer gets the output of the previous layer as its input, the final output is a prediction (probability)– to which class a word belongs. This is done by training the network with thousands of surnames from 18 different languages. Then, the network would predict what is the base language base of a name, based on the spelling.
# To encode we take text files with names, then split into lines and converted from Unicode to ascii. Then, create a dictionary mapping category (language) to list of lines (names).
# Then, data is turned to tensors – with "one-hot" vector. A vector size is number_of_letters. To represent a word, few vectors are bunched together into a 2D matrix,
# (length_of_line ˟ number_of_letters).
# Loss function – nn.NLLLoss (last layer in RNN is nn.LogSoftmax)
#
# $L(x,y)=L=l_1,…,l_N^⊤,l_n=-w_(y_n ) x_(n,y_n ),w_c=weight[c]·1{c≠ignore_index}$
#
# Evaluation- each loop in train process:
# • Create input+ target tensors
#
# • Create zeroed initial hidden state
#
# • Read each letter in&
#
# o Keep hidden state for next letter
#
# • Compare final output to target
#
# • Back-propagate
#
# • Return the output & loss
#
# Results: a confusion matrix represent how network performs on different categories. For every language, which language the network guessed. Build matrix: like the train process but without the backprop. At the end we can see for each language how accurate the network guesses are, between 0 and 1.
#
#
# (Reading <a href="https://pytorch.org/tutorials/beginner/blitz/tensor_tutorial.html">PyTorch Tensor Tutorial</a> helps)
# <a name="newdata"></a>
# <h3>3.2 Explore City Names Dataset</h3>
#
# We use a dataset on city names in different countries to train a classifier.
#
# The data from <a href="cities_val.zip">cities_val.zip</a> (validation) and <a href="cities_train.zip">cities_train.zip</a> (training)
# is under "../data/cities/val" and "../data/cities/train".
#
# +
import codecs
import math
import random
import string
import time
import numpy as np
from sklearn.metrics import accuracy_score
'''
Don't change these constants for the classification task.
You may use different copies for the sentence generation model.
'''
languages = ["af", "cn", "de", "fi", "fr", "in", "ir", "pk", "za"]
all_letters = string.ascii_letters + " .,;'"
n_letters = len(all_letters)
import unicodedata
# Turn a Unicode string to plain ASCII, thanks to https://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
and c in all_letters
)
# print(unicodeToAscii('Ślusàrski'))
# Build the category_lines dictionary, a list of names per language
category_lines = {}
all_categories = []
# Read a file and split into lines
def readLines(filename):
lines = codecs.open(filename, "r",encoding='utf-8', errors='ignore').read().strip().split('\n')
return [unicodeToAscii(line) for line in lines]
# -
# Explore the train dataset by computing basic descriptive statistics:
# number of categories, tokens per category, number of characters, distinct characters, average number of characters per token.
#
# Explain why the unicodeToAscii is a good idea for this task.
# unicodeToAscii is a good idea for this task because we wnt to remove accents, meaning normalize the input text. In that way we keep the text 'clean' from any different signs besides the letter (and spaces). unicodedata module defines character properties for all Unicode characters. normalize - Return the normal form form for a Unicode string. category - Returns the general category assigned to the character chr as string.
# +
# Compute statistics over cities dataset
import glob
import os
def findFiles(path):
return glob.glob(path)
chars_statistic={}
tokens_statistic={}
all_data = ""
print(make_bold('number of characters in files:'))
for filename in findFiles('../data/cities/train/*.txt'):
category = os.path.splitext(os.path.basename(filename))[0]
all_categories.append(category)
lines = readLines(filename)
data = ' '.join(lines)
chars_statistic[category]=len(data)
print(make_underline(f'\t{category}:'),len(data))
all_data += data
category_lines[category] = lines
n_categories = len(set(all_categories))
print(make_bold('\nnumber of categories:'),n_categories)
toknes_num = 0
print(make_bold('\nnumber of tokens in catgory:'))
for c in category_lines:
# number of rows in each file
tokens_statistic[c]=len(category_lines[c])
print(make_underline(f'\t{c}:'),len(category_lines[c]))
toknes_num += len(category_lines[c])
len_char=len(all_data)
print(make_bold('\nnumber of characters in all files'),len_char)
dist_char = set(all_data)
print(make_bold('number of distinct characters: '),len(dist_char))
print(make_bold('average number of characters per token:'),(len_char/toknes_num))
# -
plt.bar(chars_statistic.keys(),chars_statistic.values())
plt.title('number of characters in files:')
plt.show()
plt.bar(tokens_statistic.keys(),tokens_statistic.values())
plt.title('number of tokens in files:')
plt.show()
# <a name="citiesmodel"></a>
# <h3>3.3 Train a Model and Evaluate It</h3>
#
# Adopt the code of the PyTorch tutorial to run on this new dataset.
#
# Report on performance in a similar manner. Explain the main confusion cases observed in the confusion matrix.
# +
import torch
# Find letter index from all_letters, e.g. "a" = 0
def letterToIndex(letter):
return all_letters.find(letter)
# Just for demonstration, turn a letter into a <1 x n_letters> Tensor
def letterToTensor(letter):
tensor = torch.zeros(1, n_letters)
tensor[0][letterToIndex(letter)] = 1
return tensor
# Turn a line into a <line_length x 1 x n_letters>,
# or an array of one-hot letter vectors
def lineToTensor(line):
tensor = torch.zeros(len(line), 1, n_letters)
for li, letter in enumerate(line):
tensor[li][0][letterToIndex(letter)] = 1
return tensor
print(letterToTensor('J'))
print(lineToTensor('Jones').size())
# +
import torch.nn as nn
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
self.i2o = nn.Linear(input_size + hidden_size, output_size)
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, input, hidden):
combined = torch.cat((input, hidden), 1)
hidden = self.i2h(combined)
output = self.i2o(combined)
output = self.softmax(output)
return output, hidden
def initHidden(self):
return torch.zeros(1, self.hidden_size)
n_hidden = 128
rnn = RNN(n_letters, n_hidden, n_categories)
# +
input = letterToTensor('c')
hidden =torch.zeros(1, n_hidden)
output, next_hidden = rnn(input, hidden)
output
# +
input = lineToTensor('Chongjingdong')
hidden = torch.zeros(1, n_hidden)
output, next_hidden = rnn(input[0], hidden)
output
# +
def categoryFromOutput(output):
top_n, top_i = output.topk(1)
category_i = top_i[0].item()
return all_categories[category_i], category_i
categoryFromOutput(output)
# +
import random
def randomChoice(l):
return l[random.randint(0, len(l) - 1)]
def randomTrainingExample():
category = randomChoice(all_categories)
line = randomChoice(category_lines[category])
category_tensor = torch.tensor([all_categories.index(category)], dtype=torch.long)
line_tensor = lineToTensor(line)
return category, line, category_tensor, line_tensor
for i in range(10):
category, line, category_tensor, line_tensor = randomTrainingExample()
print('category =', category, '/ line =', line)
# +
criterion = nn.NLLLoss()
learning_rate = 0.001 # If you set this too high, it might explode. If too low, it might not learn
def train(category_tensor, line_tensor):
hidden = rnn.initHidden()
rnn.zero_grad()
for i in range(line_tensor.size()[0]):
output, hidden = rnn(line_tensor[i], hidden)
loss = criterion(output, category_tensor)
loss.backward()
# Add parameters' gradients to their values, multiplied by learning rate
for p in rnn.parameters():
p.data.add_(p.grad.data, alpha=-learning_rate)
return output, loss.item()
# +
import time
import math
n_iters = 100000
print_every = 5000
plot_every = 1000
# Keep track of losses for plotting
current_loss = 0
all_losses = []
def timeSince(since):
now = time.time()
s = now - since
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
start = time.time()
for iter in range(1, n_iters + 1):
category, line, category_tensor, line_tensor = randomTrainingExample()
output, loss = train(category_tensor, line_tensor)
current_loss += loss
# Print iter number, loss, name and guess
if iter % print_every == 0:
guess, guess_i = categoryFromOutput(output)
correct = '✓' if guess == category else '✗ (%s)' % category
print('%d %d%% (%s) %.4f %s / %s %s' % (iter, iter / n_iters * 100, timeSince(start), loss, line, guess, correct))
# Add current loss avg to list of losses
if iter % plot_every == 0:
all_losses.append(current_loss / plot_every)
current_loss = 0
# +
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
plt.figure()
plt.plot(all_losses)
# +
# Keep track of correct guesses in a confusion matrix
confusion = torch.zeros(n_categories, n_categories)
n_confusion = 10000
# Just return an output given a line
def evaluate(line_tensor):
hidden = rnn.initHidden()
for i in range(line_tensor.size()[0]):
output, hidden = rnn(line_tensor[i], hidden)
return output
# Go through a bunch of examples and record which are correctly guessed
for i in range(n_confusion):
category, line, category_tensor, line_tensor = randomTrainingExample()
output = evaluate(line_tensor)
guess, guess_i = categoryFromOutput(output)
category_i = all_categories.index(category)
confusion[category_i][guess_i] += 1
# Normalize by dividing every row by its sum
for i in range(n_categories):
confusion[i] = confusion[i] / confusion[i].sum()
# Set up plot
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(confusion.numpy())
fig.colorbar(cax)
# Set up axes
ax.set_xticklabels([''] + all_categories, rotation=90)
ax.set_yticklabels([''] + all_categories)
# Force label at every tick
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
# sphinx_gallery_thumbnail_number = 2
plt.show()
# +
def predict(input_line, n_predictions=3):
print('\n> %s' % input_line)
with torch.no_grad():
output = evaluate(lineToTensor(input_line))
# Get top N categories
topv, topi = output.topk(n_predictions, 1, True)
predictions = []
for i in range(n_predictions):
value = topv[0][i].item()
category_index = topi[0][i].item()
print('(%.2f) %s' % (value, all_categories[category_index]))
predictions.append([value, all_categories[category_index]])
predict('katiafi')
predict('le theuil')
predict('gilin')
# -
confusion
# <a name="bettercitiesmodel"></a>
# <h3>3.4 Improve the RNN Model (Optional)</h3>
#
# Explore methods to improve performance of the cities classifier.
# Use a character RNN, dropout, better initialization.
# Report on error reduction.
| 62,230 |
/126.单词接龙Ⅱ.ipynb
|
f65aed6509b47440513bf3d81e41998ca56edf02
|
[] |
no_license
|
yuhe1984/Leecode-Solutions
|
https://github.com/yuhe1984/Leecode-Solutions
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 9,043 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
from matplotlib import pyplot as plt
import h5py
import numpy as np
mappedref = "CGACATCGAGGTGCCAAACACCGCCGTCGATATGAACTCTTGGGCGGTATCAGCCTGTTATCCCCGGAGTACCTTTTATCCGTTGAGCGATGGCCCTTCCATTCAGAACCACCGGATCACTATGACCTGCTTTCGCACCTGCTCGCGCCGTCACGCTCGCAGTCAAGCTGGCTTATGCCATTGCACTAACCTCCTGATGTCCGACCAGGATTAGCCAACCTTCGTGCTCCTCCGTTACTCTTTAGGAGGAGACCGCCCCAGTCAAACTACCCACCAGACACTGTCCGCAACCCGGATTACGGGTCAACGTTAGAACATCAAACATTAAAGGGTGGTATTTCAAGGTCGGCTCCATGCAGACTGGCGTCCACACTTCAAAGCCTCCCACCTATCCTACACATCAAGGCTCAATGTTCAGTGTCAAGCTATAGTAAAGGTTCACGGGGTCTTTCCGTCTTGCCGCGGGTACACTGCATCTTCACAGCGAGTTCAATTTCACTGAGTCTCGGGTGGAGACAGCCTGGCCATCATTACGCCATTCGTGCAGGTCGGAACTTACCCGACAAGGAATTTCGCTACCTTAGGACCGTTATAGTTACGGCCGCCGTTTACCGGGGCTTCGATCAAGAGCTTCGCCTTGCGGCTAACCCCATCAATTAACCTTCCGGCACCGGGCAGGCGTCACACCGTATACGTCCACTTTCGTGTTTGCACAGTGCTGTGTTTTTAATAAACAGTTGCAGCCAGCTGGTATCTTCGACTGATTTCAGCTCCATCCGCGAGGGACTTCACCTACATATCAGCGTGCCTTCTCCCGAAGTTACGGCACCATTTTGCCTAGTTCCTTCACCCGAGTTCTCTCAAGCGCCTTGGTATTCTCTACCTGACCACCTGTGTCGGTTTGGGGTACGATTTGATGTTACCTGATGCTTAGAGGCTTTTCCTGGAAGCAGGGCATTTGTTGCTTCAGCACCGTAGTGCCTCGTCATCACGCCTCAGCCTTGATTTTCCGGATTTGCCTGGAAAACCAGCCTACACGCTTAAACCGGGACAACCGTCGCCCGGCCAACATAGCCTTCTCCGTCCCCCCTTCGCAGTAACACCAAGTACAGGAATATTAACCTGTTTCCCATCGACTACGCCTTTCGGCCTCGCCTTAGGGGTCGACTCACCCTGCCCCGATTAACGTTGGACAGGAACCCTTGGTCTTCCGGCGAGCGGGCTTTTCACCCGCTTTATCGTTACTTATGTCAGCATTCGCACTTCTGATACCTCCAGCATACCTCACAGTACACCTTCACAGGCTTACAGAACGCTCCCCTACCCAACAACACATAGTGTCGCTGCCGCAGCTTCGGTGCATGGTTTAGCCCCGTTACATCTTCCGCGCAGGCCGACTCGACCAGTGAGCTATTACGCTTTCTTTAAATGATGGCTGCTTCTAAGCCAACATCCTGGCTGTCTGGGCCTTCCCACATCGTTTCCCACTTAACCATGACTTTGGGACCTTAGCTGGCGGTCTGGGTTGTTTCCCTCTTCACGACGGACGTTAGCACCCGCCGTGTGTCTCCCGTGATAACATTCTCCGGTATTCGCAGTTTGCATCGGGTTGGTAAGTCGGGATGACCCCCTTGCCGAAACAGTGCTCTACCCCCGGAGATGAATTCACGAGGCGCTACCTAAATAGCTTTCGGGGAGAACCAGCTATCTCCCGGTTTGATTGGCCTTTCACCCCCAACCACAAGTCATCCGCTAATTTTTCAACATTAGTCGGTTCGGTCCTCCAGTTAGTGTTACCCAACCTTCAACCTGCCCATGGCTAGATCACCGGGTTTCGGGTCTATACCCTGCAACTTAACGCCCAGTTAAGACTCGGTTTCCCTTCGGCTCCCCTATTCGGTTAACCTTGCTACAGAATATAAGTCGCTGACCCATTATACAAAAGGTACGCAGTCACACGCCTAAGCGTGCTCCCACTGCTTGTACGTACACGGTTTCAGGTTCTTTTTCACTCCCCTCGCCGGGGTTCTTTTCGCCTTTCCCTCACGGTACTGGTTCACTATCGGTCAGTCAGGAGTATTTAGCCTTGGAGGATGGTCCCCCCATATTCAGACAGGATACCACGTGTCCCGCCCTACTCATCGAGCTCACAGCATGTGCATTTTTGTGTACGGGGCTGTCACCCTGTATCGCACGCCTTTCCAGACGCTTCCACTAACACACACACTGATTCAGGCTCTGGGCTGCTCCCCGTTCGCTCGCCGCTACTGGGGGAATCTCGGTTGATTTCTTTTCCTCGGGGTACTTAGATGTTTCAGTTCCCCCGGTTCGCCTCATTAACCTATGGATTCAGTTAATGATAGTGTGTCGAAACACACTGGGTTTCCCCATTCGGAAATCGCCGGTTATAACGGTTCATATCACCTTACCGACGCTTATCGCAGATTAGCACGTCCTTCATCGCCTCTGACTGCCAGGGCATCCACCGTGTACGCTTAGTCGCTTAACCTCACAACCCGAAGATGTTTCTTTCGATTCATCATCGTGTTGCGAAAATTTGAGAGACTCACGAACAACTTTCGTTGTTCAGTGTTTCAATTTTCAGCTTGATCCAGATTTTTAAAGAGCAAAACTTCGCAGTGAACCTTTGCAGGTACACTCTGAAGTATTTTTTATTTAATCACTACAGAGATGGTGGAGCTATGCGGGATCGAACCGCAGACCTCCTGCGTGCAAAGCAGGCGCTCTCCCAGCTGAGCTATAGCCCCATAACATGTAGTTAAAACCTCTTCAAATTTGCCGTGCAAATTTGGTAGGCCTGAGTGGACTTGAACCACCGACCTCACCCTTATCAGGGGTGCGCTCTAACCACCTGAGCTACAAGCCTGTAGAGGTTTTACTGCTCATTTTCATCAGACAATCTGTGTGAGCACTTCAAAGAACGCTTCTTTAAGGTAAGGAGGTGATCCAACCGCAGGTTCCCCTACGGTTACCTTGTTACGACTTCACCCCAGTCATGAATCACAAAGTGGTAAGCGCCCTCCCGAAGGTTAAGCTACCTACTTCTTTTGCAACCCACTCCCATGGTGTGACGGGCGGTGTGTACAAGGCCCGGGAACGTATTCACCGTGGCATTCTGATCCACGATTACTAGCGATTCCGACTTCATGGAGTCGAGTTGCAGACTCCAATCCGGACTACGACGCACTTTATGAGGTCCGCTTGCTCTCGCGAGGTCGCTTCTCTTTGTATGCGCCATTGTAGCACGTGTGTAGCCCTGGTCGTAAGGGCCATGATGACTTGACGTCATCCCCACCTTCCTCCAGTTTATCACTGGCAGTCTCCTTTGAGTTCCCGGCCGGACCGCTGGCAACAAAGGATAAGGGTTGCGCTCGTTGCGGGACTTAACCCAACATTTCACAACACGAGCTGACGACAGCCATGCAGCACCTGTCTCACAGTTCCCGAAGGCACCAATCCATCTCTGGAAAGTTCTGTGGATGTCAAGACCAGGTAAGGTTCTTCGCGTTGCATCGAATTAAACCACATGCTCCACCGCTTGTGCGGGCCCCCGTCAATTCATTTGAGTTTTAACCTTGCGGCCGTACTCCCCAGGCGGTCGACTTAACGCGTTAGCTCCGGAAGCCACGCCTCAAGGGCACAACCTCCAAGTCGACATCGTTTACGGCGTGGACTACCAGGGTATCTAATCCTGTTTGCTCCCCACGCTTTCGCACCTGAGCGTCAGTCTTCGTCCAGGGGGCCGCCTTCGCCACCGGTATTCCTCCAGATCTCTACGCATTTCACCGCTACACCTGGAATTCTACCCCCCTCTACGAGACTCAAGCTTGCCAGTATCAGATGCAGTTCCCAGGTTGAGCCCGGGGATTTCACATCTGACTTAACAAACCGCCTGCGTGCGCTTTACGCCCAGTAATTCCGATTAACGCTTGCACCCTCCGTATTACCGCGGCTGCTGGCACGGAGTTAGCCGGTGCTTCTTCTGCGGGTAACGTCAATGAGCAAAGGTATTAACTTTACTCCCTTCCTCCCCGCTGAAAGTACTTTACAACCCGAAGGCCTTCTTCATACACGCGGCATGGCTGCATCAGGCTTGCGCCCATTGTGCAATATTCCCCACTGCTGCCTCCCGTAGGAGTCTGGACCGTGTCTCAGTTCCAGTGTGGCTGGTCATCCTCTCAGACCAGCTAGGGATCGTCGCCTAGGTGAGCCGTTACCCCACCTACTAGCTAATCCCATCTGGGCACATCCGATGGCAAGAGGCCCGAAGGTCCCCCTCTTTGGTCTTGCGACGTTATGCGGTATTAGCTACCGTTTCCAGTAGTTATCCCCCTCCATCAGGCAGTTTCCCAGACATTACTCACCCGTCCGCCACTCGTCAGCAAAGAAGCAAGCTTCTTCCTGTTACCGTTCGACTTGCATGTGTTAGGCCTGCCGCCAGCGTTCAATCTGAGCCATGATCAAACTCT"
reffrombioguys = "GTTGTGCTTCGTTCCGGTTACGTATTGCTAATGATACGGCGGCGACCACCGAGATCTACTAGCATGTGCTAGGACGACATCGAGGTGCCAAACACCGCCGTCGATATGAACTCTTAGGCGGTATCGGCCTATTATCAGGTACCTTTATCGTTAGAGCGATGGCCGCCATTTCAGAGCCACCGGATCACTATGGCTGCTTTCGCACCTGCTCGCGCCGTCACGCTCTGTCAAGCTGGCTTATGCCATTGCACTAACCTCTGATGTCCGACCAGGATTGTAACCTTCGTGCTCCTCCGTTACTCTTTAGGAGGGAGACCTTTGATCAAACTACCCACCAGACACTGTCCGCAGCGGATTACGGGTCAACGTTAGAACATCAAACATTAAAGGGTGGTATTTCAAAGGTCGGCTCCATGCGGGCTGGCGTCCCTACTTCAAAGCCTCCACCTATCCTACACATCAAGGCTCCATGTTCAGTGTCAAATATAGTAAGGTTCAGGTCTTTCCGTCTTGCCGCGGGTACACTGCATCTTCGCAGCGAGTTCAATTTCACTGAGTCTCAGGTGGAGACATGCCAGCCATCATTACGCCATTCGTGCAGGTCGGAACTTACCCGACAAGAATTTCGCTACCTTAGGACCGTTATAGTTACGGCCGCCGTTTACCGAGGCTTCGATCAAAGCTTCGCCTTGCGGCTAACCCCATCATTAACCTTCAAGCACCGGGCGGCGATCACACACCATGTACGTCCCACTTTCGTGTTTGCACAGTGCTGTGTTTTTAATAAACAGTTGCAGCCAGCTGGTATCTTCGACTGATTTCAAACCATGCGAGGACTTCACCTACATATCCGGTGTATTCTCCCGAAGTTACGGCACCATTTTGCCCTAGTTCAACTGCCAGGGTTATATAAGCGCCTTGGTATTCTCTGCCTGACACCTGTGTCGGTTTGGGGGCGCGATTTGTGTTACCTGAGTGCTTAGAGGCTTTTCCTGGAAGCAGGGCATTTGTTGCTTCGGCACCGTAGTGCCTCGTCATCACGCCTCAGCCTTGACTCCGGATTTGCACGAAGCCAGCCTACACGCTTAAACCAGGCAACCGTCGCCCGGCCAACGCATCAGCCTTCTCCGTCCCCCTTCGCGGTAACACCAAGTACAGGAATAACCTGTTTCCCATCATTCGCCTTTCGGCCTCGCCTTAGGGGTCGGCTGCTACCGGTAACGATTGGACAGGAACCCTTGATTCTTCCGGCGAGCGGGCTACCACTTTATCGTTACTTGTATTCAGCATTCGCACTTCTGATACCTCCAGCATGCCTCACAGTACACCTTCACAGCGGGCTTACAGAACGCTCCCTGCACAACAACACATAGTGTCAGCTGCCGCGGCTTCGGTGCATGGTTTATTGCGTACATCTTCCGCGCAGGCCGACTCGACCGACATTATTCATTACGCTTTCTTTAATGATAACTTCTAGCCAACATCCGCAACATCCAGGCTTCCCACATCGTTTCCCACTTAACCATGACTTTGGGACGCCAAGCTGGCAGGTCTGCGGTTCTTTCCTCTGCGACGGACGTTAGCACCGCCGTGTGTCTCCGTGATAACATTCTCCGGTATTCACAGTTTGCATCAGGTTGGTAAGTCAGGATGACCCCCCTTGCCGAAACAGTGCTCTACCCCCGGAGATGGTCACGAGGCGCTACCTAAATAGCTTTCGGGGAGAACCAGCTATCTCCCGGTTTGATTGGCCTTTCACCCCCAACCACAAGTCATCGCTAATTTTTCAACATTGATCGGTTCGGTCCTCCAGTTAGTGTTACCCAACCTTCAACCTACTTATGGCTGATCACCGGGTTTCGGGTCTATACCCCTGCAACTTAACGCCCAGTTAAGACTCGGACTTCCCTTCAGCTCCCCCTATTCGGTTAACGCTGCAGAATATAAGTCGCTGACCCATTATACAAAGGTACGCAGTCACACGCCTAGCGTGCTCCCACTCTTGCTTTTGGTTTCAGGTTCTTTTCACTCCCTCGCGGGGTTCTTTTCGCCTTTCCTGCAGGTACTGGTTTGCTATCGGTCAGTCAAATTATTTAGCCTTGGAGGATGGTCCCCCATATTCAGACAGGATACCACGTGTCCGCCCCTACTCATCGACTCTGGCGTATTATCGGTGTTTTTGTGTTACTGGTTTTTGTCACCCTGTATCGCACGCGCCCTCCAGACAGCCACTAACACACACACTGGTTCAGGCTCTGGGCTGCTCGTTGCTCCTCGCCGCTACTGGGGAATCTCAGTTGATTTCTTTTCCTCAGGGTGCCGATGTTTCAGTTTCCCCCAGTTCGCCCCTCATTAACCTATGGATTCGGTGTGATGGTGTGTCGAAACACACTGGGTTTCCCCATTCGGAAATCGCCGGTTATAACGGTTCATATCACCTTACCGACGCTTATCGCAGATTAGCGTCCTTCATCGCCTCTGACTGCCAGGGCATCCACCGTGTACGCTTAGTCGCTTAACCTCACAACCCGAAGATGTTACCGTTCATCATCGTGTTGAAAATTTGAAGACTCACGAACAACTTTCGTTGTTCAGTGTTTCCAATTTTCAGCAGTCAGTTTTTAAGAGCAAAACTTCGCAGTAATTTGCGGGTGCTCTGAAGTATTTTTATTTAATCACTACGAAGGATGGTGGGCTATGGGATCAGACCGCAGACCTCCTGCGTGCAAGCAGGCGCTCTCCCAGCTGAGCTATGGCCCCCATAACATGTAGTTAAAACCTCTTCAAATTTGCCGTGCAAATTTGGTAGGCCCTGATGGACTTGAACCACCGACCTCACCCACATATCAGAGAGTTTGCTCTAGTACCTGAGCTACAAGCCTGAAAGTTTACTGCTCATTTTCATCAGACCAATCTGTGTAGGCAGCACTTCAAGAACGCTTCTTTAAGGTAAGGAGGTGATCAACCGCAGGTTCCCCTACGATTACCTTGTTACGACTTCACCCCAGTCATGAATCCACAAAGTGGTAGCTTCCCTCCGAAGGGTTAAGCTACCTACTTCTTTTGCAACCCCTCCCATGGTGTGACGGGCGGGTGTGTACAAGGCCGGGAACGTATTCACCGTGGCATTCTGATCCACGATTATAGCGATTCCGACTTCATGGAGTCGAGTTGCAGACTCCAATCGGACTACGACGCACTTTATGAGGTCCGCTTGCTCTCGCGAGGTCGCTTCTCTTTGTATGCGCCATTGCCTTGGCACGTGTGTAGCCCTGGTCGTAGAGGCCATGATGACTTGACGTCATCCCCACCTTCCTCCAGTTTATCACTGGCAGTCTCCTTTGAGTTCCCAGCCGGACCGCTGGCAACAAAGGATAAGGGGATTTGCGCGCTCGTTGCAGGACTTAACCCAACATTTCACAACACGAGCTGACGACAGCCATGCAGCACCTGTCTCACAGTTCCCGAAGGCACCAATCCATCTCTGGAAAGTTCTGTGGATGTCAAGACCAGGTAAGGTTCTTCGCGTTGCATCGAATTAAACCACATGCTCCACCGCTTGTGCAGGCCCCGTCAATTCATTGAGTTTTAACCTTGCAGCCCGTACTCCCAGGCGGTCGACTTAACGCGTTAGCTCGGAAGCCACGCCTCAAGGGCACAACCTCAAGTCGACATCGTTTACGGCGTGGACTACCAGGGTATAATCCTGTTGCTCCCCACGCTTTCGCACCTGAGCGTCAGTCTTCGTCAGGGGGCCGCCTTCGCCACCGGTATTCCTCAGATCTCTCGCATTTCACCGCTTTACACCTGGAATTCTACCCCCTCTACGAGACTCAAGCACCCGAGTATCAGATGCAGTTTGGGTTGAGCGAAGTTTCACATCTGACTTAACAAACCGCCGCCGTGCGCTTCCCGCCCGGTAATTCCGATTAACGCTTGCACCTCGAAATGATACCGCGGCTGCTGGCACGGAGTTAGCCGGTGCTTCTTCTGCAGGTAACGTCAACGAGCAAGGTAGTAACTTTTCCACTCCTTCGCACCTCCCCGCTGAAAAGTACTTTACAACCAGTGCTTCTTCATACACGCGGCATGAAAACGCATCAGGCTTTACGCCCATTGTGCAATATTCCCCACTGCCGCCTCCGCTAGGAGTCTGGACCGTGTCTCAGTTCCAGTGTGGCTGGTCATCCTCTCAGACCAGCTAGGGATCGTCGCCTAGGTGAGCCGTTACCCCACCTACTAGCTAATCCCATCTGGGCACATCCGATGGCAAGAGGCCAGAGGTCCCCCTCTTTGGTCTTGCGACGTTATGCGGTATTAGCTACCGTTTCCAGTAGTTATCCCCCTCCATCAGGCAGTTTCCAGACATTACTCACCCGTCCGCCACTCGTCAGCAAAGAAACGAAACTTCTTCCTGTTACCATTAAACTTGCATGTGTTAGGCCTGCCGCCAGCGTTCAATCTGAGCCATGATCAAACTCTCTCCACCATGTAGTACCAATCTCGTATGCCGTCTTCTGCTTGAAGGCAATGCGTAAC"
# +
# from taiyaki encoding
labelBaseMap = {
0: "A",
1: "C",
2: "G",
3: "T"
}
def get_reads_dict(filename):
file = h5py.File(filename, "r")
file = file['Reads']
reads = []
for r in file.keys():
elem = {}
elem['UUID'] = r
for k in file[r].keys():
elem[k]=file[r][k][()]
reads.append(elem)
return reads
reads = get_reads_dict("../taiyakiOutputs/output_createfasta.hdf5")
print(reads[0]['UUID'])
ref = reads[0]['Reference']
ref = list(map(lambda x: labelBaseMap[x], ref))
ref = "".join(ref)
# len ref == len mappedref
print(reffrombioguys)
print(mappedref)
print(len(reffrombioguys))
print(len(mappedref))
# -
i=1
while len(reffrombioguys.split(mappedref[:i])) > 1:
i+=1
print(i-1)
print(len(reffrombioguys.split(mappedref[:41])[0]))
print(mappedref[:30])
dden=true id="_glgqtWrGrj6"
x_n = x - A @ z_s_500 # Extract noise
train_x = x_n[:x_n.shape[0] // 2] # Train data to fit the model
test_x = x_n[x_n.shape[0] // 2:] # Test data to validate the model
M = 15 # You are supposed to find the best parameter $M$ during your study
X_train = build_X(train_x, M)
# + [markdown] colab_type="text" hidden=true id="eN-R3tGiWL8M"
# - (3 pts) Once you constructed $X$ based on the training data, the fitting of the parameters $\theta$ again requires the solution of the linear least-squares problem. This time you are supposed to compute pseudoinverse of $X$ via QR decomposition. Hint: use ```numpy.linalg.qr()``` function.
# + colab={} colab_type="code" hidden=true id="KPAHxizqV-oG"
def build_pseudoinverse_QR(A):
q,r =np.linalg.qr(A)
return np.linalg.inv(r)@np.transpose(q)
# '''
# Input: np.array of size (n - M, M)
# Output: np.array of size (M, n - M)
# '''
# raise NotImplementedError()
X_inv = build_pseudoinverse_QR(X_train)
theta = X_inv @ train_x[M:]
# + [markdown] colab_type="text" hidden=true id="l1ZwHinqTMhy"
# - (3 pts) Check your model with test data. Plot your predicted noise series and test noise series in one plot. Next, add smooth trend $Az_s$ back to predicted noise series and plot it with the original series $x$ in one plot. You have to get two plots with two time series in every plot. Be careful with indexing!
# + colab={} colab_type="code" hidden=true id="ZHzOLN4_VaP8"
test_X = build_X(test_x,M)
predicted = test_X @ theta
plt.figure(figsize=(9, 6))
plt.plot(x[:1810], label="Original data")
plt.plot(predicted+np.tile(z_s_500,5)[:1810], label="predicted data")
plt.legend(fontsize=12)
plt.ylabel("Values", fontsize=18)
plt.xlabel("Time", fontsize=18)
plt.grid(True)
plt.xticks(fontsize=18)
_ = plt.yticks(fontsize=18)
# plt.plot(A@z_s_500[:1810])
# Plotting
# +
plt.figure(figsize=(9, 6))
plt.plot((x-A @z_s_500)[:1810], label="Original noise")
plt.plot(predicted[:1810], label="predicted noise")
plt.legend(fontsize=12)
plt.ylabel("Values", fontsize=18)
plt.xlabel("Time", fontsize=18)
plt.grid(True)
plt.xticks(fontsize=18)
_ = plt.yticks(fontsize=18)
# + [markdown] colab_type="text" hidden=true id="c4z9T7OlT1p6"
# - (Bonus) How valuable is your model for real time prediction? Try to perform honest time-marching with your model starting from $x_{M+1}$ to $x_{M+100}$. Is it still good? Try to choose the best parameters $M$ and $\lambda$.
#
# + colab={} colab_type="code" hidden=true id="5TYqYs-pFL84"
# Your solution is here
# + [markdown] heading_collapsed=true
# # Problem 2 (Theoretical tasks) (26 pts)
#
# _1._
# - (1 pts) what are the constants $C_1$ and $C_2$ such that $C_1 \|x\|_{\infty} \leq \|x\|_2 \leq C_2 \| x\|_{\infty}$
# - (5 pts) Prove that $\| U A \|_F = \| A U \|_F = \| A \|_F$ for any unitary matrix $U$.
# - (5 pts) Prove that $\| U A \|_2 = \| A U \|_2 = \| A \|_2$ for any unitary matrix $U$.
#
# _2._
# - (5 pts) Using the results from the previous subproblem, prove that $\| A \|_F \le \sqrt{\mathrm{rank}(A)} \| A \|_2$. _Hint:_ SVD will help you.
# - (5 pts) Show that for any $m, n$ and $k \le \min(m, n)$ there exists $A \in \mathbb{R}^{m \times n}: \mathrm{rank}(A) = k$, such that $\| A \|_F = \sqrt{\mathrm{rank}(A)} \| A \|_2$. In other words, show that the previous inequality is not strict.
# - (5 pts) Prove that if $\mathrm{rank}(A) = 1$, then $\| A \|_F = \| A \|_2$.
# - (5 pts) Prove that $\| A B \|_F \le \| A \|_2 \| B \|_F$.
# + [markdown] hidden=true
# 1.1
#
# $C_1 \|x\|_{\infty} \leq \|x\|_2 \leq C_2 \| x\|_{\infty}$
#
# $ \Vert x \Vert_{\infty} = \max_i | x_i| $
#
# $ \Vert x \Vert_2 = \Big(\sum_{i=1}^n |x_i|^2\Big)^{1/2} $
#
# $ \max_i | x_i| \leq \Big(\sum_{i=1}^n |x_i|^2\Big)^{1/2} \leq \sqrt{n}\max_i | x_i| $
#
# $ C_1 = 1 ;C_2= \sqrt{n}$
#
# 1.2
#
# $ \|A\|_F = \sqrt{trace(A^*A)} $
#
# $ \|UA\|_F = \sqrt{trace((UA)^*UA)} = \sqrt{trace((A^*U^*UA)} = \sqrt{trace(A^*A)} $
#
# $ \|AU\|_F = \sqrt{trace((AU)^*AU)} = \sqrt{trace((U^*A^*AU)} = \sqrt{trace(UU^*A^*A)} = \sqrt{trace(A^*A)} $
#
# $ \|A\|_F = \|UA\|_F = \|AU\|_F $
#
#
# 1.3
#
# $ \Vert A \Vert_{2} = \sup_{\Vert x \Vert_{2} = 1} {\Vert A x \Vert_{2}} $
#
# $ \Vert AU \Vert_{2} = \sup_{\Vert x \Vert_{2} = 1} {\Vert AU x \Vert_{2}} = \sup_{\Vert Ux \Vert_{2} = 1} {\Vert AU x \Vert_{2}} = \sup_{\Vert y\Vert_{2} = 1} {\Vert A y \Vert_{2}} $
#
# $ \Vert UA \Vert_{2} = \sup_{\Vert x \Vert_{2} = 1} {\Vert UA x \Vert_{2}} = \sup_{\Vert x\Vert_{2} = 1} {\Vert A x \Vert_{2}} $
#
#
# 2.1
#
# $\| A \|_F \le \sqrt{\mathrm{rank}(A)} \| A \|_2$
#
# $\| A \|_F = \sqrt{\sum_{i}^{rank(A)} \Sigma^{*} \Sigma} = \sqrt{\sum_{i}^{rank(A)} \sigma^2}$
#
# $\| A \|_2 = max(\sigma) $
#
# $ \sqrt{\sum_{i}^{rank(A)} \sigma^2} \leq \sqrt{\mathrm{rank}(A)} max(\sigma) $
#
#
# 2.2
#
# When matrix $A$ has a singular value $\sigma$ of multiplicity $k = rank(A)$ we get $\sqrt{\sum_{i=1}^k \sigma^2 }= \sigma \sqrt{k} = \sqrt{rank(A)} \|A\|_2$
#
#
#
# 2.3
#
# from the previous point in this case we obtain that $k = rank(A)= 1$ and $\sqrt{\sum_{i=1}^k \sigma^2 }= \sqrt{\sum_{i=1}^1 \sigma^2 }= \sigma \sqrt{1} = \sigma=\|A\|_2$
#
# 2.4
#
# we consider $b_i$ to be thr $i$ coloumn of $B$
#
# $ \|AB\|_F^{2} = \sum_{i=1}^{n}\|Ab_{i}\|^2_{2} \leq \|A\|_{2}^2\sum_{i=1}^{n}\|b_{i}\|^2_{2} =\|A\|_2^2 \|B\|_F ^2$
# + [markdown] heading_collapsed=true
# # Problem 3 (Matrix calculus) (15 pts)
#
# _1._ (5 pts) Consider the following function
#
# $$ F(U, V) = \frac{1}{2}\|X - UV\|_F^2, $$
#
# where $X \in \mathbb{R}^{n \times n}$, $U \in \mathbb{R}^{n \times k}$ and $V \in \mathbb{R}^{k \times n}$ and $k < n$.
#
# - (2 pts) Derive analytical expression for the gradient of the function $F$ with respect to $U$
# - (2 pts) Derive analytical expression for the gradient of the function $F$ with respect to $V$
# - (1 pts) Estimate computational complexity of computing these gradients (in big-O notation).
#
# _2._ (2 pts) Derive analytical expression for the gradient of the function $f$:
#
# $$ R(x) = \frac{(Ax, x)}{(x, x)}, $$
#
# where $A$ is a symmetric real matrix. Why the gradient of this function is important in NLA you will know in the lectures later.
#
# _3._ (8 pts) Consider the following function $f$
#
# $$f(w) = \log\det\left(\sum_{i=1}^m w_i x_i x_i^{\top}\right),$$
#
# where $x_i, \; i = 1,\dots,m$ are given column vectors.
#
#
# - (3 pts) Derive analytical expression for the gradient of $f$\
# - (1 pts) For what values of $m$ and vectors $x_i$ the function $f$ makes sense and is finite?
# - (4 pts) Consider two approaches to compute it: directly with matrix products and so on and the single-line solution with ```einsum``` function.
# Generate some set of vectors $x_i \in \mathbb{R}^{1000}$ such that the funtion $f$ is finite and compare the time of computing derived gradient with these approaches. Use [%timeit](https://docs.python.org/3.6/library/timeit.html) command to measure time. What do you think about the reason of such behaviour?
# -
# 3.1
#
#
# $ F(U, V) = \frac{1}{2}\|X - UV\|_F^2 = \frac{1}{2} tr\Big( (X-UV)^{*}(X-UV)\Big) = \frac{1}{2} tr\Big( (X^{*}-V^{*}U^{*})(X-UV)\Big) = \frac{1}{2} tr\Big( X^{*}X -X^{*}UV-V^{*}U^{*}X-V^{*}U^{*}UV\Big) = \frac{1}{2} \Big( tr(X^{*}X) - tr(X^{*}UV) - tr(V^{*}U^{*}X) - tr(V^{*}U^{*}UV)\Big)$
#
# 1
#
#
# $d_{V}F(U,V) = \frac{1}{2} d_{V} \Big( tr(X^{*}X) - tr(X^{*}UV) - tr(U^{*}X V^{*}) + tr(U^{*}U V V^{*})\Big) = -\frac{1}{2} d_{V} \Big(tr(X^{*}UV) \Big) - \frac{1}{2} d_{V}\Big(tr(U^{*}X V^{*}) \Big) + \frac{1}{2} d_{V} \Big(tr(U^{*}U V V^{*})\Big)= -\frac{1}{2} (I, X^{*}U dV)-\frac{1}{2} (I, U^{*}X (dV)^{*}) + (I, U^{*}U dV V^{*} ) + (I, U^{*}U V dV^{*} ) = (dV, U^{*}U V) - (U^{*}X, dV) = (U^{*}(UV -X),dV)$
#
#
#
# $\nabla _{V} F(U,V) = U^{*}(UV -X) $
#
# 2
#
# $d_{U}F(U,V) = \frac{1}{2} d_{U} \Big( tr(X^{*}X) - tr(X^{*}UV) - tr(U^{*}X V^{*}) + tr(U^{*}U V V^{*})\Big) = -\frac{1}{2}d_{U}\Big(tr(VX^{*}U) \Big) - \frac{1}{2} d_{U}\Big(tr(X V^{*} U^{*}) \Big) + \frac{1}{2} d_{U} \Big(tr(V V^{*} U^{*}U)\Big) = - \frac{1}{2} (I, VX^{*}d U)- \frac{1}{2} (I, XV^{*}(dU)*)+ \frac{1}{2}(I, U^{*}U dV V^{*}) + \frac{1}{2}(I, U^{*}U V (dV)^{*}) = -(XV^{*}, dU) + (UVV^{*},dU) = ((UV-X)V^{*}, dU)$
#
#
#
# $\nabla _{U} F(U,V) = (UV-X)V^{*} $
#
# 3
#
# the hardest part is the multiplication of 3 matrices so the computatinal complexity is $O(n^3)$
#
# 3.2
#
#
# $ R(x) = \frac{(Ax, x)}{(x, x)} $
#
# $ \frac{\delta R(x)}{\delta x_j} = \frac{\frac{\delta (Ax, x)}{\delta x_j}}{(x, x)} - \frac{(Ax, x)\frac{\delta (x, x)}{\delta x_j} }{(x, x)^2} = \frac{2(Ax)_{j}}{(x, x)} - \frac{(Ax, x) 2x_{j}}{(x, x)^2} = \frac{2}{(x,x)} \Big( Ax -R(x)x\Big)_{j}$
#
# 3.3
#
# 1.
#
# $f(w) = \log\det\left(\sum_{i=1}^m w_i x_i x_i^{\top}\right)$
#
# we use formula = $\frac{\delta log(det(A))}{\delta x} = trace \Big(A^{-1} \frac{\delta A}{\delta x}\Big)$
#
# so we need to compute the derivative $\frac{\delta \left(\sum_{i=1}^m w_i x_i x_i^{\top}\right)}{\delta w_j}$
#
# $\frac{\delta \left(\sum_{i=1}^m w_i x_i x_i^{\top}\right)}{\delta w_j} = x_j x_j^{\top} $
#
# $ \frac{\delta f(w)}{\delta w_j}= trace\Big( x_j x_j^{\top} \Big( \sum_{i=1}^m w_i x_i x_i^{\top}\Big)^{-1}\Big)$
#
# 2.
#
# for the logarifm we need $\det\left(\sum_{i=1}^m w_i x_i x_i^{\top}\right)>0$.
# In determinant we have the sum of m matrices of shape $n \times n$. All these matrices are the rank of 1 cause all the lines linearly dependent due to definition. but if we sum these matrices their rank will increase. so we need $m \geq n$ and we need the requirment for x vectors not to be linearly dependent.
# + hidden=true
import numpy as np
k = 7
X = np.random.rand(1000,1000)
mat_k = np.outer(X[k], X[k])
mat = X.T @ X
mat_inv = np.linalg.inv(mat)
# -
# in this place we assume that $w=(1,.., 1)^T$. we see that for trace operation einsum is quicker.
# %timeit np.einsum('ik, ki', mat_inv, mat_k)
# %timeit np.trace(mat_inv @mat_k)
# + [markdown] heading_collapsed=true slideshow={"slide_type": "fragment"}
# # Problem 4. Compression of the fully-connected layers in neural network with simple architecture (20 pts)
#
# In this problem we consider the neural network that performs classification of the dataset of images.
# Any neural network can be considered as composition of simple linear and non-linear functions.
# For example, a neural network with 3 layers can be represented as
#
# $$f_3(f_2(f_1(x, w_1), w_2), w_3),$$
#
# where $x$ is input data (in our case it will be images) and $w_i, \; i =1,\dots,3$ are parameters that are going to be trained.
#
# We will study the compression potential of neural network with simple architecture: alternating some numbers of linear and non-linear functions.
#
# The main task in this problem is to study how the compression of fully-connected layers affects the test accuracy.
# Any fully-connected layer is represented as linear function $AX + B$, where $X$ is input matrix and $A, B$ are trainable matrices. Matrices $A$ in every layer are going to be compressed.
# The main result that you should get is the plot of dependence of test accuracy on the total number of parameters in the neural network.
# + [markdown] hidden=true
# #### Zero step: install PyTorch
#
# - Follow the steps in [official instructions](https://pytorch.org/get-started/locally/)
# + [markdown] hidden=true
# #### First step: download CIFAR10 dataset
# + hidden=true
import torch
import torchvision
import torchvision.transforms as transforms
from torchvision import datasets, transforms
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import matplotlib.pyplot as plt
# %matplotlib inline
import numpy as np
batch_size = 100
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
train_loader = torch.utils.data.DataLoader(datasets.CIFAR10('./', train=True, download=True, transform=transform),
batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(datasets.CIFAR10('./', train=False, transform=transform),
batch_size=batch_size, shuffle=True)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# + [markdown] hidden=true
# #### Check what images are we going to classify
# + hidden=true
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.figure(figsize=(20, 10))
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
# get some random training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
# show images
imshow(torchvision.utils.make_grid(images))
# print labels
print(' '.join('%5s' % classes[labels[j]] for j in range(8)))
# + [markdown] hidden=true
# ### Second step: neural network architecture
#
# For simplicity and demonstration purposes of the neural network compression idea consider the architecture consisting of the only fully-connected layers and non-linear ReLU functions between them.
# To demonstrate compression effect, consider the dimension of the inner layers equals to 1000.
#
# Below you see implementation of such neural network in PyTorch.
# More details about neural networks you will study in the *Deep learning* course in one of the upcoming term
# + hidden=true
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(3 * 32 * 32, 1000)
self.fc2 = nn.Linear(1000, 1000)
self.fc3 = nn.Linear(1000, 1000)
self.fc4 = nn.Linear(1000, 1000)
self.fc5 = nn.Linear(1000, 1000)
self.fc6 = nn.Linear(1000, 10)
self.ReLU = nn.ReLU()
def forward(self, x):
x = self.fc1(x.view(-1, 3 * 32*32))
x = self.ReLU(x)
x = self.fc2(x)
x = self.ReLU(x)
x = self.fc3(x)
x = self.ReLU(x)
x = self.fc4(x)
x = self.ReLU(x)
x = self.fc5(x)
x = self.ReLU(x)
x = self.fc6(x)
return F.log_softmax(x, dim=1)
# + [markdown] hidden=true
# #### Implement functions for training and testing after every sweep over all dataset entries
# + hidden=true
def train(model, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
# + hidden=true
def test(model, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
# data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
# + [markdown] hidden=true
# ### Set parameters for training and print intermediate loss values
# + hidden=true
log_interval = 50
epochs = 7
# + [markdown] hidden=true
# ### Third step: run training with the [Adam](https://arxiv.org/pdf/1412.6980.pdf%20%22%20entire%20document) optimization method
#
# If your laptop is not very fast, you will wait some time till training is finished.
# + hidden=true
model = Net()
optimizer = optim.Adam(model.parameters(), lr=1e-3)
for epoch in range(1, epochs + 1):
train(model, train_loader, optimizer, epoch)
test(model, test_loader)
# + [markdown] hidden=true
# Now we have somehow trained neural network and we are ready to perform compression of the weigths in the fully-connected layers.
# + [markdown] hidden=true
# - (3 pts) Compute SVD of the matrix $1000 \times 1000$, which corresponds to a weight matrix $A$ in any layer of the trained neural network of the appropriate dimension. To find more information about accessing this matrix please refer to [PyTorch manual](https://pytorch.org/docs/stable/index.html).
# Plot decaying of the singular values like it was shown in the lecture. What conclusion can you make?
# - (12 pts) Create a new model, which is analogue to the class ```Net```, but with some significant distinctions.
# It takes as input parameters the instance of the class ```Net``` and compression rank $r > 0$.
# After that, this model has to compress all matrices $A$ in fully-connected layers with SVD using first $r$ singular vectors and singular values.
# Pay attention to efficiently storing of compress representation of the layers.
# Also ```forward``` method of your new model has to be implemented in a way to use compressed representation of the fully-connected layers. In all other aspects it has to reproduce ```forward``` method in the original non-compressed model (number of layers, activations, loss function etc).
# - (5 pts) Plot dependence of test accuracy on the number of parameters in the compressed model. This number of parameters obviously depends on the compression rank $r$.
# Also plot dependence of time to compute inference on the compression rank $r$.
# Explain obtained results.
# To measure time, use [%timeit](https://docs.python.org/3.6/library/timeit.html) with necessary parameters (examples of using this command see in lectures)
# + hidden=true
path = './NLA_nat.pth'
torch.save(model.state_dict(), path)
any_layer= model.state_dict()['fc3.weight']
d, s,v =torch.svd(any_layer)
plt.figure(figsize=(9, 6))
plt.semilogy(s/s[0])
plt.ylabel(r"$\sigma_i / \sigma_0$", fontsize=24)
plt.xlabel(r"Singular value index, $i$", fontsize=24)
plt.grid(True)
plt.xticks(fontsize=18)
_ = plt.yticks(fontsize=18)
# Your solution is here
# -
# so here we can see that the singular values of the matrix decrease so we can cut only the meaningfull values
model.state_dict().keys()
class netCompressed:
def __init__(self, net, r):
super().__init__()
self.r = r
layer_weight = self.compressing(net, 'fc1.weight')
self.fc1 = layer_weight
self.fc1_bias = net.state_dict()['fc1.bias']
layer_weight = self.compressing(net, 'fc2.weight')
self.fc2 = layer_weight
self.fc2_bias = net.state_dict()['fc2.bias']
layer_weight = self.compressing(net,'fc3.weight')
self.fc3 = layer_weight
self.fc3_bias = net.state_dict()['fc3.bias']
layer_weight = self.compressing(net, 'fc4.weight')
self.fc4 = layer_weight
self.fc4_bias = net.state_dict()['fc4.bias']
layer_weight = self.compressing(net, 'fc5.weight')
self.fc5 = layer_weight
self.fc5_bias = net.state_dict()['fc5.bias']
layer_weight = self.compressing(net, 'fc6.weight')
self.fc6 = layer_weight
self.fc6_bias = net.state_dict()['fc6.bias']
def forward(self, x):
x = x.view(-1, 3 * 32*32)
for layer, b in zip([self.fc1, self.fc2, self.fc3, self.fc4, self.fc5, self.fc6],
[self.fc1_bias, self.fc2_bias, self.fc3_bias, self.fc4_bias, self.fc5_bias, self.fc6_bias]):
u, s, v = layer
# print(u.shape, s.shape, v.shape)
x = x@v
x = s.view(1, -1)*x
# print(x.shape)
x= x @ u.T
x = x + b
if layer is self.fc6:
x = F.log_softmax(x, dim=1)
else:
x = torch.relu(x)
return x
def compressing(self, net, key):
weight = net.state_dict()[key]
# print(key, weight.shape)
u,s,v = torch.svd(weight)
# print(u.shape, s.shape, v.shape)
main_s = s [:self.r]
main_u = u[:, :self.r]
main_v = v[:, :self.r]
# compressed_weight = torch.matmul(torch.matmul(main_u, torch.diag_embed(main_s)), main_v.transpose())
return main_u, main_s, main_v
import time
# # %%timeit
rs = list(map(int, torch.logspace(0, 3, 20)))
accs = []
times = []
for r in rs:
net2 = netCompressed(model, r)
n_correct = 0
n_total = 0
time_total = 0
for x, y in test_loader:
time_start = time.time()
# # time = %timeit -n1 -r1 -q -o y_pred = net2.forward(x).argmax(-1, keepdim=False)
y_pred = net2.forward(x).argmax(-1, keepdim=False)
time_end = time.time()
time_total += time_end - time_start
n_correct += torch.sum(y == y_pred).item()
n_total += y.shape[0]
accuracy = n_correct / n_total
times.append(time_total / n_total)
accs.append(accuracy)
# +
import matplotlib.pyplot as plt
plt.figure(figsize=(9, 6))
plt.semilogx(rs, accs)
# plt.semilogx(rs, times)
plt.ylabel(r"test of accuracy", fontsize=24)
plt.xlabel(r"rank compression", fontsize=24)
plt.grid(True)
plt.xticks(fontsize=18)
_ = plt.yticks(fontsize=18)
plt.show()
# -
print(torch.sum(y_pred == y))
y.shape[0]
# +
plt.figure(figsize=(7, 5))
plt.semilogx(rs[6:], times[6:])
# plt.semilogx(rs, times)
plt.ylabel(r"inference time", fontsize=14)
plt.xlabel(r"rank compression", fontsize=14)
plt.grid(True)
plt.xticks(fontsize=8)
_ = plt.yticks(fontsize=8)
plt.show()
plt.show()
# -
# the more we compress the more time it takes per one operation
it = iter(train_loader)
x = next(it)[0]
res.average
# + [markdown] heading_collapsed=true
# # Problem 5 (Bonus)
#
# 1. The norm is called absolute if $\|x\|=\| \lvert x \lvert \|$ holds for any vector $x$, where $x=(x_1,\dots,x_n)^T$ and $\lvert x \lvert = (\lvert x_1 \lvert,\dots, \lvert x_n \lvert)^T$. Give an example of a norm which is not absolute.
#
# 2. Write a function ```ranks_HOSVD(A, eps)```
# that calculates Tucker ranks of a d-dimensional tensor $A$ using High-Order SVD (HOSVD) algorithm, where ```eps``` is the relative accuracy in the Frobenius norm between the approximated and the initial tensors. Details can be found [here](http://ca.sandia.gov/~tgkolda/pubs/pubfiles/TensorReview.pdf) on Figure 4.3.
# ```python
# def ranks_HOSVD(A, eps):
# return r #r should be a tuple of ranks r = (r1, r2, ..., rd)
# ```
# -
# 5.1.
# we consider the norm like that
# $\|X\|_{ANYA}\sum_{i=2}^{n}|x_{i}-x_{i-1}|+\sum_{i=1}^{n}|x_{i}|$
# we can check the norm properties:
#
#
# 1.
# $\|X\|_{ANYA}>=0$ (due to definition of the modul)
#
#
# 2.
# $\|\lambda X\|_{ANYA} = \sum_{i=2}^{n}|\lambda x_{i}-\lambda x_{i-1}|+\sum_{i=1}^{n}|\lambda x_{i}|= |\lambda|\sum_{i=2}^{n}|x_{i}-x_{i-1}|+\sum_{i=1}^{n}|x_{i}| = |\lambda|\|X\|_{ANYA}$
#
#
# 3.
# $\|X+Y\|_{ANYA}= \sum_{i=2}^{n}|x_{i}+y_{i}-(x_{i-1}+y_{i-1})|+\sum_{i=1}^{n}|x_{i}+y_{i}|= \sum_{i=2}^{n}|x_{i}-x_{i-1}+y_{i}-y_{i-1})|+\sum_{i=1}^{n}|x_{i}+y_{i}| \leq \sum_{i=2}^{n} \Big( |x_{i}-x_{i-1}|+|x_{i}|+|y_{i}-y_{i-1}|+|y_{i}|\Big)=\|X\|_{ANYA}+\|Y\|_{ANYA}$
#
# here is an example for $n=3$
#
# $x=(1, -2, -3)^T$ and $\lvert x \lvert = (\lvert 1\lvert,\lvert -2 \lvert ,\lvert -3 \lvert)^T$
#
# for $x$ $\|X\|_{ANYA}=|-2-1|+|-3+2|+|1| +6=11$
#
#
# for $x$ $\| |X| \|_{ANYA}=|2-1|+|3-2|+|1|+6=9$
| 34,048 |
/METALLIC GLASS FORMING DATA(DA).ipynb
|
0f50d83d69ef141cf8834c53b42bbf67e47a93bd
|
[] |
no_license
|
rajrishi1997/Modeling-the-reduced-glass-transition-temperature-of-glass-forming-alloys-
|
https://github.com/rajrishi1997/Modeling-the-reduced-glass-transition-temperature-of-glass-forming-alloys-
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.r
| 858,246 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # DISTRIBUTIONAL ANALYSIS REPORT
# Name-Rajrishi Sarkar,Roll-203110061
# # Data description
Metallic Glass Descriptors:
Relevant papers:
There is no paper published on this data set at the present time. The data was assembled
primarily by Vanessa Nilsen under the guidance of Prof. Dane Morgan at UW Madison
([email protected]).
A previous study of reduced glass transition temperature as a GFA descriptor can be found in reference [5]:
https://www.sciencedirect.com/science/article/pii/S0022309300000648
X features:
The metallic glass dataset gives two columns with information about the material
Composition. The first is the overall composition, and the second is the highest
Composition element. The columns from four to the end are the MAGPIE features that
have been generated from the material composition column and give values such as
properties averaged over the material composition as well as features that are only for
the majority element in each alloy [3]. The majority element features are labelled as
"site1".
Y property:
The reduced glass transition temperature (Trg) has historically been used as a rough
predictor for Glass Forming Ability (GFA). By making a model to predict Trg for an
arbitrary alloy, it could be possible to use these values to estimate GFA directly, or as
input for another model to then predict GFA.
# # Importing dataset
data<-read.csv("C:\\Users\\ADMIN\\Desktop\\data for data analysis\\stats project\\features.csv")
df<-as.data.frame(data)
#viewing the datset
head(df)
dim(df)
# # loading the packages
load.libraries <- c('fitdistrplus', 'logspline','actuar')
install.lib <- load.libraries[!load.libraries %in% installed.packages()]
for(libs in install.lib) install.packages(libs, dependences = TRUE)
sapply(load.libraries, require, character = TRUE)
# **Lets analyze the distribution of each of the features one by one**
# # 1) Glass forming temperature (target)
# **visualizing the skew kurtosis plot**
descdist(df$Trg, discrete = FALSE,boot=1000)
# **the observation lies below the lognormal curve, we will try to fit the distributions to the data**
fw <- fitdist(df$Trg, "weibull")
fg <- fitdist(df$Trg , "gamma",method="mme")
par(mfrow = c(2, 2))
plot.legend <- c( "weibull","gamma")
denscomp(list(fw, fg), legendtext = plot.legend)
qqcomp(list(fw, fg), legendtext = plot.legend)
cdfcomp(list(fw, fg), legendtext = plot.legend)
ppcomp(list(fw, fg), legendtext = plot.legend)
# both the distributions seem to fit pretty good
# **comparing the AIC values of both the distributions**
# 1. WEIBULL DIST(AIC)=-2004.26
# 2. GAMMA DIS(AIC)=-1794.17
# due to low AIC gamma dist is a better fit for the target feature
#
# # 2) Density composition average
# **cullen frey plot**
descdist(df$Density_composition_average, discrete = FALSE,boot=1000)
# the observation lies near the logistic distribution point and some bootstrap samples also lies near normal dist
# **fitting the distributions**
x <- df$Density_composition_average
fn<- fitdist((x - min(x)*1.01) / (max(x) * 1.01 - min(x) * 1.01), "norm")
flg <- fitdist((x - min(x)*1.01) / (max(x) * 1.01 - min(x) * 1.01), "logis")
par(mfrow = c(2, 2))
plot.legend <- c( "norm","logis")
denscomp(list(fn,flg), legendtext = plot.legend)
qqcomp(list(fn,flg), legendtext = plot.legend)
cdfcomp(list(fn,flg), legendtext = plot.legend)
ppcomp(list(fn,flg), legendtext = plot.legend)
# **choosing the better fit**
fn$aic
flg$aic
# LOGISTIC DISTRIBUTION is a better fit judging by the aic values
# # 3) IsBoron composition average
# **cullen frey plot**
descdist(df$IsBoron_composition_average, discrete = FALSE,boot=1000)
# the observation clearly falls under the beta distribution
# **fitting distributions**
x <- df$Density_composition_average
fb<- fitdist((x - min(x)*1.01) / (max(x) * 1.01 - min(x) * 1.01), "beta")
fg <- fitdist((x - min(x)*1.01) / (max(x) * 1.01 - min(x) * 1.01), "gamma")
par(mfrow = c(2, 2))
plot.legend <- c( "beta","gamma")
denscomp(list(fb,fg), legendtext = plot.legend)
qqcomp(list(fb,fg), legendtext = plot.legend)
cdfcomp(list(fb,fg), legendtext = plot.legend)
ppcomp(list(fb,fg), legendtext = plot.legend)
# **choosing the best fit**
fb$aic
fg$aic
# judging from the aic values BETA DISTRIBUTION is suitable for this particular distribution
# # 4) IsDBlock composition average
# **cullen frey plot**
descdist(df$IsDBlock_composition_average, discrete = FALSE,boot=1000)
# **fitting beta distribution**
x <- df$IsDBlock_composition_average
fb<- fitdist((x - min(x)*1.01) / (max(x) * 1.01 - min(x) * 1.01), "beta")
plot(fb)
# # 5) IsTransitionMetal composition average
# **cullen frey plot**
descdist(df$IsTransitionMetal_composition_average, discrete = FALSE,boot=1000)
# the feature seems to fit the beta distribution
# **fitting to beta distribution**
x <- df$IsTransitionMetal_composition_average
fb<- fitdist((x - min(x)*1.01) / (max(x) * 1.01 - min(x) * 1.01), "beta")
plot(fb)
# # 6) NValance composition average
descdist(df$NValance_composition_average, discrete = FALSE,boot=1000)
# **fitting to lognormal distribution**
x <- df$NValance_composition_average
fln<- fitdist((x - min(x)*1.01) / (max(x) * 1.01 - min(x) * 1.01), "lnorm")
plot(fln)
# # 7) HeatVaporization max value
# **cullen frey plot**
descdist(df$NValance_composition_average, discrete = FALSE,boot=1000)
# the observation lies below the lognormal distribution line
# **fitting the lognormal distribution**
x <- df$HeatVaporization_max_value
fln<- fitdist((x - min(x)*1.01) / (max(x) * 1.01 - min(x) * 1.01), "lnorm")
plot(fln)
# # 8) BoilingT difference
# **Cullen frey plot**
descdist(df$BoilingT_difference, discrete = FALSE,boot=1000)
# it's difficult to determine because the observation doesn't really seel to fit any distributions but we will still try lognormal and the logistic distributions
# **fitting to distributions**
x <- df$BoilingT_difference
fln<- fitdist((x - min(x)*1.01) / (max(x) * 1.01 - min(x) * 1.01), "lnorm")
fllg <- fitdist((x - min(x)*1.01) / (max(x) * 1.01 - min(x) * 1.01), "logis")
par(mfrow = c(2, 2))
plot.legend <- c( "lnorm","logis")
denscomp(list(fln,fllg), legendtext = plot.legend)
qqcomp(list(fln,fllg), legendtext = plot.legend)
cdfcomp(list(fln,fllg), legendtext = plot.legend)
ppcomp(list(fln,fllg), legendtext = plot.legend)
# judging from the observations the LOGISTIC DIST seems to fit better
# # 9) HeatVaporization difference
# **cullen frey plot**
descdist(df$HeatVaporization_difference, discrete = FALSE,boot=1000)
# judging by the graph it's better if we go with the lognormal distribution
# **fitting to lognormal distribution**
x <- df$HeatVaporization_difference
fln<- fitdist((x - min(x)*1.01) / (max(x) * 1.01 - min(x) * 1.01), "lnorm")
plot(fln)
# # 10) MeltingT_difference
# **Cullen frey plot**
descdist(df$MeltingT_difference, discrete = FALSE,boot=1000)
# observation lies in the beta region, bootstrapped values are hitting the gamma distribution line
# **fitting the distributions**
x <- df$MeltingT_difference
fb<- fitdist((x - min(x)*1.01) / (max(x) * 1.01 - min(x) * 1.01), "beta")
fg <- fitdist((x - min(x)*1.01) / (max(x) * 1.01 - min(x) * 1.01), "gamma")
par(mfrow = c(2, 2))
plot.legend <- c( "beta","gamma")
denscomp(list(fb,fg), legendtext = plot.legend)
qqcomp(list(fb,fg), legendtext = plot.legend)
cdfcomp(list(fb,fg), legendtext = plot.legend)
ppcomp(list(fb,fg), legendtext = plot.legend)
# **choosing the best possible distribution**
fb$aic
fg$aic
# GAMMA DIST(AIC=-497) is a better fit
# # 11) NdValence_difference
# **Cullen frey plot**
descdist(df$NdValence_difference, discrete = FALSE,boot=1000)
# **fitting the beta distribution**
x <- df$NdValence_difference
fb<- fitdist((x - min(x)*1.01) / (max(x) * 1.01 - min(x) * 1.01), "beta")
plot(fb)
# # 12) NsUnfilled difference
# **cullen frey plot**
descdist(df$NsUnfilled_difference, discrete = FALSE,boot=1000)
# looking at the observation we have no choice but to go with the beta distribution
# **fitting the beta distribution**
x <- df$NsUnfilled_difference
fb<- fitdist((x - min(x)*1.01) / (max(x) * 1.01 - min(x) * 1.01), "beta")
plot(fb)
# # 13) valence difference
# **cullen frey plot**
descdist(df$valence_difference, discrete = FALSE,boot=1000)
# the observation may fit to normal,uniform or beta distribution
# **fitting the distributions**
x <- df$valence_difference
fb<- fitdist((x - min(x)*1.01) / (max(x) * 1.01 - min(x) * 1.01), "beta")
fu <- fitdist((x - min(x)*1.01) / (max(x) * 1.01 - min(x) * 1.01), "unif")
fn <- fitdist((x - min(x)*1.01) / (max(x) * 1.01 - min(x) * 1.01), "norm")
par(mfrow = c(2, 2))
plot.legend <- c( "beta","unif","norm")
denscomp(list(fb, fu,fn), legendtext = plot.legend)
qqcomp(list(fb, fu,fn), legendtext = plot.legend)
cdfcomp(list(fb, fu,fn), legendtext = plot.legend)
ppcomp(list(fb, fu,fn), legendtext = plot.legend)
# **choosing the best fit distribution**
fb$aic
fu$aic
fn$aic
# comparing the aic values we see that BETA DIST is best fit for the feature
# # 14) Site1 Density
# **cullen frey plot**
descdist(df$Site1_Density, discrete = FALSE,boot=1000)
# observation lies near logistic distribution
# **fitting the logistic distribution**
x <- df$Site1_Density
fllg<- fitdist((x - min(x)*1.01) / (max(x) * 1.01 - min(x) * 1.01), "logis")
plot(fllg)
# # 15) Site1 HeatCapacityMass
# **cullen frey plot**
descdist(df$Site1_HeatCapacityMass, discrete = FALSE,boot=1000)
# the observation clearly lies in the gamma distribution line
# **fitting gamma distribution**
x <- df$Site1_HeatCapacityMass
fg<- fitdist((x - min(x)*1.01) / (max(x) * 1.01 - min(x) * 1.01), "gamma")
plot(fg)
# # 17) Site1 HeatFusion
# **cullen frey plot**
descdist(df$Site1_HeatFusion, discrete = FALSE,boot=1000)
# as per the graph the observation is close to uniform and normal distributions,it also lies in the beta region
# **fitting distributions**
x <- df$Site1_HeatFusion
fb<- fitdist((x - min(x)*1.01) / (max(x) * 1.01 - min(x) * 1.01), "beta")
fu <- fitdist((x - min(x)*1.01) / (max(x) * 1.01 - min(x) * 1.01), "unif")
fn <- fitdist((x - min(x)*1.01) / (max(x) * 1.01 - min(x) * 1.01), "norm")
par(mfrow = c(2, 2))
plot.legend <- c( "beta","unif","norm")
denscomp(list(fb, fu,fn), legendtext = plot.legend)
qqcomp(list(fb, fu,fn), legendtext = plot.legend)
cdfcomp(list(fb, fu,fn), legendtext = plot.legend)
ppcomp(list(fb, fu,fn), legendtext = plot.legend)
fb$aic
fu$aic
fn$aic
# 1. BETA DIST(aic) = -363.557
# 2. UNIFORM DIST(aic)=-7.64
# 3. NORMAL DIST(aic)=-321.73
#
# BETA DISTRIBUTION is the best fit
# # 18) Site1 IsDBlock
# **cullen frey plot**
descdist(df$Site1_IsDBlock, discrete = FALSE,boot=1000)
# observation lies in the beta region
# **fitting beta distribution**
x <- df$Site1_IsDBlock
fb<- fitdist((x - min(x)*1.01) / (max(x) * 1.01 - min(x) * 1.01), "beta")
plot(fb)
# # 19) Site1 IsTransitionMetal
# **cullen frey plot**
descdist(df$Site1_IsTransitionMetal, discrete = FALSE,boot=1000)
# the observation lies in the beta region
# **fitting the beta distribution**
x <- df$Site1_IsTransitionMetal
fb<- fitdist((x - min(x)*1.01) / (max(x) * 1.01 - min(x) * 1.01), "beta")
plot(fb)
# # 20) Site1 NdValence
# **cullen frey plot**
descdist(df$Site1_NdValence, discrete = FALSE,boot=1000)
# the observation falls in the beta range
# **fitting the distributions**
x <- df$Site1_NdValence
fb<- fitdist((x - min(x)*1.01) / (max(x) * 1.01 - min(x) * 1.01), "beta")
fu <- fitdist((x - min(x)*1.01) / (max(x) * 1.01 - min(x) * 1.01), "unif")
par(mfrow = c(2, 2))
plot.legend <- c( "beta","unif")
denscomp(list(fb, fu), legendtext = plot.legend)
qqcomp(list(fb, fu), legendtext = plot.legend)
cdfcomp(list(fb, fu), legendtext = plot.legend)
ppcomp(list(fb, fu), legendtext = plot.legend)
# **finalizing the distribution**
fb$aic
fu$aic
# BETA DIST(AIC) has a lower value therefore it is a better fit
# # 21) Site1 SpecificHeatCapacity
# **cullen frey plot**
descdist(df$Site1_SpecificHeatCapacity, discrete = FALSE,boot=1000)
# **fitting the distributions**
x <- df$Site1_SpecificHeatCapacity
fb<- fitdist((x - min(x)*1.01) / (max(x) * 1.01 - min(x) * 1.01), "beta")
fln <- fitdist((x - min(x)*1.01) / (max(x) * 1.01 - min(x) * 1.01), "lnorm")
par(mfrow = c(2, 2))
plot.legend <- c( "beta","lnorm")
denscomp(list(fb, fln), legendtext = plot.legend)
qqcomp(list(fb, fln), legendtext = plot.legend)
cdfcomp(list(fb, fln), legendtext = plot.legend)
ppcomp(list(fb, fln), legendtext = plot.legend)
# **finalizing the distribution**
fb$aic
fln$aic
# LNORM DIST(AIC)=-680.96 which is lower and therefore is a better fit
| 12,920 |
/Logistic Regression All/.ipynb_checkpoints/FeaturesReduced-checkpoint.ipynb
|
c6431d353d08254601ace8c88a85ef06c15e4b3f
|
[] |
no_license
|
elcronos/ILI-MachineLearningResearch
|
https://github.com/elcronos/ILI-MachineLearningResearch
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 6,689 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Plotly and Cufflinks
# Plotly is a library that allows you to create interactive plots that you can use in dashboards or websites (you can save them as html files or static images).
#
# ## Installation
#
# In order for this all to work, you'll need to install plotly and cufflinks to call plots directly off of a pandas dataframe. These libraries are not currently available through **conda** but are available through **pip**. Install the libraries at your command line/terminal using:
#
# pip install plotly
# pip install cufflinks
#
# ** NOTE: Make sure you only have one installation of Python on your computer when you do this, otherwise the installation may not work. **
#
# ## Imports and Set-up
import pandas as pd
import numpy as np
# %matplotlib inline
# +
from plotly import __version__
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
print(__version__) # requires version >= 1.9.0
# -
import cufflinks as cf
# For Notebooks
init_notebook_mode(connected=True)
# For offline use
cf.go_offline()
# ### Fake Data
df = pd.DataFrame(np.random.randn(100,4),columns='A B C D'.split())
df.head()
df2 = pd.DataFrame({'Category':['A','B','C'],'Values':[32,43,50]})
df2.head()
# ## Using Cufflinks and iplot()
#
# * scatter
# * bar
# * box
# * spread
# * ratio
# * heatmap
# * surface
# * histogram
# * bubble
# ## Scatter
df.iplot(kind='scatter',x='A',y='B',mode='markers',size=10)
# ## Bar Plots
df2.iplot(kind='bar',x='Category',y='Values')
df.count().iplot(kind='bar')
# ## Boxplots
df.iplot(kind='box')
# ## 3d Surface
df3 = pd.DataFrame({'x':[1,2,3,4,5],'y':[10,20,30,20,10],'z':[5,4,3,2,1]})
df3.iplot(kind='surface',colorscale='rdylbu')
# ## Spread
df[['A','B']].iplot(kind='spread')
# ## histogram
df['A'].iplot(kind='hist',bins=25)
df.iplot(kind='bubble',x='A',y='B',size='C')
# ## scatter_matrix()
#
# Similar to sns.pairplot()
df.scatter_matrix()
33[0m")
df = pd.DataFrame(data = OPT, columns=headers, index=rowsIds)
heading_properties = [('font-size', '18px')]
cell_properties = [('font-size', '16px')]
dfstyle = [dict(selector="th", props=heading_properties),\
dict(selector="td", props=cell_properties)]
df.style.set_table_styles(dfstyle)
# +
display(Markdown('<b>A secondary structure on RNA sequence, B(a string expressing all 4 symbols{A,U,C,G}) is a set of pairs S = {(i, j)}, where i, j ∈ {1, 2, . . . , n}, that satisfies the following conditions.</b>'))
display(Markdown('(i) (No sharp turns.) The ends of each pair in S are separated by at least four intervening bases; that is, if (i, j) ∈ S, then i < j − 4.'
' (ii) The elements of any pair in S consist of either {A, U} or {C, G} (in either order).'
' (iii) S is a matching: no base appears in more than one pair.'
' (iv) (The noncrossing condition.) If (i, j) and (k, l) are two pairs in S, then we cannot have i < k < j < l.'))
print("Reference : Algorithm Design by Jon Kleinberg and Éva Tardos. Addison-Wesley, 2005")
print("image reference : Algorithm Design by Jon Kleinberg and Éva Tardos. Addison-Wesley, 2005")
Image(filename = "img/RNA.png")
# +
s1 = ' <h1> Condition 1 is being fulfilled </h1>'
display(HTML(s1))
s = ' <h1> initialization of OPT(i,j) = 0 where i >= j - 4 and i > j </h1>'
display(HTML(s))
display(Markdown('<b>Initial Values of OPT[i,j]:</b>'))
for i in range(0,OPT.shape[0]):
for j in range(0,OPT.shape[1]):
if (i >= j-4 or i>j):
OPT[i,j] = 0
# df = pd.DataFrame(data = OPT, columns=headers, index=rowsIds)
# df.style.set_table_styles(dfstyle)
plot_map(OPT[len(RNA_list)-6::-1,5::1],np.nanmax(OPT),0,cmap = 'hot')
# plot_map(OPT,np.amax(OPT))
# +
display(Markdown('Let, OPT[i,j] express the max number of possible base pairs in the RNA structure b = [bi...bj]. Two cases can arise in the structure'))
display(Markdown('(i) j is not involved in any pair,then OPT[i,j] = OPT[i,j-1]'))
display(Markdown('(ii) j creates pair with t for t<j-4.As depicted in the figure, we see two independent subproblems OPT[i,t-1] ans OPT[t+1,j-1]'))
display(Markdown('Final recurrence equation: OPT[i,j] = max(OPT[i,j-1],max_t(1+OPT[i,t-1]+OPT[t+1,j-1]), where max is taken over t with all conditions satisfied'))
print("Reference : Algorithm Design by Jon Kleinberg and Éva Tardos. Addison-Wesley, 2005")
print("image reference : Algorithm Design by Jon Kleinberg and Éva Tardos. Addison-Wesley, 2005")
Image(filename = "img/pair.png")
# -
RNA_list = np.array((RNA_list))
n = len(RNA_list) - 1
val_2 = []
for k in range(4,n):
print("\033[1m"+"For k = "+str(k+1)+ "\033[0m")
for i in range(0,n-k):
j = i + (k+1)
print("OPT[i,j] = OPT[%d,%d]" %(i+1,j+1))
if (RNA_list[j] == 'A'):
t = list(np.where(RNA_list[i:j-4] == 'U')[0])
t = [t+i for t in t]
if (len(t) == 0):
print("\t j is %d and j-th symbol is %s .j is not involved in the pair because no base-pair is found" %(j+1,RNA_list[j]))
print("\t OPT[i,j] = OPT[i,j-1]")
OPT[i,j] = OPT[i,j-1]
else:
print("\t j is %d and j-th symbol is %s .j pairs with t for t<j-4. recurrence on two subproblems OPT[i,t-1] and OPT[t+1,j-1]" %(j+1,RNA_list[j]))
print("\t OPT[i,j] = max_t(1 + OPT[i,t-1] +OPT[t+1,j-1]")
for t in t:
if (t-1 < 0):
val_2.append(1 + OPT[t+1,j-1])
else:
val_2.append(1 + OPT[i,t-1] + OPT[t+1,j-1])
OPT[i,j] = max(val_2)
val_2 = []
elif (RNA_list[j] == 'U'):
t = list(np.where(RNA_list[i:j-4] == 'A')[0])
t = [t+i for t in t]
if (len(t) == 0):
print("\t j is %d and j-th symbol is %s .j is not involved in the pair because no base-pair is found" %(j+1,RNA_list[j]))
print("\t OPT[i,j] = OPT[i,j-1]")
OPT[i,j] = OPT[i,j-1]
else:
print("\t j is %d and j-th symbol is %s .j pairs with t for t<j-4. recurrence on two subproblems OPT[i,t-1] and OPT[t+1,j-1]" %(j+1,RNA_list[j]))
print("\t OPT[i,j] = max_t(1 + OPT[i,t-1] +OPT[t+1,j-1]")
for t in t:
if (t-1 < 0):
val_2.append(1 + OPT[t+1,j-1])
else:
val_2.append(1 + OPT[i,t-1] + OPT[t+1,j-1])
OPT[i,j] = max(val_2)
val_2 = []
elif (RNA_list[j] == 'C'):
t = list(np.where(RNA_list[i:j-4] == 'G')[0])
t = [t+i for t in t]
if (len(t) == 0):
print("\t j is %d and j-th symbol is %s .j is not involved in the pair because no base-pair is found" %(j+1,RNA_list[j]))
print("\t OPT[i,j] = OPT[i,j-1]")
OPT[i,j] = OPT[i,j-1]
else:
print("\t j is %d and j-th symbol is %s .j pairs with t for t<j-4. recurrence on two subproblems OPT[i,t-1] and OPT[t+1,j-1]" %(j+1,RNA_list[j]))
print("\t OPT[i,j] = max_t(1 + OPT[i,t-1] +OPT[t+1,j-1]")
for t in t:
if (t-1 < 0):
val_2.append(1 + OPT[t+1,j-1])
else:
val_2.append(1 + OPT[i,t-1] + OPT[t+1,j-1])
OPT[i,j] = max(val_2)
val_2 = []
elif (RNA_list[j] == 'G'):
t = list(np.where(RNA_list[i:j-4] == 'C')[0])
t = [t+i for t in t]
if (len(t) == 0):
print("\t j is %d and j-th symbol is %s .j is not involved in the pair because no base-pair is found" %(j+1,RNA_list[j]))
print("\t OPT[i,j] = OPT[i,j-1]")
OPT[i,j] = OPT[i,j-1]
else:
print("\t j is %d and j-th symbol is %s .j pairs with t for t<j-4. recurrence on two subproblems OPT[i,t-1] and OPT[t+1,j-1]" %(j+1,RNA_list[j]))
print("\t OPT[i,j] = max_t(1 + OPT[i,t-1] +OPT[t+1,j-1]")
for t in t:
if (t-1 < 0):
val_2.append(1 + OPT[t+1,j-1])
else:
val_2.append(1 + OPT[i,t-1] + OPT[t+1,j-1])
OPT[i,j] = max(val_2)
val_2 = []
plot_map(OPT[len(RNA_list)-6::-1,5::1],np.nanmax(OPT),k+1,cmap = 'hot')
# plot_map(OPT[int(len(RNA_list)/2)+2::-1,int(len(RNA_list)/2):len(RNA_list):],np.amax(OPT),k+1)
display(Markdown(' <b> Maximum number of pairs possible in the RNA Structure : </b>'+str(OPT[0,len(RNA_list)-1])))
display(Markdown('<b> The iterations of the algorithm showing solutions to subproblems on the input sequence of the RNA Secondary Structure Prediction Problem </b>'))
| 9,028 |
/notebooks/GlobalModel01.ipynb
|
1fa17a7d84a8996c3bcdb8786cffb5014eaaddc0
|
[] |
no_license
|
mtmoncur/baseball_data
|
https://github.com/mtmoncur/baseball_data
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 1,936,362 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Import libraries and datasets
# +
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import statsmodels.api as sm
import statsmodels.stats.outliers_influence as out_infl
import warnings
warnings.filterwarnings("ignore")
theft_log_df = pd.read_csv('theft_log.csv',header=None)
visitor_log_df = pd.read_csv('visitor_log.csv')
# -
# ### Data Cleaning
# +
# Check missing values
print('Visitor Log:')
print(visitor_log_df.isnull().any())
print('\nTheft Log:')
print(theft_log_df.isnull().any())
# Count unique dates, names, dob
print('\nNumber of unique days:', len(visitor_log_df['visit_date'].unique()))
print('Number of unique names:', len(visitor_log_df['name'].unique()))
print('Number of unique DOB:', len(visitor_log_df['dob'].unique()))
# Create user id, combine name with DOB as primary key
visitor_log_df['visitor_id'] = visitor_log_df.groupby(['name','dob']).grouper.group_info[0]
print('Number of unique visitors:', len(visitor_log_df['visitor_id'].unique()))
visitor_log_df.head()
# -
# ### Create visitor dataframe
visitor_df = visitor_log_df[['name', 'dob', 'visitor_id']]
visitor_df = visitor_df.drop_duplicates() # remove duplicate visitor entries
visitor_df = visitor_df.sort_values(by=['visitor_id']) # Sort data by visitor id
visitor_df = visitor_df.reset_index(drop=True)
visitor_df.head()
# +
# Count total visit for each visitor
visitor_df['total_visit_count'] = visitor_log_df['visitor_id'].value_counts().sort_index()
# Count visit on dates with theft report for each visitor
visitor_log_df = visitor_log_df[visitor_log_df.visit_date.isin(list(theft_log_df[0]))]
visitor_df['theft_visit_count'] = visitor_log_df['visitor_id'].value_counts().sort_index()
# Remove visitor with no visit on theft reported dates
visitor_df = visitor_df.dropna()
visitor_df['theft_visit_count'] = visitor_df['theft_visit_count'].astype(int)
# Calculate theft visit over total visit ratio
visitor_df['theft_visit_ratio'] = visitor_df['theft_visit_count'] / visitor_df['total_visit_count']
visitor_df = visitor_df.reset_index(drop=True)
visitor_df.head()
# -
# ### Statistical analysis of numerical variables of visitor data
# +
# Boxplot
plt.rcParams['figure.figsize'] = (8, 8)
sns.boxplot(data=visitor_df.iloc[:,3:5])
plt.show()
# Histogram
plt.rcParams['figure.figsize'] = (8, 3)
sns.distplot( visitor_df["total_visit_count"] , color="skyblue", label="Sepal Length")
plt.show()
sns.distplot( visitor_df["theft_visit_count"] , color="red", label="Sepal Width")
plt.show()
sns.distplot( visitor_df["theft_visit_ratio"] , color="green", label="Sepal Width")
plt.show()
# Statistical summary
visitor_df[['total_visit_count','theft_visit_count','theft_visit_ratio']].describe().round(2)
# -
# Several outliers of total_visit_count and theft_visit_count can be observed from the boxplot. Understanding these outliers can help identify the suspicious visitors
# +
# Scatterplot with regression line
plt.rcParams['figure.figsize'] = (8, 6)
sns.regplot(x=visitor_df["total_visit_count"], y=visitor_df["theft_visit_count"])
# Correlation
plt.show()
print('Correlation Matrix')
visitor_df[['total_visit_count','theft_visit_count','theft_visit_ratio']].corr().round(3)
# -
# Correlation between theft_visit_count and total_visit_count is significantly high at 0.94 indicating that there is a strong positive relationship between these 2 variables. The scatterplot shows there are several outliers departing further from the regression line than most data points, these outliers can potentially be the thief visitors.
# ### Fit an Ordinary-Least-Square regression line using theft_visit_count and total_visit_count
# +
# Fit a OLS model
X = visitor_df['total_visit_count'].values.reshape(-1,1)
y = visitor_df['theft_visit_count'].values.reshape(-1,1)
ols = sm.OLS(y, sm.add_constant(X)).fit()
print ("The R-squared value is " + str(ols.rsquared))
# -
# ### Use Cook's distance to detect outliers
# Cook's distance for an observation (x) measures how far the regression line would move if the line is fitted without that observation. A Cook's distance larger than 3 times the mean of the Cook's distances of all observations indicate that the observation is potentially an outlier.
#
# Reference: https://www.mathworks.com/help/stats/cooks-distance.html
# Obtain summary of influence measures for OLS result
ols_infl = out_infl.OLSInfluence(ols)
influence_df = ols_infl.summary_frame()
influence_df.head()
# Add Cook's distance to visitor dataframe
visitor_df['cooks_distance'] = influence_df['cooks_d']
# Sort visitors by value of Cook's distance
visitor_df = visitor_df.sort_values(by=['cooks_distance'],ascending=False)
# Only select visitor with Cook's distance larger than 3 times the mean of all Cook's distances
visitor_df = visitor_df[visitor_df.cooks_distance > visitor_df['cooks_distance'].mean()*3]
visitor_df = visitor_df.reset_index(drop=True)
visitor_df
# It shows that there are 69 visitors that might suspects based on the value of their Cook's distance. As total_visit_count and theft_visit_count are used to fit the regression line, these obtained Cook's distances indicate that these visitors have unusual values for these counts compared to most visitors.
# ### Show 20 most suspicious visitors with the highest Cook's distances
visitor_df.head(20)
inertias.append(model.inertia_) # inertia_=> 응집도 계산해주는 내장함수
score.append(model.score(feature_robust_Scale))
inertias # 응집도 값 계산
#score
# -
# 회차당 응집도 그래프 그리기
# Plot ks vs inertias
plt.plot(ks, inertias, '-o')
plt.xlabel('number of clusters, k')
plt.ylabel('SSE')
plt.xticks(ks)
plt.show()
# +
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import seaborn as sns
# create model and prediction
model = KMeans(n_clusters=3,algorithm='auto')
model.fit(feature_robust_Scale)
predict = pd.DataFrame(model.predict(feature_robust_Scale))
#print(predict)
predict.columns=['predict'] # predcit는 군집에 라벨링해주는 것
# 이 데이터는 군집0, 군집1, 군집2에 들어간다
# concatenate labels to df as a new column
r = pd.concat([feature_robust_Scale,predict],axis=1)
print(r)
# -
from mpl_toolkits.mplot3d import Axes3D
# scatter plot
fig = plt.figure( figsize=(6,6))
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
ax.scatter(r['구매까지_조회경과시간_robust_Scale'],r['검색어연관도_robust_Scale'],r['Back_nums_robust_Scale'], c=r['predict'],alpha=0.5)
ax.set_xlabel('구매까지_조회경과시간_robust_Scale')
ax.set_ylabel('검색어연관도_robust_Scale')
ax.set_zlabel('Back_nums_robust_Scale')
plt.show()
#pairplot with Seaborn
sns.pairplot(r, hue='predict')
plt.show()
# +
from sklearn.cluster import KMeans
ks = range(1, 10)
inertias = []
score = []
for k in ks:
model = KMeans(n_clusters = k)
model.fit(feature_MinMax_Scale)
inertias.append(model.inertia_) # inertia_=> 응집도 계산해주는 내장함수
score.append(model.score(feature_MinMax_Scale))
inertias # 응집도 값 계산
#score
# -
# 회차당 응집도 그래프 그리기
# Plot ks vs inertias
plt.plot(ks, inertias, '-o')
plt.xlabel('number of clusters, k')
plt.ylabel('SSE')
plt.xticks(ks)
plt.show()
# +
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import seaborn as sns
# create model and prediction
model = KMeans(n_clusters=3,algorithm='auto')
model.fit(feature_MinMax_Scale)
predict = pd.DataFrame(model.predict(feature_MinMax_Scale))
#print(predict)
predict.columns=['predict'] # predcit는 군집에 라벨링해주는 것
# 이 데이터는 군집0, 군집1, 군집2에 들어간다
# concatenate labels to df as a new column
r = pd.concat([feature_MinMax_Scale,predict],axis=1)
print(r)
# -
from mpl_toolkits.mplot3d import Axes3D
# scatter plot
fig = plt.figure( figsize=(6,6))
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
ax.scatter(r['구매까지_조회경과시간_Min/Max_Scale'],r['검색어연관도_Min/Max_Scale'],r['Back_nums_Min/Max_Scale'], c=r['predict'],alpha=0.5)
ax.set_xlabel('구매까지_조회경과시간_Min/Max_Scale')
ax.set_ylabel('검색어연관도_Min/Max_Scale')
ax.set_zlabel('Back_nums_Min/Max_Scale')
plt.show()
#pairplot with Seaborn
sns.pairplot(r, hue='predict')
plt.show()
match_HiTS['%sMedianKronMag' % b].values[mask_lim][mask_diff],
# '.g', markersize=4, alpha=.4, label='MAD: %f'%(MAD_g))
#ax[k].legend(loc='best')
ax[k].plot(range(10,30), range(10,30), '-k', linewidth=1)
ax[k].set_xlabel(r'$(PS)$')
ax[k].set_ylabel(r'$%s$ $(HiTS)$' % b)
ax[k].set_xlim(16,24)
ax[k].set_ylim(16,24)
nbins = len(ax[k].get_xticklabels())
ax[k].yaxis.set_major_locator(MaxNLocator(nbins=nbins, prune='both'))
plt.tight_layout()
fig.subplots_adjust(hspace=0)
plt.savefig('%s/figures/HiTS_PS_%s_%s.pdf' % (mainpath, field, 'all'),
format='pdf', dpi=600,bbox_inches='tight')
plt.show()
for b in ['g','r','i']:
fig, ax = plt.subplots(1,2, figsize=(12,5))
ax[0].plot(match_PS1['%sMeanKronMag' % (b)],
match_PS1['%sMeanKronMagStd' % (b)],
'*g', markersize=4, alpha=.4, label='PS')
ax[0].plot(match_HiTS['%sMedianKronMag' % (b)],
match_HiTS['%sMedianKronMagStd' % (b)],
'.b', markersize=4, alpha=.4, label='HiTS')
ax[0].set_xlabel('%s mag' % (b))
ax[0].set_ylabel('%s std' % (b))
ax[0].set_xlim(14,26)
ax[0].set_ylim(0,1)
ax[0].legend()
ax[1].plot(match_PS1['%sMeanKronMag' % (b)],
match_PS1['%sMeanKronMagErr' % (b)],
'*g', markersize=4, alpha=.4, label='PS')
ax[1].plot(match_HiTS['%sMedianKronMag' % (b)],
match_HiTS['%sMedianKronMagErr' % (b)],
'.b', markersize=4, alpha=.4, label='HiTS')
ax[1].set_xlabel('%s mag' % (b))
ax[1].set_ylabel('%s err' % (b))
ax[1].set_xlim(14,26)
ax[1].set_ylim(0,1)
ax[1].legend()
plt.show()
def customJoint(x,y,*args,**kwargs):
plt.scatter(x,y,c='b', s=1.5, alpha=.5, edgecolors='face')
plt.axhline(0, c='k', ls='--', lw=1)
#nparam_density = stats.kde.gaussian_kde(y)
#aux = np.linspace(-.5, .5, 100)
#nparam_density = nparam_density(aux)
#mode = aux[np.argmax(nparam_density)]
#plt.axhline(mode, c='r', ls='--', lw=1)
def customMarginal(x,*args,**kwargs):
sb.distplot(x, hist=False, kde=True,color='b', vertical=kwargs['vertical'])
min_lim = [15,15,15]
for i,b in enumerate(['g','r','i']):
mask_yes = ((match_PS1['%sMeanKronMag'%(b)] > min_lim[i]) &
(match_HiTS['%sMedianKronMag'%(b)] > min_lim[i]) &
(match_HiTS['%sN'%(b)] > 1)).values
mask_diff = mask_difference(match_PS1['%sMeanKronMag'%(b)].values[mask_yes],
match_HiTS['%sMedianKronMag'%(b)].values[mask_yes], n=3.5)
diff_ = match_HiTS['%sMedianKronMag'%(b)] - match_PS1['%sMeanKronMag'%(b)]
print diff_[mask_yes].shape
print diff_[mask_yes][mask_diff].shape
nparam_density = stats.kde.gaussian_kde(diff_[mask_yes][mask_diff].values)
aux = np.linspace(-.5, .5, 200)
nparam_density = nparam_density(aux)
mode = aux[np.argmax(nparam_density)]
print mode
g = sb.JointGrid(match_PS1['%sMeanKronMag'%(b)][mask_yes][mask_diff],
diff_[mask_yes][mask_diff], space=0, size=6)
g = g.plot(customJoint, customMarginal)
plt.ylim(np.min(diff_[mask_yes][mask_diff]) ,
np.max(diff_[mask_yes][mask_diff]))
#dots = grid.plot_joint(plt.scatter, color="k", s=2, alpha=.5)
#hist = grid.plot_marginals(sb.distplot, kde=False, color="k")
plt.xlabel(r'$(PS1)_g$')
plt.ylabel(r'$(HiTS - PS1)_g$')
#plt.tight_layout()
g.savefig('%s/figures/HiTS_PS1_%s_%s.pdf' % (mainpath, field, b), format='pdf', dpi=600,
bbox_inches='tight')
plt.show()
0.5)
# Here is a function for displaying generated images.
def view_samples(epoch, samples, nrows, ncols, figsize=(5,5)):
fig, axes = plt.subplots(figsize=figsize, nrows=nrows, ncols=ncols,
sharey=True, sharex=True)
for ax, img in zip(axes.flatten(), samples[epoch]):
ax.axis('off')
img = ((img - img.min())*255 / (img.max() - img.min())).astype(np.uint8)
ax.set_adjustable('box-forced')
im = ax.imshow(img)
plt.subplots_adjust(wspace=0, hspace=0)
return fig, axes
# And another function we can use to train our network.
def train(net, dataset, epochs, batch_size, print_every=10, show_every=100, figsize=(5,5)):
saver = tf.train.Saver()
sample_z = np.random.uniform(-1, 1, size=(50, z_size))
samples, losses = [], []
steps = 0
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for x, y in dataset.batches(batch_size):
steps += 1
# Sample random noise for G
batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))
# Run optimizers
_ = sess.run(net.d_opt, feed_dict={net.input_real: x, net.input_z: batch_z})
_ = sess.run(net.g_opt, feed_dict={net.input_z: batch_z})
if steps % print_every == 0:
# At the end of each epoch, get the losses and print them out
train_loss_d = net.d_loss.eval({net.input_z: batch_z, net.input_real: x})
train_loss_g = net.g_loss.eval({net.input_z: batch_z})
print("Epoch {}/{}...".format(e+1, epochs),
"Discriminator Loss: {:.4f}...".format(train_loss_d),
"Generator Loss: {:.4f}".format(train_loss_g))
# Save losses to view after training
losses.append((train_loss_d, train_loss_g))
if steps % show_every == 0:
gen_samples = sess.run(
generator(net.input_z, 3, reuse=True),
feed_dict={net.input_z: sample_z})
samples.append(gen_samples)
_ = view_samples(-1, samples, 5, 10, figsize=figsize)
plt.show()
saver.save(sess, './checkpoints/generator.ckpt')
with open('samples.pkl', 'wb') as f:
pkl.dump(samples, f)
return losses, samples
# ## Hyperparameters
#
# GANs are very senstive to hyperparameters. A lot of experimentation goes into finding the best hyperparameters such that the generator and discriminator don't overpower each other. Try out your own hyperparameters or read [the DCGAN paper](https://arxiv.org/pdf/1511.06434.pdf) to see what worked for them.
# +
real_size = (32,32,3)
z_size = 100
learning_rate = 0.0002
batch_size = 128
epochs = 25
alpha = 0.2
beta1 = 0.5
# Create the network
net = GAN(real_size, z_size, learning_rate, alpha=alpha, beta1=beta1)
# +
dataset = Dataset(trainset, testset)
losses, samples = train(net, dataset, epochs, batch_size, figsize=(10,5))
# -
fig, ax = plt.subplots()
losses = np.array(losses)
plt.plot(losses.T[0], label='Discriminator', alpha=0.5)
plt.plot(losses.T[1], label='Generator', alpha=0.5)
plt.title("Training Losses")
plt.legend()
_ = view_samples(-1, samples, 5, 10, figsize=(10,5))
| 15,375 |
/material/deep_learning/wheather_forecasting_using_TimeseriesGenerator.ipynb
|
e88cbbfa1071a46b9d8b2cc0537fcee58c89e102
|
[
"CC-BY-4.0"
] |
permissive
|
KangMinPyo/cau_2021
|
https://github.com/KangMinPyo/cau_2021
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 600,025 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Lhotse Shar
#
# ## Storage format optimized for sequential I/O and modularity
#
# This notebook introduces and shows how to use Lhotse's own data storage format called Lhotse Shar.
#
# ### Similarities to WebDataset
#
# Lhotse Shar is heavily inspired by the [WebDataset](https://github.com/webdataset/webdataset) project. Specifically, we borrow the concepts of 1) sequentially readable tar files, and 2) shards. Like WebDataset, we store the data itself in tar files for sequential reading, that are much faster to access than individual files or random-access storage formats. In fact, sequential reading often makes it feasible to train models when downloading the data over the network, e.g. from cloud storage services, with arbitrarily dataset sizes.
#
# The scalability against dataset size is achieved using the second concept of WebDataset: sharding. Instead of writing the entire dataset into a single tar file, we would partition the data first into smaller chunks called shards. Each shard is then contained in its own tar file. At any given time, a single data loading worker would only read a single shard, but it may shuffle the shard order, as well as keep an internal buffer used for shuffling of data items across different shards. Since only a single shard is read at any given time, the memory usage depends only on shuffling buffer size and does not depend on the dataset size.
#
# Lhotse offers WebDataset integration to leverage the features described above. For a dedicated tutorial, see the notebook called `02-webdataset-integration.ipynb`.
#
# ### Features specific to Lhotse Shar
#
# Lhotse WebDataset integration had a few shortcomings that made it frustrating to work with continuously evolving datasets. Specifically, when you need to compute various types of features for your data (think self-supervised representations, ASR posteriors, alignments, speaker embeddings, etc.) -- every time you compute them, or have a different version, you would have to create a new copy of your full dataset. Another downside was that the tarfile contained everything: the data itself (e.g., recording and features) and the metadata manifest (e.g., Cut manifest). In order to investigate the metadata, the user had to actually load or iterate through the entire shard; and editing the metadata required a full dataset copy again.
#
# To address the beforementioned issues, we designed a modular format that allows to store the metadata, and each data field, in a separate sequentially-read file. For example, instead of having a single `shard-000000.tar` with metadata + audio + features, one would have:
#
# ```
# tree data-shar
# ├── cuts.000000.jsonl.gz
# ├── fbank.000000.tar
# ├── recording.000000.tar
# ```
#
# Upon reading, the user can iterate over the metadata only:
#
# ```
# cuts = CutSet.from_shar(fields={"cuts": ["data-shar/cuts.000000.jsonl.gz"]})
# ```
#
# Or the whole thing, or any combination of fields:
#
# ```
# cuts_audio = CutSet.from_shar(
# fields={
# "cuts": ["data-shar/cuts.000000.jsonl.gz"],
# "recording": ["data-shar/recording.000000.tar"],
# },
# )
# cuts_feats = CutSet.from_shar(
# fields={
# "cuts": ["data-shar/cuts.000000.jsonl.gz"],
# "fbank": ["data-shar/fbank.000000.tar"],
# },
# )
# cuts_full = CutSet.from_shar(
# fields={
# "cuts": ["data-shar/cuts.000000.jsonl.gz"],
# "recording": ["data-shar/recording.000000.tar"],
# "fbank": ["data-shar/fbank.000000.tar"],
# },
# )
# ```
#
# When new type of data is computed, e.g., speaker embeddings, all it takes is to save them in yet another tar file, and specify it during reading.
#
# Below, we'll showcase how to do this using mini librispeech to make it easy to reproduce.
# + jupyter={"outputs_hidden": false}
# Optional auto-formatting
# #!pip install nb_black
# #%load_ext lab_black
# +
# Get the latest version of Lhotse, if not installed:
# #!pip install git+https://github.com/lhotse-speech/lhotse
# + jupyter={"outputs_hidden": false}
import os
from pathlib import Path
import torch
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
from lhotse import CutSet, Fbank
from lhotse.dataset import (
DynamicBucketingSampler,
K2SpeechRecognitionDataset,
OnTheFlyFeatures,
PerturbSpeed,
PerturbVolume,
SpecAugment,
make_worker_init_fn,
)
from lhotse.shar import ArrayTarWriter, AudioTarWriter, JsonlShardWriter
from lhotse.recipes import (
download_librispeech,
prepare_librispeech,
)
# + jupyter={"outputs_hidden": false}
root_dir = Path("data")
tmp_dir = Path("tmp")
tmp_dir.mkdir(exist_ok=True)
num_jobs = os.cpu_count() - 1
# -
# # (mini) LibriSpeech
#
# We're downloading the data, preparing recording/supervision manfiests, and compiling them into CutSets.
# A cut is a basic "example" of data in Lhotse.
#
# Approx. download size 450MB.
# + jupyter={"outputs_hidden": false}
# libri_variant = "librispeech"
libri_variant = "mini_librispeech"
libri_root = download_librispeech(root_dir, dataset_parts=libri_variant)
libri = prepare_librispeech(
libri_root, dataset_parts=libri_variant, output_dir=root_dir, num_jobs=num_jobs
)
cuts_train = CutSet.from_manifests(**libri["train-clean-5"]).trim_to_supervisions()
cuts_dev = CutSet.from_manifests(**libri["dev-clean-2"]).trim_to_supervisions()
# -
# # Export cuts to Lhotse Shar format
#
# Sharding is a technique used to partition a large dataset into smaller parts that can be split between different GPU nodes and dataloading workers.
#
# In this example, we're working with small data, but we'll treat it like a large dataset to illustrate the typical usage.
# Specifying `shard_size=1000` means that each shard will contain 1000 cuts (and the last shard is likely to have less).
#
# Note: to understand how to use the `fields` dict and what other values are possible, please visit [documentation for `SharWriter`](https://lhotse.readthedocs.io/en/latest/api.html#lhotse.shar.writers.SharWriter).
# + jupyter={"outputs_hidden": false}
data_dir = tmp_dir / "data-shar"
data_dir.mkdir(parents=True, exist_ok=True)
shards = cuts_train.to_shar(data_dir, fields={"recording": "wav"}, shard_size=1000)
shards
# + jupyter={"outputs_hidden": false}
# !tree {data_dir}
# -
# # Read cuts from Lhotse Shar format
#
# We're going to read the CutSet in two variants: the first only contains the metadata, and the other ones allows to access the audio data as well.
# + jupyter={"outputs_hidden": false}
cuts_nodata = CutSet.from_shar(fields={"cuts": shards["cuts"]})
cuts_audio = CutSet.from_shar(fields=shards)
# + jupyter={"outputs_hidden": false}
print("Metadata only:", cuts_nodata[0].recording)
print("With audio data:", cuts_audio[0].recording)
# + jupyter={"outputs_hidden": false}
# Reading audio works as usual here.
cuts_audio[0].load_audio()
# + jupyter={"outputs_hidden": false}
import traceback
# This one will raise an exception -- the metadata indicates there is a recording for this Cut,
# but we did not actually load it.
try:
cuts_nodata[0].load_audio()
except:
traceback.print_exc()
# -
# # Extend Lhotse Shar data directory with fbank features
#
# Note: this is for illustration purposes, as we might add a special method in Lhotse that roughly wraps the code below. Specifically, we could replace Fbank here with something like a neural network and save its embeddings or outputs instead, under some different name.
# + jupyter={"outputs_hidden": false}
fbank = Fbank()
# ArrayTarWriter detects the %06d formatting pattern and will auto-increment the shard number.
with ArrayTarWriter(
f"{data_dir}/fbank.%06d.tar", shard_size=1000, compression="lilcom"
) as writer:
for cut in cuts_audio:
# `feats` is a numpy array with log Mel filter bank features.
feats = cut.compute_features(fbank)
# `cut` now contains a field `cut.fbank` with metadata manifest for the features,
# and a method `cut.load_fbank()` that loads the features (respects pad/truncation).
cut = cut.attach_tensor(
"fbank", feats, frame_shift=fbank.frame_shift, temporal_dim=0
)
# We store the features under key `cut.id`, because during loading we'll check that the IDs match
# to avoid data errors. We also store the feature manifest to have some information about this data.
writer.write(cut.id, feats, cut.fbank)
feature_shards = writer.output_paths
# -
# Now, we'll notice that ``data_dir`` contains new files corresponding to fbank features:
# + jupyter={"outputs_hidden": false}
# !tree {data_dir}
# -
# And these features can be easily read together with the rest of the data:
# + jupyter={"outputs_hidden": false}
cuts_audio_feat = CutSet.from_shar(
fields={
"cuts": shards["cuts"],
"recording": shards["recording"],
"fbank": feature_shards,
}
)
# + jupyter={"outputs_hidden": false}
print("Audio array:", cuts_audio_feat[0].load_audio())
print("Fbank array:", cuts_audio_feat[0].load_fbank())
# -
# # Training DataLoader with Lhotse Shar
#
# ### Shards, multi-GPU, and avoiding data duplication
#
# Lhotse Shar requires a few adjustments to the DataLoader initialization as compared to a vanilla Lhotse workflow.
# Note that in a multi-GPU and multi-dataloader-worker setting, all processes see the same CutSet with the same list of shards.
# A common solution to this problem is to split the shard list across different workers so that each worker sees a non-overlapping subset.
# However, we found in practice that this is not necessary.
# Given enough shards, all it takes is to shuffle them differently in each process/worker, and keep re-shuffling as the full epoch is reached.
# Even with medium sized datasets, it is extremely unlikely that two workers would yield mini-batches containing duplicated data.
# In order to re-shuffle the shard order on each epoch, we expect the CutSet will be made (infinitely) repeated using `cuts = cuts.repeat()`.
#
# Let's illustrate the above with a simple example. It roughly means the following: given a dataset of three shards [A, B, C], a single node, two dataloader workers W1 and W2, and global random seed=0, the training dataloading might look like the following (assuming `stateful_shuffle=True`):
#
# Epoch 0:
# - W1 uses RNG with seed (global=0 + worker-id=1 + 1000\*rank=0) + epoch=0 = 1 and has order: [B, A, C]
# - W2 uses RNG with seed (global=0 + worker-id=2 + 1000\*rank=0) + epoch=0 = 2 and has order: [C, B, A]
#
# Epoch 1:
# - W1 uses RNG with seed (global=0 + worker-id=1 + 1000\*rank=0) + epoch=1 = 2 and has order: [C, B, A]
# - W2 uses RNG with seed (global=0 + worker-id=2 + 1000\*rank=0) + epoch=1 = 3 and has order: [A, B, C]
#
# ... and so on.
#
# Note that since `.repeat()` makes `CutSet` infinite, the dataloader will never stop yielding data, so you won't easily know what is the current epoch -- it's best to count steps, although if you really need to know the epoch, Shar attaches a custom field `cut.shar_epoch` to each cut that you can read out to understand what's the epoch. You'll also generally observe that each `shar_epoch` contains `world_size * num_workers` actual epochs in this setup.
#
#
#
# ### Comparison to vanilla Lhotse dataloading and Lhotse+WebDataset dataloading
#
# We'll extend the "base" approach used in `examples/00-basic-workflow.ipynb` (next to this file).
# The code below has the same functionality, just reads the data differently.
# It is also very similar to the workflow in `examples/02-webdataset-integration.ipynb`.
#
# ### Implementation note: the use of IterableDataset
#
# We require the use of IterableDataset, which in our case is just a wrapper over sampler iteration and map-style dataset that converts CutSet mini-batch to tensors.
# What this does is it moves the sampler to dataloading worker processes, so Lhotse Shar can "auto-detect" that its in a multi-worker context and can drop some shards in each worker/node. Remember that in a "typical" sampler + map-style dataset scenario, the sampler lives in the same process as the main training loop instead.
#
# To learn more about map-style and iterable-style datasets, see: https://pytorch.org/docs/stable/data.html#dataset-types
# + jupyter={"outputs_hidden": false}
cuts = CutSet.from_shar(
fields={
"cuts": shards["cuts"],
"recording": shards["recording"],
},
# The three arguments below are specifically for dataloading.
# shuffle_shards=True enables shuffling of shards,
# stateful_shuffle=True makes the shuffling different on each epoch,
# and seed="randomized" tells the CutSet to randomize the seed on each dataloader node and worker.
shuffle_shards=True,
stateful_shuffle=True,
seed="randomized",
).repeat() # repeat() enables the stateful shuffling
train_sampler = DynamicBucketingSampler(
cuts,
shuffle=True,
max_duration=100.0,
num_buckets=10,
rank=0,
world_size=1,
)
train_dataset = K2SpeechRecognitionDataset(
cut_transforms=[
PerturbSpeed(factors=[0.9, 1.1], p=2 / 3),
PerturbVolume(scale_low=0.125, scale_high=2.0, p=0.5),
],
input_transforms=[
SpecAugment(), # default configuration is well-tuned
],
input_strategy=OnTheFlyFeatures(Fbank()),
)
from lhotse.dataset.iterable_dataset import IterableDatasetWrapper
train_iter_dataset = IterableDatasetWrapper(
dataset=train_dataset,
sampler=train_sampler,
)
train_dloader = DataLoader(
train_iter_dataset,
batch_size=None,
# For faster dataloading, use num_workers > 1
num_workers=2,
# Note: Lhotse offers its own "worker_init_fn" that helps properly
# set the random seeds in all workers (also with multi-node training)
# and randomizes the shard order across different workers.
worker_init_fn=make_worker_init_fn(seed=0),
)
# -
# ### Visualisation
#
# We simply iterate the dataloader as usual and view how the first batch looks like
# +
from lhotse.dataset.vis import plot_batch
for batch in train_dloader:
plot_batch(batch)
break
# -
| 14,406 |
/Project_03.ipynb
|
285272b3c4506d8d5fea4516408c682fa832704b
|
[] |
no_license
|
rjnuttall/GA_project_03
|
https://github.com/rjnuttall/GA_project_03
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 2,733,503 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Yeni başlayanlar için Python programlama temellerini işlediğimiz yazı dizimizin bu bölümünde karar verme yapısından bahsedeceğiz.
#
# En basit programlarda bile sık sık "şu şart doğruysa şöyle yap, yoksa yapma", veya "doğruysa şöyle yap, yanlışsa öbür türlü yap" şeklinde düzenlemelere ihtiyaç duyarız. Sözgelişi, "cevap doğruysa puanı bir arttır", veya "yanlış sayısı dördün katıysa puanı bir azalt" gibi. Birçok dil gibi Python'da da seçim yapısı `if-else` komutlarıyla kurulur.
#
# if komutu
# ======
# Karar yapılarının en basit hali, bir şartın doğruluğunu yoklamak ve doğruysa belli işlemler yapmaktır. Sözgelişi aşağıdaki programa bakalım. Hücreyi Shift-Enter ile çalıştırdığımızda tahminimizi soran bir kutu ortaya çıkacak. Kutuya bir cevap yazıp Enter'e basın.
hedef = "42"
tahmin = input("Tahmininiz: ")
if tahmin == hedef:
print("Doğru!")
hedef = "42"
tahmin = input("Tahmininiz: ")
if tahmin == hedef:
print("Doğru!")
if tahmin != hedef:
print("Yanlış!")
# Sayı `hedef` değerine (`"42"`) eşitse ekrana `"Doğru!"` yazısını basacak, değilse birşey yapmayacak. Şarttan sonra iki nokta üstüste (`:`) koymayı da unutmayın.
#
# (Bu hücredeki kodu kendi başına çalışan bir betik haline getirmek için, kodu kopyalayıp bir metin editörüne yapıştırın ve tahmin.py isimli bir dosya olarak kaydedin. Böylece komut satırında çalıştırabilirsiniz:
#
# $ python3 tahmin.py
# Tahmininiz: 42
# Doğru!
# şeklinde bir çıktınız olmalı)
#
# `if`'den sonra `tahmin`'in `hedef`'e eşit olup olmadığını yoklayan bir mantık ifadesi var. Bunun değeri ya doğru (`True`) ya da yanlış (`False`) olacak. Eşitlik şartı için `==` (iki tane eşit işareti) kullanıldığına dikkat edin.
1 == 1
1 == 2
1 = 1
# Şarttan sonraki satırlara, şart doğruysa çalıştırılacak kod parçası (_if bloku_) gelir. Burada Python'un kendine özgü bir kuralını görüyoruz: Bloktaki her ifade, `if` başlığına göre belli bir miktar sağa kaydırılmalıdır. Yorumlayıcı hangi komutların `if` blokuna ait olduğunu bu kaydırma sayesinde ayırt eder.
#
# Meselâ şu iki ayrı programa bakalım:
#
# hedef = "42"
# tahmin = input("Tahmininiz: ")
# if tahmin == hedef:
# print("Doğru!")
# print("Bravo")
# ve:
#
# hedef = "42"
# tahmin = input("Tahmininiz: ")
# if tahmin == hedef:
# print("Doğru!")
# print("Bravo")
#
# İkinci programdaki
#
# print "Bravo"
# komutu `if` blokunun dışında olduğu için, tahmin doğru olsa da olmasa da çalıştırılır. Deneyin.
#
# `if` içindeki blokun kaç boşluk sağa kaydırılacağı önemli değildir; bir tek boşluk bile yeterlidir. Çoğu IDE otomatik olarak dört boşluk genişlikte bir sıçrama yapar; Python programcılık camiasında tavsiye edilen de budur. Sonraki satırlar da aynı kaydırma seviyesinde başlar. Bu yüzden, bloku bitirmek için yeni satıra geçtikten sonra geriye silme (Backspace) tuşuna basmalısınız.
#
# if-else komutu
# ========
# Yukarıdaki program bize sadece tahminimiz doğruysa bir cevap veriyor. Tahminimizin yanlış olduğunu da söylemesini istersek `if-else` yapısını kullanırız.
hedef = "42"
x = input("Tahmininiz: ")
if x == hedef:
print("Doğru!")
else:
print("Yanlış")
# `else` ifadesinden sonra herhangi bir şart gelmez, çünkü `else` bloku zaten sadece `if` şartı yanlışsa çalıştırılır. `else` kelimesinin, bağlı olduğu `if` ile aynı kaydırma seviyesinde olduğuna dikkat edin.
#
# Her `else` bir `if`'e bağlı olmalıdır, ama tersi doğru değildir. Her `if`'e bir `else` gerekmez.
#
# if-elif-else komutu
# ===========
# `if` kullanımının en genel hali budur. `elif` kelimesi `else if`'in kısaltmasıdır.
x = int(input("Kaç tane? "))
if x>2000:
print("Binlerce")
elif x>200:
print("Yüzlerce")
elif x>10:
print("Çok")
elif x>0:
print("Birkaç")
else:
print("Yok")
# Aynı programı `elif` kullanmadan ancak şöyle yazabiliriz:
x = int(input("Kaç tane? "))
if x>2000:
print("Binlerce")
else:
if x>200:
print("Yüzlerce")
else:
if x>10:
print("Çok")
else:
if x>0:
print("Birkaç")
else:
print("Yok")
# Görüldüğü gibi `elif` kullanmak programı sadeleştiriyor.
#
# Karşılaştırmalar
# =========
# `if`'den sonra, mantıksal olarak doğru veya yanlış sayılacak ifadeler gelmelidir. Eşitlikten yukarıda bahsettik. Ayrıca `>` (büyük), `<` (küçük), `>=` (büyük veya eşit), `<=` (küçük veya eşit) ve `!=` (eşit değil) işlemleri kullanılabilir.
2 > 3, 2 >= 3, 2 < 3, 2 <= 3, 2 != 3
# Sayı olmayan veri yapıları da karşılaştırmalarda kullanılabilirler. O durumda, eşitsizliklerin değerlendirilmesinde alfabetik sıra kullanılır.
"hello" == "Hello", "hello" > "Hello", "hello" < "jello"
[1,2,3] < [1,20,3], [1,2,3] < [11,2,3]
# Daha fazla bilgi için <a href="http://docs.python.org/2/reference/expressions.html#not-in">Python dili referansına</a> bakabilirsiniz.
#
# Bir veri yapısı içinde belli bir eleman veya alt grubun bulunup bulunmadığını `in` kelimesiyle test edebilirsiniz. Şartı ters çevirmek için `not in` kullanılır.
L = [12,3,4,[5,6]]
3 in L, [3,4] in L, [5,6] in L, [5,6] not in L
s = "merhaba"
"a" in s, "erh" in s
cümle = input("bir isim yaz: ")
if "a" in cümle:
print("ismin içinde 'a' harfi geçiyor!")
else:
print("ismin içinde 'a' harfi geçmiyor!")
# Sözlüklerde `in` kelimesi sadece referanslar içinde yoklama yapar.
d = {"abc": 54, (1,2): -45.1}
"abc" in d, 54 in d, (1,2) in d
# Doğru ve yanlış sabitleri
# ==============
# `True` ve `False`, aslında 1 ve 0 sayılarına verilen yeni isimlerdir. Ayrıca,
#
# [], {}, "", None, 0, 0.0, False
# ifadelerinin her biri mantıksal yanlış anlamı taşır. Tersine olarak da, sıfırdan farklı her sayı veya boş olmayan herhangi bir liste/çokuz/dize/sözlük `if` yapılarında mantıksal doğru olarak yorumlanırlar.
L=[]
if L:
print("Liste dolu")
else:
print("Liste boş.")
s = "abc"
if s:
print(s)
else:
print("Boş")
# Mantıksal işlemler
# ==========
# Birden fazla şartı Boole işlemleri (`and`, `or`, `not`) ile biraraya getirerek, daha karmaşık şartlar oluşturmak mümkün olur.
#
# * `X and Y` : Hem `X` hem `Y` doğruysa doğru; `X` ve `Y`'den en az biri yanlışsa yanlış.
# * `X or Y` : `X` ve `Y`'den en az biri doğruysa doğru; hem `X` hem `Y` yanlışsa yanlış.
# * `not X` : `X`'in doğruluk değerinin tersi.
1 < 2 and 3==3
1 < 2 and 3==4
1 < 2 or 3==4
1 > 2 or 3==4
not 1>2
not 3==3
# Boole ifadelerinde ikiden fazla bileşen de kullanılabilir. Önce `not` işlemi, sonra `and` işlemi, sonra da `or` işlemi yapılır. Aynı işlem yapılıyorsa, bileşenler soldan sağa çifter çifter alınır.
1 == 2 or 3 > 4 or 5 < 8
1 != 2 and not 3 > 4 and 5 < 8
1 != 2 and 3 <= 4 or 5 > 10 and 7 > 8
# "Kısa devre" işlemler
# ============
# Mantıksal işlemlerin yan etkileri vardır: Mantıksal `or` (veya) işleminde, işlenen değerlerden sadece birisinin doğru olması işlem sonucunun doğru olması için yeterlidir. Bu yüzden, Python `X or Y` işlemini yaparken `X`'in doğru olduğunu görürse `Y`'ye hiç bakmaz, onu değerlemez, ve işlem sonucu olarak `X`'in değerini geri verir. Buna _kısa devre hesaplama_ adı verilir. Eğer `X` doğru değilse, `Y` ne olursa olsun, `Y`'yi verir.
#
# Kısa devre özelliği hesaplama verimliliği de sağlar. Bazen bu işlemlerin bileşenleri karmaşık işlemlerle elde ediliyor olabilir. Sözgelişi `Y` yerine, bir fonksiyon çağrısı koyuyor olabiliriz, ve bu fonksiyon ağır işlemler yapıyor olabilir. Böyle durumlarda kısa devre işlemler hesaplamada verimlilik sağlarlar, `Y`'yi boş yere hesaplamazlar.
# Aşağıdaki örnekte, `"abc"` doğru sayıldığı için, `print` işlemi hiç yapılmadan `"abc"` geri verilir.
"abc" or print("mrb")
# Aşağıdaki örnekte, `or` işleminin değerini anlamak için `print` komutu işlenir. Bunun yan etkisi olarak `"mrb"` yazılır. Ama `print` çağrısı `None` verir, o yüzden `or` işlemi de `None` sonucu verir.
False or print("mrb")
0 or 3
# Benzer bir durum `X and Y` işlemi için de geçerlidir. Eğer `X` mantıksal olarak yanlışsa, `Y `ne olursa olsun bütün ifade yanlış olur, böylece `X` değeri geri verilir. Ama `X` doğruysa, işlemin doğruluk değeri `Y`'ye bağlıdır, ve `Y `değeri geri verilir.
2 and 3
3 and print("mrb")
0 and 2
[] and False
# Bu özellik çeşitli "hack"ler için kullanılabilir. Sözgelişi
#
# X = A or B or C or None
# ifadesi, `X`'in, `A`, `B`, `C` arasında boş olmayan ilk nesneye atanmasını, veya `None` olmasını sağlar. Aynı şeyi `if-elif-else` kullanarak yapmak daha uzun bir kod gerektirir.
#
# if A:
# X = A
# elif B:
# X = B
# elif C:
# X = C
# else:
# X = None
# Üçlü if-else ifadesi
# ===========
# Birçok durumda, bir değişkene, bir şartın doğru veya yanlış olmasına göre farklı değerler atarız. Sözgelişi, `x` mantık ifadesinin doğru olması halinde `a`'ya `y` değeri verelim, aksi takdirde `z` verelim.
#
# if x:
# a = y
# else:
# a = z
# Üçlü `if-else` ifadesiyle aynısını daha kısa olarak şöyle yazabiliriz:
#
# a = y if x else z
# Örnek olarak, `m`'ye `x`'in mutlak değerini atayalım.
x = float(input("Bir sayı girin: "))
m = x if x>0 else -x
print(m)
# En baştaki örneği de şöyle yazabiliriz.
hedef = "42"
x = input("Tahmininiz: ")
print("Doğru!" if x == hedef else "Yanlış")
hedef = 42
x = int(input("Tahmininiz: "))
print("Doğru!" if x == hedef else "Yanlış")
# Bir `if-elif-else` blokunu üçlü `if-else` ifadesi olarak yazabiliriz. Sözgelişi
#
# if rating > 100:
# sinif = "A"
# elif rating > 50:
# sinif = "B"
# else:
# sinif = "C"
# yerine
#
# sinif = "A" if rating > 100 else "B" if rating > 50 else "C"
# yazılabilir. Ama aşırıya kaçırırsanız programın okunması zorlaşır.
ers = filters,
kernel_size = kernel_size,
strides = (1,1),
dilation_rate=(1,1),
padding = 'SAME',
activation = self.activation,
kernel_initializer = tf.glorot_normal_initializer(),
bias_initializer = tf.random_uniform_initializer()
)(inputs=a1)
a2 = tf.layers.BatchNormalization()(a2)
return a1, a2
def _model_unet(self):
self.inputs = tf.layers.Input(tensor=self.x, name='input_layer')
self.a1, self.a2, self.p1 = self._unet_conv(self.inputs, 16, (2,2), True, 'unit_1')
self.a3, self.a4, self.p2 = self._unet_conv(self.p1, 32, (2,2), True, 'unit_2')
self.a5, self.a6, self.p3 = self._unet_conv(self.p2, 64, (2,2), True, 'unit_3')
self.a7, self.a8, self.p4 = self._unet_conv(self.p3, 256, (2,2), True, 'unit_4')
self.a9, self.a10 = self._unet_conv(self.p4, 512, False, 'unit_5')
self.a11, self.a12 = self._unet_conv_transpose(self.a10, self.a8, 256, (2,2), 'unit_6')
self.a13, self.a14 = self._unet_conv_transpose(self.a12, self.a6, 64, (2,2), 'unit_7')
self.a15, self.a16 = self._unet_conv_transpose(self.a14, self.a4, 32, (2,2), 'unit_8')
self.a17, self.a18 = self._unet_conv_transpose(self.a16, self.a2, 16, (2,2), 'unit_9')
self.a19 = tf.layers.Conv2D(filters = 1,
kernel_size = (2,2),
strides = (1,1),
dilation_rate=(1,1),
padding = 'SAME',
activation = tf.nn.sigmoid,
kernel_initializer=tf.glorot_normal_initializer(),
bias_initializer=tf.random_uniform_initializer()
)(inputs=self.a18)
# def _model_modified_unet(self):
# self.inputs = tf.layers.Input(tensor=self.x, name='input_layer')
# self.a1, self.a2, self.p1 = self._unet_conv(self.inputs, 16, (2,2), True, 'unit_1')
# self.a3, self.a4, self.p2 = self._unet_conv(self.p1, 32, (2,2), True, 'unit_2')
# self.a5, self.a6, self.p3 = self._unet_conv(self.p2, 64, (2,2), True, 'unit_3')
# self.a7, self.a8, self.p4 = self._unet_conv(self.p3, 128, (2,2), True, 'unit_4')
# self.a1, self.a2= self._unet_conv(self.inputs, 16, (2,2), (1,1), False, 'unit_1')
# self.a3, self.a4= self._unet_conv(self.p1, 32, (2,2), (1,1), False, 'unit_2')
# self.a5, self.a6= self._unet_conv(self.p2, 64, (2,2), (1,1), False, 'unit_3')
# self.a7, self.a8= self._unet_conv(self.p3, 256, (2,2), (1,1), False, 'unit_4')
def _ioumetric(self, y_pred, y_true):
y_pred_ = tf.to_int64(y_pred > 0.5)
y_true_ = tf.to_int64(y_true > 0.5)
score, update_opts = tf.metrics.mean_iou(labels=y_true_,
predictions=y_pred_,
num_classes=2)
with tf.control_dependencies([update_opts]):
score = tf.identity(score)
return score
def _dice_iou(self, y_pred, y_true):
self.inter = tf.reduce_mean(tf.multiply(y_true, y_pred), name = 'inter')
self.union_iou = tf.reduce_mean(tf.subtract(tf.add(y_true, y_pred), self.inter),
name = 'union_iou')
self.iou = tf.div(self.inter, self.union_iou,
name = 'iou')
self.union_dice = tf.reduce_mean(tf.add(y_true, y_pred),
name='union_dice')
self.dice = tf.multiply(tf.constant(2.0, dtype=tf.float32),
tf.div(self.inter, self.union_dice),
name = 'dice')
return self.iou, self.dice
def _loss(self):
with tf.name_scope('loss'):
self.bce = tf.reduce_mean(tf.keras.losses.binary_crossentropy(self.y, self.a19),
name='bce')
self.iou, self.dice = self._dice_iou(self.a19, self.y)
self.log_dice = tf.log(self.dice, name='log_dice')
self.loss = tf.subtract(self.bce, self.log_dice, name='loss')
#self.loss = tf.subtract(tf.constant(1.0, dtype=tf.float32), self.iou, name='loss')
#self.loss = self.bce
self.score = self._ioumetric(self.a19, self.y)
def _optimizer(self):
self.optimizer = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss,
global_step=self.global_step)
def _summaries(self):
tf.summary.scalar('loss', self.loss)
tf.summary.histogram('histogram_loss', self.loss)
tf.summary.scalar('score', self.score)
tf.summary.histogram('histogram_score', self.score)
tf.summary.scalar('lr', self.learning_rate)
self.summary_op = tf.summary.merge_all()
def _saver(self):
self.saver = tf.train.Saver(name='saver')
def _build_graph(self):
self._input_data()
self._model_unet()
self._loss()
self._optimizer()
self._summaries()
self._saver()
print("Computation graph build completed...", end='\n')
def _reset_graph(self):
tf.reset_default_graph()
def _inference(self, test_data, graphdir, name):
self._reset_graph()
self.saver = tf.train.import_meta_graph(graphdir)
self.graph = tf.get_default_graph()
self.x = self.graph.get_tensor_by_name("input/input_x:0")
self.a19 = self.graph.get_tensor_by_name("conv2d/Sigmoid:0")
self.preds=[]
with tf.Session() as self.sess:
self.saver.restore(self.sess, tf.train.latest_checkpoint(PARAMS + name))
feed_dict = {self.x : test_data}
pred = self.sess.run(self.a19, feed_dict=feed_dict)
self.preds.append(pred)
self.preds = np.squeeze(np.array(self.preds), axis=0).astype(np.float32)
def _train(self, train_data, valid_data, name):
print('Training...', end='\n')
initial_step = 0
with tf.Session() as self.sess:
self.sess.run(tf.global_variables_initializer())
self.sess.run(tf.local_variables_initializer())
writer_train = tf.summary.FileWriter(LOGDIR + name + '/train/')
writer_valid = tf.summary.FileWriter(LOGDIR + name + '/valid/')
writer_train.add_graph(self.sess.graph)
writer_valid.add_graph(self.sess.graph)
min_loss = 1e16
j = 0
lr = 0
for i in range(self.epochs):
gen = self._batch_gen(train_data, batch_size=self.batch_size, train=True, shuffle=True)
#gen = self._image_augmentation(train_data, batch_size=self.batch_size, train=True, seed= (i+1))
average_loss = 0.0
average_valid_loss = 0.0
feed_dict_valid = {self.x: valid_data[0], self.y: valid_data[1]}
for k, (batch_x, batch_y) in enumerate(gen):
lr = self._cyclic_lr(j)
feed_dict_train = {self.x: batch_x, self.y: batch_y}
loss, mean_iou, _ = self.sess.run([self.loss, self.score, self.optimizer], feed_dict=feed_dict_train)
valid_loss = self.sess.run(self.loss, feed_dict=feed_dict_valid)
average_loss += loss
average_valid_loss += valid_loss
if j % 10 == 0:
s_train = self.sess.run(self.summary_op, feed_dict=feed_dict_train)
s_valid = self.sess.run(self.summary_op, feed_dict=feed_dict_valid)
writer_train.add_summary(s_train, j)
writer_valid.add_summary(s_valid, j)
j +=1
if k > 30:
break
if average_loss < min_loss:
min_loss = average_loss
self.saver.save(self.sess, os.path.join(PARAMS + name, "unet_best.ckpt"), i)
self.saver.save(self.sess, PARAMS + name + '/unet_best_', global_step=self.global_step)
print("Epoch {} Training Loss: {:.4f} Validation Loss: {:.4f} Score: {:.4f}".format(i, average_loss / (k+1), average_valid_loss / (k+1), mean_iou), end='\r')
# -
# ## Model Training
def main(X_train, y_train, X_valid, y_valid, X_test, name):
model = u_net(learning_rate = 1e-3, batch_size=32, epochs=300, dimension=DIMENSION)
model._build_graph()
model._train((X_train, y_train), (X_valid, y_valid), name)
return model
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size = 0.3, random_state = 140)
model = main(X_train, y_train, X_valid, y_valid, X_test, '_1')
# ## Inference
model._inference(X_test, PARAMS + '_1/' + 'unet_best_-4410.meta', '_1')
preds = model.preds
# ## Post Processing
preds = (preds > 0.5).astype(np.uint8)
final_mask = mask_post_process(TEST_PATH, TEST_ID, DIMENSION, preds)
plt.imshow(preds[5,:,:,0])
plt.show()
plt.imshow(X_test[5,:,:,:])
plt.show()
plt.imshow(final_mask[5][:,:,0], cmap='gray')
plt.show()
# +
new_test_ids = []
rles = []
for n, id_ in enumerate(TEST_ID):
rle = list(prob_to_rles(final_mask[n]))
rles.extend(rle)
new_test_ids.extend([id_]*len(rle))
# -
# ## Submission
sub = pd.DataFrame()
sub['ImageId'] = new_test_ids
sub['EncodedPixels'] = pd.Series(rles).apply(lambda x: ' '.join(str(y) for y in x))
sub.to_csv('./submission/sub-dsbowl2018-test.csv', index=False)
| 20,470 |
/ML Pipeline Preparation.ipynb
|
27e122023fbc0354c34e70f2f6ed37c2431e00af
|
[
"MIT"
] |
permissive
|
liorgefen86/disaster_response
|
https://github.com/liorgefen86/disaster_response
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 10,142 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ---
#
# _You are currently looking at **version 1.1** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-machine-learning/resources/bANLa) course resource._
#
# ---
# # Assignment 1 - Introduction to Machine Learning
# For this assignment, you will be using the Breast Cancer Wisconsin (Diagnostic) Database to create a classifier that can help diagnose patients. First, read through the description of the dataset (below).
# +
import numpy as np
import pandas as pd
from sklearn.datasets import load_breast_cancer
cancer = load_breast_cancer()
#print(cancer.DESCR) # Print the data set description
print(cancer.DESCR)
# -
# The object returned by `load_breast_cancer()` is a scikit-learn Bunch object, which is similar to a dictionary.
cancer.keys()
# ### Question 0 (Example)
#
# How many features does the breast cancer dataset have?
#
# *This function should return an integer.*
# +
# You should write your whole answer within the function provided. The autograder will call
# this function and compare the return value against the correct solution value
def answer_zero():
# This function returns the number of features of the breast cancer dataset, which is an integer.
# The assignment question description will tell you the general format the autograder is expecting
return len(cancer['feature_names'])
# You can examine what your function returns by calling it in the cell. If you have questions
# about the assignment formats, check out the discussion forums for any FAQs
answer_zero()
# -
# ### Question 1
#
# Scikit-learn works with lists, numpy arrays, scipy-sparse matrices, and pandas DataFrames, so converting the dataset to a DataFrame is not necessary for training this model. Using a DataFrame does however help make many things easier such as munging data, so let's practice creating a classifier with a pandas DataFrame.
#
#
#
# Convert the sklearn.dataset `cancer` to a DataFrame.
#
# *This function should return a `(569, 31)` DataFrame with *
#
# *columns = *
#
# ['mean radius', 'mean texture', 'mean perimeter', 'mean area',
# 'mean smoothness', 'mean compactness', 'mean concavity',
# 'mean concave points', 'mean symmetry', 'mean fractal dimension',
# 'radius error', 'texture error', 'perimeter error', 'area error',
# 'smoothness error', 'compactness error', 'concavity error',
# 'concave points error', 'symmetry error', 'fractal dimension error',
# 'worst radius', 'worst texture', 'worst perimeter', 'worst area',
# 'worst smoothness', 'worst compactness', 'worst concavity',
# 'worst concave points', 'worst symmetry', 'worst fractal dimension',
# 'target']
#
# *and index = *
#
# RangeIndex(start=0, stop=569, step=1)
def answer_one():
df = pd.DataFrame(cancer.data, columns=cancer['feature_names'])
df["target"] = cancer["target"]
return df
# ### Question 2
# What is the class distribution? (i.e. how many instances of `malignant` (encoded 0) and how many `benign` (encoded 1)?)
#
# *This function should return a Series named `target` of length 2 with integer values and index =* `['malignant', 'benign']`
def answer_two():
cancerdf = answer_one()
benign_sum = np.sum(cancerdf["target"])
malignant_sum = len(cancerdf["target"]) - benign_sum
return pd.Series([malignant_sum, benign_sum], index = ['malignant', 'benign'])
# ### Question 3
# Split the DataFrame into `X` (the data) and `y` (the labels).
#
# *This function should return a tuple of length 2:* `(X, y)`*, where*
# * `X` *has shape* `(569, 30)`
# * `y` *has shape* `(569,)`.
def answer_three():
cancerdf = answer_one()
X = cancerdf[cancer['feature_names']]
y = cancerdf["target"]
return X, y
# ### Question 4
# Using `train_test_split`, split `X` and `y` into training and test sets `(X_train, X_test, y_train, and y_test)`.
#
# **Set the random number generator state to 0 using `random_state=0` to make sure your results match the autograder!**
#
# *This function should return a tuple of length 4:* `(X_train, X_test, y_train, y_test)`*, where*
# * `X_train` *has shape* `(426, 30)`
# * `X_test` *has shape* `(143, 30)`
# * `y_train` *has shape* `(426,)`
# * `y_test` *has shape* `(143,)`
# +
from sklearn.model_selection import train_test_split
def answer_four():
X, y = answer_three()
(X_train, X_test, y_train, y_test) = train_test_split(X , y, random_state=0)
return (X_train, X_test, y_train, y_test)
# -
# ### Question 5
# Using KNeighborsClassifier, fit a k-nearest neighbors (knn) classifier with `X_train`, `y_train` and using one nearest neighbor (`n_neighbors = 1`).
#
# *This function should return a * `sklearn.neighbors.classification.KNeighborsClassifier`.
# +
from sklearn.neighbors import KNeighborsClassifier
def answer_five():
X_train, X_test, y_train, y_test = answer_four()
knn = KNeighborsClassifier(n_neighbors = 1)
return knn.fit(X_train, y_train)
# -
# ### Question 6
# Using your knn classifier, predict the class label using the mean value for each feature.
#
# Hint: You can use `cancerdf.mean()[:-1].values.reshape(1, -1)` which gets the mean value for each feature, ignores the target column, and reshapes the data from 1 dimension to 2 (necessary for the precict method of KNeighborsClassifier).
#
# *This function should return a numpy array either `array([ 0.])` or `array([ 1.])`*
def answer_six():
cancerdf = answer_one()
means = cancerdf.mean()[:-1].values.reshape(1, -1)
knn = answer_five()
return knn.predict(means)
# ### Question 7
# Using your knn classifier, predict the class labels for the test set `X_test`.
#
# *This function should return a numpy array with shape `(143,)` and values either `0.0` or `1.0`.*
def answer_seven():
X_train, X_test, y_train, y_test = answer_four()
knn = answer_five()
return knn.predict(X_test)
answer_seven()
# ### Question 8
# Find the score (mean accuracy) of your knn classifier using `X_test` and `y_test`.
#
# *This function should return a float between 0 and 1*
def answer_eight():
X_train, X_test, y_train, y_test = answer_four()
knn = answer_five()
return knn.score(X_test, y_test)
# ### Optional plot
#
# Try using the plotting function below to visualize the differet predicition scores between training and test sets, as well as malignant and benign cells.
def accuracy_plot():
import matplotlib.pyplot as plt
# %matplotlib notebook
X_train, X_test, y_train, y_test = answer_four()
# Find the training and testing accuracies by target value (i.e. malignant, benign)
mal_train_X = X_train[y_train==0]
mal_train_y = y_train[y_train==0]
ben_train_X = X_train[y_train==1]
ben_train_y = y_train[y_train==1]
mal_test_X = X_test[y_test==0]
mal_test_y = y_test[y_test==0]
ben_test_X = X_test[y_test==1]
ben_test_y = y_test[y_test==1]
knn = answer_five()
scores = [knn.score(mal_train_X, mal_train_y), knn.score(ben_train_X, ben_train_y),
knn.score(mal_test_X, mal_test_y), knn.score(ben_test_X, ben_test_y)]
plt.figure()
# Plot the scores as a bar chart
bars = plt.bar(np.arange(4), scores, color=['#4c72b0','#4c72b0','#55a868','#55a868'])
# directly label the score onto the bars
for bar in bars:
height = bar.get_height()
plt.gca().text(bar.get_x() + bar.get_width()/2, height*.90, '{0:.{1}f}'.format(height, 2),
ha='center', color='w', fontsize=11)
# remove all the ticks (both axes), and tick labels on the Y axis
plt.tick_params(top='off', bottom='off', left='off', right='off', labelleft='off', labelbottom='on')
# remove the frame of the chart
for spine in plt.gca().spines.values():
spine.set_visible(False)
plt.xticks([0,1,2,3], ['Malignant\nTraining', 'Benign\nTraining', 'Malignant\nTest', 'Benign\nTest'], alpha=0.8);
plt.title('Training and Test Accuracies for Malignant and Benign Cells', alpha=0.8)
# +
# Uncomment the plotting function to see the visualization,
# Comment out the plotting function when submitting your notebook for grading
accuracy_plot()
# -
.googleapis.com/kaggle-forum-message-attachments/2594/supervised_link_prediction.pdf<br>
# 3. Tune hyperparameters for XG boost with all these features and check the error metric.
# -
# reading data
if os.path.isfile('data/after_eda/train_pos_after_eda.csv'):
train_graph=nx.read_edgelist('data/after_eda/train_pos_after_eda.csv',delimiter=',',create_using=nx.DiGraph(),nodetype=int)
print(nx.info(train_graph))
else:
print("please run the FB_EDA.ipynb or download the files from drive")
#reading
from pandas import read_hdf
df_final_train = read_hdf('data/fea_sample/storage_sample_stage4.h5', 'train_df',mode='r')
df_final_test = read_hdf('data/fea_sample/storage_sample_stage4.h5', 'test_df',mode='r')
# ## 1. Preferential Attachment
#
# https://en.wikipedia.org/wiki/Preferential_attachment
#
# https://networkx.github.io/documentation/networkx-1.10/reference/generated/networkx.algorithms.link_prediction.preferential_attachment.html
# \begin{equation}
# Preferential Attachment = {|X| * |Y|}
# \end{equation}
#for followees
def pre_att_for_followees(a,b):
try:
return len(set(train_graph.successors(a))) * len(set(train_graph.successors(b)))
except:
return 0
#for followers
def pre_att_for_followers(a,b):
try:
return len(set(train_graph.predecessors(a)))* len(set(train_graph.predecessors(b)))
except:
return 0
# ### Adding new set of features
# - Preferential Attachment
if not os.path.isfile('data/fea_sample/storage_sample_stage5.h5'):
#mapping Preferential Attachment followers to train and test data
df_final_train['preferential_attachment_followers'] = df_final_train.apply(lambda row:
pre_att_for_followers(row['source_node'],row['destination_node']),axis=1)
df_final_test['preferential_attachment_followers'] = df_final_test.apply(lambda row:
pre_att_for_followers(row['source_node'],row['destination_node']),axis=1)
#mapping Preferential Attachment followees to train and test data
df_final_train['preferential_attachment_followees'] = df_final_train.apply(lambda row:
pre_att_for_followees(row['source_node'],row['destination_node']),axis=1)
df_final_test['preferential_attachment_followees'] = df_final_test.apply(lambda row:
pre_att_for_followees(row['source_node'],row['destination_node']),axis=1)
hdf = HDFStore('data/fea_sample/storage_sample_stage5.h5')
hdf.put('train_df',df_final_train, format='table', data_columns=True)
hdf.put('test_df',df_final_test, format='table', data_columns=True)
hdf.close()
else:
df_final_train = read_hdf('data/fea_sample/storage_sample_stage5.h5', 'train_df',mode='r')
df_final_test = read_hdf('data/fea_sample/storage_sample_stage5.h5', 'test_df',mode='r')
# ## 2. SVD Feature
# #### Calculate svd_mul as multiply between Sourse node SVD and Destination node SVD features
# $ M = U \sum V^* $
#
# So, given matrix __M:n×m__ that performs some linear transformation, the SVD decomposes that transformation into three steps: __U:m×m__, __S:m×n__, __V:n×n__, such that __M=U×S×V^T__
def svd(x, S):
try:
z = sadj_dict[x]
return S[z]
except:
return [0,0,0,0,0,0]
#for svd features to get feature vector creating a dict node val and inedx in svd vector
sadj_col = sorted(train_graph.nodes())
sadj_dict = { val:idx for idx,val in enumerate(sadj_col)}
Adj = nx.adjacency_matrix(train_graph,nodelist=sorted(train_graph.nodes())).asfptype()
U, s, V = svds(Adj, k = 6)
print('Adjacency matrix Shape',Adj.shape)
print('U Shape',U.shape)
print('V Shape',V.shape)
print('s Shape',s.shape)
# for U shape
train_source_svd_U = df_final_train.source_node.apply(lambda x: svd(x, U))
train_destination_svd_U = df_final_train.destination_node.apply(lambda x: svd(x, U))
test_source_svd_U = df_final_test.source_node.apply(lambda x: svd(x, U))
test_destination_svd_U = df_final_test.destination_node.apply(lambda x: svd(x, U))
## convert list to array FOR U SHAPE
train_source_svd_U = np.array([np.array(i) for i in train_source_svd_U])
train_destination_svd_U = np.array([np.array(i) for i in train_destination_svd_U])
test_source_svd_U = np.array([np.array(i) for i in test_source_svd_U])
test_destination_svd_U = np.array([np.array(i) for i in test_destination_svd_U])
# for V shape
train_source_svd_V = df_final_train.source_node.apply(lambda x: svd(x, V.T))
train_destination_svd_V = df_final_train.destination_node.apply(lambda x: svd(x, V.T))
test_source_svd_V = df_final_test.source_node.apply(lambda x: svd(x, V.T))
test_destination_svd_V = df_final_test.destination_node.apply(lambda x: svd(x, V.T))
## convert list to array FOR V SHAPE
train_source_svd_V = np.array([np.array(i) for i in train_source_svd_V])
train_destination_svd_V = np.array([np.array(i) for i in train_destination_svd_V])
test_source_svd_V = np.array([np.array(i) for i in test_source_svd_V])
test_destination_svd_V = np.array([np.array(i) for i in test_destination_svd_V])
print(train_source_svd_U.shape)
print(test_source_svd_U.shape)
print(train_source_svd_V.shape)
print(test_source_svd_V.shape)
# Multuply between Sourse node SVD and Destination node SVD features
train_sour_dist_svd_U = np.multiply(train_source_svd_U,train_destination_svd_U)
test_sour_dist_svd_U = np.multiply(test_source_svd_U,test_destination_svd_U)
print(train_sour_dist_svd_U.shape)
print(test_sour_dist_svd_U.shape)
# Multiply between Sourse node SVD and Destination node SVD features for V shape
train_sour_dist_svd_V = np.multiply(train_source_svd_V,train_destination_svd_V)
test_sour_dist_svd_V = np.multiply(test_source_svd_V,test_destination_svd_V)
print(train_sour_dist_svd_V.shape)
print(test_sour_dist_svd_V.shape)
train_sour_dist_svd_U = pd.DataFrame(train_sour_dist_svd_U,columns=['svd_mul_u_1', 'svd_mul_u_2','svd_mul_u_3', 'svd_mul_u_4', 'svd_mul_u_5', 'svd_mul_u_6'])
train_sour_dist_svd_U.head(2)
test_sour_dist_svd_U = pd.DataFrame(test_sour_dist_svd_U,columns=['svd_mul_u_1', 'svd_mul_u_2','svd_mul_u_3', 'svd_mul_u_4', 'svd_mul_u_5', 'svd_mul_u_6'])
test_sour_dist_svd_U.head(2)
train_sour_dist_svd_V = pd.DataFrame(train_sour_dist_svd_V,columns=['svd_mul_v_1', 'svd_mul_v_2','svd_mul_v_3', 'svd_mul_v_4', 'svd_mul_v_5', 'svd_mul_v_6'])
train_sour_dist_svd_V.head(2)
test_sour_dist_svd_V = pd.DataFrame(test_sour_dist_svd_V,columns=['svd_mul_v_1', 'svd_mul_v_2','svd_mul_v_3', 'svd_mul_v_4', 'svd_mul_v_5', 'svd_mul_v_6'])
test_sour_dist_svd_V.head(2)
# #### Calculate svd_dot as multiply between Sourse node SVD and Destination node SVD features
train_sour_dist_svd_U['sour_dist_svd_dot_U'] = train_sour_dist_svd_U.sum(axis=1)
train_sour_dist_svd_V['sour_dist_svd_dot_V'] = train_sour_dist_svd_V.sum(axis=1)
train_sour_dist_svd_U.head(2)
train_sour_dist_svd_V.head(2)
test_sour_dist_svd_U['sour_dist_svd_dot_U'] = test_sour_dist_svd_U.sum(axis=1)
test_sour_dist_svd_V['sour_dist_svd_dot_V'] = test_sour_dist_svd_V.sum(axis=1)
test_sour_dist_svd_U.head(2)
test_sour_dist_svd_V.head(2)
# ### Adding new set of features
df_final_train = pd.concat([df_final_train,train_sour_dist_svd_U], axis=1)
df_final_train = pd.concat([df_final_train,train_sour_dist_svd_V], axis=1)
df_final_train.head(5)
df_final_test = pd.concat([df_final_test,test_sour_dist_svd_U], axis=1)
df_final_test = pd.concat([df_final_test,test_sour_dist_svd_V], axis=1)
df_final_test.head(5)
import tables
hdf = HDFStore('data/fea_sample/storage_sample_stage6.h5')
hdf.put('train_df',df_final_train, format='table', data_columns=True)
hdf.put('test_df',df_final_test, format='table', data_columns=True)
hdf.close()
df_final_train.columns
y_train = df_final_train.indicator_link
y_test = df_final_test.indicator_link
df_final_train.drop(['source_node', 'destination_node','indicator_link'],axis=1,inplace=True)
df_final_test.drop(['source_node', 'destination_node','indicator_link'],axis=1,inplace=True)
# ## 3. Modelling
# ### 3.1 Random Forest
# +
# Random Forest model after add all features
param_dist = {"n_estimators":sp_randint(105,300),
"max_depth": sp_randint(10,20),
"min_samples_split": sp_randint(100,200),
"min_samples_leaf": sp_randint(25,70)}
clf = RandomForestClassifier(random_state=25,n_jobs=-1)
rf_random = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=5,cv=10,scoring='f1',random_state=25)
rf_random.fit(df_final_train,y_train)
print('mean test scores',rf_random.cv_results_['mean_test_score'])
print('mean train scores',rf_random.cv_results_['mean_train_score'])
# -
print(rf_random.best_estimator_)
clf = RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',
max_depth=14, max_features='auto', max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=30, min_samples_split=101,
min_weight_fraction_leaf=0.0, n_estimators=272, n_jobs=-1,
oob_score=False, random_state=25, verbose=0, warm_start=False)
clf.fit(df_final_train,y_train)
y_train_pred = clf.predict(df_final_train)
y_test_pred = clf.predict(df_final_test)
print('Train f1 score',f1_score(y_train,y_train_pred))
print('Test f1 score',f1_score(y_test,y_test_pred))
print('Train confusion_matrix')
plot_confusion_matrix(y_train,y_train_pred)
print('Test confusion_matrix')
plot_confusion_matrix(y_test,y_test_pred)
from sklearn.metrics import roc_curve, auc
fpr_tr,tpr_tr,ths = roc_curve(y_train,y_train_pred)
fpr_te,tpr_te,ths = roc_curve(y_test,y_test_pred)
auc_sc_tr = auc(fpr_tr, tpr_tr)
auc_sc_te = auc(fpr_te, tpr_te)
plt.plot(fpr_te, tpr_te, color='navy',label='ROC curve Test (area = %0.2f)' % auc_sc_te)
plt.plot(fpr_tr, tpr_tr, color='r', label='ROC curve Train (area = %0.2f)' % auc_sc_tr)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic with test data')
plt.legend()
plt.show()
features = df_final_train.columns
importances = clf.feature_importances_
indices = (np.argsort(importances))[-30:]
plt.figure(figsize=(10,12))
plt.title('Feature Importances')
plt.barh(range(len(indices)), importances[indices], color='r', align='center')
plt.yticks(range(len(indices)), [features[i] for i in indices])
plt.xlabel('Relative Importance')
plt.show()
# ### 3.2 XGBoost
# +
import xgboost as xgb
x_model = xgb.XGBClassifier(nthread=4)
param_dist = {"learning_rate" : uniform(0.001,0.3),
"n_estimators" : sp_randint(10,600),
"max_depth" : sp_randint(5,20),
"min_child_weight": sp_randint(1,10),
"gamma" : uniform(0,0.03),
"subsample" : uniform(0.6,0.4),
"reg_alpha" : sp_randint(0,200),
"reg_lambda" : sp_randint(0,200),
"colsample_bytree":uniform(0.6,0.3)}
rf_random = RandomizedSearchCV(x_model, param_distributions=param_dist,
n_iter=5,cv=10,scoring='f1',random_state=25)
rf_random.fit(df_final_train,y_train)
print('mean test scores',rf_random.cv_results_['mean_test_score'])
print('mean train scores',rf_random.cv_results_['mean_train_score'])
# -
print(rf_random.best_estimator_)
# +
clf = xgb.XGBClassifier(base_score=0.5, booster='gbtree', colsample_bylevel=1,
colsample_bytree=0.8400362967290745, gamma=0.004483331345083044,
learning_rate=0.07020278445544602, max_delta_step=0, max_depth=11,
min_child_weight=1, missing=None, n_estimators=277, n_jobs=1,
nthread=4, objective='binary:logistic', random_state=0, reg_alpha=2,
reg_lambda=171, scale_pos_weight=1, seed=None, silent=True,
subsample=0.8103275853858888)
clf.fit(df_final_train,y_train)
y_train_pred = clf.predict(df_final_train)
y_test_pred = clf.predict(df_final_test)
print('Train f1 score',f1_score(y_train,y_train_pred))
print('Test f1 score',f1_score(y_test,y_test_pred))
# -
print('Train confusion_matrix')
plot_confusion_matrix(y_train,y_train_pred)
print('Test confusion_matrix')
plot_confusion_matrix(y_test,y_test_pred)
from sklearn.metrics import roc_curve, auc
fpr_tr,tpr_tr,ths = roc_curve(y_train,y_train_pred)
fpr_te,tpr_te,ths = roc_curve(y_test,y_test_pred)
auc_sc_tr = auc(fpr_tr, tpr_tr)
auc_sc_te = auc(fpr_te, tpr_te)
plt.plot(fpr_te, tpr_te, color='navy',label='ROC curve Test (area = %0.2f)' % auc_sc_te)
plt.plot(fpr_tr, tpr_tr, color='r', label='ROC curve Train (area = %0.2f)' % auc_sc_tr)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic with test data')
plt.legend()
plt.show()
features = df_final_train.columns
importances = clf.feature_importances_
indices = (np.argsort(importances))[-81:]
plt.figure(figsize=(10,12))
plt.title('Feature Importances')
plt.barh(range(len(indices)), importances[indices], color='r', align='center')
plt.yticks(range(len(indices)), [features[i] for i in indices])
plt.xlabel('Relative Importance')
plt.show()
from prettytable import PrettyTable
x = PrettyTable()
x.field_names = ["Model","Best Hyper Parameter","Train f1_score","Test f1_score","Train AUC","Test_AUC"]
x.add_row(["RandomForestClassifier","max_depth:14, min_samples_leaf:30,\n min_samples_split:101, n_estimators:272",0.9641 ,0.9269,0.96,0.93])
x.add_row(["XGBClassifier","colsample_bytree:0.8400,gamma:0.0044,\nlearning_rate:0.0702,max_depth:11,\nmin_child_weight:1,n_estimators: 77,\nreg_alpha:2,reg_lambda:171,\nsubsample:0.8103",0.9852,0.9328,0.99,0.94])
from IPython.display import Markdown, display
def printmd(string):
display(Markdown(string))
printmd('****Final Conclusion:****')
print(x)
| 22,519 |
/cours/2020/ing2/bigdata/lesson7 Graphics/11 -- Static graphics with Seaborn.ipynb
|
fb0acd12d62d9d3d5fb839655dd465ddda6f4c3d
|
[] |
no_license
|
kariulele/epita-image
|
https://github.com/kariulele/epita-image
| 3 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 4,383,045 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Seaborn
#
# [Seaborn](https://stanford.edu/~mwaskom/software/seaborn/) est une bibliothèque construite au dessus Matplotlib. Elle amméliore le rendu graphique par défaut de Matplotlib et offre des graphiques statistiques. Il est souvent plus simple d'essayer de faire sa figure directement sous
# Seaborn et si le rendu n'est pas satisfaisant ou qu'on ne peut pas faire ce qu'on désire, alors on utilise Matplotlib.
#
# Seaborn est prévu pour fonctionner avec Pandas mais on peut l'utiliser sans Pandas.
#
# #### Références
# * [liste des fonctions](http://seaborn.pydata.org/api.html)
# * [gallerie d'exemples](http://seaborn.pydata.org/examples/index.html)
# +
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
sns.__version__
# -
# ### Rendu graphique
#
# Une des force de Seaborn est de faire de joli dessin et d'offrir différents styles. On pourra regarder
# https://seaborn.pydata.org/tutorial/aesthetics.html pour plus de détail.
sns.set_style('whitegrid') # background
sns.set_context("notebook") # change size of font, can be: paper (default), notebook, talk, poster
# ## Les données
#
# On va travailler avec la liste des maires de France élus en 2014.
# +
import pandas as pd
import datetime as dt
import numpy as np
mayors = pd.read_excel("data/maires-2014.xlsx")
# -
mayors.head()
mayors.Genre.value_counts()
# ## Présentation des données
#
#
# [Relation plot](https://seaborn.pydata.org/generated/seaborn.relplot.html) `relplot` permet de présenter les données
# suivant un nuage de points ou une courbe :
#
# * sous forme de points avec l'argument `kind = "scatter"` ou plus directement avec `scatterplot` (valeur par défaut)
# * sous forme de courbe avec l'argument `kind = "line"` ou plus directement avec `lineplot`
#
# ### Une simple courbe
#
# Comme indiqué initialement, Seaborn fonctionne bien avec Pandas mais pour commencer regardons comment afficher une simple
# courbe avec des valeurs pour x et d'autres pour y. On va trier les communes par leur population et afficher les tailles dans l'ordre afin de voir la répartion des communes suivant leur taille. Pour cela on est obligé d'utiliser `lineplot` car
# `relplot` demande un DataFrame.
ax = sns.lineplot(x=np.arange(len(mayors)), y=np.sort(mayors['Population de la commune']))
ax.set_title('Taille des %d communes' % len(mayors))
ax.set(yscale='log') # you can try without a logarithm scale in y to see the result
# On note qu'il y a plus de communes en dessous de 100 habitants qu'au dessus de 10 000 habitants.
# ### Distribution des données
#
# On peut vouloir regarder la corrélation entre deux champs.
# Ainsi les maires ont un âge et leur ville a un nombre d'habitant ce qui permet d'afficher l'âge en fonction de la taille de la population (pour voir si plus une ville est grande, plus son maire est âgé par exemple).
g = sns.relplot(data=mayors, x='Age', y='Population de la commune')
g.set(yscale='log') # same, without the log scale we cannot see anything
# On peut aussi utiliser la couleur pour indiquer un troisième champs (argument `hue`) qui peut être une valeur réelle ou pas. La profession du maire ou son sexe par exemple sont des catégorie et non des valeur réelles mais cela convient.
#
# On peut avoir un quatrième champs (argument `col`) mais alors il doit être une catégorie puisqu'on dessinera une figure par valeur.
sns.set_style('dark')
sns.set_context("talk")
color_profession = {'agriculture':'g','industrie/commerce':'brown', 'privé':'cyan', 'libéral':'b', 'divers':'pink',
'fonctionnaire':'yellow', 'enseignement':'orange', 'entrep. publique':'r', 'retraité':'black'}
g = sns.relplot(data=mayors, x='Age', y='Population de la commune', hue='Type profession', palette=color_profession,
marker='.', col='Genre')
g.set(yscale='log')
sns.set_style('white')
sns.set_context("notebook")
g = sns.relplot(data=mayors, x='Age', y='Population de la commune', hue='Genre', col='Type profession', col_wrap=3)
g.set(yscale='log', xlim=(20,100), ylim=(1,3000000))
# ### Corrélations globale de N champs 2 à 2
#
# [`pairplot`](https://seaborn.pydata.org/generated/seaborn.pairplot.html)
# offre une vision globale des corrélations entre tous les champs d'un jeu de données. Il affiche une matrice NxN de figures
# (avec N le nombre de champs pris en compte) avec pour la répartition de d'un champs par rapport à un autre et sur la
# diagonale l'histogramme du champs concerné.
#
# Il est possible d'avoir un champs qui définie la couleur avec l'argument `hue`. Ce champs n'est pas pris en compte dans le nombre N de la matrice.
sns.set_style('darkgrid')
mayors['Taille commune'] = np.log10(mayors['Population de la commune'])
sns.pairplot(data=mayors[mayors['Type profession']=='fonctionnaire'][['Age', 'Taille commune', 'Genre']],
markers='+', hue='Genre', aspect=2)
# ### Afficher les valeurs de différentes catégories
#
# [`catplot`](https://seaborn.pydata.org/generated/seaborn.catplot.html) permet de comparer toutes les catégories d'un
# champs par rapport à un champs réel. Le type d'affichage est défini par l'argument `kind`. On a :
#
# * `point` qui affiche la moyenne et l'écart type pour chaque catégorie et les relie avec des lignes (utile si les catégories sont ordonnées et qu'on veut suivre l'évolution, des années par exemple)
# * `bar` qui affiche la même chose que `point` mais avec des bares et sans relier les catégories (intérêt pas évident)
# * `box` et `boxen` offrent un affichage statistique de chaque catégorie (meilleure représentation statistique AMHA)
# * `swarm` affiche un point par élément dans en forme d'arbre (bien pour voir le nombre d'éléments de chaque catégorie)
# * `strip` affiche un point par élément dans une sorte de colonne (bien pour voir le nombre d'éléments mais `swarm` est plus joli AMHA)
# * `violin` intègre dans une courbe les éléments de chaque catégorie pour en faire une pyramide ou un violon (affichage parfait pour une pyramide des âges ou équivalent)
#
# Regarde l'âge des maires pour chaque type de profession revient à dessiner des pyramides des âges en séparant les femmes des hommes. La pyramide se fait avec `kind = 'violin'`.
sns.set_style('whitegrid')
g = sns.catplot(data=mayors, x='Type profession', y="Age", hue="Genre", palette="muted",
kind='violin', scale='count', split=True, aspect=3)
g.set(title="Pyramide des âges des maires pour chaque famille de profession") # Mayors' age-sex pyramid for each type of professions
g.set_xticklabels(rotation=30, ha='right')
# La catégorie "divers" a le plus de femmes. Regardons en détail les différents métiers de cette catégorie et affichons un point par maire avec `kind = 'swarm'` :
g = sns.catplot(data=mayors[mayors['Type profession']=='divers'], x='Code profession', y="Age", hue="Genre",
kind='swarm', aspect=3)
# avec pour le Code profession :
#
# * 54 : permanent politique
# * 55 : ministre du culte (il n'y en a pas)
# * 56 : autres professions
# * 57 : sans profession déclarée
#
# Les femmes sont majoritaire dans la profession "sans profession"...
# ### Combien sont-ils ?
#
# Lorsqu'on a trop de données, afficher des petits points les uns sur les autres devient vite illisible.
#
# [`joinplot`](https://seaborn.pydata.org/generated/seaborn.jointplot.html) offre une alternative intéressante qui
# permet d'estimer le nombre d'instances qui partagent les même valeurs.
# Pour cela il affiche un histogramme suivant chaque axe et, si `kind='kde'`, une carte sous forme de relief.
# Le sommet de la montagne représente le plus grand nombre de maires qui partage le même âge et un ville de même taille. Dans notre cas, il s'agit des maires de 70 ans dans des villes de 300 habitants.
#
sns.set_style('white')
sns.jointplot(x=mayors.Age, y=np.log10(mayors['Population de la commune']), kind='kde',
height=8, xlim=((20,100)), ylim=np.log10((1,3000000)))
# Malheureusement `joinplot` ne permet pas d'avoir la légende qui indiquerait la valeur des lignes de niveaux...
#
# On peut en avoir une idée affichant l'histogramme des âges. Le maximum est à 70 ans avec presque 5 % des maires qui ont cet âge.
sns.set_style('whitegrid')
plt.figure(figsize=(10, 5))
sns.distplot(mayors.Age, bins=33)
len(mayors[(mayors.Age > 69.5) & (mayors.Age < 70.5)]) / len(mayors)
# ## Statistiques
#
# [`regplot`](https://seaborn.pydata.org/generated/seaborn.regplot.html) permet de tracer des régressions.
plt.figure(figsize=(10, 6))
ax =sns.regplot(data=mayors[mayors.Genre=='M'], x='Age', y='Population de la commune', color='b', marker='.', order=3)
sns.regplot(data=mayors[mayors.Genre=='F'], x='Age', y='Population de la commune', color='orange', marker='.', order=3)
ax.set(yscale='log', xlim=(20,100), ylim=(1,3000000))
# On a vu que les femmes sont 5 fois moins nombreuses, on voit qu'elles gèrent des villes plus petites sauf autour de 60 ans où elles dépassent les hommes.
#
# On note aussi la zone d'incertude qui est tellement large pour les femmes de moins de 38 ans et de plus de 77 ans que la courbe n'a rapidement plus beaucoup de sens.
# ### Moyenne et quartiles
#
# Enfin l'affichage de boite à moustache donne
#
# * la moyenne au milieu de la boite
# * le premier et dernier quartile forment la boite
# * la plus grande et plus petite valeur prises en compte sont les extrèmités de traits verticaux
# * les points (losanges) en dehors des traits verticaux sont les données abérrantes donc pas prises en compte
g = sns.catplot(data=mayors, x='Type profession', y="Age", hue="Genre", palette="muted",
kind='box', aspect=3)
g.set(title="Quartiles des âges des maires pour chaque famille de profession") # Mayors' age-sex pyramid for each type of professions
g.set_xticklabels(rotation=30, ha='right')
# ### Plus
#
# Je vous invite à regarder la [gallerie d'exemples](http://seaborn.pydata.org/examples/index.html) pour avoir d'autres exemples des possibilités de Seaborn.
# + [markdown] variables={" PreviousNext(\"10 -- Static graphics with Matplotlib.ipynb\", \"20 -- Dynamic graphics with Plotly -- Basics.ipynb\")": " <br/><center><a href=\"10 -- Static graphics with Matplotlib.ipynb\">10 -- Static graphics with Matplotlib</a> \u2190 <a href=\"http://python3.mooc.lrde.epita.fr:8888/tree/lesson7 Graphics\" style=\"text-decoration:none\"> \u25b3 </a> \u2192 <a href=\"20 -- Dynamic graphics with Plotly -- Basics.ipynb\">20 -- Dynamic graphics with Plotly -- Basics</a></center><br/> "}
# {{ PreviousNext("10 -- Static graphics with Matplotlib.ipynb", "20 -- Dynamic graphics with Plotly -- Basics.ipynb")}}
| 12,347 |
/Desicion_Tree_Random_Forest.ipynb
|
047754d3bc42581a2a983d17f2d168638a7600b1
|
[] |
no_license
|
javanese-programmer/basic-machine-learning-b7
|
https://github.com/javanese-programmer/basic-machine-learning-b7
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 86,791 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# [](https://githubtocolab.com/giswqs/leafmap/blob/master/examples/notebooks/02_using_basemaps.ipynb)
# [](https://gishub.org/leafmap-pangeo)
#
# Uncomment the following line to install [leafmap](https://leafmap.org) if needed.
import leafmap
m = leafmap.Map()
m
m = leafmap.Map(google_map="HYBRID")
m
m = leafmap.Map(google_map="TERRAIN")
m
m = leafmap.Map()
m.add_basemap("HYBRID")
m
m = leafmap.Map()
m.add_tile_layer(url="https://mt1.google.com/vt/lyrs=y&x={x}&y={y}&z={z}", name="Google Satellite")
m
m = leafmap.Map()
naip_url = 'https://services.nationalmap.gov/arcgis/services/USGSNAIPImagery/ImageServer/WMSServer?'
m.add_wms_layer(url=naip_url, layers='0', name='NAIP Imagery', format='image/png', shown=True)
m
# +
m = leafmap.Map(google_map="HYBRID")
url1 = "https://www.fws.gov/wetlands/arcgis/services/Wetlands/MapServer/WMSServer?"
m.add_wms_layer(url1, layers="1",format='image/png', transparent=True, name="NWI Wetlands Vector")
url2 = "https://www.fws.gov/wetlands/arcgis/services/Wetlands_Raster/ImageServer/WMSServer?"
m.add_wms_layer(url2, layers="0",format='image/png', transparent=True, name="NWI Wetlands Raster")
m.add_legend(builtin_legend="NWI")
m
| 1,567 |
/HW1/hw1_code_and_report.ipynb
|
b2a2fc9d7ea04f3d6eae506f0672e0ada97886d9
|
[] |
no_license
|
TohaRhymes/immunogenomics_ib_autumn2020
|
https://github.com/TohaRhymes/immunogenomics_ib_autumn2020
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 265,527 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Домашнее задание по иммуногеномике. __Basic analysis of antibody sequences__
#
# __ИБ. осень 2020.__
# __Чангалиди Антон__
#
# __Использовал IgBlast + Python для обработки данных__
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pylab as plt
import operator as op
memory = pd.read_csv('memory.txt', sep = '\t')
naive = pd.read_csv('naive.txt', sep = '\t')
plasma = pd.read_csv('plasma.txt', sep = '\t')
# ### Task 1.
# Analyze the joint usage of V and J genes: for each sequence find the closest germline V and J genes, list of all VJ pairs occurring in the sample, and create a plot (e.g., heatmap ) showing the number of sequences for each VJ pair.
# +
def return_gene(string):
end = string.find('*')
return string[:end]
def draw_heatmap(data, name):
genes_names = ['v_call', 'j_call']
genes = []
for gene in genes_names:
genes.append(data[gene].apply(return_gene).to_list())
genes = np.array(genes)
v_map = {j:i for i, j in enumerate(np.unique(genes[0]))}
j_map = {j:i for i, j in enumerate(np.unique(genes[1]))}
heatmap = np.zeros((len(v_map), len(j_map)))
v_genes = {i:0 for i in np.unique(genes[0])}
for v_gene, j_gene in genes.T:
v_gene
heatmap[v_map[v_gene]][j_map[j_gene]]+=1
v_genes[v_gene] += 1
fig, ax = plt.subplots(figsize=(7,10))
ax = sns.heatmap(heatmap, xticklabels=j_map.keys(), yticklabels=v_map.keys(), cmap = sns.cm.rocket_r)
ax.set_title(name, fontsize=16)
plt.show()
return v_genes
datasets = {'memory':memory, 'naive':naive, 'plasma':plasma}
v_genes = {}
for k in datasets:
v_genes.update({k:draw_heatmap(datasets[k], k)})
v_genes.update({k:{k: v for k, v in sorted(v_genes[k].items(), key=lambda item: -item[1])}})
# -
memory['v_call']
# На картинках выше представлен хитмап представленности VJ комбинаций у разных типов имунных клеток (всего 144 VJ-комбинации в naive-клетках, 171 комбинация в memory-клетках, и 32 - в plasma-клетках). У плазматических клеток наблюдается малое число комбинаций (самая представленная комбинация: IGHV3-7 и IGHJ5) - малое число может быть связано с тем, что эти клетки образовались от одной B-клетки с одной V+J комбинацией (направленной на борьбу с одним антигеном).
#
# В memory и naive клетках самый представленный J-ген - IGHJ4, причем, в memory клетках имеется большее количество комбинаций, чем в других клетках (наверное, из-за того, что эти клетки содержат рецепторы, восприимчивые к различным антигенам).
# ### Task 2
# Find 10 most used V genes in the sample and analyze their mutability . For
# each gene, analyze sequences aligned to it and compute the number of
# differences in each alignment. Mutability is the distribution of the number of
# differences. Visualize mutability of 10 most used V genes in any convenient
# form (e.g., using boxplot ).
# +
def draw_mutability(data, name):
def mutability(string1, string2):
mismatches = 0
for i, j in zip(string1, string2):
if i != j:
mismatches += 1
return mismatches/len(string1)
genes = v_genes[name]
i = 0
genes_count = {}
for g in genes:
i+=1
genes_count.update({f'{g}, amount = {genes[g]}':[]})
if i >= 10:
break
for row in data.iterrows():
cur_gene = return_gene(row[1]['v_call'])
if f'{cur_gene}, amount = {genes[cur_gene]}' in genes_count:
genes_count[f'{cur_gene}, amount = {genes[cur_gene]}'].append(mutability(row[1]['v_sequence_alignment'], row[1]['v_germline_alignment']))
plot_data = genes_count
# sort keys and values together
sorted_keys, sorted_vals = zip(*(plot_data.items()))
fig, ax = plt.subplots(figsize=(10,7))
ax.set_title(name, fontsize=16)
ax.set_xticklabels(ax.get_xticklabels(),rotation=90)
# almost verbatim from question
sns.set(context='notebook', style='whitegrid')
sns.utils.axlabel(xlabel="V gene", ylabel="Mutability", fontsize=16)
sns.boxplot(data=sorted_vals, width=.18)
# sns.swarmplot(data=sorted_vals, size=6, edgecolor="black", linewidth=.9)
# category labels
plt.xticks(plt.xticks()[0], sorted_keys)
plt.show()
datasets = {'memory':memory, 'naive':naive, 'plasma':plasma}
for k in datasets:
draw_mutability(datasets[k], k)
# -
# Самый низкий процент мутаций - у наивных клеток. Это логично, так как они еще не прошли профилирование и не имеют "специализации". Мутации возникают в клетках плазмы и памяти - после встречи с антигенами, поэтому эти клетки более изменчивы. При этом, клетки плазмы имеют больший разброс (возможно, из-за того, что им необходимо иметь имунный респонс к большему числу антигенов).
# ### Task 3
# Visualize distributions of CDR3 lengths.
# +
def draw_cdr(data, name):
cdrs = []
for row in data.iterrows():
cdrs.append(len(row[1]['cdr3']))
fig, ax = plt.subplots(figsize=(10,7))
ax.set_title(name, fontsize=16)
sns.distplot(cdrs)
sns.utils.axlabel(ylabel = '', xlabel="CDR3 length distribution", fontsize=16)
plt.show()
datasets = {'memory':memory,
'naive':naive,
'plasma':plasma}
for k in datasets:
draw_cdr(datasets[k], k)
# -
# У наивных клеток и клеток памяти схожий пик распределения CDR3 - у 45. Распределние CDR3 клеток плазмы отличается от предыдущих (выделяется 1 пик и почти только он). Скорее всего, это из-за небольшой вариабельности пар V+J генов в этой клетке (выделялась одна), что может значить высокую специфичность к определенному антигену.
# ### Task 4
# Compute the fraction of non-productive sequences in the sample. Both IgBlast and DiversityAnalyzer report productiveness of input sequences as a part of the output.
# +
def count_non_productive(data, name):
non_productive_count = 0
sum_count = 0
for row in data.iterrows():
if row[1]['productive'] == 'F':
non_productive_count += 1
sum_count += 1
return non_productive_count / sum_count
datasets = {'memory':memory,
'naive':naive,
'plasma':plasma}
for k in datasets:
print(f'Fraction of non-productive sequences in the {k} sample: {count_non_productive(datasets[k], k)*100}%')
# -
# Процент непродуктивных кеток во всех 3 типах клеток достаточно мало - В клетках памяти мы видим высокий процент непродуктивных последовательностей. Это может быть из-за более низкого отбора и более высокой мутабельностью в этих клетках.
| 6,794 |
/module3-reshape-data/LS_DS_123_Reshape_data.ipynb
|
8bac6482429aa27ce4d79edcb0f1ce9630f01de3
|
[
"MIT"
] |
permissive
|
rowebyrowe/DS-Unit-1-Sprint-2-Data-Wrangling
|
https://github.com/rowebyrowe/DS-Unit-1-Sprint-2-Data-Wrangling
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 8,022,317 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="oeWq7mGFZm6L"
# _Lambda School Data Science_
#
# # Reshape data
#
# Objectives
# - understand tidy data formatting
# - melt and pivot data with pandas
#
# Links
# - [Tidy Data](https://en.wikipedia.org/wiki/Tidy_data)
# - [Pandas Cheat Sheet](https://github.com/pandas-dev/pandas/blob/master/doc/cheatsheet/Pandas_Cheat_Sheet.pdf)
# - Tidy Data
# - Reshaping Data
# - Python Data Science Handbook
# - [Chapter 3.8](https://jakevdp.github.io/PythonDataScienceHandbook/03.08-aggregation-and-grouping.html), Aggregation and Grouping
# - [Chapter 3.9](https://jakevdp.github.io/PythonDataScienceHandbook/03.09-pivot-tables.html), Pivot Tables
#
# Reference
# - pandas documentation: [Reshaping and Pivot Tables](https://pandas.pydata.org/pandas-docs/stable/reshaping.html)
# - Modern Pandas, Part 5: [Tidy Data](https://tomaugspurger.github.io/modern-5-tidy.html)
# + [markdown] colab_type="text" id="u2-7QkU3eR_e"
# ## Why reshape data?
#
# #### Some libraries prefer data in different formats
#
# For example, the Seaborn data visualization library prefers data in "Tidy" format often (but not always).
#
# > "[Seaborn will be most powerful when your datasets have a particular organization.](https://seaborn.pydata.org/introduction.html#organizing-datasets) This format ia alternately called “long-form” or “tidy” data and is described in detail by Hadley Wickham. The rules can be simply stated:
#
# > - Each variable is a column
# - Each observation is a row
#
# > A helpful mindset for determining whether your data are tidy is to think backwards from the plot you want to draw. From this perspective, a “variable” is something that will be assigned a role in the plot."
#
# #### Data science is often about putting square pegs in round holes
#
# Here's an inspiring [video clip from _Apollo 13_](https://www.youtube.com/watch?v=ry55--J4_VQ): “Invent a way to put a square peg in a round hole.” It's a good metaphor for data wrangling!
# + [markdown] colab_type="text" id="3av1dYbRZ4k2"
# ## Upgrade Seaborn
#
# Run the cell below which upgrades Seaborn and automatically restarts your Google Colab Runtime.
# + colab_type="code" id="AOLhnquFxao7" colab={"base_uri": "https://localhost:8080/", "height": 241} outputId="3374d198-506d-44d9-da0f-a907f32e91d7"
# !pip install seaborn --upgrade
import os
os.kill(os.getpid(), 9)
# + id="nmBBOJ18lnE5" colab_type="code" colab={}
import seaborn as sns
# + [markdown] colab_type="text" id="tE_BXOAjaWB_"
# ## Hadley Wickham's Examples
#
# From his paper, [Tidy Data](http://vita.had.co.nz/papers/tidy-data.html)
# + colab_type="code" id="PL6hzS3yYsNt" colab={}
# %matplotlib inline
import pandas as pd
import numpy as np
import seaborn as sns
table1 = pd.DataFrame(
[[np.nan, 2],
[16, 11],
[3, 1]],
index=['John Smith', 'Jane Doe', 'Mary Johnson'],
columns=['treatmenta', 'treatmentb'])
table2 = table1.T
# + [markdown] colab_type="text" id="YvfghLi3bu6S"
# "Table 1 provides some data about an imaginary experiment in a format commonly seen in the wild.
#
# The table has two columns and three rows, and both rows and columns are labelled."
# + colab_type="code" id="5ZidjYdNikwF" outputId="b39840a6-6e73-45d6-ba54-05c8c236a4c0" colab={"base_uri": "https://localhost:8080/", "height": 142}
table1
# + [markdown] colab_type="text" id="wIfPYP4rcDbO"
# "There are many ways to structure the same underlying data.
#
# Table 2 shows the same data as Table 1, but the rows and columns have been transposed. The data is the same, but the layout is different."
# + colab_type="code" id="mYBLbVTVKR2h" outputId="159282a4-a86d-43c6-e824-9ab032c16c09" colab={"base_uri": "https://localhost:8080/", "height": 111}
table2
# + [markdown] colab_type="text" id="RaZuIwqNcRpr"
# "Table 3 reorganises Table 1 to make the values, variables and obserations more clear.
#
# Table 3 is the tidy version of Table 1. Each row represents an observation, the result of one treatment on one person, and each column is a variable."
#
# | name | trt | result |
# |--------------|-----|--------|
# | John Smith | a | - |
# | Jane Doe | a | 16 |
# | Mary Johnson | a | 3 |
# | John Smith | b | 2 |
# | Jane Doe | b | 11 |
# | Mary Johnson | b | 1 |
# + [markdown] colab_type="text" id="8P88YyUvaxAV"
# ## Table 1 --> Tidy
#
# We can use the pandas `melt` function to reshape Table 1 into Tidy format.
# + colab_type="code" id="vOUzvON0t8El" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3c772fba-02fb-47d4-a179-a68c47733313"
table1.columns.tolist()
# + id="TGey3BI1l9az" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d06b376b-d854-4145-8a98-9fc925d2c0f0"
table1.index.tolist()
# + id="21idar_BmA3C" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 266} outputId="7b076e70-4d38-4707-f34f-a869b15467b7"
tidy = table1.reset_index().melt(id_vars='index')
tidy = tidy.rename(columns={
'index':'name',
'variable':'trt',
'value':'result'
})
tidy['trt'] = tidy['trt'].str.replace('treatment', '')
tidy.set_index('name')
# + [markdown] colab_type="text" id="uYb2vG44az2m"
# ## Table 2 --> Tidy
# + colab_type="code" id="yP_oYbGsazdU" colab={"base_uri": "https://localhost:8080/", "height": 111} outputId="bf4534b7-95f0-4df9-a4de-859228ea02c1"
table2
# + id="L8lDbds57tIj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 266} outputId="ea994ff3-a555-468f-c768-01c0c1697c87"
tidy2 = table2.reset_index().melt(id_vars='index')
tidy2 = tidy2.rename(columns={
'index': 'treatment',
'variable': 'name',
'value': 'result'
})
tidy2['treatment'] = tidy2['treatment'].str.replace('treatment', '')
tidy2 = tidy2.sort_values(by='treatment',axis='index')
tidy2.set_index('treatment')
# + [markdown] id="pQmh9666DgWZ" colab_type="text"
# So tidy format and melt seem to be about consolidation.
# + [markdown] colab_type="text" id="kRwnCeDYa27n"
# ## Tidy --> Table 1
#
# The `pivot_table` function is the inverse of `melt`.
# + colab_type="code" id="BxcwXHS9H7RB" colab={"base_uri": "https://localhost:8080/", "height": 142} outputId="0560865a-31f9-4065-8f2f-75afc9dad87c"
table1
# + id="zzaRHnrum_i-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 235} outputId="5f56011e-c631-4b67-9b35-0d798a19eef9"
tidy
# + id="nwyF-UOgnIcS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 173} outputId="9fb46004-0a06-40f9-e90f-ec7f435e5fc4"
tidy.pivot_table(index='name', columns='trt', values='result')
# + [markdown] colab_type="text" id="nR4dlpFQa5Pw"
# ## Tidy --> Table 2
# + colab_type="code" id="flcwLnVdJ-TD" colab={"base_uri": "https://localhost:8080/", "height": 111} outputId="0dd66737-429a-4f22-aa31-4d0e5fb77864"
table2
# + id="YcOnRD3t_Whf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 235} outputId="4e75fc2b-00ef-4673-e83b-f758bc3b463c"
tidy2
# + id="Ahd-UgUs_wRY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 142} outputId="bf38e952-28db-4e56-983a-005efd7da530"
tidy2.pivot_table(index='treatment', columns = 'name', values = 'result')
# + [markdown] id="2FRgeCkprmdi" colab_type="text"
# # Seaborn example
# The rules can be simply stated:
#
# * Each variable is a column
# * Each observation is a row
#
# A helpful mindset for determining whether your data are tidy is to think backwards from the plot you want to draw. From this perspective, a “variable” is something that will be assigned a role in the plot."
# + id="6w6AfR2QrzWh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 163} outputId="8cf4fd23-8542-466d-dfca-f678423d3711"
sns.catplot(x='trt',y='result', col='name', kind='bar', data=tidy, height=2)
# + [markdown] colab_type="text" id="7OwdtbQqgG4j"
# ## Load Instacart data
#
# Let's return to the dataset of [3 Million Instacart Orders](https://tech.instacart.com/3-million-instacart-orders-open-sourced-d40d29ead6f2)
# + [markdown] colab_type="text" id="RdXhRmSbgbBc"
# If necessary, uncomment and run the cells below to re-download and extract the data
# + colab_type="code" id="SoX-00UugVZD" colab={"base_uri": "https://localhost:8080/", "height": 224} outputId="a79d6797-53eb-41cc-fc75-95b9166409e5"
# !wget https://s3.amazonaws.com/instacart-datasets/instacart_online_grocery_shopping_2017_05_01.tar.gz
# + colab_type="code" id="tDGkv5vngXTw" colab={"base_uri": "https://localhost:8080/", "height": 238} outputId="62913158-0089-4010-eeae-ee125c760491"
# !tar --gunzip --extract --verbose --file=instacart_online_grocery_shopping_2017_05_01.tar.gz
# + [markdown] colab_type="text" id="covQKAHggl80"
# Run these cells to load the data
# + colab_type="code" id="dsbev9Gi0JYo" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="cf157980-99ec-42bb-cc1e-504ca121d69a"
# %cd instacart_2017_05_01
# + colab_type="code" id="1AHEpFPcMTn1" colab={}
products = pd.read_csv('products.csv')
order_products = pd.concat([pd.read_csv('order_products__prior.csv'),
pd.read_csv('order_products__train.csv')])
orders = pd.read_csv('orders.csv')
# + [markdown] colab_type="text" id="bmgW_DxohBV5"
# ## Goal: Reproduce part of this example
#
# Instead of a plot with 50 products, we'll just do two — the first products from each list
# - Half And Half Ultra Pasteurized
# - Half Baked Frozen Yogurt
# + colab_type="code" id="p4CdH8hkg5RJ" outputId="cb210b5c-8406-49c8-84ad-4d518ee5dd03" colab={"base_uri": "https://localhost:8080/", "height": 383}
from IPython.display import display, Image
url = 'https://cdn-images-1.medium.com/max/1600/1*wKfV6OV-_1Ipwrl7AjjSuw.png'
example = Image(url=url, width=600)
display(example)
# + [markdown] colab_type="text" id="VgXHJM-mhvuo"
# So, given a `product_name` we need to calculate its `order_hour_of_day` pattern.
# + [markdown] colab_type="text" id="PZxgqPU7h8cj"
# ## Subset and Merge
# + colab_type="code" id="6IymsG0BRYQY" colab={}
product_names = ['Half Baked Frozen Yogurt', 'Half And Half Ultra Pasteurized']
# + colab_type="code" id="LUoNA7_UTNkp" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="f9780bf2-7dfc-45f4-8c98-ffe3e7d5e284"
products.columns.tolist()
# + id="JfXoEMZxsisT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 136} outputId="092a0e40-d84e-41d9-be31-f938bb06cc95"
orders.columns.tolist()
# + id="mfRhyqGmsnoh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="11dc368c-3508-495b-c095-1fca6bd84220"
order_products.columns.tolist()
# + id="PCRxLG3Zs9Vt" colab_type="code" colab={}
merged = (products[['product_id', 'product_name']]
.merge(order_products[['order_id','product_id']])
.merge(orders[['order_id','order_hour_of_day']]))
# + id="gcnAaEf3tjBc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="5a924233-8a96-4b41-df67-104e1b74711b"
products.shape, order_products.shape, orders.shape, merged.shape
# + id="Cb2S-Al0tvZa" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="004a247d-b8b7-40b6-f2f0-e6f55816aefc"
merged.head()
# + id="73Y7F7L4t2Tz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="8ed0c83c-bd2b-482d-a249-1e4038ebdcec"
condition = merged['product_name'].isin(['Half Baked Frozen Yogurt', 'Half And Half Ultra Pasteurized'])
subset = merged[condition]
merged.shape, subset.shape
# + [markdown] colab_type="text" id="lOw6aZ3oiPLf"
# ## 4 ways to reshape and plot
# + [markdown] colab_type="text" id="5W-vHcWZiFKv"
# ### 1. value_counts
# + colab_type="code" id="QApT8TeRTsgh" colab={}
froyo = subset[subset['product_name']=='Half Baked Frozen Yogurt']
cream = subset[subset['product_name']=="Half And Half Ultra Pasteurized"]
# + id="1GP0vdZH4TQd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="bcaa4d10-963f-4600-b678-936f0f581f91"
(cream['order_hour_of_day']
.value_counts(normalize=True)
.sort_index()
.plot())
(froyo['order_hour_of_day']
.value_counts(normalize=True)
.sort_index()
.plot())
# + [markdown] colab_type="text" id="CiB9xmZ4iIqt"
# ### 2. crosstab
# + colab_type="code" id="aCzF5spQWd_f" colab={"base_uri": "https://localhost:8080/", "height": 296} outputId="f97715d1-e799-4ba3-bd3a-2122949d080c"
(pd.crosstab(subset['order_hour_of_day'],subset['product_name'],normalize='columns')* 100).plot()
# + [markdown] colab_type="text" id="wCp-qjbriUze"
# ### 3. pivot_table
# + colab_type="code" id="O8d6_TDKNsxB" colab={"base_uri": "https://localhost:8080/", "height": 299} outputId="f1d39132-dfb7-41b5-d44e-249c242fd2bf"
subset.pivot_table(index='order_hour_of_day',
columns='product_name',
values='order_id',
aggfunc=len).plot()
# + [markdown] colab_type="text" id="48wCoJowigCf"
# ### 4. melt
# + colab_type="code" id="VnslvFfvYSIk" colab={"base_uri": "https://localhost:8080/", "height": 231} outputId="05e74983-f73a-4c77-ef0d-56424016c61e"
table = pd.crosstab(subset['order_hour_of_day'],
subset['product_name'],
normalize = True)
melted = (table
.reset_index()
.melt(id_vars='order_hour_of_day')
.rename(columns={
'order_hour_of_day': 'Hour of Day Ordered',
'product_name': 'Product',
'value': 'Percent of Orders by Product'
}))
sns.relplot(x='Hour of Day Ordered',
y='Percent of Orders by Product',
hue='Product',
data=melted,
kind='line')
# + [markdown] id="5l3tyo6obm5l" colab_type="text"
# # ASSIGNMENT
# - Replicate the lesson code
# - Complete the code cells we skipped near the beginning of the notebook
# - Table 2 --> Tidy
# - Tidy --> Table 2
# + [markdown] id="76pc3DBwbm5m" colab_type="text"
# - Load seaborn's `flights` dataset by running the cell below. Then create a pivot table showing the number of passengers by month and year. Use year for the index and month for the columns. You've done it right if you get 112 passengers for January 1949 and 432 passengers for December 1960.
# + id="nTL7HNUtbm5m" colab_type="code" colab={}
flights = sns.load_dataset('flights')
flights.head()
# + id="YP-7IFMjbm5s" colab_type="code" colab={}
flights.columns.tolist()
# + id="4sVrOvNbAWRY" colab_type="code" colab={}
flights.index.tolist()
# + id="4c7aJk5MAaxE" colab_type="code" colab={}
flights.pivot_table(index='year',columns='month',values='passengers')
# + [markdown] id="8wbkIANwbm5w" colab_type="text"
# # STRETCH OPTIONS
#
# _Try whatever sounds most interesting to you!_
#
# - Replicate more of Instacart's visualization showing "Hour of Day Ordered" vs "Percent of Orders by Product"
# - Replicate parts of the other visualization from [Instacart's blog post](https://tech.instacart.com/3-million-instacart-orders-open-sourced-d40d29ead6f2), showing "Number of Purchases" vs "Percent Reorder Purchases"
# - Get the most recent order for each user in Instacart's dataset. This is a useful baseline when [predicting a user's next order](https://www.kaggle.com/c/instacart-market-basket-analysis)
# - Replicate parts of the blog post linked at the top of this notebook: [Modern Pandas, Part 5: Tidy Data](https://tomaugspurger.github.io/modern-5-tidy.html)
# + id="6Y9jTPL2BB2T" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="bb6f12d5-bf98-42ae-d431-29585ae099b3"
'''most recent order
I would get the order id,maybe the time of day, the products that were purchased'''
products.columns.tolist()
# + id="sDUqKEzJN3aT" colab_type="code" colab={}
order_products.columns.tolist()
# + id="YmVSW45HN7Xc" colab_type="code" colab={}
orders.columns.tolist()
# + id="EuXo5JXBdzSV" colab_type="code" colab={}
to_csv()#look that up
# + id="M_jYTlvQN98_" colab_type="code" colab={}
'''order day of week because most recent,
order hour of day so we can see the most recent hour in the recent day,
user id so we can see who ordered what,
product name so I can show what was ordered,
order_number because that show the sequence so I dont need the times'''
#product_name,order_number,user_id
#from orders:user_id, order_number, order_id
#from products:product_name, product_id
#from order_products:order_id, product_id
merged = (products[['product_id', 'product_name']]
.merge(order_products[['order_id', 'product_id']])
.merge(orders[['order_id','user_id','order_number']]))
# + id="uHxD0TuYWpZO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="db3447a3-4167-4385-84e3-69868a65806a"
subset = merged[['product_name','order_number','user_id']]
subset.head()
# + id="1hR5CXffd1yk" colab_type="code" colab={}
final = subset.groupby(['user_id'])[['order_number','product_name']].agg({'order_number':'max'})
# + id="yUDDC881XTyI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="d48a58dc-1731-4966-ae1f-5eae0b64f0f4"
final.to_csv()
#whew that's a lot of users
# + id="zDo2yZwKmRBF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 2031} outputId="39a69c28-509c-4c4b-b7dd-0d3fea450985"
final
# + id="82cDhydXX0b3" colab_type="code" colab={}
'''tidy = subset.reset_index().melt()
tidy = tidy.rename(columns={
'user_id': 'User ID',
'order_number': 'Purchase Number',
'product_name': 'Product Name'
})
tidy.set_index('Purchase Number')
'''
# + id="EkB0zGykZw0w" colab_type="code" colab={}
| 18,029 |
/Car_price_prediction_linear_regression/price_prediction.ipynb
|
688c4cab708da838ce0491f22bbfe79be0ebe3b9
|
[] |
no_license
|
geetakrishna1994/upgrad
|
https://github.com/geetakrishna1994/upgrad
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 3,438,983 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
sns.set()
pd.set_option('display.max_columns',100)
# # Data Import
prices = pd.read_csv('CarPrice_Assignment.csv')
prices.info()
prices.shape
prices.head()
# data set has 205 rows, 26 columns, with 11 columns categorical columns and 14 continuous columns. There are no null values present in the data set as seen from above
# ### Settings car_ID as index
prices.set_index('car_ID',inplace = True)
prices.head()
# # Data Cleaning
# ## Null value check
prices.isnull().sum()
# There are no null values present in the dataframe as seen from the above.
# ## Duplicate value check
prices.duplicated().sum()
# ## Converting symboling from continuous to categorical
prices.symboling = prices.symboling.astype('category')
# ## Converting object columns into category
object_list = prices.select_dtypes(include = 'object').columns
prices[object_list] = prices[object_list].astype('category')
prices.info()
# ## New variable creation
# ### Company Name is extracted from CarName
prices.CarName.unique()
# We can see that CarName is actually comprised on two words seperated by a space with the first word being the Company name. We can create a new variable called Company and extract it from CarName column
prices['Company'] = prices.CarName.apply(lambda x: x.split(' ')[0])
prices.Company.unique()
# We can see that some of the company names are misspelled or case different like `maxda`,`Nissan`,`porcshce`,`toyouta`,`vokswagen`,`vw`. Hence these are to be corrected.
prices.loc[prices.Company == 'maxda','Company'] = 'mazda'
prices.loc[prices.Company == 'Nissan','Company'] = 'nissan'
prices.loc[prices.Company == 'porcshce','Company'] = 'porsche'
prices.loc[prices.Company == 'toyouta','Company'] = 'toyota'
prices.loc[prices.Company == 'vokswagen','Company'] = 'volkswagen'
prices.loc[prices.Company == 'vw','Company'] = 'volkswagen'
prices.Company.unique()
prices.Company = prices.Company.astype('category')
# ## Dropping columns
# `CarName` column is not necessary hence the column is dropped
prices.drop('CarName',axis = 1,inplace = True)
prices.info()
# ## EDA
# cat_cols has list of all columns of type category
cat_cols = prices.select_dtypes(include = 'category').columns.tolist()
prices[cat_cols].describe()
i= 1
total = len(cat_cols)
plt.figure(figsize=(20,15))
plt.suptitle('Prize variations for all categorical variables',va = 'bottom',fontsize = 24)
#fig,axes = plt.subplots(nrows = 3,ncols = 4)
for feature in cat_cols:
plt.subplot(3,4,i)
sns.boxplot(x = feature,y='price',data = prices)
plt.xticks(rotation = 'vertical')
i = i+1
plt.tight_layout()
plt.subplots_adjust(wspace = 0.4,hspace = 0.4)
plt.show()
# From the above figure we can see that price has a strong correlation with all most all the variable except for `fueltype` and `doornumber`
#cont_cols has list of all columns which are continuous variables except for price
cont_cols = prices.select_dtypes(exclude = 'category').columns.tolist()
cont_cols.pop(cont_cols.index('price'))
print(len(cont_cols))
i= 1
total = len(cont_cols)
plt.figure(figsize=(20,15))
plt.suptitle('Prize variations for all continuous variables',va = 'bottom',fontsize = 24)
#fig,axes = plt.subplots(nrows = 3,ncols = 4)
for feature in cont_cols:
plt.subplot(4,4,i)
sns.scatterplot(x = feature,y='price',data = prices)
plt.xticks(rotation = 'vertical')
i = i+1
plt.tight_layout()
plt.subplots_adjust(wspace = 0.4,hspace = 0.4)
plt.show()
# We can see that most of the continuous variables are strongly correlated with the price of the car except for `stroke`, `peakrpm` and `compressionratio`.
plt.figure(figsize = (8,12))
plt.suptitle('Distribution of compression ratio')
plt.subplot(3,1,1)
sns.distplot(prices.compressionratio)
plt.xticks(rotation = 'vertical')
plt.subplot(3,1,2)
sns.countplot(x = 'compressionratio',data = prices)
plt.xticks(rotation = 'vertical')
plt.subplot(3,1,3)
sns.scatterplot(x = 'compressionratio',y= 'price',data = prices)
plt.xticks(rotation = 'vertical')
plt.tight_layout()
plt.show()
# For compression ratio we can see that there are two clusters formed around 10 and 20. Also there is no noticable trend between price and the compressionratio inside the clusters, hence we can convert the compressionratio into categorical variable. Assign low to all the values which are around 10 and high to variables which are around 20
prices.loc[prices.compressionratio < 20, 'CR'] = 'low'
prices.loc[prices.compressionratio >= 20, 'CR'] = 'high'
prices.CR = prices.CR.astype('category')
prices.head()
sns.boxplot(x = 'CR',y = 'price',data = prices)
plt.show()
# we can see cars with higher comperssionratio have higher median price.
# Plots showing scatter plots for all possible pairs of continuous variables
sns.pairplot(prices[cont_cols])
plt.show()
# We can spot a lot of correlated variables, we will consider all of them for now and remove one by one after checking VFI.
plt.figure(figsize = (16, 10))
sns.heatmap(prices.corr(), annot = True, cmap="YlGnBu")
plt.show()
# We can see that the certain features like enginelocation,drivewheel,cylindernumber,symboling have high correlation with the prices
# we can see that company of car has a correlation with the prices of the car.
prices_df = prices.copy()
# # Data Preparation
# ## Creating dummies for categorical variables
prices[cat_cols].describe()
symboling_dummies = pd.get_dummies(prices.symboling,prefix = 'sym',drop_first = True)
fueltype_dummies = pd.get_dummies(prices.fueltype,prefix = 'ft',drop_first = True)
aspiration_dummies = pd.get_dummies(prices.aspiration,prefix = 'asp',drop_first = True)
doornumber_dummies = pd.get_dummies(prices.doornumber,prefix = 'dn',drop_first = True)
carbody_dummies = pd.get_dummies(prices.carbody,prefix = 'cb',drop_first = True)
drivewheel_dummies = pd.get_dummies(prices.drivewheel,prefix = 'dw',drop_first = True)
enginelocation_dummies = pd.get_dummies(prices.enginelocation,prefix = 'el',drop_first = True)
enginetype_dummies = pd.get_dummies(prices.enginetype,prefix = 'et',drop_first = True)
cylindernumber_dummies = pd.get_dummies(prices.cylindernumber,prefix = 'cn',drop_first = True)
fuelsystem_dummies = pd.get_dummies(prices.fuelsystem,prefix = 'fs',drop_first = True)
Company_dummies = pd.get_dummies(prices.Company,prefix = 'company',drop_first = True)
CR_dummies = pd.get_dummies(prices.CR,prefix = 'CR',drop_first = True)
prices = pd.concat([prices,symboling_dummies,fueltype_dummies,aspiration_dummies,doornumber_dummies,carbody_dummies,drivewheel_dummies,enginelocation_dummies,enginetype_dummies,cylindernumber_dummies,fuelsystem_dummies,Company_dummies,CR_dummies],axis = 1)
prices.drop(cat_cols,axis=1,inplace = True)
prices.drop(['compressionratio','CR'],axis = 1,inplace = True)
prices.head()
# ## Splitting into train and test sets
from sklearn.model_selection import train_test_split
prices_train,prices_test = train_test_split(prices,test_size = 0.3,random_state = 8)
print(prices_train.shape)
print(prices_test.shape)
# ## Standardization of continous values
# All contionous values are scalled using MinMaxscaler from SKlearn.
from sklearn.preprocessing import MinMaxScaler
minmax_scaler = MinMaxScaler()
prices.head()
cont_cols.append('price')
cont_cols.pop(cont_cols.index('compressionratio'))
cont_cols
prices_train[cont_cols] = minmax_scaler.fit_transform(prices_train[cont_cols])
prices_train.head()
prices_train.describe()
# # Modelling
plt.figure(figsize = (16, 10))
sns.heatmap(prices_train.corr(), annot = True, cmap="YlGnBu")
plt.show()
# We can see that horsepower is highly correlated variable with the price and also it is a variable that is generally looked at for assesing the performance of car, therefore it is a good place to start modelling.
# ## Dividing into Feature and Output sets
y_train = prices_train.pop('price')
X_train = prices_train
# ## Model1 : horsepower
import statsmodels.api as sm
X_train_lm1 = sm.add_constant(X_train['horsepower'])
lr1 = sm.OLS(y_train,X_train_lm1).fit()
lr1.params
# The positive coefficeint here for horsepower makes sense because as engine size increases the power also increases, which makes the car tobe priced higher
# Let's visualise the data with a scatter plot and the fitted regression line
plt.scatter(X_train_lm1.iloc[:, 1], y_train)
plt.plot(X_train_lm1.iloc[:, 1], 0.127 + 0.462*X_train_lm1.iloc[:, 1], 'r')
plt.show()
print(lr1.summary())
# From model statistics we can see that a linear model on with horsepower has an R2 of 0.666 and adj.R2 of 0.663 and the coefficeints are also significant.
# We can see that all other correalted variables with prices are highly correlated with each other. Lets see we can achieve a better model taking all the variables
# ## Model2: All features
X_train_lm2 = sm.add_constant(X_train)
lr2 = sm.OLS(y_train,X_train_lm2).fit()
print(lr2.params)
print(lr2.summary())
# We can see that R2 of the model is pretty high, it about 97% and adjusted R2 of about 95%. But the F-statistic is pretty low and also the p-value of the majority of ceofficients are far greater than general rule of 5%. Hence these can be considered as significant for drawing conclusions.
# Let us check what are the VIF values to see what are strongly dependent variables
# ### VIF CHECK
from statsmodels.stats.outliers_influence import variance_inflation_factor
# Create a dataframe that will contain the names of all the feature variables and their respective VIFs
vif_df = pd.DataFrame()
vif_df['Features'] = X_train.columns
vif_df['VIF'] = [variance_inflation_factor(X_train.values, i) for i in range(X_train.shape[1])]
vif_df['VIF'] = round(vif_df['VIF'], 2)
vif_df = vif_df.sort_values(by = "VIF", ascending = False)
vif_df
# We can see very high multi collinearity and only 3 features have VIF near 5 out of 68
# Lets try another model now by using RFE, by selecting top 10 features
# ## Model3: RFE (Top 30)
from sklearn.linear_model import LinearRegression
from sklearn.feature_selection import RFE
lr_rfe30 = LinearRegression()
lr_rfe30.fit(X_train,y_train)
rfe30 = RFE(lr_rfe30,30)
rfe30 = rfe30.fit(X_train,y_train)
X_train.columns[rfe30.support_]
col30 = X_train.columns[rfe30.support_]
X_train_lm3 = sm.add_constant(X_train[col30])
lr3 = sm.OLS(y_train,X_train_lm3).fit()
print(lr3.params)
print(lr3.summary())
# Create a dataframe that will contain the names of all the feature variables and their respective VIFs
vif_df = pd.DataFrame()
vif_df['Features'] = X_train[col30].columns
vif_df['VIF'] = [variance_inflation_factor(X_train[col30].values, i) for i in range(X_train[col30].shape[1])]
vif_df['VIF'] = round(vif_df['VIF'], 2)
vif_df = vif_df.sort_values(by = "VIF", ascending = False)
vif_df
# lets drop the features which have high p-value (which are not significant). Hence `carheight` is droppped.
col30 = col30.tolist()
col30.remove('carwidth')
X_train_lm4 = sm.add_constant(X_train[col30])
lr4 = sm.OLS(y_train,X_train_lm4).fit()
print(lr4.summary())
# Create a dataframe that will contain the names of all the feature variables and their respective VIFs
vif_df = pd.DataFrame()
vif_df['Features'] = X_train[col30].columns
vif_df['VIF'] = [variance_inflation_factor(X_train[col30].values, i) for i in range(X_train[col30].shape[1])]
vif_df['VIF'] = round(vif_df['VIF'], 2)
vif_df = vif_df.sort_values(by = "VIF", ascending = False)
vif_df
# ### Dropping `carlength`
# +
col30.remove('carlength')
X_train_lm5 = sm.add_constant(X_train[col30])
lr5 = sm.OLS(y_train,X_train_lm5).fit()
print(lr5.summary())
# -
# ### Dropping `fs_spfi`
# +
col30.remove('fs_spfi')
X_train_lm6 = sm.add_constant(X_train[col30])
lr6 = sm.OLS(y_train,X_train_lm6).fit()
print(lr6.summary())
# -
# ### Dropping `el_rear`
# +
col30.remove('el_rear')
X_train_lm7 = sm.add_constant(X_train[col30])
lr7 = sm.OLS(y_train,X_train_lm7).fit()
print(lr7.summary())
# -
# ### Dropping `carheight`
# +
col30.remove('carheight')
X_train_lm8 = sm.add_constant(X_train[col30])
lr8 = sm.OLS(y_train,X_train_lm8).fit()
print(lr8.summary())
# -
# ### Dropping `cb_hardtop`
# +
col30.remove('cb_hardtop')
X_train_lm9 = sm.add_constant(X_train[col30])
lr9 = sm.OLS(y_train,X_train_lm9).fit()
print(lr9.summary())
# -
# ### Dropping `cb_sedan`
# +
col30.remove('cb_sedan')
X_train_lm10 = sm.add_constant(X_train[col30])
lr10 = sm.OLS(y_train,X_train_lm10).fit()
print(lr10.summary())
# -
# ### Dropping `cb_wagon`
# +
col30.remove('cb_wagon')
X_train_lm11 = sm.add_constant(X_train[col30])
lr11 = sm.OLS(y_train,X_train_lm11).fit()
print(lr11.summary())
# -
# ### Dropping `cb_hatchback`
# +
col30.remove('cb_hatchback')
X_train_lm12 = sm.add_constant(X_train[col30])
lr12 = sm.OLS(y_train,X_train_lm12).fit()
print(lr12.summary())
# -
# ### Dropping `fs_2bbl`
# +
col30.remove('fs_2bbl')
X_train_lm13 = sm.add_constant(X_train[col30])
lr13 = sm.OLS(y_train,X_train_lm13).fit()
print(lr13.summary())
# -
# ### Dropping `company_mitsubishi`
# +
col30.remove('company_mitsubishi')
X_train_lm14 = sm.add_constant(X_train[col30])
lr14 = sm.OLS(y_train,X_train_lm14).fit()
print(lr14.summary())
# -
# All features are significant, check for VIF.
# ### VIF CHECK
# Create a dataframe that will contain the names of all the feature variables and their respective VIFs
vif_df = pd.DataFrame()
vif_df['Features'] = X_train[col30].columns
vif_df['VIF'] = [variance_inflation_factor(X_train[col30].values, i) for i in range(X_train[col30].shape[1])]
vif_df['VIF'] = round(vif_df['VIF'], 2)
vif_df = vif_df.sort_values(by = "VIF", ascending = False)
vif_df
# ### Dropping `et_rotor`
# +
col30.remove('et_rotor')
X_train_lm15 = sm.add_constant(X_train[col30])
lr15 = sm.OLS(y_train,X_train_lm15).fit()
print(lr15.summary())
# -
# Create a dataframe that will contain the names of all the feature variables and their respective VIFs
vif_df = pd.DataFrame()
vif_df['Features'] = X_train[col30].columns
vif_df['VIF'] = [variance_inflation_factor(X_train[col30].values, i) for i in range(X_train[col30].shape[1])]
vif_df['VIF'] = round(vif_df['VIF'], 2)
vif_df = vif_df.sort_values(by = "VIF", ascending = False)
vif_df
# ### Dropping `enginesize`
# +
col30.remove('enginesize')
X_train_lm16 = sm.add_constant(X_train[col30])
lr16 = sm.OLS(y_train,X_train_lm16).fit()
print(lr16.summary())
# -
# After removing `cn_six` there are some features which have become insignificant. Hence those are to be removed
# ### Dropping `cn_six`
# +
col30.remove('cn_six')
X_train_lm17 = sm.add_constant(X_train[col30])
lr17 = sm.OLS(y_train,X_train_lm17).fit()
print(lr17.summary())
# -
# ### Dropping `cn_twelve`
# +
col30.remove('cn_twelve')
X_train_lm18 = sm.add_constant(X_train[col30])
lr18 = sm.OLS(y_train,X_train_lm18).fit()
print(lr18.summary())
# -
# ### Dropping `cn_three`
# +
col30.remove('cn_three')
X_train_lm19 = sm.add_constant(X_train[col30])
lr19 = sm.OLS(y_train,X_train_lm19).fit()
print(lr19.summary())
# -
# ### Dropping `cn_two`
# +
col30.remove('cn_two')
X_train_lm20 = sm.add_constant(X_train[col30])
lr20 = sm.OLS(y_train,X_train_lm20).fit()
print(lr20.summary())
# -
# ### Dropping `et_ohcf`
# +
col30.remove('et_ohcf')
X_train_lm21 = sm.add_constant(X_train[col30])
lr21 = sm.OLS(y_train,X_train_lm21).fit()
print(lr21.summary())
# -
# ### Dropping `company_saab`
# +
col30.remove('company_saab')
X_train_lm22 = sm.add_constant(X_train[col30])
lr22 = sm.OLS(y_train,X_train_lm22).fit()
print(lr22.summary())
# -
# ### Dropping `company_volvo`
# +
col30.remove('company_volvo')
X_train_lm23 = sm.add_constant(X_train[col30])
lr23 = sm.OLS(y_train,X_train_lm23).fit()
print(lr23.summary())
# -
# ### Dropping `ft_gas`
# +
col30.remove('ft_gas')
X_train_lm23 = sm.add_constant(X_train[col30])
lr23 = sm.OLS(y_train,X_train_lm23).fit()
print(lr23.summary())
# -
# ### Checking VIF
# Create a dataframe that will contain the names of all the feature variables and their respective VIFs
vif_df = pd.DataFrame()
vif_df['Features'] = X_train[col30].columns
vif_df['VIF'] = [variance_inflation_factor(X_train[col30].values, i) for i in range(X_train[col30].shape[1])]
vif_df['VIF'] = round(vif_df['VIF'], 2)
vif_df = vif_df.sort_values(by = "VIF", ascending = False)
vif_df
# ### Dropping `curbweight`
# +
col30.remove('curbweight')
X_train_lm24 = sm.add_constant(X_train[col30])
lr24 = sm.OLS(y_train,X_train_lm24).fit()
print(lr24.summary())
# -
# ### Dropping `boreratio`
# +
col30.remove('boreratio')
X_train_lm25 = sm.add_constant(X_train[col30])
lr25 = sm.OLS(y_train,X_train_lm25).fit()
print(lr25.summary())
# -
# Create a dataframe that will contain the names of all the feature variables and their respective VIFs
vif_df = pd.DataFrame()
vif_df['Features'] = X_train[col30].columns
vif_df['VIF'] = [variance_inflation_factor(X_train[col30].values, i) for i in range(X_train[col30].shape[1])]
vif_df['VIF'] = round(vif_df['VIF'], 2)
vif_df = vif_df.sort_values(by = "VIF", ascending = False)
vif_df
prices_test[cont_cols] = minmax_scaler.transform(prices_test[cont_cols])
prices_test.head()
# ## Residual Analysis of the train data
# +
y_train_pred = lr25.predict(X_train_lm25)
# Plot the histogram of the error terms
fig = plt.figure()
sns.distplot((y_train - y_train_pred), bins = 20)
fig.suptitle('Error Terms', fontsize = 20) # Plot heading
plt.xlabel('Errors', fontsize = 18) # X-label
# +
y_train_pred = lr25.predict(X_train_lm25)
# Plot the histogram of the error terms
fig = plt.figure()
sns.scatterplot(y_train,(y_train - y_train_pred))
fig.suptitle('Error Terms', fontsize = 20) # Plot heading
plt.xlabel('Errors', fontsize = 18) # X-label
# -
# We can see that error terms are distributed normally with mean zero and also erros terms are randomly spread across price range.
# ### Evaluating model on test set
y_test = prices_test.pop('price')
X_test = prices_test
X_test_lm1 = sm.add_constant(X_test[col30])
y_test_pred = lr25.predict(X_test_lm1)
from sklearn.metrics import r2_score
r2_score(y_test, y_test_pred)
# R2 on the test set is 0.83, it is a pretty good model.
# +
# Plotting y_test and y_pred to understand the spread
fig = plt.figure()
plt.scatter(y_test, y_test_pred)
fig.suptitle('y_test vs y_test_pred', fontsize = 20) # Plot heading
plt.xlabel('y_test', fontsize = 18) # X-label
plt.ylabel('y_test_pred', fontsize = 16)
# -
fig = plt.figure()
c = [i for i in range(1,len(y_test)+1,1)]
plt.plot(c,y_test, color="blue", linewidth=2.5, linestyle="-")
plt.plot(c,y_test_pred, color="red", linewidth=2.5, linestyle="-")
fig.suptitle('Actual and Predicted', fontsize=20)
# ### Model Summary:
# 1. The linear model for price has 7 features
# 2. The R2 and the adjusted R2 for the model is 0.921 and 0.917 respectively.
# 3. The R2 on the test set is 0.83
# 4. Error terms are normally distributed with mean zero
# 5. There is no observed pattern in the error terms when plotted against the predicted values
# ## Linear equation and Business Insights
# $price = 0.2795 \times wheelbase+0.5876 \times horsepower-0.2629 \times et\_dohcv+0.2239 \times company\_bmw+0.3701 \times company\_buick+0.2293 \times company\_jaguar+0.2979 \times company\_porsche. $
# - Horsepower has the highest factor and it is reasonable because higher horsepower cars are generally expensive.They tend to sports cars or they have large engines which are costiler.
# - Company of the car has a positive effect on the price of the car if it belongs to on of bmw,buick,jaguar or porsche
# - Car with higher wheelbase has a higher price
# ## Conclusion :
# Linear model has been fitted for price of the car based on the given features. For building of the model, top 30 features were selected RFE (recursive feature elimination) and from there on each feature is eliminated checking p-value and VIF values until we have arrived at a satisfactory model.
#
# Horsepower has an VIF of 5.04, though it is slightly higher than 5, this was not removed because of its business importance. Higher horse means, larger engine and larger chasis and better aerodynamics for withstanding those higher acceleration and speeds. Therefore it makes a lot of sense of Horsepower to be there in the equation and be a major contributor for the price.
| 20,842 |
/Sample_Skeleton.ipynb
|
1005bee9003d38567ca698a76ebe41e3ae25490a
|
[] |
no_license
|
Shivan118/Churn-Prediction
|
https://github.com/Shivan118/Churn-Prediction
| 1 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 6,436 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#Imports
import os
import h5py
from math import sqrt
import numpy as np
import torch
import torchani
from torchani.units import HARTREE_TO_KCALMOL
# +
#Build TorchANI Model
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = torchani.models.ANI2x(periodic_table_index=False).to(device).double() # Set Model with double precision
species_to_tensor = torchani.utils.ChemicalSymbolsToInts(['H','C','N','O','S','F', 'Cl']) #Species to tensor function
# +
#Functions
def singlepoint_energy_calc(xyz, typ):
"""
Function that takes coordinates and species and perfoms a single point energy calculation using
a torchANI model
Parameters:
-----------
xyz: coordinates with shape (1, Na, 3), where Na is number of atoms in molecule
typ: lsit of atom types in molecule with shape (1, Na)
return energy value as tensor
"""
coordinates = torch.tensor(xyz,requires_grad=True,device=device)
species=species_to_tensor(typ).unsqueeze(0).to(device)
_, energy = model((species, coordinates))
return energy
def abs_dif(x,y):
"""
Function that calculates the absolute differnce.
"""
delta = np.subtract(x,y)
return abs(delta)
def interaction_type(dictionary):
"""
Function that takes a dictionary of interaction type and calculates and prints the MAE and RMSE
"""
ani = []
dft = []
for key in dictionary:
ani.append(dictionary[key]['ani'])
dft.append(dictionary[key]['dft'])
ani = np.array(ani)
dft = np.array(dft)
mae = np.average(abs_dif(ani, dft))
rmse = sqrt(np.average(abs_dif(ani,dft)**2))
print('MAE')
print(mae)
print('RMSE')
print(rmse)
print('Dictionary Length: ', len(dictionary))
# +
''' ANI data loader class
Class for loading data stored with the datapacker class.
'''
class anidataloader(object):
''' Contructor '''
def __init__(self, store_file):
if not os.path.exists(store_file):
raise FileNotFoundError('file ' + store_file + 'not found.')
self.store = h5py.File(store_file,'r')
''' Group recursive iterator (iterate through all groups in all branches and return datasets in dicts) '''
def h5py_dataset_iterator(self,g, prefix=''):
for key in g.keys():
item = g[key]
path = '{}/{}'.format(prefix, key)
keys = [i for i in item.keys()]
if isinstance(item[keys[0]], h5py.Dataset): # test for dataset
data = {'path':path}
for k in keys:
if not isinstance(item[k], h5py.Group):
dataset = np.array(item[k].value)
if type(dataset) is np.ndarray:
if dataset.size != 0:
if type(dataset[0]) is np.bytes_:
dataset = [a.decode('ascii') for a in dataset]
data.update({k:dataset})
yield data
else: # test for group (go down)
yield from self.h5py_dataset_iterator(item, path)
''' Default class iterator (iterate through all data) '''
def __iter__(self):
for data in self.h5py_dataset_iterator(self.store):
yield data
''' Returns a list of all groups in the file '''
def get_group_list(self):
return [g for g in self.store.values()]
''' Allows interation through the data in a given group '''
def iter_group(self,g):
for data in self.h5py_dataset_iterator(g):
yield data
''' Returns the requested dataset '''
def get_data(self, path, prefix=''):
item = self.store[path]
path = '{}/{}'.format(prefix, path)
keys = [i for i in item.keys()]
data = {'path': path}
for k in keys:
if not isinstance(item[k], h5py.Group):
dataset = np.array(item[k].value)
if type(dataset) is np.ndarray:
if dataset.size != 0:
if type(dataset[0]) is np.bytes_:
dataset = [a.decode('ascii') for a in dataset]
data.update({k: dataset})
return data
''' Returns the number of groups '''
def group_size(self):
return len(self.get_group_list())
''' Returns the number of items in the entire file '''
def size(self):
count = 0
for g in self.store.values():
count = count + len(g.items())
return count
''' Close the HDF5 file '''
def cleanup(self):
self.store.close()
# -
# # Calculating Interaction Energies
# +
#CCSD(T)/CBS Interaction Energies (kcal/mol) from literature
# ONLY NEEDED IF YOU ARE LOOKING AT X40 HALOGENS!!!!
#Řezáč, J.; Riley, K. E.; Hobza, P. Benchmark Calculations of Noncovalent Interactions of Halogenated Molecules.
#J. Chem. Theory Comput. 2012, 8, 4285–4292. https://doi.org/10.1021/ct300647k.
#In order of sorted molecules. Important to keep in this order.
ccsd = [-0.49,
-1.08,
-0.75,
-0.98,
-0.69,
-1.15,
-1.65,
-1.34,
-4.4,
-6.12,
-1.17,
-2.25,
-1.49,
-2.11,
-9.67,
-10.41,
-9.59,
-6.3,
-14.32,
-11.42,
-3.89,
-3.78
]
# +
# Data files:
data_in = 'h5_files/X40.h5' #Path to H5 File
adl = anidataloader(data_in) #Load H5 file using the AniDataLoader
# -
#Navigate through h5 file as if it were a dictionary
for dat in adl:
for key in dat:
print(key)
break
# ## Interaction Energy with No Deformation Energy
# +
systems = [] # List of system names
ani_eAB = [] # List of ANI Dimer energies (kcal/mol)
ani_eA = [] # List of ANI Monomer A energies (kcal/mol)
ani_eB = [] # List of ANI Monomer B energies (kcal/mol)
dft_eAB = [] # List of DFT Dimer energies (kcal/mol)
dft_eA = [] # List of DFT Monomer A energies (kcal/mol)
dft_eB = [] # List of DFT Monomer B energies (kcal/mol)
for dat in adl:
if '/ani/dimers/' in dat['path']:
systems.append(dat['path'][12:])
energy = singlepoint_energy_calc(dat['coordinates'], dat['species']) #Perform single point calculation
ani_eAB.append(energy.item()*HARTREE_TO_KCALMOL)
if '/ani/monA/' in dat['path']:
energy = singlepoint_energy_calc(dat['coordinates'], dat['species']) #Perform single point calculation
ani_eA.append(energy.item()*HARTREE_TO_KCALMOL)
if '/ani/monB/' in dat['path']:
energy = singlepoint_energy_calc(dat['coordinates'], dat['species']) #Perform single point calculation
ani_eB.append(energy.item()*HARTREE_TO_KCALMOL)
if '/dft/dimers/' in dat['path']:
dft_eAB.append(dat['energy'][0]) #Extract DFT energy from H5
if '/dft/monA/' in dat['path']:
dft_eA.append(dat['energy'][0]) #Extract DFT energy from H5
if '/dft/monB/' in dat['path']:
dft_eB.append(dat['energy'][0]) #Extract DFT energy from H5
# -
for i in range(len(systems)):
print(systems[i])
print('ANI AB: ', ani_eAB[i] )
print('ANI A: \t', ani_eA[i] )
print('ANI B: \t', ani_eB[i] )
print('DFT AB: ', dft_eAB[i] )
print('DFT A: \t', dft_eA[i] )
print('DFT B: \t', dft_eB[i] )
# +
#Calculate the Interaction energies and save them in lists
# IE = E_AB - (E_A+E_B)
ani_int_e = [] #List of ANI interaction energies
dft_int_e = [] #List of DFT Interaction energies
for i in range(len(systems)):
a_i_e = ani_eAB[i]-(ani_eA[i]+ani_eB[i])
ani_int_e.append(a_i_e)
d_i_e = dft_eAB[i]-(dft_eA[i]+dft_eB[i])
dft_int_e.append(d_i_e)
# -
#!!!Only for X40 Dataset
# Separates the data into different dictionaries dependent on interaction type
dispersion={}
induction = {}
dipole_dipole = {}
stacking = {}
halogen_bonds={}
hydrogen_bonds={}
for i in range(len(systems)):
if '01_' in systems[i] or '02_' in systems[i]:
dispersion[systems[i]]={'ani':ani_int_e[i], 'dft': dft_int_e[i]}
if '05_' in systems[i] or '06_' in systems[i] or '07_' in systems[i] or'08_' in systems[i]:
induction[systems[i]]={'ani':ani_int_e[i], 'dft': dft_int_e[i]}
if '09_' in systems[i] or '10_' in systems[i]:
dipole_dipole[systems[i]]={'ani':ani_int_e[i], 'dft': dft_int_e[i]}
if '11_' in systems[i] or '12_' in systems[i]:
stacking[systems[i]]={'ani':ani_int_e[i], 'dft': dft_int_e[i]}
if '13_' in systems[i] or '16_' in systems[i] or '19_' in systems[i] or'22_' in systems[i]:
halogen_bonds[systems[i]]={'ani':ani_int_e[i], 'dft': dft_int_e[i]}
if '31_' in systems[i] or '32_' in systems[i] or '33_' in systems[i] or'34_' in systems[i] or '37_' in systems[i] or '38_' in systems[i] or '39_' in systems[i] or '40_' in systems[i]:
hydrogen_bonds[systems[i]]={'ani':ani_int_e[i], 'dft': dft_int_e[i]}
#ANI vs DFT
ani_int_e = np.array(ani_int_e)
dft_int_e = np.array(dft_int_e)
print('ANI vs DFT')
print('MAE')
print(np.average(abs_dif(ani_int_e, dft_int_e)))
print('RMSE')
print (sqrt(np.average(abs_dif(ani_int_e,dft_int_e)**2)))
#ANI vs. CCSD(T)/CBS (only X40 Dataset)
ani_int_e = np.array(ani_int_e)
ccsd_int_e = np.array(ccsd)
print('ANI vs CCSD(T)/CBS')
print('MAE')
print (np.average(abs_dif(ani_int_e,ccsd_int_e)))
print('RMSE')
print (sqrt(np.average(abs_dif(ani_int_e,ccsd_int_e)**2)))
#DFT vs. CCSD(T)/CBS (only X40 Dataset)
ccsd_int_e = np.array(ccsd)
dft_int_e = np.array(dft_int_e)
print('DFT vs CCSD(T)/CBS')
print('MAE')
print (np.average(abs_dif(ccsd_int_e,dft_int_e)))
print('RMSE')
print (sqrt(np.average(abs_dif(ccsd_int_e,dft_int_e)**2)))
#!!Only for X40 Dataset
print('London Dispersion')
interaction_type(dispersion)
print()
print('Induction')
interaction_type(induction)
print()
print('Dipole-dipole Interaction')
interaction_type(dipole_dipole)
print()
print('Stacking')
interaction_type(stacking)
print()
print('Halogen Bonds')
interaction_type(halogen_bonds)
print()
print('Hydrogen Bonds')
interaction_type(hydrogen_bonds)
print()
# ## Interaction Energy with Deformation Energy
# +
systems = [] # List of system names
ani_eAB = [] # List of ANI Dimer energies (kcal/mol)
ani_eA = [] # List of ANI Monomer A energies (kcal/mol)
ani_eB = [] # List of ANI Monomer B energies (kcal/mol)
dft_eAB = [] # List of DFT Dimer energies (kcal/mol)
dft_eA = [] # List of DFT Monomer A energies (kcal/mol)
dft_eB = [] # List of DFT Monomer B energies (kcal/mol)
for dat in adl:
if '/ani/dimers/' in dat['path']:
systems.append(dat['path'][12:])
energy = singlepoint_energy_calc(dat['coordinates'], dat['species']) #Perform single point calculation
ani_eAB.append(energy.item()*HARTREE_TO_KCALMOL)
if '/ani/optmonA/' in dat['path']:
energy = singlepoint_energy_calc(dat['coordinates'], dat['species']) #Perform single point calculation
ani_eA.append(energy.item()*HARTREE_TO_KCALMOL)
if '/ani/optmonB/' in dat['path']:
energy = singlepoint_energy_calc(dat['coordinates'], dat['species']) #Perform single point calculation
ani_eB.append(energy.item()*HARTREE_TO_KCALMOL)
if '/dft/dimers/' in dat['path']:
dft_eAB.append(dat['energy'][0]) #Extract DFT energy from H5
if '/dft/optmonA/' in dat['path']:
dft_eA.append(dat['energy'][0]) #Extract DFT energy from H5
if '/dft/optmonB/' in dat['path']:
dft_eB.append(dat['energy'][0]) #Extract DFT energy from H5
# -
for i in range(len(systems)):
print(systems[i])
print('ANI AB: ', ani_eAB[i] )
print('ANI A: \t', ani_eA[i] )
print('ANI B: \t', ani_eB[i] )
print('DFT AB: ', dft_eAB[i] )
print('DFT A: \t', dft_eA[i] )
print('DFT B: \t', dft_eB[i] )
# +
#Calculate the Interaction energies and save them in lists
# IE = E_AB - (E_A+E_B)
ani_int_e = [] #List of ANI interaction energies
dft_int_e = [] #List of DFT Interaction energies
for i in range(len(systems)):
a_i_e = ani_eAB[i]-(ani_eA[i]+ani_eB[i])
ani_int_e.append(a_i_e)
d_i_e = dft_eAB[i]-(dft_eA[i]+dft_eB[i])
dft_int_e.append(d_i_e)
# -
#!!!Only for X40 Dataset
# Separates the data into different dictionaries dependent on interaction type
dispersion={}
induction = {}
dipole_dipole = {}
stacking = {}
halogen_bonds={}
hydrogen_bonds={}
for i in range(len(systems)):
if '01_' in systems[i] or '02_' in systems[i]:
dispersion[systems[i]]={'ani':ani_int_e[i], 'dft': dft_int_e[i]}
if '05_' in systems[i] or '06_' in systems[i] or '07_' in systems[i] or'08_' in systems[i]:
induction[systems[i]]={'ani':ani_int_e[i], 'dft': dft_int_e[i]}
if '09_' in systems[i] or '10_' in systems[i]:
dipole_dipole[systems[i]]={'ani':ani_int_e[i], 'dft': dft_int_e[i]}
if '11_' in systems[i] or '12_' in systems[i]:
stacking[systems[i]]={'ani':ani_int_e[i], 'dft': dft_int_e[i]}
if '13_' in systems[i] or '16_' in systems[i] or '19_' in systems[i] or'22_' in systems[i]:
halogen_bonds[systems[i]]={'ani':ani_int_e[i], 'dft': dft_int_e[i]}
if '31_' in systems[i] or '32_' in systems[i] or '33_' in systems[i] or'34_' in systems[i] or '37_' in systems[i] or '38_' in systems[i] or '39_' in systems[i] or '40_' in systems[i]:
hydrogen_bonds[systems[i]]={'ani':ani_int_e[i], 'dft': dft_int_e[i]}
#ANI vs DFT
ani_int_e = np.array(ani_int_e)
dft_int_e = np.array(dft_int_e)
print('ANI vs DFT')
print('MAE')
print(np.average(abs_dif(ani_int_e, dft_int_e)))
print('RMSE')
print (sqrt(np.average(abs_dif(ani_int_e,dft_int_e)**2)))
#ANI vs. CCSD(T)/CBS (only X40 Dataset)
ani_int_e = np.array(ani_int_e)
ccsd_int_e = np.array(ccsd)
print('ANI vs CCSD(T)/CBS')
print('MAE')
print (np.average(abs_dif(ani_int_e,ccsd_int_e)))
print('RMSE')
print (sqrt(np.average(abs_dif(ani_int_e,ccsd_int_e)**2)))
#DFT vs. CCSD(T)/CBS (only X40 Dataset)
ccsd_int_e = np.array(ccsd)
dft_int_e = np.array(dft_int_e)
print('DFT vs CCSD(T)/CBS')
print('MAE')
print (np.average(abs_dif(ccsd_int_e,dft_int_e)))
print('RMSE')
print (sqrt(np.average(abs_dif(ccsd_int_e,dft_int_e)**2)))
#!!Only for X40 Dataset
print('London Dispersion')
interaction_type(dispersion)
print()
print('Induction')
interaction_type(induction)
print()
print('Dipole-dipole Interaction')
interaction_type(dipole_dipole)
print()
print('Stacking')
interaction_type(stacking)
print()
print('Halogen Bonds')
interaction_type(halogen_bonds)
print()
print('Hydrogen Bonds')
interaction_type(hydrogen_bonds)
print()
| 15,388 |
/labs/module_2/Matplotlib-Seaborn/main.ipynb
|
75d940c58541fd94f64b7a095519ef5c6f5005e0
|
[] |
no_license
|
paoloironhack/dataptams2020
|
https://github.com/paoloironhack/dataptams2020
| 2 | 3 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 18,163 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] Collapsed="false"
# # Lab | Matplotlib & Seaborn
# + [markdown] Collapsed="false"
# #### Import all the libraries that are necessary.
# + Collapsed="false"
# + [markdown] Collapsed="false"
# # Challenge 1
# + [markdown] Collapsed="false"
# #### Data
# + Collapsed="false"
x = np.arange(0,100)
y = x*2
z = x**2
# + [markdown] Collapsed="false"
# #### Plot (x,y) and (x,z) on the axes.
# There are 2 ways of doing this. Do in both ways.
# Hint: Check out the nrows, ncols, and index arguments of subplots.
#
# Also, play around with the linewidth and style. Use the ones you're most happy with.
# + Collapsed="false"
# OPTION 1
# Create a figure of a fixed size
# First plot
# Second plot
# Show the plot
# + [markdown] Collapsed="false"
# #### Use plt.subplots(nrows=1, ncols=2) to create the plot below
# + Collapsed="false"
# OPTION 2
# Create a figure of a fixed size and axes
# Plots
# Show the plot
# + [markdown] Collapsed="false"
# #### Augmenting your previous code, resize your previous plot.
# Hint: Add the figsize argument in plt.subplots()
# + Collapsed="false"
# ENHANCED VERSION to see more matplotlib features
# Variables
fs = 16 # fontsize
fw = 700 # fontweight
lw = 3 # lineweight
y_rot = 0 # ylabel rotation
# Create a figure of a fixed size and axes
# First plot adding a title and x and y labels
# Second plot adding a title and x and y labels
# Show the plot
# + [markdown] Collapsed="false"
# #### Plot both y=x^2 and y=exp(x) in the same plot using normal and logarithmic scale.
# Hint: Use set_xscale and set_yscale
# + Collapsed="false"
# Create a figure of a fixed size and axes
# Normal plot
# Logarithmic scale
# Show plot
# + Collapsed="false"
# ALTERNATIVE WITH LEGEND
# Create a figure of a fixed size and axes
# Normal plot
# Logarithmic scale
# Show plot
# + [markdown] Collapsed="false"
# # Challenge 2
# + [markdown] Collapsed="false"
# Import the Fitbit2.csv file and name your dataset fitbit. Download the data from [here](https://drive.google.com/file/d/17TW-w-izKuR7bLoa0Mnyp9YN9zEQNXq-/view?usp=sharing) and place it in the data folder.
# + Collapsed="false"
# Read the data
# + [markdown] Collapsed="false"
# #### From the Fitbit data, we want to visually understand:
#
# How the average number of steps change by month. Use the appropriate visualization to show the median steps by month.
# Is Fitbitter more active on weekend or workdays?
# All plots must be in the same jupyter notebook cell.
#
# Hints:
#
# - Use Months_encoded and Week_or Weekend columns.
# - Use matplolib.pyplot object oriented API.
# - Set your size figure to 12,4
# - Explore plt.sca
# - Explore plt.xticks
# - Save your figures
# + Collapsed="false"
# MEDIAN STEPS BY MONTH_ENCODED
# Find the median steps for each month
# Create a figure of a fixed size and axes
# Set the current axes instance to ax[0] - Now plt methods will affect ax[0]
# Plot the weekday steps in the current axes
# Add labels, title, etc
# Save the figure
# MEDIAN STEPS BY WORK_OR_WEEKEND
# Set the current axes instance to ax[1] - Now plt methods will affect ax[1]
# Find the median steps for workdays and weekdays
# Plot the workday and weekend steps in the current axes
# Add labels, title, etc
# Save the figure
# + [markdown] Collapsed="false"
# #### Write a loop to plot 3 scatter plots of the following features:
#
# Minutes Lightly Active vs Steps
# Minutes Very Active vs Steps
# Minutes Sedentary vs Steps
# + Collapsed="false"
# ALTERNATIVE 1
# Create a df with the columns we are interested in
# Create a figure of a fixed size and axes
# Iterate to draw each scatter plot
# + Collapsed="false"
# ALTERNATIVE 2
# Create a df with the columns we are interested in
# Create a figure of a fixed size and axes
# Iterate to draw each scatter plot
# + [markdown] Collapsed="false"
# # Challenge 3
# + [markdown] Collapsed="false"
# #### Open the titanic file. Name your dataset titanic.
# + [markdown] Collapsed="false"
# Download the dataset from [here](https://drive.google.com/file/d/1jkkOcWm9aEF8gb0r2SsarF5Qdoaaw1do/view?usp=sharing) and place it in the data folder
# + Collapsed="false"
# Read the data
# + [markdown] Collapsed="false"
# #### Explore the titanic dataset using Pandas dtypes.
# + Collapsed="false"
# + [markdown] Collapsed="false"
# #### What are your numerical variables? What are your categorical variables?
# Hint: Use Pandas select_dtypes
# + Collapsed="false"
# Numerical variables
# + Collapsed="false"
# Categorical variables
# + [markdown] Collapsed="false"
# #### Set the plot style to classic and the figure size to (12,6).
# Hint: To set the style you can use matplotlib.pyplot functions or seaborn
# + Collapsed="false"
# Plot style change
# Figure size change
# + [markdown] Collapsed="false"
# #### Use the right visulalization to show the distribution of the column Age.
# + Collapsed="false"
# Pandas plots are based on matplotlib
# Plot the histogram of column Age
# Set titles, labels, etc
# Show the plot
# + [markdown] Collapsed="false"
# #### Use subplots and plot the distribution of the Age variable with bins equal to 10, 20 and 50.
# + Collapsed="false"
# Variables
# Create a figure of a fixed size and axes
# Plot the histogram using a different number of bins
# + [markdown] Collapsed="false"
# #### How does the bin size affect your plot? Comment.
# + [markdown] Collapsed="false"
# ~~~~
# Using a low number of bins (wider bins) reduces noise on the distribution estimation while using a
# high number of bins (narrower bins) gives greater precision to the distribution estimation (and more noise).
# ~~~~
# + [markdown] Collapsed="false"
# #### Use seaborn to show the distribution of the column Age.
# + Collapsed="false"
# Seaborn is a Python library based on matplotlib
# Plot of the age histogram using seaborn
# Show the plot
# + [markdown] Collapsed="false"
# #### Use the right plot to visualize the column Gender. There are 2 ways of doing it. Do it both ways.
# Hint: Use matplotlib and seaborn
# + Collapsed="false"
# OPTION 1 - Matplotlib
# Bar plot of the gender
# Add labels to the plot and change xticks rotation
# Show the plot
# + Collapsed="false"
# OPTION 2 - Seaborn
# Seaborn Countplot
# Show the plot
# + [markdown] Collapsed="false"
# #### Use the right plot to visualize the column Pclass.
# + Collapsed="false"
# Bar plot of Pclass
# Add labels to the plot and change xticks rotation
# Show the plot
# + [markdown] Collapsed="false"
# #### We would like to have in one single plot the summary statistics of the feature Age. What kind of plot would you use?
# + Collapsed="false"
# Box plot
# Show the plot
# + [markdown] Collapsed="false"
# #### What does the last plot tells you about the feature Age? Comment.
# + [markdown] Collapsed="false"
# ~~~~
# The statistics that you can get from the boxplot are the minimum, first quartile, median,
# third quartile, and maximum.
#
# The red line shows us the median of Age.
# The blue box shows us the interquartile range (from Q1 to Q3).
# It also shows us the outliers, which are out of the maximum and minimum range (Q1 - 1.5*IQR, Q3 + 1.5*IQR).
# ~~~~
# + [markdown] Collapsed="false"
# #### Now in addition to the summary statistics, we want to have in the SAME plot the distribution of Age. What kind of plot would you use?
# + Collapsed="false"
# Set figure size
# Violin plot
# Show the plot
# + [markdown] Collapsed="false"
# #### What additional information the last plot provides you about the feature Age? Comment.
# + [markdown] Collapsed="false"
# ~~~~
# This plot is a combination of a boxplot and a density plot. The violin plot features a kernel density estimation
# of the underlying distribution of the data.
#
# The black central part of the plot is the same as a boxplot and the white dot is the median.
# The blue part is the distribution of the data.
# ~~~~
# + [markdown] Collapsed="false"
# #### We suspect that there is a linear relationship between Fare and Age. Use the right plot to show the relationship between these 2 features. There are 2 ways, please do it both ways.
# Hint: One of the ways involves using Seaborn.
# + Collapsed="false"
# OPTION 1
# Scatter plot
# Add labels
# Show the plot
# + Collapsed="false"
# OPTION 2
# Joinplot
# Show the plot
# + [markdown] Collapsed="false"
# #### Using Seaborn plot the correlation matrix.
# + Collapsed="false"
# Set figure size
# Correlation matrix
# Add title
# Show plot
# + [markdown] Collapsed="false"
# #### What are the most correlated features? Comment.
# + [markdown] Collapsed="false"
# ~~~~~
# The most correlated features are Parch and SibSp.
# ~~~~~
# + [markdown] Collapsed="false"
# #### Use the right plot to display the summary statistics of the Age in function of the Pclass.
# + Collapsed="false"
# Boxplot
# Show the plot
# + [markdown] Collapsed="false"
# #### Use the seaborn to plot the distribution of the Age based on the Gender.
# Hint: Use Facetgrid
# + Collapsed="false"
# Create a grid with FacetGrid
# Draw a plot on each facet
# Show the plot
| 9,464 |
/Donde estan los ojos.ipynb
|
e9fe29fe85ce50e24feca5b9b5b098e86e47011e
|
[] |
no_license
|
gsarmientod/MLP_OJOS
|
https://github.com/gsarmientod/MLP_OJOS
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 3,380,275 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Regresión I: ¿Donde estan los ojos?
# Autores: German Sarmiento, Camila Lozano
# ### Introducción
# Muchos de los sistemas de reconocimiento de personas por medio de imágenes utilizan la ubicación de puntos importantes en la cara, con el objetivo de identificar la ubicación del rostro. Algunos puntos importantes son los ojos, la nariz y la boca, a partir de ellos es posible definir las coordenadas entre las cuales se encuentra ubicada la cara de la persona. La detección de estos puntos es escencial para los algoritmos de reconocimimiento facial.
from IPython.display import Image
Image("FacialDetectionImage.png")
# Identificación de puntos como los ojos resulta de suma importancia en campos como prevención de accidentalidad víal, dado que pueden ayudar a identificar fatiga o ojos caidos (Sudhakara, 2012).
# El objetivo de este notebook es realizar un sistemas de identificación de la ubicación de los ojos a partir de imágenes.Para ello, implementamos un modelo de redes neuronales MLP para utilizando un conjunto de 1236 imagenes que fueron previamente etiquetdas por el proyecto "GI4E - Gaze Interaction for Everybody" de la Universidad Pública de Navarra, dispuesta en el siguiente link: https://www.unavarra.es/gi4e/databases/gi4e/.
#
# Esta base de datos contiene 1236 imagenes en formato png de 103 personas con una resolución de 800×600 pixeles, cada persona cuenta con 12 imagenes. Cada imagen viene acompañada de su etiqueta en los ojos, de tal manera que cada una de las imagenes tiene un archivo .txt asociado agrupado por persona, con la siguiente información: x1 y1 x2 y2 x3 y3 x4 y4 x5 y5 x6 y6, en donde:
#
# * Los puntos (x1,y1) corresponden a las coordenadas de la parte externa de la cornea en el ojo izquierdo
# * Los puntos (x2,y2) corresponden a las coordenadas del centro del iris del ojo izquierdo
# * Los puntos (x3,y3) corresponden a las coordenadas de la parte interna de la cornea en el ojo izquierdo
# * Los puntos (x4,y4) corresponden a las coordenadas de la parte externa de la cornea en el ojo derecho
# * Los puntos (x5,y5) corresponden a las coordenadas del centro del iris del ojo derecho
# * Los puntos (x6,y6) corresponden a las coordenadas de la parte interna de la cornea en el ojo derecho
#
# De esta manera el objetivo de este ejercicio es predicir los valores de x1 y1 x2 y2 x3 y3 x4 y4 x5 y5 x6 y6. Las imagenets contiene personas distintas, con gafas, sin gafas mirando en diferentes direcciones.
#
#
# Estas imagenes fueron reescaladas y se utilizaron como entrenamiento de un modelo de redes neuronales Multy Layer Perceptron con el objeto de obtener las coordenadas de los ojos. Depués de entrenado el modelo, se evaluó su desempeño.
#
# ### Métodos
# Durante los últimos años y dada la importancia de este tema, se han explorado diferentes aproximaciones metodológicas para detectar las coordenadas de los ojos. Algunos de estos métodos parte de capturar las propiedades fisiológicas de los ojos utilizando modelos de aspecto de los ojos para representar patrones de los ojos. En estos modelos el centro y el radio del iris se detectan buscando un círculo que separa el iris oscuro y la esclerotica brillante. No obstante, estos metodos requieren de una gran cantidad de imagenes de entrenamiento para identificar dichas porpiedades fisiológicas del ojo. Otros métodos útilizados incluyen análisis de componentes principales, lógica difusa, modelos de Markov, entre otros. No obstante, las redes neuronales han emostrado ser eficientes en la detección de objetos faciales (Sudhakara, 2012).
#
# Por tanto, para solucionar este problema de reconocimiento de patrones visuales implementamos una red neuronal Multi Layer Perceptron. El problema que se busca resolver es un problema de regresión, en el cuál el valor a predecir son las coordenadas de los puntos en los cuales se encuntran los ojos de las personas. Para ello se utilizaron las 1236 imagenes para entrenar el modelo las cuales fueron reescaladas y posteriormente se realizó la evaluación del modelo.
#
# La red neuronal de tipo multilayer perceptron fue entrenada ccon sólo una capa oculta. La cantidad de características corresponde al número de pixeles totales de la imagen a color. La red recibió 50 * 50 * 3 valores y generó como insumo un vector con 4 puntos indicando las coordenadas x, y del centro del iris del ojo izquierdo y del centro del iris del ojo derecho. Por otra parte, con el objeto de generalizar se utiliza una capa `Dropout`, la cual vuelve cero el 15% de las activaciones de neuronas de la capa anterior. La función de costo se define con mínimos cuadrados ordinarios.
#
#
#
# ### Solución
# Iniciaremos importanto las librerias. Para este ejercicio utilizaremos torch y torchvison
# +
#Imports
import numpy as np
from torch import nn, optim
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision.transforms import ToTensor
from torchvision import transforms
import os
import glob
from torchvision.io import read_image
import matplotlib.pyplot as plt
# %matplotlib inline
# -
# Lo primero que hacemos es descargar e importar al proyecto el conjunto de las 1236 imagenes en formato .png
# +
train_dir = "DataIMG/images/"
images = glob.glob(os.path.join(train_dir,'*.png'))
# -
# Cada persona tiene asociado un archivo .txt, que agrupa, de las 12 muestras por persona, los puntos x1 y1 x2 y2 x3 y3 x4 y4 x5 y5 x6 y6 que deseamos estimar. Por tanto, a cada imagen le asociamos sus coordenadas que indican la posición de los ojos.
#
#
print(images[0])
anot0_file = "DataIMG/labels/" + images[0][15:19] + 'image_labels.txt'
print(anot0_file)
# Los 12 puntos contenidos en el archivo "001_image_labels.txt" corresponden a x1 y1 x2 y2 x3 y3 x4 y4 x5 y5 x6 y6, respectivamente
anot0 = np.genfromtxt(anot0_file)
anot0 = np.array(anot0[0:1,1:]).reshape(12)
print(anot0.shape)
print(anot0)
# Verificamos que la resolución de una de las imagenes sea de 800×600 pixeles, y cada pixel contiene su información de colores representada por su RGB.
person = read_image(images[0])
person.shape
# Pintamos una imganen con los 12 puntos de entrenamiento (x1 y1 x2 y2 x3 y3 x4 y4 x5 y5 x6 y6) asociados a esa imagen. Los puntos ubicados en la cornea se pintan de color rojo y el centro deliris en color amarillo
# +
from IPython.display import clear_output
from time import sleep
for i in range(12):
clear_output(wait=True)
person = read_image(images[i])
anot0_file = "DataIMG/labels/" + images[i][15:19] + 'image_labels.txt'
anot0 = np.genfromtxt(anot0_file)
anot0 = np.array(anot0[i:i + 1,1:]).reshape(12)
plt.figure(figsize=(8,8))
plt.imshow(person.permute(1,2,0))
plt.plot(anot0[0],anot0[1],'+r')
plt.plot(anot0[2],anot0[3],'+y') #Pupila izquierda
plt.plot(anot0[4],anot0[5],'+r')
plt.plot(anot0[6],anot0[7],'+r')
plt.plot(anot0[8],anot0[9],'+y') #Pupila derecha
plt.plot(anot0[10],anot0[11],'+r')
plt.axis('off')
plt.title(images[i][15:])
plt.show()
sleep(0.3)
# -
# Reescalamos todas las imagenes de 800×600 pixeles a 50*50 pixeles. Y reescalamos así mismo los puntos de ubicación de los ojos.
# +
person = read_image(images[0])
anot0_file = "DataIMG/labels/" + images[0][15:19] + 'image_labels.txt'
anot0 = np.genfromtxt(anot0_file)
anot0 = np.array(anot0[0:1,1:]).reshape(12)
resize = transforms.Resize(size=(50,50))
plt.imshow(resize(person).permute(1,2,0))
plt.axis('off')
plt.title(images[0][15:])
# -
print(person.shape)
# Re escalamiento de los puntos de los ojos.
#
# ---
print(person.shape[1])
print(person.shape[2])
dy, dx = person.shape[1]/50, person.shape[2]/50
print(dy, dx)
print(anot0)
for i in range(12):
if i == 0 or i % 2 ==0:
anot0[i] = np.round(anot0[i]/dx)
else:
anot0[i] = np.round(anot0[i]/dy)
print(anot0)
# Imagen re escalada a 50x50 con sus respectivos puntos de los ojos.
# +
plt.imshow(resize(person).permute(1,2,0))
plt.plot(anot0[0],anot0[1],'+r')
plt.plot(anot0[2],anot0[3],'+y') #Pupila izquierda
plt.plot(anot0[4],anot0[5],'+r')
plt.plot(anot0[6],anot0[7],'+r')
plt.plot(anot0[8],anot0[9],'+y') #Pupila derecha
plt.plot(anot0[10],anot0[11],'+r')
plt.axis('off')
plt.title(images[0][15:])
# -
# La siguiente clase une todo el proceso desde la lectura de los archivos y el reescalamiento.
#
# Para este caso, debido al tamaño de las imágenes y del conjunto de datos de prueba, solo se va a trabajar con los puntos principales del iris (coordenadas del centro del ojo).
#
# Por lo cual la implementación solo trabaja con 4 puntos, un par x1,y1 para un ojo y un par x2, y2 para el otro ojo.
class CustomImageDataset(Dataset):
def __init__(self,img_dir,resizes=None):
self.img_dir = img_dir
self.resizes = resizes #Tupla (altura, ancho) del tamaño resultante de re-escalar la imagen
self.list_img = glob.glob(os.path.join(img_dir,'*.png')) # lista de los archivos de sólo la imágenes
def __len__(self):
return len(self.list_img)
def transform(self, image, y):
# Re-escalamiento de las coordenadas del punto según el re-escalamiento de la imagen
dy, dx = image.shape[1]/self.resizes[0], image.shape[2]/self.resizes[1]
for i in range(4):
if i == 0 or i % 2 ==0:
y[i] = np.round(y[i]/dx)
else:
y[i] = np.round(y[i]/dy)
resize = transforms.Resize(size=self.resizes) #instancia el re-rescalamiento
image = resize(image) #ejecuta el re-escalamiento
return image/255, torch.tensor(y).float() #para que tambien sea y un torch.tensor. y el image/255 es para normalizar el tensor entre 0 y 1 (y sea float)
def __getitem__(self,idx):
image = read_image(self.list_img[idx]) # con read_image la imagen se lee ya como tensor
anot_file = "DataIMG/labels/image_labels.txt" #Aquí esta el total de corrdenadas de todas las imagenes
anot = np.genfromtxt(anot_file)
anot = np.array(anot[idx :idx + 1,1:]).reshape(12)
y = np.array([anot[2], anot[3], anot[8], anot[9]])# leyendo solo las coordenadas de pupila izquierda y derecha
if self.resizes:
image, y = self.transform(image, y)
return image, y
# Se crea una función para graficar las imágenes con matplotlib y las coordenadas de los ojos. En la función graficaremos los centro del iris del ojo izquierdo y del centro del iris del ojo derecho.
def plot_person(img,eyes,s=None):
if s is not None:
plt.figure(figsize=(s,s))
plt.imshow(img.permute(1,2,0)) # paso necesario debido al formato de las imágenes en pytorch
plt.plot(eyes[0],eyes[1],'+r')
plt.plot(eyes[2],eyes[3],'+r')
plt.axis('off')
# Creando los datasets para entrenamiento y validación (y también los dataloaders)
# +
##########################
### CONFIGURACION
##########################
RANDOM_SEED = 123
NUM_EPOCHS = 100
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
lr = 0.01
RS = 50 # escalamiento de todas la imagenes a RS x RS
BS = 300
torch.manual_seed(RANDOM_SEED);
# -
torch.cuda.is_available()
# Instanciando el dataset de training.
train_ds = CustomImageDataset(train_dir, resizes=(RS,RS))
img,y = train_ds[34] # una persona específico
# Probamos nuestra función de pintar.
plot_person(img,y)
# Iterando a través del DataLoader. Se construye el `DataLoader`a partir del conjunto de entrenamiento para un batch específico y revolviéndolos aleatoriamente
train_dl = DataLoader(train_ds, batch_size=BS, shuffle=True, )
img, y = next(iter(train_dl))
print(f"image batch size: {img.size()}")
print(f"mouth batch size: {y.size()}")
# Vemos que en este caso, la primera imagen (posición 0), no concuerda con el orden de las imágenes en la librería de imágenes, de igual forma, cada vez que ejecutemos de nuevo las líneas anteriores, siempre obtendremos una imagen distinta en el índice 0.
#
# Por otro lado, debido al re escalamiento, observamos como tiende a “deformarse” la ubicación de los iris.
plot_person(img[0],y[0])
# Construyendo el Dataset y DataLoader para el conjunto de validación
valid_ds = CustomImageDataset(train_dir, resizes=(RS,RS))
valid_dl = DataLoader(valid_ds, batch_size=BS*2)
# Entrenando una red neuronal artificial de tipo multilayer perceptron (MLP) con sólo una capa oculta. La cantidad de características corresponde al número de pixeles totales de la imagen a color. La red recibirá 50 * 50 * 3 valores y generará como capa de salida un vector con 4 puntos indicando las coordenadas x, y del centro del iris del ojo izquierdo y del centro del iris del ojo derecho.
#
# Con el objeto de generalizar, se utiliza una capa `Dropout`, la cual vuelve cero el 15% de las activaciones de neuronas de la capa anterior.
#
model = nn.Sequential(
nn.Linear(RS*RS*3,200),
nn.Dropout(p=0.15),
nn.ReLU(),
nn.Linear(200, 4)
)
# Definimos la función de costo con mínimos cuadrados ordinarios, `mse_loss`. Cuando existe múltiples variables respuesta, `mse_loss` calcula el *MSE* para cada variable de salida y luego los promedia
# +
loss_func = F.mse_loss
model = model.to(DEVICE)
opt = optim.RMSprop(model.parameters(),lr = lr, momentum=0.9)
scheduler = optim.lr_scheduler.OneCycleLR(
opt, max_lr=0.01, steps_per_epoch=len(train_dl), epochs=NUM_EPOCHS) #El scheduler es un control sobre los valores del LR que busca acelerar el aprendizaje, buscar el mejor modelo, evitar atascos en mínimos muy locales, etc.
# -
# Se calcula el loss a un minibatch aplanando la imagen.
def loss_batch(xb,yb):
yp = model(xb.view(-1, RS*RS*3).to(DEVICE)) # Se reforma la imagen a un vector columna
yt = yb.to(DEVICE)
loss = loss_func(yp, yt)
out = [loss, len(xb)]
return out
# Y realizamos el típico método train .
def train():
train_losses = []
valid_losses = []
for epoch in range(NUM_EPOCHS):
model.train()
losses = 0
nums = 0
for xb, yb in train_dl:
loss, l = loss_batch(xb,yb)
loss.backward()
opt.step()
opt.zero_grad()
losses += loss.item() * l
nums += l
scheduler.step()
train_loss = losses / nums
train_losses.append(train_loss)
model.eval()
with torch.no_grad():
losses, nums = zip(*[loss_batch(xb, yb) for xb, yb in valid_dl])
losses = [l.item() for l in losses]
valid_loss = np.sum(np.multiply(losses,nums)) / np.sum(nums)
valid_losses.append(valid_loss)
if epoch % 5 == 0:
print(f"epoch: {epoch}, train_loss: {train_loss:.4f} \
valid_loss: {valid_loss:.4f}")
plt.plot(range(NUM_EPOCHS), train_losses, 'r', label='train')
plt.plot(range(NUM_EPOCHS), valid_losses, 'b', label = 'valid')
plt.legend()
train()
# El entrenamiento de la red, toma en promedio más de 3 horas con equipo de procesador i5, con una tarjeta gráfica NVIDIA GTX 1660, que por cierto desde Pytorch no fue posible forzar al uso de la tarjeta gráfica.
# 
# 
ypred = model(img.view(-1, RS*RS*3).to(DEVICE))
ypred
# Ahora comparamos los resultados de las coordenadas originales vs las coordenadas generadas a partir del modelo.
i = 5
plot_person(img[i],y[i])
plot_person(img[i],ypred[i].detach().cpu().numpy())
# Se puede apreciar que se acerca bastante, aunque por supuesto no da el mismo valor.
resize = transforms.Resize(size=(50,50))
# Exportando el modelo para inferencia (predicción)
torch.save(model.state_dict(), "model_MLP_eyes.pth")
# ### Implementación del modelo
# Ahora realicemos la inferencia, construyendo el modelo, cargándole los pesos y llevándolo a la CPU (es mejor hacer inferencia sobre CPU).
#
# Se construye la función `where_eyes`, la cual, a partir del modelo creado, intenta predecir el posicionamiento de los ojos(iris), en la imagen. La función recibe como parámetro, la ruta donde se encuentra la imagen.
def where_eyes(im_path):
modelp = nn.Sequential(
nn.Linear(RS*RS*3,200),
nn.Dropout(p=0.15),
nn.ReLU(),
nn.Linear(200, 4)
)
modelp.load_state_dict( torch.load("model_MLP_eyes.pth", map_location=torch.device('cpu')))
modelp.eval()
im = read_image(im_path)
im_r = transforms.Resize(size=(RS,RS))(im)/255 # re-escalamos y normalizamos (para convertir a float entre 0y1)
with torch.no_grad():
ypred = modelp(im_r.view(-1, RS*RS*3))
plt.figure(figsize=(8,8))
plt.imshow(im.permute(1,2,0))
plt.axis('off')
plt.show()
#Las coordenadas estan dadas en una imagen de 50*50, por lo cual se debe reescalar las mismas
dy, dx = 12, 16
for i in range(4):
if i == 0 or i % 2 ==0:
ypred[0][i] = np.round(ypred[0][i]*dx)
else:
ypred[0][i] = np.round(ypred[0][i]*dy)
plot_person(im,ypred[0], 8)
# Leyendo una imagen del mismo dataset y haciendo la predicción:
im_path = "DataIMG/images/002_05.png"
where_eyes(im_path)
# Ahora, si seleccionamos otra imagen de rostro que no fue tenida en cuenta en el entrenamiento, vemos que aún trata de acercarse bastante a la zona de los ojos, sin embargo, debido a que en el entrenamiento la distancia de los ojos es más pequeña, la percepción del otro ojo, da muy cerca a la nariz, mantiene una relación de aspecto en cuanto a distancia con la media de distancias de entre ojos que hay en el conjunto de entrenamiento.
im_path = "img_test.jpg"
where_eyes(im_path)
#
# Ahora, probamos con otra imagen, en la cual la persona, tiene un parche en uno de sus ojos. Podemos observar que en este caso el modelo no se acerca en lo absoluto y en parte, la explicación puede estar dada por la falta de muestras en el conjunto de entrenamiento donde existieran personas con solo ojo visible.
#
im_path = "img_test2.jpg"
where_eyes(im_path)
# ### Discusión
# Hay una gran cantidad de caracteristicas humanas y variabilidades que pueden afectar el desarrollo del algoritmo como por ejemplo el uso de gafas de diferentes tipos, la dirección de la mirada, la iluminación de la foto, fondo de la foto, alineación de la cara, expresiones fáciales, rotación de los ojos, entre otros.
# Dentro de los cambios que se podrían haber explroado para mejorar el desempeño del algorimo estan:
# * Convertir las imagentes a escala de grises.
# * Rotar las imagenes.
# * Recortar las imagenes para obtener mayor precisión del área del ojo, haciendo Zoom a las mismas.
# * Reducción de dimensionalidad.
# ### Conclusiones
# El modelo implementado en este notebook predice 4 puntos correspondientes a las coordenas del iris del ojo izquierdo y el ojo derecho de una persona. Para su construcción utilizó como insumo 1236 imagenes. Una vez evaluado el modelo se observa que si bien la red no predice el valor exacto de las coordenadas, predice un valor muy cercano a estas.
#
# Quizá, incluyendo un mayor número de neuronas y capas ocultas, puede llegarse a obtener un modelo con una mayor precisión.
#
# Por otro lado, también podemos notar el pobre desempeño del modelo cuando se tienen situaciones de personas que solo tienen visible un ojo. Por supuesto, en los datos de entrenamiento no había ejemplos de personas que solo tuvieran un ojo expuesto.
# ### Bibliografía
# Sudhakara Rao, P., & Sreehari, S. (2012). Neural Network Approach for Eye Detection. arXiv e-prints, arXiv-1205.
# ### Preguntas
# 1. ¿Qué puede concluir del comportamiento de la red y los datos proporcionados
# Si bien la red no predice el valor exacto de las coordenadas, predice un valor muy cercano a estas.
#
# El entrenamiento consume bastantes recursos de procesamiento y toma bastante tiempo, esto, a pesar de usar un redimensionamiento de las imágenes para trabajar con un número menor de Features.
# 2. ¿Qué criterio utilizo para seleccionar la arquitectura de la red?
#
# La red neuronal de tipo multilayer perceptron fue entrenada con sólo una capa oculta. La cantidad de características corresponde al número de pixeles totales de la imagen a color. La red recibió 50 * 50 * 3 valores y generó como insumo un vector con 4 puntos indicando las coordenadas x, y del centro del iris del ojo izquierdo y del centro del iris del ojo derecho. La cantidad de neuronas en la capa oculta se seleccionó en función de la capacidad de cómputo.
#
# La razón de no aumentar la complejidad de la red, se debe a los altos consumos registrados con una sola capa y 200 neuronas.
# 3. ¿Tiene la red el comportamiento esperado?, sino, ¿A qué cree que se debe esto?
# Sí, la red predice los 4 puntos correspondientes a las coordendas del iris del ojo derecho y del ojo izquierdo.
#
# Aunque no cuenta con un alto grado de precisión en la predicción, tiene un acercamiento considerable en el área de los ojos. El inconveniente se presenta en el momento de predecir con imágenes de rostros que no están incluidos en el data set de entrenamiento y que poseen características diferenciales respecto a las imágenes usadas para entrenar el modelo, como por ejemplo, el uso de imágenes con un acercamiento mayor en el área facial, donde pudimos observar que si bien se acerca bastante al área de los ojos, la distancia entre los ojos no se predice de manera esperada, ya que la mayoría de los datos de prueba (imágenes) tenían una poca distancia entre los ojos, lo cual afecta la eficacia del modelo con imágenes que tienen otras características de tamaño diferente.
# 4. ¿Qué le mejoraría al modelo que usted diseño?, ¿Cómo implementaría esas mejoras?
#
# Dentro de los cambios que se podrían haber explroado para mejorar el desempeño del algorimo estan:
# * Convertir las imagentes a escala de grises
# * Rotar las imagenes
# * Recortar las imagenes para obtener mayor precisión del área del ojo
# * Reducción de dimensionalidad
| 22,506 |
/notebooks/02-ml_preprocessing.ipynb
|
a3fa57b581a17b92f7d08cc22f3eb4607807c145
|
[
"CC-BY-4.0",
"MIT"
] |
permissive
|
UBC-MDS/525_group10
|
https://github.com/UBC-MDS/525_group10
| 0 | 0 |
MIT
| 2021-04-26T22:04:39 | 2021-04-26T21:12:59 |
Jupyter Notebook
|
Jupyter Notebook
| false | false |
.py
| 564,376 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/MRS88/AI_course/blob/master/%D0%BD%D0%B5%D0%B4%D0%B5%D0%BB%D1%8F_06_%D0%A4%D1%83%D0%BD%D0%BA%D1%86%D0%B8%D0%B8_%D0%B8_%D0%BC%D0%BE%D0%B4%D1%83%D0%BB%D0%B8_Python/%D0%A0%D0%B0%D0%B7%D0%B1%D0%BE%D1%80_%D0%94%D0%97_Ultra_Pro_%D0%A4%D1%83%D0%BD%D0%BA%D1%86%D0%B8%D0%B8_%D0%B8_%D0%BC%D0%BE%D0%B4%D1%83%D0%BB%D0%B8.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="NRUICBBzlLh2"
# ## Разбор домашнего задания "Функции и модули"
# ## Ultra-Pro
# + [markdown] id="j0nWwQpIaPch"
# **Задача 1**
#
# Уже весна, скоро в отпуск. Необходимо написать функцию расчета стоимости вашего путешествия.
#
# Определите функцию с названием ```hotel_cost``` и с аргументом "Количество ночей" в качестве входных данных. Цена отеля будет 140 у. е. за ночь. Выходит, функция должна вернуть стоимость всех ночей.
#
# Определите функцию под названием ```plane_car_cost```, которая на вход принимает строку "Город". Такая функция должна вернуть цену перелета в зависимости от места: если это Крым, то перелет обойдется в 120 у. е.,
# если Шри-Ланка - 800 у. е.,
# если Каир - 400 у. е.,
# если Сочи - 120 у. е.
#
# Напишите функцию ```rental_car_cost``` с аргументом "Количество дней". Эта функция должна рассчитать стоимость аренды авто, если за сутки вы должны оплачивать саму ренту (40 у. е.) + стоимость услуг страхового агентства (+1% от суммарной стоимости услуг каждый следующий день. Т. е. первый день 1% от стоимости, второй - берется 1% от стоимости предыдущего дня (включая 1% за предыдущий день) и т. д.(следовательно сумма аренды авто за первый день будет составлять 40 * 1.01,
# за второй день 40 * 1.01 * 1.01, за третий день 40 * 1.01 * 1.01 * 1.01 и т.д.) Если вы арендуете на 7 и более дней, то скидка - 50 у. е., если от 3 до 6 дней включительно - 20 у. е. Обе скидки получить сразу нельзя.
# Задача состоит в том, чтобы эти функции (```hotel_cost, plane_ride_cost, rental_car_cost```) были локальными в составе одной глобальной функции trip_cost, которая бы возвращала сумму этих локальных функций.
# + [markdown] id="eaY2kmYSusXk"
# **Решение:**
# + id="pngcgEEkaQbG"
def trip_cost(nNights, city, nDays):
'''
trip_cost - функция расчета затрат на отпуск
Входные параметры: nNights - количество ночей
city - город
nDays - количествой дней
'''
def hotel_cost(nNights):
return nNights*140
def plane_ride_cost(city):
if city.lower()=='крым':
cost = 120
elif city.lower()=='шри-ланка':
cost = 800
elif city.lower()=='каир':
cost = 400
elif city.lower()=='сочи':
cost = 120
else:
print('Неизвестный город')
cost = 0
return cost
def rental_car_cost(nDays):
if nDays>=7:
discount=50
elif (nDays>=3 and nDays<=6):
discount=20
else:
discount=0
car_rent = 0
for i in range(nDays):
car_rent = car_rent + 40 * 1.01**(i+1)
if car_rent == 0:
cost = 0
cost = car_rent - discount
return cost
return hotel_cost(nNights)+plane_ride_cost(city)+rental_car_cost(nDays)
# + id="P0iiTXLna80Q" colab={"base_uri": "https://localhost:8080/", "height": 90} outputId="fe92d654-edb1-4e5e-adfd-a1372f3a1823"
nN = int(input('Количество ночей '))
st = input('Какой город? ')
nD = int(input('Количество дней аренды авто '))
print(round(trip_cost(nN, st, nD), 2), '$')
# + [markdown] id="vx8FJjcGrx4b"
# **Задача 2**
#
# Напишите функцию, которая считает факториал заданного числа.
# + [markdown] id="CV8UOQ7UuvqM"
# **Решение:**
# + id="rwkN-nciXC41"
def factorial(ar):
a = 1
for i in range(1, ar+1):
a *= i
return a
# + id="jet4V0IEtqAl" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="045bf8f3-fba0-4379-dbbf-fb807676e17f"
factorial(3)
# + [markdown] id="T8a_TGxGt7Yy"
# **Задача 3**
#
# Напишите **лямбда**-функцию, которая считает полную площадь конуса:
# S = πR(l + R).
# + [markdown] id="S3Osx-rEuwWc"
# **Решение:**
# + id="c2M4DJJxtsTT"
from math import pi, sqrt
square = lambda h, r: pi*r*(sqrt(r**2 + h**2) + r)
# + id="M3a4FRVrw9ch" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="f479501c-f5f2-4dfa-f73f-790a12bdec06"
square(3, 3)
# + [markdown] id="6fXQoIiVysbl"
# **Задача 4**
#
# Напишите 2 локальные функции, которые будут переводить рубли в доллары и рубли в евро, внутри глобальной функции, которая будет принимать на вход число (рубли) и выводить две конвертированные валюты(доллары и евро).
# + [markdown] id="Q3lVc9lBuw46"
# **Решение:**
# + id="N7uDMsRoxCvC"
def conv(ar):
def usd(x):
return x/74.09
def eur(y):
return y/88.21
return print('доллар', round(usd(ar), 2), '\nевро', round(eur(ar), 2))
# + id="wQHdypXe6Bkv" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="b01eac2b-bbbb-4c3f-8e95-d49426b74f50"
conv(100)
# + [markdown] id="TtWCCVfN6nCc"
# **Задача 5**
#
# Напишите функцию-генератор, которая будет выводить числа Фибоначчи.
# + [markdown] id="bY6RWAf7uxiA"
# **Решение:**
# + id="z8KBWxdz6EGv" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="1edf27a1-ff76-4719-fcd2-dcd89598d0b8"
def fib(a=0, b=1):
"""Генератор чисел Фибоначчи. `a` и `b` - начальные значения"""
while True:
yield a
a, b = b, a + b
f = fib()
print(', '.join(str(next(f)) for _ in range(10)))
# + id="WR1nBMt6sjbB"
| 5,801 |
/Chapter4_3_J.ipynb
|
fbed6ff28bc7c853045dd191a19196f75d9708e6
|
[] |
no_license
|
minzwon/ISMIR-2017
|
https://github.com/minzwon/ISMIR-2017
| 1 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 1,737,623 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lab 12: Gaussian Mixture Models (GMMs)
#
# In lecture, we learned that the Gaussian Mixture Model (GMM) is a more sophisticated unsupervised clustering method than
# $k$-means. The GMM models a dataset $(\mathbf{x}^{(1)},\ldots,\mathbf{x}^{(m)})$ as an i.i.d. sample from the following
# generative model for each sample
# $\mathbf{x}^{(i)}$:
#
# 1. Sample $z^{(i)}$ from a multinomial distribution over clusters $1..k$ according to probabilities $(\phi_1,\ldots,\phi_k)$.
# 2. Sample $\mathbf{x}^{(i)}$ from ${\cal N}(\mathbf{\mu}_{z^{(i)}},\Sigma_{z^{(i)}})$.
#
# The parameters are estimated using the Expectation Maximization (EM) algorithm, which begins with a guess for parameters
# $\phi_1,\ldots,\phi_k,\mu_1,\ldots,\mu_k,\Sigma_1,\ldots,\Sigma_k$ then iteratively alternates between computing a soft assignment
# of data to clusters then updating the parameters according to that soft assignment.
#
# First, we'll build a GMM model for a dataset then use the model for anomaly detection.
#
# ## Example 1: Anomaly detection
#
# Let's generate synthetic data from a mixture of Gaussians, use EM to recover as best possible the ground truth parameters, and
# then use the model to find "anomalies" (unusually unlikely points according to the model). First, we set up the ground truth
# parameters and generate a dataset from those ground truth parameters:
# +
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
# Ground truth means and covariances for the data we'll generate
means_gt = [ [1,10], [10,1], [10,10] ]
sigmas_gt = [ np.matrix([[1, 0],[0, 1]]), np.matrix([[4,0],[0,1]]),
np.matrix([[1,0],[0,4]]) ]
# Ground truth Prior probability (phi_j) for each cluster
phi_gt = [ 0.2, 0.2, 0.6 ]
# For more interesting covariances, you can also try, for example,
# [[11.31371, -0.70711],[11.31371, 0.70711]] or
# [[11.31371, 0.70711],[-11.31371, 0.70711]].
# Size of dataset
m = 500
# number of variables
n = len(means_gt[0])
# k number of clusters/outcomes
k = len(phi_gt)
# Ground truth indices of cluster identities
Z = [0]*m
# Generate a new k-means dataset
def gen_dataset():
X = np.zeros((m,n))
# Generate m samples from multinomial distribution using phi_gt
z_vectors = np.random.multinomial(1, phi_gt, size=m) # Result: binary matrix of size (m x k)
for i in range(m):
# Convert one-hot representation z_vectors[i,:] to an index
Z[i] = np.where(z_vectors[i,:] == 1)[0][0]
# Grab ground truth mean mu_{z^i}
mu = means_gt[Z[i]]
# Grab ground truth covariance Sigma_{z^i}
sigma = sigmas_gt[Z[i]]
# Sample a 2D point from mu, sigma
X[i,:] = np.random.multivariate_normal(mu,sigma,1)
return X
X = gen_dataset()
# -
# Next, the EM algorithm itself. We have an initialization step and an iterative step.
# +
def init_gmm(X, k):
m = X.shape[0]
n = X.shape[1]
Mu = np.zeros((n,k))
Sigma = np.zeros((k,n,n))
Phi = np.zeros(k)
order = np.random.permutation(m)
for j in range(k):
# Initially assign equal probability to each cluster/outcome
Phi[j] = 1/k
# Ramdomly assign mean to one of the data points
Mu[:,j] = X[order[j],:].T
# Initial covariance is identity matrix
Sigma[j,:,:] = np.eye(n)
# print('Sigma',Sigma)
return Phi, Mu, Sigma
def Gaussian(X, mean, covariance):
k = len(mean)
X = X - mean.T
p = 1/((2*np.pi)**(k/2)*(np.linalg.det(covariance)**0.5)) * np.exp(-0.5 * np.sum(X @ np.linalg.pinv(covariance) * X, axis=1))
return p
# Run one iteration of EM
def iterate_em_gmm(X, threshold, Phi, Mu, Sigma):
m = X.shape[0]
n = X.shape[1]
k = len(Phi)
threshold = np.reshape(np.repeat(threshold, n*k), (n,k))
pj_arr = np.zeros((m,k))
# E-step: calculate w_j^i
W = np.zeros((m, k))
for j in range(k):
pj = Gaussian(X, Mu[:,j], Sigma[j,:,:])
pj_arr[0:m,j] = pj
W[:,j] = Phi[j] * pj
# W tells us what is the relative weight of each cluster for each data point
W[:,:] = W * np.tile(1/np.sum(W,1),(k,1)).T
# M-step: adjust mean and sigma
Phi[:] = sum(W) / m
Mu_previous = Mu.copy()
for j in range(k):
# Split cluster specific W for each dimension
Wj = np.tile(W[:,j],(n,1)).T
# Compute Mu for each variable for each cluster
Mu[:,j] = sum(X * Wj)/sum(Wj)
Muj = np.tile(Mu[:,j],(m,1))
Sigma[j,:,:] = np.matmul((X - Muj).T, (X - Muj) * Wj) / sum(W[:,j])
if (abs(Mu-Mu_previous) <= threshold).all():
converged = True
else:
converged = False
labels = np.argmax(pj_arr, axis = 1) # find most probable cluster
pj = pj_arr[np.arange(0,m,1),labels]
X_label = np.insert(X, 2, labels, axis=1) # add label at column index 2
return converged, pj, X_label
# -
# Let's run the model to convergence:
# +
threshold = np.matrix(.01)
Phi, Mu, Sigma = init_gmm(X, k)
# print('Phi: ', Phi)
# print('Mu: ',Mu)
# print('Sigma: ', Sigma)
converged = False
while not converged:
converged, pj, X_label = iterate_em_gmm(X, threshold, Phi, Mu, Sigma)
# print(converged)
# print(pj.shape)
# print(pj)
# print(X_label.shape)
# print(X_label)
# print(X)
# -
# ### In-class exercise
#
# Determine how close the estimated parmeters Phi, Mu, and Sigma are to the ground trouth values set up at the beginning
# of the experiment. Report your results and briefly discuss in your lab report.
#
# Next, we continue to find outliers:
print('Phi: ', Phi)
print('init Phi', phi_gt)
print('Mu: ',Mu)
print('init mean', means_gt)
print('Sigma: ', Sigma)
print('init sigma', sigmas_gt)
# #### Report:
# As shown above the results are very similar and also in the right orders. However, what i have noticed is that, the GM algorithm does not always give the right orders, as our inputs.
# For example, Phi is 0.2,0.2,0.6, the algorithm may sometimes gives 0.2, 0.6, 0.2.
outlier_prob = .01
outliers = np.nonzero(pj < outlier_prob)[0]
outliers
# +
plt.figure(figsize=(10,6))
plt.scatter(X[:,0],X[:,1],marker=".",c=pj,cmap='viridis');
plt.scatter(X[outliers,0],X[outliers,1],marker="o",facecolor="none",edgecolor="r",s=70);
plt.plot(Mu[0,0], Mu[1,0],'bx',Mu[0,1], Mu[1,1],'bx', Mu[0,2], Mu[1,2],'bx')
for c in np.arange(0,k):
X_class = X[np.where(X_label[:,2] == c)[0],:]
xlist = np.linspace(min(X_class[:,0]), max(X_class[:,0]), 30)
ylist = np.linspace(min(X_class[:,1]), max(X_class[:,1]), 30)
XX, YY = np.meshgrid(xlist, ylist)
Z = np.zeros(XX.shape)
i = 0
while i < XX.shape[0]:
j = 0
while j < XX.shape[0]:
pt = np.array([[XX[i,j], YY[i,j]]])
Z[i,j] = Gaussian(pt, Mu[:,c], Sigma[c])[0]
j = j + 1
i = i + 1
cp = plt.contour(XX, YY, Z)
plt.title('Inliers and outliers according to GMM model')
plt.show()
# -
# ### In-class exercise
#
# Notice that using a hard threshold for each cluster gives us more outliers for a broad cluster than a
# tight cluster. First, understand why, and explain in your report. Second, read about Mahalanobis distance
# of a point to the mean of a multivariate Gaussian distribution and see if you can use Mahalanobis distance
# to get a better notion of outliers in this dataset.
# +
def Mahalanobis(X,mean,covariance):
n= len(mean)
X= X-mean.T
p = np.sqrt(np.sum([email protected](covariance)*X,axis = 1))
return p
def mahalanobis_distance(X_labeled,Mu, Sigma):
m = X_labeled.shape[0]
n = X_labeled.shape[1] - 1
X = X_labeled[:,0:-1]
labels = X_labeled[:,-1].astype(int)
k = max(labels) + 1
md_array = np.zeros((m,k))
for j in range(k):
md_array[:,j] = Mahalanobis(X, Mu[:,j],Sigma[j,:,:])
return md_array
mahala_thresh = 2.5
md = mahfaaae", "grade": false, "grade_id": "cell-9f1f107cbcd456da", "locked": true, "schema_version": 3, "solution": false}
# Shuffle the data first
data, labels = sklearn.utils.shuffle(data, labels, random_state=42)
# -
# ## Have a look at the data
#
# We will not go through all steps in the Recipe, nor in depth.
#
# But here's a peek
#
#
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" deletable=false editable=false id="0zuY0AOl_K1R" nbgrader={"cell_type": "code", "checksum": "8c4131ec53f7308cedbfacf445f24806", "grade": false, "grade_id": "cell-90cee4495de96935", "locked": true, "schema_version": 3, "solution": false} outputId="8e2ff048-a6b6-4919-e40d-8381aee59af7"
# Visualize the data samples
helper.showData(data[:25], labels[:25])
# -
# ## Eliminate the color dimension
#
# As a simplification, we will convert the image from color (RGB, with 3 "color" dimensions referred to as Red, Green and Blue) to gray scale.
#
# +
print("Original shape of data: ", data.shape)
w = (.299, .587, .114)
data_bw = np.sum(data *w, axis=3)
print("New shape of data: ", data_bw.shape)
data_orig = data.copy()
# -
# Visualize the data samples
helper.showData(data_bw[:25], labels[:25], cmap="gray")
# + [markdown] colab_type="text" deletable=false editable=false id="6uCIcmcDmlkw" nbgrader={"cell_type": "markdown", "checksum": "69971bd52dd89d315a4178ec2b1a6875", "grade": false, "grade_id": "cell-cdda65487ad48f16", "locked": true, "schema_version": 3, "solution": false}
# ## Have look at the data: Examine the image/label pairs
#
# Rather than viewing the examples in random order, let's group them by label.
#
# Perhaps we will learn something about the characteristics of images that contain ships.
#
# We have loaded and shuffled our dataset, now we will take a look at image/label pairs.
#
# Feel free to explore the data using your own ideas and techniques.
#
#
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" deletable=false editable=false id="2k9YAbAZDbqE" nbgrader={"cell_type": "code", "checksum": "974d33fdf5602fb253893a018a76e45f", "grade": false, "grade_id": "cell-4f5e0d3c18f2d4b8", "locked": true, "schema_version": 3, "solution": false} outputId="c24286e7-6991-4b78-9716-bad69e27ccfd"
# Inspect some data (images)
num_each_label = 10
for lab in np.unique(labels):
# Fetch images with different labels
X_lab, y_lab = data_bw[ labels == lab ], labels[ labels == lab]
# Display images
fig = helper.showData( X_lab[:num_each_label], [ str(label) for label in y_lab[:num_each_label] ], cmap="gray")
_ = fig.suptitle("Label: "+ str(lab), fontsize=14)
print("\n\n")
# + [markdown] colab_type="text" deletable=false editable=false id="i_418VKqmvwy" nbgrader={"cell_type": "markdown", "checksum": "e9756d5625bcf177e6f4f3c85c18b7b4", "grade": false, "grade_id": "cell-34eb095f07300d27", "locked": true, "schema_version": 3, "solution": false}
# # Make sure the features are in the range [0,1]
#
# **Warm up exercise:** When we want to train image data, the first thing we usually need to do is scaling.
#
# Since the feature values in our image data are between 0 and 255, to make them between 0 and 1, we need to divide them by 255.
#
# We also need to consider how to represent our target values
# - If there are more than 2 possible target values, One Hot Encoding may be appropriate
# - **Hint**: Lookup `tf.keras.utils.to_categorical`
# - If there are only 2 possible targets with values 0 and 1 we can use these targets without further encoding
#
# **Question**
# - Set variable `X` to be our gray-scale examples (`data_bw`), but with values in the range [0,1]
# - Set variable `y` to be the representation of our target values
#
# + colab={} colab_type="code" deletable=false id="EBJaZ3qyDq65" nbgrader={"cell_type": "code", "checksum": "a6d9e4b93a2f2c74eb059819631d00af", "grade": false, "grade_id": "Scale_the_data", "locked": false, "schema_version": 3, "solution": true, "task": false}
# Scale the data
# Assign values for X, y
# X: the array of features
# y: the array of labels
# The length of X and y should be identical and equal to the length of data.
from tensorflow.keras.utils import to_categorical
X, y = np.array([]), np.array([])
# YOUR CODE HERE
X = data_bw/255
y = labels
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "a7641afc3e2aaa2cb7898ceeb4e28e5f", "grade": false, "grade_id": "cell-a80cad4b10d52d33", "locked": true, "schema_version": 3, "solution": false}
# Check if your solution is right
assert X.shape == (4000, 80, 80)
assert y.shape == (4000,)
# + [markdown] colab_type="text" deletable=false editable=false id="aHnlcZ4WNN1T" nbgrader={"cell_type": "markdown", "checksum": "7292329e03fd9354da9f6e490e710040", "grade": false, "grade_id": "cell-96db16f7139d0dc5", "locked": true, "schema_version": 3, "solution": false}
# # Split data into training data and testing data
# To train and evaluate a model, we need to split the original dataset into
# a training subset (in-sample) and a test subset (out of sample).
#
# We will do this for you in the cell below.
#
# **DO NOT** shuffle the data until after we have performed the split into train/test sets
# - We want everyone to have the **identical** test set for grading
# - Do not change this cell
#
# + colab={} colab_type="code" deletable=false editable=false id="OhmoI5erNf7I" nbgrader={"cell_type": "code", "checksum": "64712b41f1274fc8f3fae0c384aa0744", "grade": false, "grade_id": "cell-833fa27d89a1170e", "locked": true, "schema_version": 3, "solution": false}
# Split data into train and test
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.10, random_state=42)
# Save X_train, X_test, y_train, y_test for final testing
if not os.path.exists('./data'):
os.mkdir('./data')
np.savez_compressed('./data/train_test_data.npz', X_train=X_train, X_test=X_test, y_train=y_train, y_test=y_test)
# + [markdown] colab_type="text" deletable=false editable=false id="7JTJltPLJYp3" nbgrader={"cell_type": "markdown", "checksum": "b1a7457d3ff74b17d20660b16b0ef3c5", "grade": false, "grade_id": "cell-c3845037b6f7611c", "locked": true, "schema_version": 3, "solution": false}
# # Create a model using only Classification, no data transformation (other than reshaping)
#
# **Question:** You need to build a 1-layer (head layer only) network model with `tensorflow.keras`. Please name the head layer "dense_head".
#
# Set variable `model0` to be a Keras `Sequential` model object that implements your model.
#
# **Hints:**
# 1. Since the dataset is 2-dimensional, you may want to use `Flatten()` in `tensorflow.keras.layers` to make your input data 1 dimensional.
# - The `input shape` argument of the `Flatten()` layer should be the shape of a single example
# 2. The number of units in your head layer
# - Depends on how you represented the target
# - It should be equal to the final dimension of `y`
#
# 3. Activation function for the head layer: Since this is a classification problem
# - Use `sigmoid` if your target's final dimension equals 1
# - Use `softmax` if your target's final dimension is greater than 1
#
# - A Dropout layer maybe helpful to prevent overfitting and accelerate your training process.
# - If you want to use a Dropout layer, you can use `Dropout()`, which is in `tensorflow.keras.layers`.
#
# + colab={"base_uri": "https://localhost:8080/", "height": 775} colab_type="code" deletable=false id="wFF00mA7PUYD" nbgrader={"cell_type": "code", "checksum": "496b7c7c9683570db9b7566a07c1600e", "grade": false, "grade_id": "build_model_0", "locked": false, "schema_version": 3, "solution": true, "task": false} outputId="56c302a5-efba-4c2e-bb3e-08f58c6e4dd9"
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras import layers
from tensorflow.keras.layers import Flatten
# Get the number of unique labels
num_cases = np.unique(labels).shape[0]
if num_cases > 2:
activation = "softmax"
loss = 'categorical_crossentropy'
else:
activation = "sigmoid"
num_cases = 1
loss = 'binary_crossentropy'
# Set model0 equal to a Keras Sequential model
model0 = None
# YOUR CODE HERE
model0 = Sequential()
model0.add(Flatten(input_shape=(80,80)))
model0.add(layers.Dense(units=num_cases,activation = activation, name = 'dense_head'))
model0.summary()
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "ba001e3561de54b3ed7f79255eebf999", "grade": false, "grade_id": "cell-6a9c8bd396c77106", "locked": true, "schema_version": 3, "solution": false}
# We can plot our model here using plot_model()
plot_model(model0)
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "46234c84d9ea666dc464a87822330172", "grade": false, "grade_id": "cell-30b12281f8b3ff4b", "locked": true, "schema_version": 3, "solution": false}
# ## Train model
#
# **Question:**
#
# Now that you have built your first model, you will compile and train it. The requirements are as follows:
#
# - Split the **training** examples `X_train, y_train` again !
# - 80% will be used for training the model
# - 20% will be used as validation (out of sample) examples
# - Use `train_test_split()` from `sklearn` to perform this split
# - Set the `random_state` parameter of `train_test_split()` to be 42
#
# - Loss function:
# - `binary_crossentropy` if your target is one-dimensional
# - `categorical_crossentropy`if your target is One Hot Encoded
# - Metric: "accuracy"
# - Use exactly 15 epochs for training
# - Save your training results in a variable named `history0`
# - Plot your training results using the`plotTrain` method described in the Student API above.
#
# + deletable=false nbgrader={"cell_type": "code", "checksum": "0c09cf3d37a6b41970765ea57a924ad1", "grade": false, "grade_id": "train_model_0", "locked": false, "schema_version": 3, "solution": true, "task": false}
model_name0 = "Head only"
n_epochs = 15
# YOUR CODE HERE
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=0.20, random_state=42)
model0.compile(loss = 'binary_crossentropy', metrics = ['accuracy'])
history0 = model0.fit(X_train, y_train, epochs = n_epochs, validation_data = (X_valid, y_valid))
helper.plotTrain(history0,model_name0)
# + [markdown] colab_type="text" deletable=false editable=false id="BHsceFTbpnvr" nbgrader={"cell_type": "markdown", "checksum": "5ba97ae5213806fb2515143575b5820a", "grade": false, "grade_id": "cell-e1886c4e415f795e", "locked": true, "schema_version": 3, "solution": false}
# ## How many weights in the model ?
#
# **Question:**
#
# Calculate the number of parameters in your model.
#
# Set variable `num_parameters0` to be equal to the number of parameters in your model.
#
# **Hint:** The model object may have a method to help you ! Remember that Jupyter can help you find the methods that an object implements.
#
# + colab={} colab_type="code" deletable=false id="dgDetvgupt58" nbgrader={"cell_type": "code", "checksum": "e57b9d8715dccf58ebe95c909ae50366", "grade": false, "grade_id": "num_of_parameters", "locked": false, "schema_version": 3, "solution": true, "task": false}
# Set num_parameters0 equal to the number of weights in the model
num_parameters0 = None
# YOUR CODE HERE
num_parameters0 = model0.count_params()
print("Parameters number in model0: ", num_parameters0)
# + [markdown] colab_type="text" deletable=false editable=false id="3hR7QB27Jhx8" nbgrader={"cell_type": "markdown", "checksum": "7b8d9891180d2506ce86f4dca27c36ec", "grade": false, "grade_id": "cell-89fa3731299113d7", "locked": true, "schema_version": 3, "solution": false}
# ## Evaluate the model
#
# **Question:**
#
# We have trained our model. We now need to evaluate the model using the test dataset created in an earlier cell.
#
# Please store the model score in a variable named `score0`.
#
# **Hint:** The model object has a method `evaluate`. Use that to compute the score.
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" deletable=false id="_y1rTpduJhkE" nbgrader={"cell_type": "code", "checksum": "725a32e9ed3c93ca3e75e07434b9ca0b", "grade": false, "grade_id": "evaluate_model_0", "locked": false, "schema_version": 3, "solution": true, "task": false} outputId="8e8c0c99-2c80-4ec3-bab9-e08de97c3807"
score0 = []
# YOUR CODE HERE
score0 = model0.evaluate(X_test, y_test)
print("{n:s}: Test loss: {l:3.2f} / Test accuracy: {a:3.2f}".format(n=model_name0, l=score0[0], a=score0[1]))
# + [markdown] colab_type="text" deletable=false editable=false id="pNkyZ8VRd-d3" nbgrader={"cell_type": "markdown", "checksum": "e892deef94afaba92aac77619938fee1", "grade": false, "grade_id": "cell-6aed77f4dd85c016", "locked": true, "schema_version": 3, "solution": false}
# ## Save the trained model0 and history0 for submission
#
# Your fitted model can be saved for later use
# - In general: so you can resume training at a later time
# - In particular: to allow us to grade it !
#
# Execute the following cell to save your model, which you will submit to us for grading.
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" deletable=false editable=false id="OesE2_-gd-d4" nbgrader={"cell_type": "code", "checksum": "d606c0cf160b9933361a145e1ec69f44", "grade": false, "grade_id": "cell-c9cde4b2b69397c4", "locked": true, "schema_version": 3, "solution": false} outputId="c4cf36e4-00bc-4d73-8aa2-7cae8724f4a8"
helper.saveModel(model0, model_name0)
helper.saveHistory(history0, model_name0)
# -
# **Question:**
#
# Make sure that the saved model can be successfully restored.
# - Set variable `model_loss` to the value of the loss parameter you used in the `compile` statement for your model
# - Set variable `model_metrics` to the value of the metrics parameter you used in the `compile` statement for your model
# + colab={} colab_type="code" deletable=false id="A_E8q6JBd-d7" nbgrader={"cell_type": "code", "checksum": "ffcdcbf036218311cc69e9805b51f25e", "grade": false, "grade_id": "cell-a006ec4f99fbefdb", "locked": false, "schema_version": 3, "solution": true, "task": false}
## Restore the model (make sure that it works)
model_loss=None
model_metrics=None
# YOUR CODE HERE
model_loss = 'binary_crossentropy'
model_metrics = ['accuracy']
model_loaded = helper.loadModel(model_name0, loss=model_loss, metrics=model_metrics)
score_loaded = model_loaded.evaluate(X_test, y_test, verbose=0)
assert score_loaded[0] == score0[0] and score_loaded[1] == score0[1]
# + [markdown] colab_type="text" deletable=false editable=false id="JXX-_3_SKsla" nbgrader={"cell_type": "markdown", "checksum": "c0ebe7ceb5eba607c2ab623c2aeb2186", "grade": false, "grade_id": "cell-67c1b5674378d4f6", "locked": true, "schema_version": 3, "solution": false}
# # Create a new model with an additional Dense layer
#
# **Question:**
#
# We will add more layers to the original model0.
#
# - You need to add **AT LEAST ONE** Dense layer followed by an activation function (for example, ReLU)
# - You can add more layers if you like
#
# - The number of units in your very **FIRST** Dense layer should be equal to the value of variable `num_features_1`, as set below.
# - Please name this Dense layer "dense_1" and the head layer "dense_head".
#
# **Hints:**
# - Don't forget to flatten your input data!
# - A Dropout layer maybe helpful to prevent overfitting and accelerate your training process.
# - If you want to use a Dropout layer, you can use `Dropout()`, which is in `tensorflow.keras.layers`.
#
# Hopefully your new model performs **better** than your first.
# + colab={} colab_type="code" deletable=false id="1Ga3U-syPaCz" nbgrader={"cell_type": "code", "checksum": "f0cc8dcc75e6ae7ed167182ac58a72d8", "grade": false, "grade_id": "build_model_1", "locked": false, "schema_version": 3, "solution": true, "task": false}
# Set model1 equal to a Keras Sequential model
model1 = None
num_features_1 = 32
# YOUR CODE HERE
model1 = Sequential()
model1.add(Flatten(input_shape=(80,80)))
model1.add(layers.Dense(units=num_features_1, activation = tf.nn.relu, name = 'dense_1'))
model1.add(layers.Dropout(.2))
model1.add(layers.Dense(units=num_features_1, activation = tf.nn.relu, name = 'dense_2'))
model1.add(layers.Dropout(.2))
model1.add(layers.Dense(units=2,activation = "softmax", name = 'dense_head'))
model1.summary()
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "132f85abb517e70b229f05e66d7aea77", "grade": false, "grade_id": "cell-49f7ed68ba2f5834", "locked": true, "schema_version": 3, "solution": false}
# Plot your model
plot_model(model1)
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "003dcf2909584528dfaa2dcef36d0771", "grade": false, "grade_id": "cell-ad82889fc971e9fd", "locked": true, "schema_version": 3, "solution": false}
# ## Train your new model
#
# **Question:**
#
# Now that you have built your new model1, you will compile and train model1. The requirements are as follows:
#
# - Split the **training** examples `X_train, y_train` again !
# - 80% will be used for training the model
# - 20% will be used as validation (out of sample) examples
# - Use `train_test_split()` from `sklearn` to perform this split
# - Set the `random_state` parameter of `train_test_split()` to be 42
#
# - Loss function: `categorical_crossentropy`; Metric: "accuracy"
# - Use exactly 15 epochs for training
# - Save your training results in a variable named `history1`
# - Plot your training results using the`plotTrain` method described in the Student API above.
#
# + deletable=false nbgrader={"cell_type": "code", "checksum": "64771c7f5f17b552d3e49ebcc79c64e6", "grade": false, "grade_id": "train_model_1", "locked": false, "schema_version": 3, "solution": true, "task": false}
from sklearn.preprocessing import OneHotEncoder
# Train the model using the API
model_name1 = "Dense + Head"
n_epochs = 15
# YOUR CODE HERE
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.10, random_state=42)
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=0.20, random_state=42)
# Transform y using one-hot encoder
enc = OneHotEncoder(handle_unknown='ignore',sparse = False)
y_train = enc.fit_transform(y_train.reshape(-1,1))
y_valid = enc.transform(y_valid.reshape(-1,1))
y_test = enc.transform(y_test.reshape(-1,1))
model1.compile(loss = 'categorical_crossentropy', metrics = ['accuracy'])
history1 = model1.fit(X_train, y_train, epochs = n_epochs, validation_data = (X_valid, y_valid))
helper.plotTrain(history1,model_name1)
# + [markdown] colab_type="text" deletable=false editable=false id="XzU2xKWcsEAo" nbgrader={"cell_type": "markdown", "checksum": "3e56c0a547971ddbc967abdb1c51d733", "grade": false, "grade_id": "cell-a048e8f185c7dafe", "locked": true, "schema_version": 3, "solution": false}
# ## How many weights in this model ?
#
# **Question:** Calculate the number of parameters in your new model.
#
# Set variable `num_parameters1` to be equal to the number of parameters in your model.
# + colab={} colab_type="code" deletable=false id="gXs0EZZVsHqB" nbgrader={"cell_type": "code", "checksum": "ec19e2f52d39525cd286d7a54c170f74", "grade": false, "grade_id": "num_parameters_model_1", "locked": false, "schema_version": 3, "solution": true, "task": false}
# Set num_parameters1 equal to the number of weights in the model
num_parameters1 = None
# YOUR CODE HERE
num_parameters1 = model1.count_params()
print('Parameters number in model1:', num_parameters1)
# + [markdown] colab_type="text" deletable=false editable=false id="gfBjJLU7J7L4" nbgrader={"cell_type": "markdown", "checksum": "5347a35355e5f9d5c36eb95521ba4e95", "grade": false, "grade_id": "cell-848f6eee66efac66", "locked": true, "schema_version": 3, "solution": false}
# ## Evaluate the model
#
# **Question:**
#
# Evaluate the new model using the test dataset. Please store the model score in a variable named `score1`.
# + colab={} colab_type="code" deletable=false id="A5wFSFvwJ68n" nbgrader={"cell_type": "code", "checksum": "fab09086bc48ea9626c354bacd9cff13", "grade": false, "grade_id": "evaluate_model_1", "locked": false, "schema_version": 3, "solution": true, "task": false}
score1 = []
# YOUR CODE HERE
score1 = model1.evaluate(X_test, y_test)
print("{n:s}: Test loss: {l:3.2f} / Test accuracy: {a:3.2f}".format(n=model_name1, l=score1[0], a=score1[1]))
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "aa58ff9a9fe6f218c5b2beec42123be0", "grade": false, "grade_id": "cell-f967fa336e8b8b82", "locked": true, "schema_version": 3, "solution": false}
# Your new test score should be at least 0.8
# -
# ## Use Binary Cross Entropy
# +
model2 = Sequential()
model2.add(Flatten(input_shape=(80,80)))
model2.add(layers.Dense(units=num_features_1, activation = tf.nn.relu, name = 'dense_1'))
model2.add(layers.Dropout(.2))
model2.add(layers.Dense(units=num_features_1, activation = tf.nn.relu, name = 'dense_2'))
model2.add(layers.Dropout(.2))
model2.add(layers.Dense(units=1,activation = "sigmoid", name = 'dense_head'))
model_name2 = "Dense + Head + BinaryCrossEntropy"
n_epochs = 15
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.10, random_state=42)
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=0.20, random_state=42)
model2.compile(loss = 'binary_crossentropy', metrics = ['accuracy'])
history2 = model2.fit(X_train, y_train, epochs = n_epochs, validation_data = (X_valid, y_valid))
helper.plotTrain(history2,model_name2)
# +
score2 = model2.evaluate(X_test, y_test)
print("{n:s}: Test loss: {l:3.2f} / Test accuracy: {a:3.2f}".format(n=model_name2, l=score2[0], a=score2[1]))
# -
# Therefore using binary cross entropy loss is better than using categorical cross entropy loss
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "6b8cfe312cca136aeceb3a54a7a80361", "grade": false, "grade_id": "cell-32ebfba806e4cfd6", "locked": true, "schema_version": 3, "solution": false}
# # Save your trained model1 and history1
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "82f11493c01709c6689f0af2fc10f1cd", "grade": false, "grade_id": "cell-f7a3c4ef4265b014", "locked": true, "schema_version": 3, "solution": false}
helper.saveModel(model1, model_name1)
helper.saveHistory(history1, model_name1)
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "80e0c6eebda69480ca4f800fbc2b3360", "grade": false, "grade_id": "cell-4a0132560885e887", "locked": true, "schema_version": 3, "solution": false}
# ## Your own model (Optional)
# Now you can build your own model using what you have learned from the course. Some ideas to try:
# - Try more than one additional `Dense` layer
# - Change the number of units (features) of your `Dense` layers.
# - Add `Dropout` layers; vary the parameter
# - Change the activation function
# - Change the classifier
# - ...
#
# Observe the effect of each change on the Loss and Accuracy.
#
# - You may want to use early stopping in training
# - In order to stop training when model metrics *worsen* rather than *improve*
#
# -
# ## Try adding additional layers
# +
model3 = Sequential()
model3.add(Flatten(input_shape=(80,80)))
model3.add(layers.Dense(units=num_features_1, activation = tf.nn.relu, name = 'dense_1'))
model3.add(layers.Dropout(.2))
model3.add(layers.Dense(units=num_features_1, activation = tf.nn.relu, name = 'dense_2'))
model3.add(layers.Dropout(.2))
model3.add(layers.Dense(units=num_features_1, activation = tf.nn.relu, name = 'dense_3'))
model3.add(layers.Dropout(.2))
model3.add(layers.Dense(units=num_features_1, activation = tf.nn.relu, name = 'dense_4'))
model3.add(layers.Dropout(.2))
model3.add(layers.Dense(units=1,activation = "sigmoid", name = 'dense_head'))
model3.summary()
# +
model_name3 = "Add additional layers"
n_epochs = 15
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.10, random_state=42)
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=0.20, random_state=42)
model3.compile(loss = 'binary_crossentropy', metrics = ['accuracy'])
history3 = model3.fit(X_train, y_train, epochs = n_epochs, validation_data = (X_valid, y_valid))
helper.plotTrain(history3,model_name3)
# +
score3 = model3.evaluate(X_test, y_test)
print("{n:s}: Test loss: {l:3.2f} / Test accuracy: {a:3.2f}".format(n=model_name3, l=score3[0], a=score3[1]))
# -
# From above, adding additional layers does not improve the accuracy
# ## Change the number of units
# +
model4 = Sequential()
model4.add(Flatten(input_shape=(80,80)))
model4.add(layers.Dense(units=128, activation = tf.nn.relu, name = 'dense_1'))
model4.add(layers.Dropout(.2))
model4.add(layers.Dense(units=64, activation = tf.nn.relu, name = 'dense_2'))
model4.add(layers.Dropout(.2))
model4.add(layers.Dense(units=32, activation = tf.nn.relu, name = 'dense_3'))
model4.add(layers.Dropout(.2))
model4.add(layers.Dense(units=16, activation = tf.nn.relu, name = 'dense_4'))
model4.add(layers.Dropout(.2))
model4.add(layers.Dense(units=1,activation = "sigmoid", name = 'dense_head'))
model_name4 = "Changing number of units"
n_epochs = 15
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.10, random_state=42)
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=0.20, random_state=42)
model4.compile(loss = 'binary_crossentropy', metrics = ['accuracy'])
history4 = model4.fit(X_train, y_train, epochs = n_epochs, validation_data = (X_valid, y_valid))
helper.plotTrain(history4,model_name4)
#print score
score4 = model4.evaluate(X_test, y_test)
print("{n:s}: Test loss: {l:3.2f} / Test accuracy: {a:3.2f}".format(n=model_name4, l=score4[0], a=score4[1]))
# -
# Changing the number of units does not significantly improve the results
# ## Vary the parameter of dropout layers
for i in [0.1, 0.2, 0.3]:
model5 = Sequential()
model5.add(Flatten(input_shape=(80,80)))
model5.add(layers.Dense(units=32, activation = tf.nn.relu, name = 'dense_1'))
model5.add(layers.Dropout(i))
model5.add(layers.Dense(units=32, activation = tf.nn.relu, name = 'dense_2'))
model5.add(layers.Dropout(i))
model5.add(layers.Dense(units=1,activation = "sigmoid", name = 'dense_head'))
model_name5 = "Changing dropout rate"
n_epochs = 15
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.10, random_state=42)
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=0.20, random_state=42)
model5.compile(loss = 'binary_crossentropy', metrics = ['accuracy'])
history5 = model5.fit(X_train, y_train, epochs = n_epochs, validation_data = (X_valid, y_valid))
helper.plotTrain(history5,model_name5)
#print score
score5 = model5.evaluate(X_test, y_test)
print("{n:s}: Test loss: {l:3.2f} / Test accuracy: {a:3.2f}".format(n=model_name5, l=score5[0], a=score5[1]))
# As the dropout rate increases, the accuracy decreases.
# ## Change the activation function
for activation_function in ['softmax', 'sigmoid', 'relu']:
model6 = Sequential()
model6.add(Flatten(input_shape=(80,80)))
model6.add(layers.Dense(units=32, activation = activation_function, name = 'dense_1'))
model6.add(layers.Dropout(i))
model6.add(layers.Dense(units=32, activation = activation_function, name = 'dense_2'))
model6.add(layers.Dropout(i))
model6.add(layers.Dense(units=1,activation = "sigmoid", name = 'dense_head'))
model_name6 = "Changing activation function"
n_epochs = 15
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.10, random_state=42)
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=0.20, random_state=42)
model6.compile(loss = 'binary_crossentropy', metrics = ['accuracy'])
history6 = model6.fit(X_train, y_train, epochs = n_epochs, validation_data = (X_valid, y_valid))
helper.plotTrain(history6,model_name6)
#print score
score6 = model6.evaluate(X_test, y_test)
print("{n:s}: Test loss: {l:3.2f} / Test accuracy: {a:3.2f}".format(n=model_name6, l=score6[0], a=score6[1]))
# From the above, the sigmoid activation function performs better than others
| 36,753 |
/core/train_SVR.ipynb
|
49d6b56ac52595979ad9bfb8204072780aae2c25
|
[
"MIT"
] |
permissive
|
m-wessler/snow-liquid-ratio
|
https://github.com/m-wessler/snow-liquid-ratio
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 55,602 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] _uuid="7b073cc577c4d8466b3cd192215c96714776b87a"
# # Prediticing spot prices for AWS EC2 Instances
#
# 
# + [markdown] _uuid="7bad6db2cda184ce3cd4365e3c29e9b940c7cf99"
# # Table of Contents
#
# * Introduction
# * Background
# * Import libraries
# * EDA (Exploratory Data Analysis)
# * Cleaning
# * Implement Model
# * Conculsion on results
# + [markdown] _uuid="b1784ff7fe303a9161ddd9e820a331c1de9fd809"
# # Introduction
#
# The purpose of this experiment is to train a deep learning model to predict an outcome on time series data. I will be using the Fast.ai library for the model. More specifically, we will be predicting the Spot prices for specifc regions.
# + [markdown] _uuid="15face38841009cf72374099634e1c0af702c32f"
# # Background
#
# Amazon Web Services [(AWS)](https://aws.amazon.com) provides virtual computing environments via their EC2 service. You can launch instances with your favourite operating system, select pre-configured instance images or create your own. Why this is revelant to data sciensits is because generally to run deep learning models you need a machine with a good GPU. EC2 can be configured with a P2/P3 instance and can be configured with up to 8 or 16 GPUs respectively!
#
# However, you can request Spot Instance Pricing. Which basically charges you for the spot price that is in effect for the duration of your instance running time. They are adjusted based on long-term trends in supply and demand for Spot instance capacity. Spot instances can be discounted at up to 90% off compared to On-Demand pricing.
#
#
# Our goal will be to predict Spot pricing for the different global regions on offer:
#
# * US East
# * US West
# * South America (East)
# * EU (European Union) West
# * EU Central
# * Canda
# * Asia Pacific North East 1
# * Asia Pacific North East 2
# * Asia Pacific South
# * Asia Pacific Southeast 1
# * Asia Pacific Southeast 2
#
#
#
#
# + [markdown] _uuid="14ca6867f2d46f9e68eb1ec285cb0005e60b3dd2"
# # Import Libraries
# + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19"
import seaborn as sns
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
from IPython.display import HTML, display
from fastai.structured import *
from fastai.column_data import *
np.set_printoptions(threshold=50, edgeitems=20)
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
print(os.listdir("../input"))
# + [markdown] _uuid="201278f8f2c44812147c22a9a915fdc22c26e595"
# Lets import all the tables
# + _uuid="15696f1e4a1081f3a5a783d4a65759d6f0a4e4e9"
PATH = "../input/"
PATH_WRITE = "/kaggle/working/"
# + _uuid="1333ffc434045d439ccdf75599713b01524fb9d4"
# ls {PATH}
# + _uuid="d2073317a9e4d38d920b48a4b04aed4f47d9af98"
table_names = ['ap-southeast-2', 'ap-northeast-2', 'ca-central-1', 'us-east-1',
'ap-northeast-1', 'ap-south-1', 'sa-east-1', 'eu-west-1',
'ap-southeast-1', 'us-west-1', 'eu-central-1']
# + _uuid="6aa4b36a89e87d6b4e1c4535166ea8befb31a52a"
tables = [pd.read_csv(f'{PATH}{fname}.csv', low_memory=False) for fname in table_names]
# + [markdown] _uuid="5ad73f558fa28a266760e9d72fbe60b47afe89d9"
#
# + [markdown] _uuid="be2d115a8d3c888150ad7a65e0439126caf9aaf0"
#
# + [markdown] _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
# # EDA
# + [markdown] _uuid="8182ec4199ee1d9a2cffb9c77a018b7b5b9b0709"
# Lets call head and take a look at what our data looks like.
# + _uuid="99cfee5cb7ae0f1821e1ebd2b484c3c280cc1d05"
for t in tables: display(t.head())
# + [markdown] _uuid="f1994a0ed99160540a45175bbd2f894b7794f15d"
# Lets call summary
# + _uuid="6f11d0dce90e036093f0b43717d877e84a83a9af"
for t in tables: display(DataFrameSummary(t).summary())
# + [markdown] _uuid="6e0269dd1faadb7b9c8845c68050ef6d805b7a0d"
# I think we need to change some of the columns names
# + _uuid="3d04b074c36921d29a32ed584e5150d7fcf9f11d"
new_labels = ['Date', 'Instance Type', 'OS', 'Region', 'Price ($)']
# + _uuid="5c4c71fe145020dfabd7664fdfad9957d9580467"
for t in tables:
t.columns = new_labels
# + _uuid="4edb090bfcb6f7da8731cb19b82b8b969a8f2a65"
for t in tables: display(t.head())
# + _uuid="0e6c6f2f9560a1ae5174e4a109e553865d7c2ce0"
for t in tables:
plt.figure(figsize=(12,8))
sns.countplot(t['Instance Type'], order=t['Instance Type'].value_counts().iloc[:20].index)
plt.xticks(rotation=90);
# + [markdown] _uuid="0927bba0acb6e575c8314a81e1ddf1b37d39593d"
# List of questions to ask:
#
# * Average price for certain instances in each region
# * Frequent instance type
# * Seasonlity of instances
# * Determine if there are any stationary variables
# * Which instance type is frequently link with what OS?
# * Need to plot instances in time-intervalse eg: between 21:00 - 22:00
#
# Also need to figure out how to give each region a table name for the graphs.
# + [markdown] _uuid="cfb5327d9a5eabb4ab81c465e907612a29e60665"
# Lets look at the tables seperately:
# + [markdown] _uuid="c97c7783e5f10166d31470fd77d4f15412d7f81c"
# # US East
# + _uuid="8f0080bd992cc111fa212f68725e7d4143d42bd7"
us_east = pd.read_csv("../input/us-east-1.csv")
PATH_USEAST = "../input/us-east-1.csv"
# + _uuid="771b9559727208d9cc4a92908ed2299783502221"
us_east.columns = new_labels
us_east.head()
# + _uuid="3a5295d73b5cd33f781389af425279e368615dff"
us_east['Date'].head()
# + [markdown] _uuid="67249332ca94a3ddef869438a140851faf220649"
# We need to parse the dates otherwise the dates will not apear on axis. The format string needs to match the format in the column EXACTLY! For more info look [here](http://strftime.org/) and [here](https://codeburst.io/dealing-with-datetimes-like-a-pro-in-pandas-b80d3d808a7f)
# + _uuid="8050f3757339a1f7d4ac6a5cf53742a822e7eb1f"
us_east['Date'] = pd.to_datetime(us_east['Date'], format='%Y-%m-%d %H:%M:%S+00:00', utc=False)
# + _uuid="3cadbe39219948fb1d426c64ffdeeb220aee53e8"
us_east.info()
# + _uuid="b669c11ac7ce0720272f1c58e726c601dbe770d6"
us_east['Date'].head(500)
# + [markdown] _uuid="890cccd166a16535c780c8fe9bfaf3f2c5970569"
# ## Instance: d2.xlarge
# + _uuid="702bd81a9aeefb67b322ecde7cb3c762c9cfb70a"
d2_= us_east[us_east['Instance Type'] == "d2.xlarge"].set_index('Date')
# + _uuid="e51c7233771be424504993b79eca75de49281a5d"
d2_Unix = us_east[us_east['OS'] == "Linux/UNIX"].set_index('Date')
# + _uuid="29d7ce1d5fa3568c6e5903cd1718b6a57901ec83"
d2_Suse = us_east[us_east['OS'] == "SUSE Linux"].set_index('Date')
# + _uuid="f804f552f9bad2a1f69d2812dd8292eef527a5d0"
d2_Win = us_east[us_east['OS'] == "Windows"].set_index('Date')
# + _uuid="31e4448882cec143bb652f0b990e4ad1437d67a8"
d2.head()
# + _uuid="fe24da3a2854fdd6a29fc25849ce5385d70e9b56"
d2.head()
# + _uuid="d95860367c735bcfdb77d03961a989892ea59eb8"
d2.head(100).plot(title="d2.xlarge Instances", figsize=(15,10))
# + _uuid="056b6b2d3ea1ed5e3555db825cd5b32f0cc262a2"
d2_Suse.head(100).plot(title="d2.xlarge Instances OS:SUSE Linux", figsize=(15,10))
# + _uuid="e15d4358545a3b1e25dd06e08844f87078f511e0"
d2_Unix.head(100).plot(title="d2.xlarge Instances OS:Linux/UNIX", figsize=(15,10))
# + _uuid="1679d1a1ef9ae4ba38c61f26a564909c329029fd"
d2_Win.head(100).plot(title="d2.xlarge Instances OS:Windows", figsize=(15,10))
# + [markdown] _uuid="9e94558673e39d3f9dae356a4eff7884780b6b39"
# Looks like windows Instances can get quite pricey with highs of around roughly `$` 7 dollars - `$` 29!🤷♀️
# + _uuid="c14e29285bc45faa5dee702b9aeb63e45ec0c528"
# + [markdown] _uuid="b9b68b8e92d857d477f2c2f1b3232dd010dfa608"
# # Cleaning
# + [markdown] _uuid="8597eae5ce3f38d3639ac8d1f7f179f93107056c"
# Lets go over the steps for checking for signs that we need to clean our dataset.
# + _uuid="7ed00f89ebaa4aca707565f3026da6845b723224"
us_east['Instance Type'].value_counts(dropna=False)
# + _uuid="dbf35b53819101f70fd2e4a7314bc6b74d769451"
us_east['OS'].value_counts(dropna=False)
# + _uuid="498cfb62d44d7ec0a8b8f3990a21a1d00771e58b"
len(us_east.isnull())
# + [markdown] _uuid="d32ba041302daf33b57c5cc8dcde6de4211c2c85"
# Out of 3 721 999 entries none have null values
# + [markdown] _uuid="26cbbc7c79e74b5138f74485f49010703ff1aeff"
# Lets train on another dataset
# + _uuid="dba41d247ed841e90f79fbf64988dd971d235576"
eu_west = pd.read_csv("../input/eu-west-1.csv")
PATH_euwest = "../input/"
# + _uuid="07068b76e9f552c529c12df4f2d9e9a897208faa"
eu_west.columns = new_labels
# + _uuid="d88b3507756b7897788f2b23d68e6da4172b17b0"
eu_west.info()
# + _uuid="8ce6ee52d87688e416ecb20d3e5f920c4563955e"
eu_west['Instance Type'].value_counts(dropna=False)
# + _uuid="208c5223a6888af8b55bf46c424cfbdab8973d38"
len(eu_west.isnull())
# + [markdown] _uuid="3f4f9f80d4da3fc49243b1b64664923adda826b0"
# # Implement Model
# + [markdown] _uuid="2752a3185ed1f147c4b74640ddd890933c57e6c7"
# Things to do:
#
# * Len the Instance type [done]
# * Add date part [done]
#
# * Create cat & continous vars [done] - do not have any other kind continous var!!
# * Process datasets [done]
# * Split Dataset - via datetime [done]
# * Create RMSE metric
# * Create model data object
# * calculate embeddings
# * Train model
# + _uuid="562f468726ea67e65218898d00f1e6ca791ed282"
add_datepart(eu_west, 'Date', drop=False)
# + _uuid="493504dfffc43d7c81e477014083c3b52068322b"
eu_west.reset_index(inplace=True)
eu_west.to_feather(f'{PATH_WRITE}eu_west')
eu_west.shape
# + _uuid="f600ad3efe637a2fb80d3b4bed29f137dbb949a1"
eu_west=pd.read_feather(f'{PATH_WRITE}eu_west')
# + _uuid="a08768c6633df34ed24bc9f102331821924929ed"
eu_west.columns
# + _uuid="8a1a3381d944815b63b87d65af47c4eef29a64b9"
joined = eu_west
joined_test = eu_west
# + _uuid="295750f97cafbeec20373223840d2bd3f4a5357b"
joined.to_feather(f'{PATH_WRITE}joined')
joined_test.to_feather(f'{PATH_WRITE}joined_test')
# + _uuid="acefae1dc369cbd1929d0840ddc6f69c3b9b7fd8"
joined = pd.read_feather(f'{PATH_WRITE}joined')
joined_test = pd.read_feather(f'{PATH_WRITE}joined_test')
# + _uuid="8d38ae66a7c1e66e55b24aac927de6c2876367d1"
joined.head()
# + _uuid="bb4a47301e8500c903ce4d24afdcb3fba3016c47"
cat_vars = [
'Instance Type',
'OS',
'Region',
'Year' ,
'Month' ,
'Week' ,
'Day',
'Dayofweek',
'Dayofyear'
]
contin_vars = ['Elapsed']
n = len(joined); n
# + _uuid="469771cb58be289167cc9677433bffa0ac7f34c3"
dep = 'Price ($)'
joined = joined[cat_vars+contin_vars+[dep,'Date']].copy()
# + _uuid="f0a1728b4e65795cb4b3cbc7df25a32b88b62194"
joined_test[dep] = 0
joined_test = joined_test[cat_vars+contin_vars+[dep,'Date',]].copy()
# + _uuid="16d7b8502a81be38017484591c2c6cf3d1930290"
for cat in cat_vars: joined[cat] = joined[cat].astype('category').cat.as_ordered()
eu_west['Price ($)'] = eu_west['Price ($)'].astype('float32')
# + _uuid="815f7b25e38ad2378bb32cf56d4885741611c5ce"
for contin in contin_vars:
joined[contin] = joined[contin].astype('float32')
joined_test[contin] = joined_test[contin].astype('float32')
# + _uuid="35e9e09bb6ecde69698ac21485e8bce8dd4ac451"
# + _uuid="f73e956e68eff54f02a1e35bc2ae5a4d199582a4"
idxs = get_cv_idxs(n, val_pct=150000/n)
joined_sample = joined.iloc[idxs].set_index("Date")
samp_size = len(joined_sample); samp_size
# + _uuid="c569c0680c96fe69c344a93705605e90b6158799"
samp_size = n
# + _uuid="bd49c1f9c1604fc90cc1645e57e084506e621a78"
joined_sample.head()
# + _uuid="ad1b07bad0f5daf6e88ca8be2b1ec7ad9d0b923d"
df_train, y, nas, mapper = proc_df(joined_sample,'Price ($)', do_scale=True)
yl = np.log(y)
# + _uuid="525f0158369d7decd8eefe1a7d06b59a52530956"
joined_test = joined_test.set_index("Date")
# + _uuid="177a699a1e1c35184a72e7a2bbb5ac54c365c278"
df_test, _, nas, mapper = proc_df(joined_test,'Price ($)', do_scale=True,mapper=mapper, na_dict=nas )
# + _uuid="750f4759b0c2df8992016d75fb028637cf540c95"
# %debug
# + _uuid="552594f719cb631cb50278d6ba792e5d5e87a49d"
df_train.info()
# + _uuid="b514e2e36c702b09bf730de2340123c296b37f9e"
# + _uuid="5e48c68defd113f99015422f789bb3d97ce1f2ef"
# + _uuid="4e0302562587c70d38a412eb2a923361f5ecdfe2"
train_val_split = 0.80
train_size = int(2383999 * train_val_split);train_size
val_idx = list(range(train_size, len(df_train)))
# + _uuid="e4e6c15c973aebd60da7eebf957893b6878a7b5b"
val_idx = np.flatnonzero(
(df_train.index<=datetime.datetime(2017,4,12)) & (df_train.index>=datetime.datetime(2017,4,12)))
# + _uuid="11d28330c430fa3cb4e5883741bee811306e3ea3"
val_idx=[0]
# + _uuid="1d6d86b449864ced33399722e294ebdd4154de7e"
len(val_idx)
# + [markdown] _uuid="e904d533e6602f21a76e2df8b448d6b983e071f3"
# We can put our Model.
# + _uuid="3c688932429fcc053032c0cee82dac3f9b791382"
def inv_y(a): return np.exp(a)
def exp_rmspe(y_pred, targ):
targ = inv_y(targ)
pct_var = (targ - inv_y(y_pred))/targ
return math.sqrt((pct_var**2).mean())
max_log_y = np.max(yl)
y_range = (0, max_log_y*1.2)
# + _uuid="d81d6e0c364ca98d50524a4198da3aa26eb195c5"
md = ColumnarModelData.from_data_frame(PATH_euwest, val_idx, df_train, yl.astype(np.float32),
cat_flds=cat_vars, bs=128, test_df=df_test)
# + _uuid="f9ea1ccef026627f1f4e03319dbfc8b3a6b011ac"
cat_sz = [(c, len(df_train[c].cat.categories)+1) for c in cat_vars]
# + _uuid="e6590bbd697a09851a263e2e5b241bf0e3c03679"
# + [markdown] _uuid="7730bf002329986d5ac5575ac0e118114c45789d"
# # Conclusion on Results
# + [markdown] _uuid="f85254622ca7a4aaac0bf5ad18771d82231a9c85"
#
rain_norm.keys()[ranksort]
# mask = lasso_coefs != 0
# ax.axvline(0, color='k', linewidth=3, zorder=11)
# ax.barh(lasso_keys[mask], lasso_coefs[mask], color='green', zorder=10, height=0.97)
# for i, k in enumerate(lasso_keys[mask]):
# if lasso_coefs[i] != 0:
# ax.text(lasso_coefs[i]/2, k, k, zorder=20)
# ax.invert_yaxis()
# ax.axes.get_yaxis().set_visible(False)
# ax.set_title('Lasso Regression Coefs')
# ax.set_xlabel('Lasso Coef Value')
# ax.grid(zorder=-1)
# plt.show()
# +
test_predictions = lr.predict(X_test_norm)
lasso_mae = np.nanmean(abs(test_predictions - y_test))
maxslr = test_predictions.max() if test_predictions.max() > y_test.max() else y_test.max()
maxslr += 5
fig, axs = plt.subplots(1, 3, figsize=(30, 8), facecolor='w')
ax = axs[0]
maxslr = y_test.max() if y_test.max() > y_train.max() else y_train.max()
ax.hist(y_train, bins=np.arange(0, maxslr, 2), color='g', edgecolor='k', alpha=1.0, label='Train SLR\nn=%d'%len(y_train))
ax.hist(y_test, bins=np.arange(0, maxslr, 2), color='C0', edgecolor='k', alpha=1.0, label='Test SLR\nn=%d'%len(y_test))
ax.legend()
ax.set_xticks(np.arange(0, maxslr+1, 5))
ax.set_xticklabels(np.arange(0, maxslr+1, 5).astype(int))
ax.grid()
ax = axs[1]
maxslr = test_predictions.max() if test_predictions.max() > y_test.max() else y_test.max()
maxslr += 5
ax.scatter(y_test, test_predictions, c='k', s=50, marker='+', linewidth=0.75)
ax.set_xlabel('Observed SLR')
ax.set_ylabel('Predicted SLR')
ax.plot([0, maxslr], [0, maxslr])
ax.set_xlim([0, maxslr])
ax.set_ylim([0, maxslr])
ax.set_aspect('equal')
ax.grid()
axs[1].set_title('Lasso Model\nR2: %.3f\nMAE: %.3f'%(lasso_test_score, lasso_mae))
ax = axs[2]
error = test_predictions - y_test
ax.hist(error, bins=np.arange(-30, 30, 2), edgecolor='k')
ax.set_xlabel('Prediction Error')
ax.set_ylabel('Count')
ax.grid()
# fig.suptitle('MultiLayer Perceptron (Simple ANN) [%d Hidden Layer Sizes]'%layer_sizes)
# +
from sklearn.linear_model import ElasticNet
# Grid-search for best parameters? (alpha and l1_ratio)
elastic = ElasticNet(alpha=0.009, l1_ratio=0.1, max_iter=10e5)
elastic.fit(X_train_norm, y_train)
elastic_train_score = elastic.score(X_train_norm, y_train)
elastic_test_score = elastic.score(X_test_norm, y_test)
elastic_coeff_used = np.sum(elastic.coef_ != 0)
print('Elastic Train Score: %.3f'%elastic_train_score)
print('Elastic Test Score: %.3f'%elastic_test_score)
# +
# fig, ax = plt.subplots(1, 1, facecolor='w', figsize=(8, 14))
# ranksort = np.argsort(abs(elastic.coef_))[::-1]
# elastic_coefs = elastic.coef_[ranksort]
# elastic_keys = X_train_norm.keys()[ranksort]
# mask = elastic_coefs != 0
# ax.axvline(0, color='k', linewidth=3, zorder=11)
# ax.barh(elastic_keys[mask], elastic_coefs[mask], color='green', zorder=10, height=0.97)
# for i, k in enumerate(elastic_keys[mask]):
# if elastic_coefs[i] != 0:
# ax.text(elastic_coefs[i]/2, k, k, zorder=20)
# ax.invert_yaxis()
# ax.axes.get_yaxis().set_visible(False)
# ax.set_title('Elastic Regression Coefs')
# ax.set_xlabel('Elastic Coef Value')
# ax.grid(zorder=-1)
# plt.show()
# +
test_predictions = elastic.predict(X_test_norm)
elastic_mae = np.nanmean(abs(test_predictions - y_test))
maxslr = test_predictions.max() if test_predictions.max() > y_test.max() else y_test.max()
maxslr += 5
fig, axs = plt.subplots(1, 3, figsize=(30, 8), facecolor='w')
ax = axs[0]
maxslr = y_test.max() if y_test.max() > y_train.max() else y_train.max()
ax.hist(y_train, bins=np.arange(0, maxslr, 2), color='g', edgecolor='k', alpha=1.0, label='Train SLR\nn=%d'%len(y_train))
ax.hist(y_test, bins=np.arange(0, maxslr, 2), color='C0', edgecolor='k', alpha=1.0, label='Test SLR\nn=%d'%len(y_test))
ax.legend()
ax.set_xticks(np.arange(0, maxslr+1, 5))
ax.set_xticklabels(np.arange(0, maxslr+1, 5).astype(int))
ax.grid()
ax = axs[1]
maxslr = test_predictions.max() if test_predictions.max() > y_test.max() else y_test.max()
maxslr += 5
ax.scatter(y_test, test_predictions, c='k', s=50, marker='+', linewidth=0.75)
ax.set_xlabel('Observed SLR')
ax.set_ylabel('Predicted SLR')
ax.plot([0, maxslr], [0, maxslr])
ax.set_xlim([0, maxslr])
ax.set_ylim([0, maxslr])
ax.set_aspect('equal')
ax.grid()
axs[1].set_title('Elastic Model\nR2: %.3f\nMAE: %.3f'%(elastic_test_score, elastic_mae))
ax = axs[2]
error = test_predictions - y_test
ax.hist(error, bins=np.arange(-30, 30, 2), edgecolor='k')
ax.set_xlabel('Prediction Error')
ax.set_ylabel('Count')
ax.grid()
# fig.suptitle('MultiLayer Perceptron (Simple ANN) [%d Hidden Layer Sizes]'%layer_sizes)
# -
# #### If we want to use feature selection, we can do so here.
# Can choose the model from which to take features, do so manually, or not at all (set equal to X_train.keys())
# +
# print('Top Predictor: %s'%elastic_keys[0])
# fig, axs = plt.subplots(3, 3, figsize=(18, 18), facecolor='w')
# axs = axs.flatten()
# for i, ax in enumerate(axs):
# ik, ic = elastic_keys[i], elastic_coefs[i]
# ax.scatter(X_train_norm[ik], y_train, c='k', marker='+', s=65, linewidth=0.5)
# ax.set_title('(%d: %.2f) %s'%(i+1, ic, ik))
# ax.grid()
# plt.show()
# +
# drop_low_import = False
# if drop_low_import:
# feature_selection = elastic_keys[mask]
# feature_selection
# pre_select = X_train_norm.keys()
# X_train_norm = X_train_norm[feature_selection]
# X_test_norm = X_test_norm[feature_selection]
# post_select = X_train_norm.keys()
# print('dropped:', [k for k in pre_select if k not in post_select])
# print(X_train_norm.shape)
# print(X_test_norm.shape)
# +
from sklearn.neural_network import MLPRegressor
for layer_sizes in [1000]:
mlp = MLPRegressor(
hidden_layer_sizes=(layer_sizes,),
activation='relu',
solver='sgd',
alpha=0.00001,
batch_size='auto',
learning_rate='adaptive',
learning_rate_init=0.01,
power_t=0.5,
max_iter=50000,
shuffle=True,
random_state=random_state,
tol=0.0001,
verbose=False,
warm_start=False,
momentum=0.7,
nesterovs_momentum=True,
early_stopping=True,
validation_fraction=0.3,
beta_1=0.5,
beta_2=0.999,
epsilon=1e-8)
mlp.fit(X_train_norm, y_train)
mlp_train_score = mlp.score(X_train_norm, y_train)
mlp_test_score = mlp.score(X_test_norm, y_test)
# mlp_coeff_used = np.sum(mlp.coef_ != 0)
print('Layer Sizes: %d'%layer_sizes)
print('MLP Train Score: %.3f'%mlp_train_score)
print('MLP Test Score: %.3f'%mlp_test_score)
print()
# +
test_predictions = mlp.predict(X_test_norm)
mlp_mae = np.nanmean(abs(test_predictions - y_test))
maxslr = test_predictions.max() if test_predictions.max() > y_test.max() else y_test.max()
maxslr += 5
fig, axs = plt.subplots(1, 3, figsize=(30, 8), facecolor='w')
ax = axs[0]
maxslr = y_test.max() if y_test.max() > y_train.max() else y_train.max()
ax.hist(y_train, bins=np.arange(0, maxslr, 2), color='g', edgecolor='k', alpha=1.0, label='Train SLR\nn=%d'%len(y_train))
ax.hist(y_test, bins=np.arange(0, maxslr, 2), color='C0', edgecolor='k', alpha=1.0, label='Test SLR\nn=%d'%len(y_test))
ax.legend()
ax.set_xticks(np.arange(0, maxslr+1, 5))
ax.set_xticklabels(np.arange(0, maxslr+1, 5).astype(int))
ax.grid()
ax = axs[1]
maxslr = test_predictions.max() if test_predictions.max() > y_test.max() else y_test.max()
maxslr += 5
ax.scatter(y_test, test_predictions, c='k', s=50, marker='+', linewidth=0.75)
ax.set_xlabel('Observed SLR')
ax.set_ylabel('Predicted SLR')
ax.plot([0, maxslr], [0, maxslr])
ax.set_xlim([0, maxslr])
ax.set_ylim([0, maxslr])
ax.set_aspect('equal')
ax.grid()
axs[1].set_title('R2: %.3f\nMAE: %.3f'%(mlp_test_score, mlp_mae))
ax = axs[2]
error = test_predictions - y_test
ax.hist(error, bins=np.arange(-30, 30, 2), edgecolor='k')
ax.set_xlabel('Prediction Error')
ax.set_ylabel('Count')
ax.grid()
fig.suptitle('MultiLayer Perceptron (Simple ANN) [%d Hidden Layer Sizes]'%layer_sizes)
# -
# <hr><hr>
# ### Implement Support Vector Regression Model
# (Other ML models like ANN from keras, tensorflow, pytorch may be used in this block)
# Implement a K-Fold Cross Validation to assess model performance (Optional, recommended)
# Make the SVR model multiprocess friendly
# +
def MARE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true))
def SVR_mp(_params):
print('.', end='')
_i, _j, _C, _e = _params
_model = SVR(
C=_C, #Ridge regularization parameter for (L2)^2 penalty
epsilon=_e, #Specifies the epsilon-tube within which no penalty is associated in the training loss function
kernel='rbf', #'linear', 'polynomial', 'rbf'
degree=3, #pass interger for 'polynomial' kernel, ignored otherwise
tol=0.001, #stopping tolerance
shrinking=False,
cache_size=200,
verbose=False)
_model.fit(X_train_norm, y_train)
test_predictions = _model.predict(X_test_norm).flatten()
_r2 = _model.score(X_test_norm, y_test) #sklearn.metrics.r2_score(y_test.values.flatten(), test_predictions)
_mse = sklearn.metrics.mean_squared_error(y_test.values.flatten(), test_predictions)
_mae = sklearn.metrics.mean_absolute_error(y_test.values.flatten(), test_predictions)
_mare = MARE(y_test.values.flatten(), test_predictions)
return (_i, _j, _C, _e, _r2, _mae, _mse, _mare, _model)
# +
import multiprocessing as mp
from sklearn.svm import SVR
import sklearn
# Best test
crange = np.arange(1, 100, 1)
erange = np.arange(1.0, 5.1, .25)
# crange = np.arange(1, 25, 1)
# erange = np.arange(0.1, 3.1, .1)
params = {}
params['r2'] = np.zeros((len(crange), len(erange)))
params['mae'] = np.zeros((len(crange), len(erange)))
params['mse'] = np.zeros((len(crange), len(erange)))
params['mare'] = np.zeros((len(crange), len(erange)))
params['model'] = np.empty((len(crange), len(erange)), dtype='object')
params['epsilon'] = np.zeros((len(crange), len(erange)))
params['C'] = np.zeros((len(crange), len(erange)))
mp_params = np.array([[(i, j, C, e) for j, e in enumerate(erange)]
for i, C in enumerate(crange)]).reshape(-1, 4)
print('Iterations to attempt: %d'%len(mp_params))
# -
# Run SVR, collect output iterated over hyperparameters (C, epsilon)<br>
# Specify the cost function/loss function used, tolerances, kernel, error metric, etc
# +
# Paralellize here
# mp_returns = [SVR_mp(_param) for _param in mp_params[:10]]
with mp.get_context('fork').Pool(64) as p:
mp_returns = p.map(SVR_mp, mp_params, chunksize=1)
p.close()
p.join()
for item in mp_returns:
i, j, C, e, r2, mae, mse, mare, model = item
i, j = int(i), int(j)
params['r2'][i, j] = r2
params['mse'][i, j] = mse
params['mae'][i, j] = mae
params['mare'][i, j] = mare
params['model'][i, j] = model
params['epsilon'][i, j] = e
params['C'][i, j] = C
# -
# Plot model performance over time, cost/loss function evolution and skill
X_train_norm
# Plot SVR output with hyperparameters (C, epsilon)<br>
# Apply a grid-search method to select best performing parameters
# +
min_on, indexer, _ = 'R2', np.where(params['r2'] == params['r2'].max()), params['r2'].max()
min_on, indexer, _ = 'MAE', np.where(params['mae'] == params['mae'].min()), params['mae'].min()
min_on, indexer, _ = 'MSE', np.where(params['mse'] == params['mse'].min()), params['mse'].min()
min_on, indexer, _ = 'MARE', np.where(params['mare'] == params['mare'].min()), params['mare'].min()
for min_on in ['r2']:
if min_on in ['mse', 'mae', 'mare']:
min_max = 'Minimized'
indexer = np.where(params[min_on] == params[min_on].min())
elif min_on in ['r2']:
min_max = 'Maximized'
indexer = np.where(params[min_on] == params[min_on].max())
r, c = indexer
r, c = r[0], c[0]
r, c, _
model = params['model'][r, c]
test_predictions = model.predict(X_test_norm)
y_true = y_test
y_pred = test_predictions
print('MARE ', MARE(y_true, y_pred))
fig, axs = plt.subplots(2, 3, facecolor='w', figsize=(24, 14))
axs = axs.flatten()
ax = axs[0]
cbar = ax.pcolormesh(erange, crange, params['mae'])
plt.colorbar(cbar, label='mae', ax=ax)
ax.set_title('Min MAE: %.3f'%params['mae'][r, c])
ax.scatter(params['epsilon'][r, c], params['C'][r, c], s=500, c='w', marker='+')
ax = axs[1]
cbar = ax.pcolormesh(erange, crange, params['mse'])
plt.colorbar(cbar, label='mse', ax=ax)
ax.set_title('Min MSE: %.3f'%params['mse'][r, c])
ax.scatter(params['epsilon'][r, c], params['C'][r, c], s=500, c='w', marker='+')
ax = axs[2]
cbar = ax.pcolormesh(erange, crange, params['r2'])
plt.colorbar(cbar, label='r2', ax=ax)
ax.set_title('Max R^2: %.3f'%params['r2'][r, c])
ax.scatter(params['epsilon'][r, c], params['C'][r, c], s=500, c='k', marker='+')
for ax in axs[:3]:
ax.set_xlabel('epsilon')
ax.set_ylabel('C_val')
ax.set_ylim([crange.min(), crange.max()])
ax.set_xlim([erange.min(), erange.max()])
ax = axs[3]
maxslr = y_test.max() if y_test.max() > y_train.max() else y_train.max()
ax.hist(y_train, bins=np.arange(0, maxslr, 2), color='g', edgecolor='k', alpha=1.0, label='Train SLR\nn=%d'%len(y_train))
ax.hist(y_test, bins=np.arange(0, maxslr, 2), color='C0', edgecolor='k', alpha=1.0, label='Test SLR\nn=%d'%len(y_test))
ax.legend()
ax.set_xticks(np.arange(0, maxslr+1, 5))
ax.set_xticklabels(np.arange(0, maxslr+1, 5).astype(int))
ax.grid()
ax = axs[4]
maxslr = test_predictions.max() if test_predictions.max() > y_test.max() else y_test.max()
maxslr += 5
ax.scatter(y_test, test_predictions, c='k', s=50, marker='+', linewidth=0.75)
ax.set_xlabel('Observed SLR')
ax.set_ylabel('Predicted SLR')
ax.plot([0, maxslr], [0, maxslr])
ax.set_xlim([0, maxslr])
ax.set_ylim([0, maxslr])
ax.set_aspect('equal')
ax.grid()
ax = axs[5]
error = test_predictions - y_test
ax.hist(error, bins=np.arange(-30, 30, 2), edgecolor='k')
ax.set_xlabel('Prediction Error')
ax.set_ylabel('Count')
ax.grid()
plt.suptitle('Support Vector Regression Model\n%s\n%s on: %s\nepsilon %.3f\nc_val: %.3f'%(site_list, min_max, min_on.upper(), params['epsilon'][r, c], params['C'][r, c]))
plt.show()
# -
# ### Describe final trained SVR model
# Descriptive plots of predictor coefficient rank/influence<br>
# Print out model parameters, error metrics, skill scores
# +
# ranksort = np.argsort(abs(model.coef_))[::-1]
# svr_coefs = model.coef_[0][ranksort][0]
# svr_keys = X_train_norm.keys()[ranksort][0]
# mask = svr_coefs != 0
# fig, ax = plt.subplots(1, 1, facecolor='w', figsize=(8, 14))
# ax.axvline(0, color='k', linewidth=3, zorder=11)
# ax.barh(svr_keys[mask], svr_coefs[mask], color='green', zorder=10, height=0.97)
# for i, k in enumerate(svr_keys):
# if svr_coefs[i] != 0:
# ax.text(svr_coefs[i]/2, k, k, zorder=20)
# # ax.invert_yaxis()
# ax.axes.get_yaxis().set_visible(False)
# ax.set_title('SVR Coefs')
# ax.set_xlabel('SVR Coef Value')
# ax.grid(zorder=-1)
# plt.show()
# -
# ### Save out a usable model along with the input parameters for application
# Save with a descriptive filename and a metadata text file!<br>
# This will make swapping out models for evaluation much simpler
| 29,572 |
/Deep Learning/CNN.ipynb
|
f43b47f9ed650c0dafcdce06b625b9f2b1154e23
|
[] |
no_license
|
devangm2000/Machine-Learning
|
https://github.com/devangm2000/Machine-Learning
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 22,441 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="4G-xQIODTer5"
# ## Sentence/Word Embeddings Ectraction for downline activities
#
# + id="uNxjqFRFmGVg"
import torch
# If there's a GPU available...
if torch.cuda.is_available():
# Tell PyTorch to use the GPU.
device = torch.device("cuda")
print('There are %d GPU(s) available.' % torch.cuda.device_count())
print('We will use the GPU:', torch.cuda.get_device_name(0))
# If not...
else:
print('No GPU available, using the CPU instead.')
device = torch.device("cpu")
# + id="ezCdYws_mUAW"
# !pip install transformers
# + id="K7mf0xZ1szvb"
from google.colab import drive
drive.mount('/content/drive')
# + id="JBW4m7nybiVs"
import pandas as pd
df = pd.read_csv("drive/MyDrive/Colab Notebooks/dataset/all_tweets_text.csv")
print(df.shape)
print(df.columns)
df['text']= df['text'].astype('str')
#df = df[8242:]
#df = df[:37]
sentences = df.text.values
print(sentences[8])
print(sentences.shape)
'''
import pandas as pd
# Reading Data into dataFrame
text = pd.read_csv("drive/MyDrive/Colab Notebooks/dataset/statuses_unicode.txt", header=None, names=['sentence'])
big5 = pd.read_csv("drive/MyDrive/Colab Notebooks/dataset/big5labels.txt", delimiter=" ", header=None, names=['O', 'C', 'E', 'A', 'N'])
df = pd.concat([text, big5], axis=1, sort=False)
#df = df[:32]
print(df.shape)
print(df.sample(5))
df['sentence']= df['sentence'].astype('str')
sentences = df.sentence.values
print(sentences[8])
print(sentences.shape)
'''
# + id="Kzze6DSKoYAi"
from transformers import BertTokenizer
tokenizer = BertTokenizer.from_pretrained(
'bert-base-multilingual-cased',
do_lower_case=False)
# + id="96DPOgivpYuZ"
# Print the original sentence.
print(' Original: ', sentences[0])
# Print the sentence split into tokens.
print('Tokenized: ', tokenizer.tokenize(sentences[0]))
# Print the sentence mapped to token ids.
print('Token IDs: ', tokenizer.convert_tokens_to_ids(tokenizer.tokenize(sentences[0])))
# + id="0RmZIJ9-pvzU"
max_len = 0
# For every sentence...
for sent in sentences:
# Tokenize the text and add `[CLS]` and `[SEP]` tokens.
input_ids = tokenizer.encode(sent, add_special_tokens=True)
# Update the maximum sentence length.
max_len = max(max_len, len(input_ids))
print('Max sentence length: ', max_len)
# + id="97IxqFfHqBEm"
# Tokenize all of the sentences and map the tokens to thier word IDs.
max_len = 256 # the closest power of two exceeding max len found
input_ids = []
attention_masks = []
# For every sentence...
for sent in sentences:
# `encode_plus` will:
# (1) Tokenize the sentence.
# (2) Prepend the `[CLS]` token to the start.
# (3) Append the `[SEP]` token to the end.
# (4) Map tokens to their IDs.
# (5) Pad or truncate the sentence to `max_length`
# (6) Create attention masks for [PAD] tokens.
encoded_dict = tokenizer.encode_plus(
sent, # Sentence to encode.
add_special_tokens = True, # Add '[CLS]' and '[SEP]'
max_length = max_len, # Pad & truncate all sentences.
pad_to_max_length = True,
return_attention_mask = True, # Construct attn. masks.
return_tensors = 'pt', # Return pytorch tensors.
)
# Add the encoded sentence to the list.
input_ids.append(encoded_dict['input_ids'])
# And its attention mask (simply differentiates padding from non-padding).
attention_masks.append(encoded_dict['attention_mask'])
# Convert the lists into tensors.
input_ids = torch.cat(input_ids, dim=0)
attention_masks = torch.cat(attention_masks, dim=0)
# Print sentence 0, now as a list of IDs.
print('Original: ', sentences[0])
print('Token IDs:', input_ids[0])
# + id="DNUPruYheNaw" cellView="code"
#@title Testo del titolo predefinito
from transformers import BertModel
model = BertModel.from_pretrained("bert-base-multilingual-cased")
model.cuda()
# + id="N1BxZu-lfG1U"
import numpy as np
import sys
from scipy.spatial import distance
bhv_centroids = np.load("drive/MyDrive/Colab Notebooks/dataset/bhv_centroids.npy")
batch_size = 16
start_range = 0
print(input_ids.shape[0])
bhv_results = []
while (start_range + batch_size) < input_ids.shape[0]:
inputs = input_ids[start_range: (start_range + batch_size)]
outputs = model(inputs.to(device))
#print(outputs.last_hidden_state.shape)
#calcola bhv qui
out_arr = outputs.last_hidden_state.cpu().detach().numpy()
out_arr_asa = np.asarray(out_arr)
for sentence in out_arr_asa:
valid_token = 0.0
bhv_scores = np.zeros(10)
for token in sentence:
min_distance = sys.float_info.max
pos = 0
if abs(token[0]) < 0.0000001:
continue
else:
for i in range(10):
now_distance = distance.euclidean(np.asarray(token), np.asarray(bhv_centroids[i]))
#print(now_distance)
if now_distance < min_distance:
min_distance = now_distance
pos = i
bhv_scores[pos] = bhv_scores[pos]+1.0
valid_token = valid_token + 1.0
#print("bhv_scores and valid token", bhv_scores, valid_token)
bhv_scores = bhv_scores / valid_token
#print("bhv_scores after div", bhv_scores)
bhv_results.append(bhv_scores)
start_range += batch_size
inputs = input_ids[start_range:]
outputs = model(inputs.to(device))
#calcola bhv anche qui
out_arr = outputs.last_hidden_state.cpu().detach().numpy()
out_arr_asa = np.asarray(out_arr)
for sentence in out_arr_asa:
valid_token = 0.0
bhv_scores = np.zeros(10)
for token in sentence:
min_distance = sys.float_info.max
pos = 0
if abs(token[0]) < 0.0000001:
continue
else:
for i in range(10):
now_distance = distance.euclidean(np.asarray(token), np.asarray(bhv_centroids[i]))
#print(now_distance)
if now_distance < min_distance:
min_distance = now_distance
pos = i
bhv_scores[pos] = bhv_scores[pos]+1.0
valid_token = valid_token + 1.0
bhv_scores = bhv_scores / valid_token
bhv_results.append(bhv_scores)
# print("bhv_results ", bhv_results)
bhv_res = np.asarray(bhv_results)
np.savetxt("drive/MyDrive/Colab Notebooks/dataset/BHV/all_bhv.csv", bhv_res, delimiter=",", fmt='%5.5f')
# + [markdown] id="Rgp7YToB7Zw-"
# # BHV with GloVe
# + id="Pt9VFwc_9YRv"
# !wget http://nlp.stanford.edu/data/glove.twitter.27B.zip
# + id="nmNs9S_e-CET"
# !unzip -q glove.twitter.27B.zip -d 'drive/MyDrive/Colab Notebooks/dataset/GloVe'
# + id="MiZn544pAxNi"
import numpy as np
def loadGloveModel(gloveFile): ##200d
print ("Loading Glove Model")
f = open(gloveFile,'r')
model = {}
for line in f:
splitLine = line.split()
word = splitLine[0]
embedding = np.array([float(val) for val in splitLine[1:]])
model[word] = embedding
print ("Done.",len(model)," words loaded!")
return model
localModel = loadGloveModel('drive/MyDrive/Colab Notebooks/dataset/GloVe/glove.twitter.27B.200d.txt')
# + id="EPUH8v13B-7m"
selfdirection = ["creativity", "freedom", "curious", "independent", "self-respect", "intelligent", "privacy"]
stimulation = ["excitement", "novelty", "challenge", "variety", "stimulation", "daring"]
hedonism = ["pleasure", "sensuous", "gratification", "enjoyable", "self-indulgent"]
achievement = ["ambitious", "successful", "capable", "influential", "intelligent", "self-respect"]
power = ["authority", "wealth", "power", "reputation", "notoriety"]
security = ["safety", "harmony", "stability", "order", "security", "clean", "reciprocation", "healthy", "moderate", "belonging"]
conformity = ["obedient", "self-discipline", "politeness", "honoring" , "loyal", "responsible"]
tradition = ["tradition", "humble", "devout", "moderate", "spiritualist"]
benevolence = ["helpful", "honest", "forgiving", "responsible", "loyal", "friendship", "love", "meaningful"]
universalism = ["broadminded", "justice", "equality", "peace", "beauty", "wisdom", "environmentalist", "harmony"]
schwartzBasicHumanValues = [selfdirection, stimulation, hedonism, achievement, power, security, conformity, tradition, benevolence, universalism]
schwartzNames = ["selfdirection", "stimulation", "hedonism", "achievement", "power", "security", "conformity", "tradition", "benevolence", "universalism"]
pos = 0
schwartzCentroids = {}
for humanValue in schwartzBasicHumanValues:
count_elements = 0.0
schwartzNCentroid = [0.0]
schwartzNCentroid = schwartzNCentroid*200
schwartzNCentroid = np.asarray(schwartzNCentroid)
for representativeWord in humanValue:
schwartzNCentroid = schwartzNCentroid + np.asarray(localModel[representativeWord])
count_elements +=1
schwartzCentroids[schwartzNames[pos]] = schwartzNCentroid/count_elements
pos +=1
print ("Centroids computed!")
# + id="zc6A1DBeMMHS"
nltk.download('stopwords')
nltk.download('wordnet')
# + id="qSRfJPMiDEiu"
import nltk
from nltk.corpus import stopwords
import string
from nltk.stem.wordnet import WordNetLemmatizer
import re
stop = set(stopwords.words('english'))
exclude = set(string.punctuation)
lemma = WordNetLemmatizer()
def clean(doc):
stop_free = " ".join([i for i in doc.lower().split() if i not in stop])
punc_free = ''.join(ch for ch in stop_free if ch not in exclude)
normalized = " ".join(lemma.lemmatize(word) for word in punc_free.split())
return normalized
NON_BMP_RE = re.compile(u"[^\U00000000-\U0000d7ff\U0000e000-\U0000ffff]", flags=re.UNICODE)
# + id="YE5yTH-Q7kWw"
import sys
from scipy.spatial import distance
bhv_results = []
for sentence in sentences:
total_words = {}
words_total = 0.0
bhv_scores = np.zeros(10)
for category in schwartzNames:
total_words[category] = 0
doc_cleaned = clean(sentence)
# print(doc_cleaned)
for word in doc_cleaned:
if word.startswith('@') or word.isdigit() or ("http" in word):
continue
else:
word = NON_BMP_RE.sub('', word)
if len(word)>0 and word in localModel:
words_total += 1
min_distance = sys.float_info.max
which_schwartz = ""
for pos in schwartzNames:
now_distance = distance.euclidean(np.asarray(localModel[word]), schwartzCentroids[pos])
if now_distance<min_distance:
min_distance = now_distance
which_schwartz = pos
total_words[which_schwartz] += 1
pos = 0
if words_total == 0.0:
bhv_results.append(bhv_scores)
else:
for category in schwartzNames:
bhv_scores[pos] = total_words[category]/words_total
pos +=1
bhv_results.append(bhv_scores)
print ("bhv computed successfully")
bhv_res = np.asarray(bhv_results)
np.savetxt("drive/MyDrive/Colab Notebooks/dataset/BHV/glove_all_bhv_all_tweets.csv", bhv_res, delimiter=",", fmt='%5.5f')
| 11,317 |
/homework/Day_002_HW.ipynb
|
43f90dffef7b72ae949129313153ca7b27af31c9
|
[] |
no_license
|
karen550168/3rd-ML100Days
|
https://github.com/karen550168/3rd-ML100Days
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 1,003 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **This notebook is an exercise in the [Data Visualization](https://www.kaggle.com/learn/data-visualization) course. You can reference the tutorial at [this link](https://www.kaggle.com/alexisbcook/bar-charts-and-heatmaps).**
#
# ---
#
# In this exercise, you will use your new knowledge to propose a solution to a real-world scenario. To succeed, you will need to import data into Python, answer questions using the data, and generate **bar charts** and **heatmaps** to understand patterns in the data.
#
# ## Scenario
#
# You've recently decided to create your very own video game! As an avid reader of [IGN Game Reviews](https://www.ign.com/reviews/games), you hear about all of the most recent game releases, along with the ranking they've received from experts, ranging from 0 (_Disaster_) to 10 (_Masterpiece_).
#
# 
#
# You're interested in using [IGN reviews](https://www.ign.com/reviews/games) to guide the design of your upcoming game. Thankfully, someone has summarized the rankings in a really useful CSV file that you can use to guide your analysis.
#
# ## Setup
#
# Run the next cell to import and configure the Python libraries that you need to complete the exercise.
import pandas as pd
pd.plotting.register_matplotlib_converters()
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
print("Setup Complete")
# The questions below will give you feedback on your work. Run the following cell to set up our feedback system.
# Set up code checking
import os
if not os.path.exists("../input/ign_scores.csv"):
os.symlink("../input/data-for-datavis/ign_scores.csv", "../input/ign_scores.csv")
from learntools.core import binder
binder.bind(globals())
from learntools.data_viz_to_coder.ex3 import *
print("Setup Complete")
# ## Step 1: Load the data
#
# Read the IGN data file into `ign_data`. Use the `"Platform"` column to label the rows.
# +
# Path of the file to read
ign_filepath = "../input/ign_scores.csv"
# Fill in the line below to read the file into a variable ign_data
ign_data = pd.read_csv(ign_filepath, index_col="Platform")
# Run the line below with no changes to check that you've loaded the data correctly
step_1.check()
# +
# Lines below will give you a hint or solution code
#step_1.hint()
#step_1.solution()
# -
# ## Step 2: Review the data
#
# Use a Python command to print the entire dataset.
# Print the data
ign_data
# The dataset that you've just printed shows the average score, by platform and genre. Use the data to answer the questions below.
# +
# Fill in the line below: What is the highest average score received by PC games,
# for any genre?
high_score = 7.759930
# Fill in the line below: On the Playstation Vita platform, which genre has the
# lowest average score? Please provide the name of the column, and put your answer
# in single quotes (e.g., 'Action', 'Adventure', 'Fighting', etc.)
worst_genre = "Simulation"
# Check your answers
step_2.check()
# -
# Lines below will give you a hint or solution code
step_2.hint()
step_2.solution()
# ## Step 3: Which platform is best?
#
# Since you can remember, your favorite video game has been [**Mario Kart Wii**](https://www.ign.com/games/mario-kart-wii), a racing game released for the Wii platform in 2008. And, IGN agrees with you that it is a great game -- their rating for this game is a whopping 8.9! Inspired by the success of this game, you're considering creating your very own racing game for the Wii platform.
#
# #### Part A
#
# Create a bar chart that shows the average score for **racing** games, for each platform. Your chart should have one bar for each platform.
# +
# Bar chart showing average score for racing games by platform
plt.figure(figsize=(12,9))
plt.title("Average score of racing games")
sns.barplot(x=ign_data['Racing'], y=ign_data.index)
plt.xlabel("")
plt.ylabel("")
# Check your answer
step_3.a.check()
# -
# Lines below will give you a hint or solution code
#step_3.a.hint()
step_3.a.solution_plot()
# #### Part B
#
# Based on the bar chart, do you expect a racing game for the **Wii** platform to receive a high rating? If not, what gaming platform seems to be the best alternative?
# +
#step_3.b.hint()
# -
# Check your answer (Run this code cell to receive credit!)
step_3.b.solution()
# ## Step 4: All possible combinations!
#
# Eventually, you decide against creating a racing game for Wii, but you're still committed to creating your own video game! Since your gaming interests are pretty broad (_... you generally love most video games_), you decide to use the IGN data to inform your new choice of genre and platform.
#
# #### Part A
#
# Use the data to create a heatmap of average score by genre and platform.
# +
# Heatmap showing average game score by platform and genre
# Set the width and height of the figure
plt.figure(figsize=(10,10))
# Heatmap showing average game score by platform and genre
sns.heatmap(ign_data, annot=True)
# Add label for horizontal axis
plt.xlabel("Genre")
# Add label for vertical axis
plt.title("Average Game Score, by Platform and Genre")
# Check your answer
step_4.a.check()
# -
# Lines below will give you a hint or solution code
#step_4.a.hint()
step_4.a.solution_plot()
# #### Part B
#
# Which combination of genre and platform receives the highest average ratings? Which combination receives the lowest average rankings?
# +
#step_4.b.hint()
# -
# Check your answer (Run this code cell to receive credit!)
step_4.b.solution()
# # Keep going
#
# Move on to learn all about **[scatter plots](https://www.kaggle.com/alexisbcook/scatter-plots)**!
# ---
#
#
#
#
# *Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum/161291) to chat with other Learners.*
alyze Each Neighborhood
# one hot encoding
Downtown_onehot = pd.get_dummies(Downtown_venues[['Venue Category']],prefix="", prefix_sep="")
# add neighborhood column back to dataframe
Downtown_onehot.drop('Neighborhood',axis=1,inplace=True)
Downtown_onehot.insert(0,'Neighborhood',Downtown_venues['Neighborhood'],False)
Downtown_onehot.head()
# #### Next, let's group rows by neighborhood and by taking the mean of the frequency of occurrence of each category
Downtown_grouped = Downtown_onehot.groupby('Neighborhood').mean().reset_index()
Downtown_grouped
# +
num_top_venues = 5
for hood in Downtown_grouped['Neighborhood']:
print("----"+hood+"----")
temp = Downtown_grouped[Downtown_grouped['Neighborhood'] == hood].T.reset_index()
temp.columns = ['venue','freq']
temp = temp.iloc[1:]
temp['freq'] = temp['freq'].astype(float)
temp = temp.round({'freq': 2})
print(temp.sort_values('freq', ascending=False).reset_index(drop=True).head(num_top_venues))
print('\n')
# -
# #### Let's put that into a *pandas* dataframe
# First, let's write a function to sort the venues in descending order.
def return_most_common_venues(row, num_top_venues):
row_categories = row.iloc[1:]
row_categories_sorted = row_categories.sort_values(ascending=False)
return row_categories_sorted.index.values[0:num_top_venues]
# +
num_top_venues = 10
indicators = ['st', 'nd', 'rd']
# create columns according to number of top venues
columns = ['Neighborhood']
for ind in np.arange(num_top_venues):
try:
columns.append('{}{} Most Common Venue'.format(ind+1, indicators[ind]))
except:
columns.append('{}th Most Common Venue'.format(ind+1))
# create a new dataframe
neighborhoods_venues_sorted = pd.DataFrame(columns=columns)
neighborhoods_venues_sorted['Neighborhood'] = Downtown_grouped['Neighborhood']
for ind in np.arange(Downtown_grouped.shape[0]):
neighborhoods_venues_sorted.iloc[ind, 1:] = return_most_common_venues(Downtown_grouped.iloc[ind, :], num_top_venues)
neighborhoods_venues_sorted.head()
# -
# #### Cluster Neighborhoods
# Run *k*-means to cluster the neighborhood into 5 clusters.
# +
# set number of clusters
kclusters = 5
Downtown_grouped_clustering = Downtown_grouped.drop('Neighborhood', 1)
# run k-means clustering
kmeans = KMeans(n_clusters=kclusters, random_state=0).fit(Downtown_grouped_clustering)
# check cluster labels generated for each row in the dataframe
kmeans.labels_[0:10]
# -
# Let's create a new dataframe that includes the cluster as well as the top 10 venues for each neighborhood.
# +
# add clustering labels
#neighborhoods_venues_sorted.insert(0, 'Cluster Labels', kmeans.labels_)
Downtown_merged = Downtown_toronto_data
# merge toronto_grouped with toronto_data to add latitude/longitude for each neighborhood
Downtown_merged = Downtown_merged.join(neighborhoods_venues_sorted.set_index('Neighborhood'), on='Neighbourhood')
Downtown_merged.head() # check the last columns!
# +
# create map
map_clusters = folium.Map(location=[latitude, longitude], zoom_start=11)
# set color scheme for the clusters
x = np.arange(kclusters)
ys = [i + x + (i*x)**2 for i in range(kclusters)]
colors_array = cm.rainbow(np.linspace(0, 1, len(ys)))
rainbow = [colors.rgb2hex(i) for i in colors_array]
# add markers to the map
markers_colors = []
for lat, lon, poi, cluster in zip(Downtown_merged['Latitude'], Downtown_merged['Longitude'], Downtown_merged['Neighbourhood'], Downtown_merged['Cluster Labels']):
label = folium.Popup(str(poi) + ' Cluster ' + str(cluster), parse_html=True)
folium.CircleMarker(
[lat, lon],
radius=5,
popup=label,
color=rainbow[cluster-1],
fill=True,
fill_color=rainbow[cluster-1],
fill_opacity=0.7).add_to(map_clusters)
map_clusters
# -
# #### Examine Clusters
# Now, you can examine each cluster and determine the discriminating venue categories that distinguish each cluster. Based on the defining categories, you can then assign a name to each cluster. I will leave this exercise to you.
# #### Cluster 1
Downtown_merged.loc[Downtown_merged['Cluster Labels'] == 0, Downtown_merged.columns[[2] + list(range(5, Downtown_merged.shape[1]))]]
Downtown_merged.loc[Downtown_merged['Cluster Labels'] == 1, Downtown_merged.columns[[2] + list(range(5, Downtown_merged.shape[1]))]]
# #### Cluster 3
Downtown_merged.loc[Downtown_merged['Cluster Labels'] == 2, Downtown_merged.columns[[2] + list(range(5, Downtown_merged.shape[1]))]]
# #### Cluster 4
Downtown_merged.loc[Downtown_merged['Cluster Labels'] == 3, Downtown_merged.columns[[2] + list(range(5, Downtown_merged.shape[1]))]]
# #### Cluster 4
Downtown_merged.loc[Downtown_merged['Cluster Labels'] == 4, Downtown_merged.columns[[2] + list(range(5, Downtown_merged.shape[1]))]]
| 10,892 |
/CVEDataView.ipynb
|
b92a55915a7d3bedb370612779b92ea9a8b53cfc
|
[] |
no_license
|
phamvanvung/cvssexploration
|
https://github.com/phamvanvung/cvssexploration
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 4,748,231 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Project 5: Group Project
# #### Author: Adam Pardo, Brandon Bergeron, Eric Bayless, Ramesh Babu
# ### 01 - Data Collection and Data Cleaning
# Task:
# ### ***Before running this notebook, data files should be downloaded from [Kaggle](https://www.kaggle.com/yelp-dataset/yelp-dataset) and placed in the data folder
# import libraries here
import pandas as pd
import os
# ## Businesses
# import Business.json
business_json_path = '../data/yelp_academic_dataset_business.json'
df_business = pd.read_json(business_json_path, lines=True)
# +
# function to pull businesses by city, with option to sample
def city_restaurants(df, city_name, samples=None):
"""
Takes in the dataframe of yelp businesses, and returns only the restaurants from a given city.
ARGS:
df: dataframe of businesses
city_name (string): Name of city to pull restaurants from
samples (int): number of restaurants to sample from df (defalt=None)
"""
#--load restaurants in city_name
df_city = df[df['city'] == city_name]
df_city = df_city.dropna(subset=['categories'])
#--keep restaurants with reviews between 100-300
restaurants = df_city[df_city['categories'].str.contains('Restaurant')]
restaurants_reduced = restaurants[(restaurants['review_count'] > 100) & (restaurants['review_count'] < 300)]
restaurants_reduced.reset_index(drop=True, inplace=True)
#--returns all restaurants if no samples
if samples == None:
restaurants_reduced.to_csv(f'../data/{city_name.replace(" ", "_")}_restaurants_large.csv', index = False)
return restaurants_reduced
else:
#creation of sample restaurants
restaurant_samples = pd.concat([restaurants_reduced[restaurants_reduced['is_open'] == 1].sample(samples//2), restaurants_reduced[restaurants_reduced['is_open'] == 0].sample(samples//2)])
restaurant_samples.reset_index(drop=True, inplace=True)
restaurant_samples.to_csv(f'./data/{city_name.replace(" ", "_")}_{samples}.csv', index = False)
return restaurant_samples
# +
#--Creation of initial sample size
#city_restaurants(df_business, 'Las Vegas', 400)
# + jupyter={"outputs_hidden": true}
city_restaurants(df_business, 'Las Vegas')
# -
# ## Reviews
# +
# creates JsonReader object for iteration over total Reviews
review_json_path = '../data/yelp_academic_dataset_review.json'
size = 500_000
review_reader = pd.read_json(review_json_path, lines=True,
dtype={'review_id':str,'user_id':str,
'business_id':str,'stars':int,
'date':str,'text':str,'useful':int,
'funny':int,'cool':int},
chunksize=size)
# +
#---Matches restaurant reviews with df of businesses
def business_reviews(reviews, business_filepath, output_filepath):
"""
Iterates over the total reviews and extracts reviews that match the businesses given
ARGS:
reviews: JsonReader for iterating over total reviews
business_filepath (string.csv): .csv filepath with desired businesses
output_filepath (string.csv): .csv filename for reviews to be saved in
"""
#--reads in .csv file of desired businesses
df_business = pd.read_csv(business_filepath)
#--stores matches on business_id
chunk_list = []
#--iterates over JsonReader chunks and matches reviews on business_id's
for chunk_review in reviews:
chunk_review = chunk_review.drop(['review_id','useful','funny','cool'], axis=1)
#--renames duplicate column
chunk_review = chunk_review.rename(columns={'stars': 'review_stars'})
chunk_merged = pd.merge(df_business, chunk_review, on='business_id', how='left')
on streams,
# that's why we have to create 1-element stream with iter
# and later retrieve the result with next.
return next(trax.data.tokenize(
iter([s]),
vocab_type='sentencepiece',
vocab_file='sentencepiece.model',
vocab_dir='/content/drive/My Drive/NLP C4 W3 Data/'))
vocab_size = trax.data.vocab_size(
vocab_type='sentencepiece',
vocab_file='sentencepiece.model',
vocab_dir='/content/drive/My Drive/NLP C4 W3 Data/')
def get_sentinels(vocab_size):
sentinels = {}
for i, char in enumerate(reversed(string.ascii_letters), 1):
decoded_text = detokenize([vocab_size - i])
# Sentinels, ex: <Z> - <a>
sentinels[decoded_text] = f'<{char}>'
return sentinels
sentinels = get_sentinels(vocab_size)
def pretty_decode(encoded_str_list, sentinels=sentinels):
# If already a string, just do the replacements.
if isinstance(encoded_str_list, (str, bytes)):
for token, char in sentinels.items():
encoded_str_list = encoded_str_list.replace(token, char)
return encoded_str_list
# We need to decode and then prettyfy it.
return pretty_decode(detokenize(encoded_str_list))
inputs_targets_pairs = []
# here you are reading already computed input/target pairs from a file
with open ('/content/drive/My Drive/NLP C4 W3 Data/inputs_targets_pairs_file.txt', 'rb') as fp:
inputs_targets_pairs = pickle.load(fp)
def display_input_target_pairs(inputs_targets_pairs):
for i, inp_tgt_pair in enumerate(inputs_targets_pairs, 1):
inps, tgts = inp_tgt_pair
inps, tgts = pretty_decode(inps), pretty_decode(tgts)
print(f'[{i}]\n'
f'inputs:\n{wrapper.fill(text=inps)}\n\n'
f'targets:\n{wrapper.fill(text=tgts)}\n\n\n\n')
# + id="bHLYA6N7Izzi" outputId="43a444e4-3470-4688-d2c6-88ab60e400b9" colab={"base_uri": "https://localhost:8080/", "height": 1000}
display_input_target_pairs(inputs_targets_pairs)
# + [markdown] id="qkfUvyjtEu6J"
# <a name='3'></a>
# # Part 3: BERT Loss
#
# We will not train the encoder which you have built in the assignment (coursera version). Training it could easily cost you a few days depending on which GPUs/TPUs you are using. Very few people train the full transformer from scratch. Instead, what the majority of people do, they load in a pretrained model, and they fine tune it on a specific task. That is exactly what you are about to do. Let's start by initializing and then loading in the model.
#
# Initialize the model from the saved checkpoint.
# + id="Hsqi-dzzxv4e"
# Initializing the model
model = trax.models.Transformer(
d_ff = 4096,
d_model = 1024,
max_len = 2048,
n_heads = 16,
dropout = 0.1,
input_vocab_size = 32000,
n_encoder_layers = 24,
n_decoder_layers = 24,
mode='predict') # Change to 'eval' for slow decoding.
# + id="lOB1C131xv4i" outputId="b48c6a4e-8380-4b1e-b7c2-66616cefdbba" colab={"base_uri": "https://localhost:8080/", "height": 51}
# Now load in the model
# this takes about 1 minute
shape11 = trax.shapes.ShapeDtype((1, 1), dtype=np.int32) # Needed in predict mode.
model.init_from_file('/content/drive/My Drive/NLP C4 W3 Data/models/model.pkl.gz',
weights_only=True, input_signature=(shape11, shape11))
# + id="9Wy3pr4ZfzA_" outputId="a3741a3e-bd37-4f7f-8372-6ea379b0cf7c" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# Uncomment to see the transformer's structure.
print(model)
# + [markdown] id="HuTyft5EBQK6"
# <a name='3.1'></a>
# ### 3.1 Decoding
#
# Now you will use one of the `inputs_targets_pairs` for input and as target. Next you will use the `pretty_decode` to output the input and target. The code to perform all of this has been provided below.
# + id="LJ8s_xZ1QtkI" outputId="0b73d4f0-987d-4b54-bc2c-48286b80a19c" colab={"base_uri": "https://localhost:8080/", "height": 326}
# # using the 3rd example
# c4_input = inputs_targets_pairs[2][0]
# c4_target = inputs_targets_pairs[2][1]
# using the 1st example
c4_input = inputs_targets_pairs[0][0]
c4_target = inputs_targets_pairs[0][1]
print('pretty_decoded input: \n\n', pretty_decode(c4_input))
print('\npretty_decoded target: \n\n', pretty_decode(c4_target))
print('\nc4_input:\n\n', c4_input)
print('\nc4_target:\n\n', c4_target)
print(len(c4_target))
print(len(pretty_decode(c4_target)))
# + [markdown] id="OD9EchfPFlAf"
# Run the cell below to decode
# + id="6HwIdimiN0k2"
# Faster decoding: (still - maybe lower max_length to 20 for speed)
# Temperature is a parameter for sampling.
# # * 0.0: same as argmax, always pick the most probable token
# # * 1.0: sampling from the distribution (can sometimes say random things)
# # * values inbetween can trade off diversity and quality, try it out!
output = decoding.autoregressive_sample(model, inputs=np.array(c4_input)[None, :],
temperature=0.0, max_length=50)
print(wrapper.fill(pretty_decode(output[0])))
# + [markdown] id="0n1MG7zNJZdh"
# ### Note: As you can see the RAM is almost full, it is because the model and the decoding is memory heavy. Running it the second time might give you an answer that makes no sense, or repetitive words. If that happens restart the runtime (see how to at the start of the notebook) and run all the cells again.
# + id="7PRbChEc8dqe"
| 9,476 |
/.ipynb_checkpoints/Tennis-ddpg-multi-checkpoint.ipynb
|
e929aabead9d56540033abda9621a725f2f51da7
|
[
"MIT"
] |
permissive
|
xunyiljg/rl_tennis
|
https://github.com/xunyiljg/rl_tennis
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 31,124 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Summary:
# Jay Woo and Casey Alvarado are working together on mapping out sexual assault cases committed by Roman Catholic high order officals all throughout the USA. In this notebook by Jay Woo and Casey Alvarado, we will explore and clean the data based on our explorations.
#
import pandas
import matplotlib.pyplot as plt
import seaborn as sns
import vincent
# %matplotlib inline
# Import the data.
df = pandas.read_csv("data.csv")
df.columns
df.head()
# We will clean the data based on any nan, empty strings, or missing values that we see.
def get_modifiers(ord_data):
#input: data column
#output: item or empty string
#split data on these weird characters we noticed
splitted = str(ord_data).split('\xc2\xa0')
#if the item in the splitted array is a digit, return the number.
#else, forget the weird symbols and return an empty string
for item in splitted:
if not item.isdigit():
return item
return ''
def clean_data(data):
#input: raw data fram
#output: cleaned data frame
#apply to the get_modifiers helper function above
data["Ord_Mod"] = data.Ord.apply(get_modifiers)
data.Ord_Mod[data.Ord_Mod == 'nan'] = ''
#fill the nan values in data.Ord with 0.
data.Ord = data.Ord.fillna(0)
#Sometimes there are '?' or '<' in the data, want to strip those out.
for o in range(len(data.Ord)):
o_list = str(data.Ord[o]).split('\xc2\xa0')
for item in o_list:
if(item.isdigit()):
data.Ord[o] = int(item)
#We noticed that there were about 3 empty first names, so we filled it with an empty string.
data["First"] = data["First"].fillna("")
#When exploring the data, we noticed that there were 4 empty status values. So we filled it with "Unknown".
data["Status"] = data["Status"].fillna("Unknown")
#There were about 10 nan D/O, so we filled those values with "Unknown".
data["D/O"] = data["D/O"].fillna("Unknown")
#The Notes column had nans, so replace nans with an empty string.
data["Notes"] = data["Notes"].fillna("")
#The Status column had two status: "convicted" and "Convicted".
#They should be the same, so we made everything in this column lower case.
data["Status"] = data["Status"].str.lower()
#The Status column had two strings: "sued" and "sued.".
#They should be the same, so we stripped out the "."
data["Status"] = data["Status"].str.strip(".")
#There are 7 Diocese that are listed as "none". Therefore, we are going to change this to be: "Unknown, Unknown"
df[df["Diocese"] == "none"] = "Unknown, UNK"
return data
df = clean_data(df)
df
# +
df[df["Source/Assignments"] == ""].sum()
print(len(df["Source/Assignments"]))
print(len(df["First"]))
print(df.First.hasnans)
print(len(df[df["Diocese"] == ""]))
print(df["Source/Assignments"].hasnans)
print(len(df[df["Diocese"] == "none"]))
# +
states = {};
for l in df.Diocese:
cityState = l.split(", ")
if cityState[1] in states.keys():
states[cityState[1]] +=1;
else:
states[cityState[1]] =1;
print(states)
# -
# ax = sns.barplot(x="states")
plt.figure(figsize=(20,10))
sns.barplot(x=states.keys(), y=states.values())
# +
state_topo = r'us_states.topo.json'
geo_data = [{'name': 'states',
'url': state_topo,
'feature': 'us_states.geo'}
]
vis = vincent.Map(geo_data=geo_data, scale=200)
vis.to_json('vega.json')
# +
geo_data = [{'name': 'counties',
'url': county_topo,
'feature': 'us_counties.geo'},
{'name': 'states',
'url': state_topo,
'feature': 'us_states.geo'}]
vis = vincent.Map(geo_data=geo_data, scale=1000, projection='albersUsa')
# +
world_topo = r'world-countries.topo.json'
geo_data = [{'name': 'countries',
'url': world_topo,
'feature': 'world-countries'}]
vis = vincent.Map(geo_data=geo_data, scale=200)
# -
| 4,352 |
/9_RF_GBDT.ipynb
|
b9f1b3348ea3740e901eb9b8bb26b3533e407284
|
[] |
no_license
|
chetanmedipally/donorsChoose
|
https://github.com/chetanmedipally/donorsChoose
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 4,122,346 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Load Necessary Libraries
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# #### Basic Plot
plt.plot([1, 2, 3, 4], [7, 6, 5, 4])
plt.show()
# +
x = [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
y = [1, 8, 27, 64, 125, 256, 343, 512, 729, 1000]
plt.plot(x, y)
plt.show()
# +
#add title to the graph
x = [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
y = [1, 8, 27, 64, 125, 256, 343, 512, 729, 1000]
plt.title("Squares V Cubes")
plt.plot(x, y)
plt.show()
# +
#add labels to the graph
x = [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
y = [1, 8, 27, 64, 125, 256, 343, 512, 729, 1000]
plt.title("Squares V Cubes")
plt.xlabel("X Axis - Squares")
plt.ylabel("Y Axis - Cubes")
plt.plot(x, y)
plt.show()
# +
#format text in the graph
x = [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
y = [1, 8, 27, 64, 125, 256, 343, 512, 729, 1000]
plt.title("Squares V Cubes", fontdict = {'fontname' : 'Comic Sans MS', 'fontsize' : 18})
plt.xlabel("X Axis - Squares", fontdict = {'fontname' : 'Comic Sans MS', 'fontsize' : 14})
plt.ylabel("Y Axis - Cubes", fontdict = {'fontname' : 'Comic Sans MS', 'fontsize' : 14})
plt.plot(x, y)
plt.show()
# +
#define your own ticks in graph
x = [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
y = [1, 8, 27, 64, 125, 256, 343, 512, 729, 1000]
plt.title("Squares V Cubes", fontdict = {'fontname' : 'Comic Sans MS', 'fontsize' : 18})
plt.xlabel("X Axis - Squares", fontdict = {'fontname' : 'Comic Sans MS', 'fontsize' : 14})
plt.ylabel("Y Axis - Cubes", fontdict = {'fontname' : 'Comic Sans MS', 'fontsize' : 14})
plt.xticks([0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
plt.yticks([0, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000])
plt.plot(x, y)
plt.show()
# +
#add legend to the graph
x = [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
y = [1, 8, 27, 64, 125, 256, 343, 512, 729, 1000]
plt.title("Squares V Cubes", fontdict = {'fontname' : 'Comic Sans MS', 'fontsize' : 18})
plt.xlabel("X Axis - Squares", fontdict = {'fontname' : 'Comic Sans MS', 'fontsize' : 14})
plt.ylabel("Y Axis - Cubes", fontdict = {'fontname' : 'Comic Sans MS', 'fontsize' : 14})
plt.xticks([0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
plt.yticks([0, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000])
plt.plot(x, y, label = "Sq V Cu")
plt.legend()
plt.show()
# +
#format graph line
x = [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
y = [1, 8, 27, 64, 125, 256, 343, 512, 729, 1000]
plt.title("Squares V Cubes", fontdict = {'fontname' : 'Comic Sans MS', 'fontsize' : 18})
plt.xlabel("X Axis - Squares", fontdict = {'fontname' : 'Comic Sans MS', 'fontsize' : 14})
plt.ylabel("Y Axis - Cubes", fontdict = {'fontname' : 'Comic Sans MS', 'fontsize' : 14})
plt.xticks([0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
plt.yticks([0, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000])
plt.plot(x, y, label = "Sq V Cu", color = "#f3672e", linewidth = 3, linestyle = "solid", marker = ".", markersize = "12", markeredgecolor = "#000")
plt.legend()
plt.show()
# +
#plot a second line
x = [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
y = [1, 8, 27, 64, 125, 256, 343, 512, 729, 1000]
plt.title("Squares V Cubes", fontdict = {'fontname' : 'Comic Sans MS', 'fontsize' : 18})
plt.xlabel("X Axis - Squares", fontdict = {'fontname' : 'Comic Sans MS', 'fontsize' : 14})
plt.ylabel("Y Axis - Cubes", fontdict = {'fontname' : 'Comic Sans MS', 'fontsize' : 14})
plt.xticks([0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
plt.yticks([0, 200, 400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000])
x2 = np.arange(0, 7, 0.5)
plt.plot(x2, x2**4, label = "Pow 4", color = "#abc123", linewidth = 3, linestyle = "solid", marker = ".", markersize = "12", markeredgecolor = "#000")
plt.plot(x, y, label = "Sq V Cu", color = "#f3672e", linewidth = 3, linestyle = "solid", marker = ".", markersize = "12", markeredgecolor = "#000")
plt.legend()
plt.show()
# +
#resize the graph
plt.figure(figsize = (5, 3), dpi = 150)
x = [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
y = [1, 8, 27, 64, 125, 256, 343, 512, 729, 1000]
plt.title("Squares V Cubes", fontdict = {'fontname' : 'Comic Sans MS', 'fontsize' : 18})
plt.xlabel("X Axis - Squares", fontdict = {'fontname' : 'Comic Sans MS', 'fontsize' : 14})
plt.ylabel("Y Axis - Cubes", fontdict = {'fontname' : 'Comic Sans MS', 'fontsize' : 14})
plt.xticks([0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
plt.yticks([0, 200, 400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000])
x2 = np.arange(0, 7, 0.5)
plt.plot(x2, x2**4, label = "Pow 4", color = "#abc123", linewidth = 3, linestyle = "solid", marker = ".", markersize = "12", markeredgecolor = "#000")
plt.plot(x, y, label = "Sq V Cu", color = "#f3672e", linewidth = 3, linestyle = "solid", marker = ".", markersize = "12", markeredgecolor = "#000")
plt.legend()
plt.show()
# +
#save the graph
plt.figure(figsize = (5, 3), dpi = 150)
x = [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
y = [1, 8, 27, 64, 125, 256, 343, 512, 729, 1000]
plt.title("Squares V Cubes", fontdict = {'fontname' : 'Comic Sans MS', 'fontsize' : 18})
plt.xlabel("X Axis - Squares", fontdict = {'fontname' : 'Comic Sans MS', 'fontsize' : 14})
plt.ylabel("Y Axis - Cubes", fontdict = {'fontname' : 'Comic Sans MS', 'fontsize' : 14})
plt.xticks([0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
plt.yticks([0, 200, 400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000])
x2 = np.arange(0, 7, 0.5)
plt.plot(x2, x2**4, label = "Pow 4", color = "#abc123", linewidth = 3, linestyle = "solid", marker = ".", markersize = "12", markeredgecolor = "#000")
plt.plot(x, y, label = "Sq V Cu", color = "#f3672e", linewidth = 3, linestyle = "solid", marker = ".", markersize = "12", markeredgecolor = "#000")
plt.legend()
plt.savefig("Line Graph New.png", dpi = 300)
plt.show()
# -
were as follows:
# <li>__project_essay_1:__ "Introduce us to your classroom"</li>
# <li>__project_essay_2:__ "Tell us more about your students"</li>
# <li>__project_essay_3:__ "Describe how your students will use the materials you're requesting"</li>
# <li>__project_essay_3:__ "Close by sharing why your project will make a difference"</li>
# </ul>
#
#
# <ul>
# Starting on May 17, 2016, the number of essays was reduced from 4 to 2, and the prompts for the first 2 essays were changed to the following:<br>
# <li>__project_essay_1:__ "Describe your students: What makes your students special? Specific details about their background, your neighborhood, and your school are all helpful."</li>
# <li>__project_essay_2:__ "About your project: How will these materials make a difference in your students' learning and improve their school lives?"</li>
# <br>For all projects with project_submitted_datetime of 2016-05-17 and later, the values of project_essay_3 and project_essay_4 will be NaN.
# </ul>
#
# + colab={} colab_type="code" id="XTrCJ5p9Cyl7" outputId="06c35f2f-d281-4345-9668-f8f44b28d6ac"
# %matplotlib inline
import warnings
warnings.filterwarnings("ignore")
import sqlite3
import pandas as pd
import numpy as np
import nltk
import string
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import confusion_matrix
from sklearn import metrics
from sklearn.metrics import roc_curve, auc
from nltk.stem.porter import PorterStemmer
import re
# Tutorial about Python regular expressions: https://pymotw.com/2/re/
import string
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.stem.wordnet import WordNetLemmatizer
from gensim.models import Word2Vec
from gensim.models import KeyedVectors
import pickle
from tqdm import tqdm
import random
import os
from chart_studio.plotly import plotly
import plotly.offline as offline
import plotly.graph_objs as go
offline.init_notebook_mode()
from collections import Counter
# -
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
from sklearn.metrics import roc_auc_score
from collections import Counter
from sklearn.metrics import accuracy_score
from sklearn.model_selection import GridSearchCV
from sklearn.calibration import CalibratedClassifierCV
# + [markdown] colab_type="text" id="MjknLZQqCymQ"
# ## 1.1 Reading Data
# + colab={} colab_type="code" id="abtcqBS8CymT"
project_data = pd.read_csv('train_data.csv')
resource_data = pd.read_csv('resources.csv')
# + colab={} colab_type="code" id="Prf2ghjDCymX" outputId="226c1b3c-35c9-48bd-afc9-7a819c871e8d"
print("Number of data points in train data", project_data.shape)
print('-'*50)
print("The attributes of data :", project_data.columns.values)
# -
project_data = project_data.sample(n=5000)
# + colab={} colab_type="code" id="BTGMDQoPCymc" outputId="5b3c47b3-d5d0-416b-b512-a59b77f60f0b"
# how to replace elements in list python: https://stackoverflow.com/a/2582163/4084039
cols = ['Date' if x=='project_submitted_datetime' else x for x in list(project_data.columns)]
#sort dataframe based on time pandas python: https://stackoverflow.com/a/49702492/4084039
project_data['Date'] = pd.to_datetime(project_data['project_submitted_datetime'])
project_data.drop('project_submitted_datetime', axis=1, inplace=True)
project_data.sort_values(by=['Date'], inplace=True)
# how to reorder columns pandas python: https://stackoverflow.com/a/13148611/4084039
project_data = project_data[cols]
project_data.head(2)
# + colab={} colab_type="code" id="ntdWuRoUCymj" outputId="7f1911f2-61b8-4069-bfb0-9fdfeb2419e3"
print("Number of data points in train data", resource_data.shape)
print(resource_data.columns.values)
resource_data.head(2)
# + colab={} colab_type="code" id="dQunmy9FCypT"
price_data = resource_data.groupby('id').agg({'price':'sum', 'quantity':'sum'}).reset_index()
project_data = pd.merge(project_data, price_data, on='id', how='left')
# + [markdown] colab_type="text" id="6WZaYhwJCymp"
# ## 1.2 preprocessing of `project_subject_categories`
# + colab={} colab_type="code" id="Mdkhq7PRCymr"
catogories = list(project_data['project_subject_categories'].values)
# remove special characters from list of strings python: https://stackoverflow.com/a/47301924/4084039
# https://www.geeksforgeeks.org/removing-stop-words-nltk-python/
# https://stackoverflow.com/questions/23669024/how-to-strip-a-specific-word-from-a-string
# https://stackoverflow.com/questions/8270092/remove-all-whitespace-in-a-string-in-python
cat_list = []
for i in catogories:
temp = ""
# consider we have text like this "Math & Science, Warmth, Care & Hunger"
for j in i.split(','): # it will split it in three parts ["Math & Science", "Warmth", "Care & Hunger"]
if 'The' in j.split(): # this will split each of the catogory based on space "Math & Science"=> "Math","&", "Science"
j=j.replace('The','') # if we have the words "The" we are going to replace it with ''(i.e removing 'The')
j = j.replace(' ','') # we are placeing all the ' '(space) with ''(empty) ex:"Math & Science"=>"Math&Science"
temp+=j.strip()+" " #" abc ".strip() will return "abc", remove the trailing spaces
temp = temp.replace('&','_') # we are replacing the & value into
cat_list.append(temp.strip())
project_data['clean_categories'] = cat_list
project_data.drop(['project_subject_categories'], axis=1, inplace=True)
#from collections import Counter
#my_counter = Counter()
#for word in project_data['clean_categories'].values:
# my_counter.update(word.split())
#
#cat_dict = dict(my_counter)
#sorted_cat_dict = dict(sorted(cat_dict.items(), key=lambda kv: kv[1]))
# + [markdown] colab_type="text" id="386yx3T2Cymv"
# ## 1.3 preprocessing of `project_subject_subcategories`
# + colab={} colab_type="code" id="4QSP0r8XCymw"
sub_catogories = list(project_data['project_subject_subcategories'].values)
# remove special characters from list of strings python: https://stackoverflow.com/a/47301924/4084039
# https://www.geeksforgeeks.org/removing-stop-words-nltk-python/
# https://stackoverflow.com/questions/23669024/how-to-strip-a-specific-word-from-a-string
# https://stackoverflow.com/questions/8270092/remove-all-whitespace-in-a-string-in-python
sub_cat_list = []
for i in sub_catogories:
temp = ""
# consider we have text like this "Math & Science, Warmth, Care & Hunger"
for j in i.split(','): # it will split it in three parts ["Math & Science", "Warmth", "Care & Hunger"]
if 'The' in j.split(): # this will split each of the catogory based on space "Math & Science"=> "Math","&", "Science"
j=j.replace('The','') # if we have the words "The" we are going to replace it with ''(i.e removing 'The')
j = j.replace(' ','') # we are placeing all the ' '(space) with ''(empty) ex:"Math & Science"=>"Math&Science"
temp +=j.strip()+" "#" abc ".strip() will return "abc", remove the trailing spaces
temp = temp.replace('&','_')
sub_cat_list.append(temp.strip())
project_data['clean_subcategories'] = sub_cat_list
project_data.drop(['project_subject_subcategories'], axis=1, inplace=True)
# count of all the words in corpus python: https://stackoverflow.com/a/22898595/4084039
#my_counter = Counter()
#for word in project_data['clean_subcategories'].values:
# my_counter.update(word.split())
#
#sub_cat_dict = dict(my_counter)
#sorted_sub_cat_dict = dict(sorted(sub_cat_dict.items(), key=lambda kv: kv[1]))
# + [markdown] colab_type="text" id="NANzhWlLCynN"
# ## 1.3 Text preprocessing
# + colab={} colab_type="code" id="yqsmu-BTCynQ"
# merge two column text dataframe:
project_data["essay"] = project_data["project_essay_1"].map(str) +\
project_data["project_essay_2"].map(str) + \
project_data["project_essay_3"].map(str) + \
project_data["project_essay_4"].map(str)
project_data.drop(['project_essay_1','project_essay_2','project_essay_3','project_essay_4'], axis=1, inplace=True)
# + colab={} colab_type="code" id="xxtnd3maCynV" outputId="2002cbb7-9006-4764-ee13-c990d1a3a99e"
project_data.head(2)
# + colab={} colab_type="code" id="2ou8qSzkCyna"
#### 1.4.2.3 Using Pretrained Models: TFIDF weighted W2V
# + colab={} colab_type="code" id="TCl7L1DhCyne" outputId="c1e9c200-8868-4b26-adbd-7836d815abef"
# printing some random reviews
print(project_data['essay'].values[0])
print("="*50)
print(project_data['essay'].values[150])
print("="*50)
print(project_data['essay'].values[1000])
print("="*50)
# + colab={} colab_type="code" id="Yqj4vGVoCynh"
# https://stackoverflow.com/a/47091490/4084039
import re
def decontracted(phrase):
# specific
phrase = re.sub(r"won't", "will not", phrase)
phrase = re.sub(r"can\'t", "can not", phrase)
# general
phrase = re.sub(r"n\'t", " not", phrase)
phrase = re.sub(r"\'re", " are", phrase)
phrase = re.sub(r"\'s", " is", phrase)
phrase = re.sub(r"\'d", " would", phrase)
phrase = re.sub(r"\'ll", " will", phrase)
phrase = re.sub(r"\'t", " not", phrase)
phrase = re.sub(r"\'ve", " have", phrase)
phrase = re.sub(r"\'m", " am", phrase)
return phrase
# + colab={} colab_type="code" id="aDUbe9piCynj" outputId="cd082bbf-9ba2-4373-ea65-73c45627bb7d"
sent = decontracted(project_data['essay'].values[200])
print(sent)
print("="*50)
# + colab={} colab_type="code" id="vXSbHYJ4Cynp" outputId="1becc1e1-bb25-48aa-f44f-32c99df9e13a"
# \r \n \t remove from string python: http://texthandler.com/info/remove-line-breaks-python/
sent = sent.replace('\\r', ' ')
sent = sent.replace('\\"', ' ')
sent = sent.replace('\\n', ' ')
print(sent)
# + colab={} colab_type="code" id="67Agrz_YCynt" outputId="ec6bf4e3-c419-4740-9989-fa53128abe20"
#remove spacial character: https://stackoverflow.com/a/5843547/4084039
sent = re.sub('[^A-Za-z0-9]+', ' ', sent)
print(sent)
# + colab={} colab_type="code" id="hhyPw-8wCyny"
# https://gist.github.com/sebleier/554280
# we are removing the words from the stop words list: 'no', 'nor', 'not'
stopwords= ['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', "you're", "you've",\
"you'll", "you'd", 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', \
'she', "she's", 'her', 'hers', 'herself', 'it', "it's", 'its', 'itself', 'they', 'them', 'their',\
'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that', "that'll", 'these', 'those', \
'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does', \
'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', \
'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through', 'during', 'before', 'after',\
'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further',\
'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more',\
'most', 'other', 'some', 'such', 'only', 'own', 'same', 'so', 'than', 'too', 'very', \
's', 't', 'can', 'will', 'just', 'don', "don't", 'should', "should've", 'now', 'd', 'll', 'm', 'o', 're', \
've', 'y', 'ain', 'aren', "aren't", 'couldn', "couldn't", 'didn', "didn't", 'doesn', "doesn't", 'hadn',\
"hadn't", 'hasn', "hasn't", 'haven', "haven't", 'isn', "isn't", 'ma', 'mightn', "mightn't", 'mustn',\
"mustn't", 'needn', "needn't", 'shan', "shan't", 'shouldn', "shouldn't", 'wasn', "wasn't", 'weren', "weren't", \
'won', "won't", 'wouldn', "wouldn't"]
# + colab={} colab_type="code" id="d6RCdphRCyn0" outputId="a59d693e-fa5b-4821-e173-e308905b96d5"
# Combining all the above stundents
from tqdm import tqdm
preprocessed_essays = []
# tqdm is for printing the status bar
for sentance in tqdm(project_data['essay'].values):
sent = decontracted(sentance)
sent = sent.replace('\\r', ' ')
sent = sent.replace('\\"', ' ')
sent = sent.replace('\\n', ' ')
sent = re.sub('[^A-Za-z0-9]+', ' ', sent)
# https://gist.github.com/sebleier/554280
sent = ' '.join(e for e in sent.split() if e.lower() not in stopwords)
preprocessed_essays.append(sent.lower().strip())
# + colab={} colab_type="code" id="rPo9cLvNCyn3" outputId="f4069bd0-65e3-46af-e799-f7e9132301bb"
# after preprocesing
preprocessed_essays[200]
# -
project_data['essay'] = preprocessed_essays
# + [markdown] colab_type="text" id="QeUw6WMKCyn7"
# <h2><font color='red'> 1.4 Preprocessing of `project_title`</font></h2>
# + colab={} colab_type="code" id="Ql9ttfW3Cyn7"
# similarly you can preprocess the titles also
# -
preprocessed_titles = []
# tqdm is for printing the status bar
for sentance in tqdm(project_data['project_title'].values):
sent = decontracted(sentance)
sent = sent.replace('\\r', ' ')
sent = sent.replace('\\"', ' ')
sent = sent.replace('\\n', ' ')
sent = re.sub('[^A-Za-z0-9]+', ' ', sent)
# https://gist.github.com/sebleier/554280
sent = ' '.join(e for e in sent.split() if e not in stopwords)
preprocessed_titles.append(sent.lower().strip())
project_data['project_title'] = preprocessed_titles
# + [markdown] colab_type="text" id="NQg4ZfFtCyn9"
# ## 1.5 Preparing data for models
# + colab={} colab_type="code" id="V4GS54ZTCyn-" outputId="febd575b-ed33-454e-d2d5-e8f6f635d117"
project_data.columns
# + [markdown] colab_type="text" id="6FDsc1epCyoD"
# we are going to consider
#
# - school_state : categorical data
# - clean_categories : categorical data
# - clean_subcategories : categorical data
# - project_grade_category : categorical data
# - teacher_prefix : categorical data
#
# - project_title : text data
# - text : text data
# - project_resource_summary: text data (optinal)
#
# - quantity : numerical (optinal)
# - teacher_number_of_previously_posted_projects : numerical
# - price : numerical
# -
project_data.head(2)
project_data['teacher_prefix'].fillna(project_data['teacher_prefix'].value_counts(dropna=False).idxmax(),inplace=True)
X = project_data.drop(['Unnamed: 0','id','teacher_id','Date','project_resource_summary','quantity'],axis=1)
y = project_data['project_is_approved']
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify = y, test_size = 0.2, random_state = 42)
X_train, X_cv, y_train, y_cv = train_test_split(X_train, y_train, stratify = y_train, test_size = 0.2, random_state = 42)
print(X_train.shape)
print(X_cv.shape)
print(X_test.shape)
print(y_train.shape)
print(y_cv.shape)
print(y_test.shape)
# + [markdown] colab_type="text" id="0d0QeeQ-CyoD"
# ### 1.5.1 Vectorizing Categorical data
# + [markdown] colab_type="text" id="kFZFSOirCyoD"
# - https://www.appliedaicourse.com/course/applied-ai-course-online/lessons/handling-categorical-and-numerical-features/
# -
def response_table(categorical_column):
categorical_dict = {}
unique_categories = X_train[categorical_column].unique()
for category in unique_categories:
category_total_count = X_train[X_train[categorical_column] == category].shape[0]
category_0_count = X_train[(X_train[categorical_column] == category) & (X_train['project_is_approved'] == 0)].shape[0]
category_1_count = X_train[(X_train[categorical_column] == category) & (X_train['project_is_approved'] == 1)].shape[0]
categorical_dict[category] = [category_0_count/category_total_count,category_1_count/category_total_count]
return categorical_dict
from sklearn.preprocessing import StandardScaler
import scipy
def response_encode(encoded_dict,key):
if (encoded_dict.get(key,0) != 0):
return encoded_dict.get(key,0)
else:
return [0.5,0.5]
# +
catrgories_dict = response_table('clean_categories')
clean_categories_scalar_0 = StandardScaler()
clean_categories_scalar_1 = StandardScaler()
X_train['clean_categories_0'] = X_train['clean_categories'].apply(lambda x : response_encode(catrgories_dict,x)[0])
X_train['clean_categories_1'] = X_train['clean_categories'].apply(lambda x : response_encode(catrgories_dict,x)[1])
cc_train_stand_0 = clean_categories_scalar_0.fit_transform(X_train['clean_categories_0'].values.reshape(-1,1))
cc_train_stand_1 = clean_categories_scalar_1.fit_transform(X_train['clean_categories_1'].values.reshape(-1,1))
X_train.drop(['clean_categories'],axis=1,inplace=True)
X_cv['clean_categories_0'] = X_cv['clean_categories'].apply(lambda x : response_encode(catrgories_dict,x)[0])
X_cv['clean_categories_1'] = X_cv['clean_categories'].apply(lambda x : response_encode(catrgories_dict,x)[1])
cc_cv_stand_0 = clean_categories_scalar_0.transform(X_cv['clean_categories_0'].values.reshape(-1,1))
cc_cv_stand_1 = clean_categories_scalar_1.transform(X_cv['clean_categories_1'].values.reshape(-1,1))
X_cv.drop(['clean_categories'],axis=1,inplace=True)
X_test['clean_categories_0'] = X_test['clean_categories'].apply(lambda x : response_encode(catrgories_dict,x)[0])
X_test['clean_categories_1'] = X_test['clean_categories'].apply(lambda x : response_encode(catrgories_dict,x)[1])
cc_test_stand_0 = clean_categories_scalar_0.transform(X_test['clean_categories_0'].values.reshape(-1,1))
cc_test_stand_1 = clean_categories_scalar_1.transform(X_test['clean_categories_1'].values.reshape(-1,1))
X_test.drop(['clean_categories'],axis=1,inplace=True)
# +
catrgories_dict = response_table('clean_subcategories')
X_train['clean_subcategories_0'] = X_train['clean_subcategories'].apply(lambda x : response_encode(catrgories_dict,x)[0])
X_train['clean_subcategories_1'] = X_train['clean_subcategories'].apply(lambda x : response_encode(catrgories_dict,x)[1])
X_train.drop(['clean_subcategories'],axis=1,inplace=True)
X_cv['clean_subcategories_0'] = X_cv['clean_subcategories'].apply(lambda x : response_encode(catrgories_dict,x)[0])
X_cv['clean_subcategories_1'] = X_cv['clean_subcategories'].apply(lambda x : response_encode(catrgories_dict,x)[1])
X_cv.drop(['clean_subcategories'],axis=1,inplace=True)
X_test['clean_subcategories_0'] = X_test['clean_subcategories'].apply(lambda x : response_encode(catrgories_dict,x)[0])
X_test['clean_subcategories_1'] = X_test['clean_subcategories'].apply(lambda x : response_encode(catrgories_dict,x)[1])
X_test.drop(['clean_subcategories'],axis=1,inplace=True)
clean_subcategories_scalar_0 = StandardScaler()
clean_subcategories_scalar_1 = StandardScaler()
csc_train_stand_0 = clean_subcategories_scalar_0.fit_transform(X_train['clean_subcategories_0'].values.reshape(-1,1))
csc_train_stand_1 = clean_subcategories_scalar_1.fit_transform(X_train['clean_subcategories_1'].values.reshape(-1,1))
csc_cv_stand_0 = clean_subcategories_scalar_0.transform(X_cv['clean_subcategories_0'].values.reshape(-1,1))
csc_cv_stand_1 = clean_subcategories_scalar_1.transform(X_cv['clean_subcategories_1'].values.reshape(-1,1))
csc_test_stand_0 = clean_subcategories_scalar_0.transform(X_test['clean_subcategories_0'].values.reshape(-1,1))
csc_test_stand_1 = clean_subcategories_scalar_1.transform(X_test['clean_subcategories_1'].values.reshape(-1,1))
# + colab={} colab_type="code" id="0ecQOf-JCyoL"
# you can do the similar thing with state, teacher_prefix and project_grade_category also
# +
catrgories_dict = response_table('teacher_prefix')
X_train['teacher_prefix_0'] = X_train['teacher_prefix'].apply(lambda x : response_encode(catrgories_dict,x)[0])
X_train['teacher_prefix_1'] = X_train['teacher_prefix'].apply(lambda x : response_encode(catrgories_dict,x)[1])
X_train.drop(['teacher_prefix'],axis=1,inplace=True)
X_cv['teacher_prefix_0'] = X_cv['teacher_prefix'].apply(lambda x : response_encode(catrgories_dict,x)[0])
X_cv['teacher_prefix_1'] = X_cv['teacher_prefix'].apply(lambda x : response_encode(catrgories_dict,x)[1])
X_cv.drop(['teacher_prefix'],axis=1,inplace=True)
X_test['teacher_prefix_0'] = X_test['teacher_prefix'].apply(lambda x : response_encode(catrgories_dict,x)[0])
X_test['teacher_prefix_1'] = X_test['teacher_prefix'].apply(lambda x : response_encode(catrgories_dict,x)[1])
X_test.drop(['teacher_prefix'],axis=1,inplace=True)
teacher_prefix_scalar_0 = StandardScaler()
teacher_prefix_scalar_1 = StandardScaler()
tp_train_stand_0 = teacher_prefix_scalar_0.fit_transform(X_train['teacher_prefix_0'].values.reshape(-1,1))
tp_train_stand_1 = teacher_prefix_scalar_1.fit_transform(X_train['teacher_prefix_1'].values.reshape(-1,1))
tp_cv_stand_0 = teacher_prefix_scalar_0.transform(X_cv['teacher_prefix_0'].values.reshape(-1,1))
tp_cv_stand_1 = teacher_prefix_scalar_1.transform(X_cv['teacher_prefix_1'].values.reshape(-1,1))
tp_test_stand_0 = teacher_prefix_scalar_0.transform(X_test['teacher_prefix_0'].values.reshape(-1,1))
tp_test_stand_1 = teacher_prefix_scalar_1.transform(X_test['teacher_prefix_1'].values.reshape(-1,1))
# +
catrgories_dict = response_table('school_state')
X_train['school_state_0'] = X_train['school_state'].apply(lambda x : response_encode(catrgories_dict,x)[0])
X_train['school_state_1'] = X_train['school_state'].apply(lambda x : response_encode(catrgories_dict,x)[1])
X_train.drop(['school_state'],axis=1,inplace=True)
X_cv['school_state_0'] = X_cv['school_state'].apply(lambda x : response_encode(catrgories_dict,x)[0])
X_cv['school_state_1'] = X_cv['school_state'].apply(lambda x : response_encode(catrgories_dict,x)[1])
X_cv.drop(['school_state'],axis=1,inplace=True)
X_test['school_state_0'] = X_test['school_state'].apply(lambda x : response_encode(catrgories_dict,x)[0])
X_test['school_state_1'] = X_test['school_state'].apply(lambda x : response_encode(catrgories_dict,x)[1])
X_test.drop(['school_state'],axis=1,inplace=True)
school_state_scalar_0 = StandardScaler()
school_state_scalar_1 = StandardScaler()
ss_train_stand_0 = school_state_scalar_0.fit_transform(X_train['school_state_0'].values.reshape(-1,1))
ss_train_stand_1 = school_state_scalar_1.fit_transform(X_train['school_state_1'].values.reshape(-1,1))
ss_cv_stand_0 = school_state_scalar_0.transform(X_cv['school_state_0'].values.reshape(-1,1))
ss_cv_stand_1 = school_state_scalar_1.transform(X_cv['school_state_1'].values.reshape(-1,1))
ss_test_stand_0 = school_state_scalar_0.transform(X_test['school_state_0'].values.reshape(-1,1))
ss_test_stand_1 = school_state_scalar_1.transform(X_test['school_state_1'].values.reshape(-1,1))
# +
catrgories_dict = response_table('project_grade_category')
X_train['project_grade_category_0'] = X_train['project_grade_category'].apply(lambda x : response_encode(catrgories_dict,x)[0])
X_train['project_grade_category_1'] = X_train['project_grade_category'].apply(lambda x : response_encode(catrgories_dict,x)[1])
X_train.drop(['project_grade_category'],axis=1,inplace=True)
X_cv['project_grade_category_0'] = X_cv['project_grade_category'].apply(lambda x : response_encode(catrgories_dict,x)[0])
X_cv['project_grade_category_1'] = X_cv['project_grade_category'].apply(lambda x : response_encode(catrgories_dict,x)[1])
X_cv.drop(['project_grade_category'],axis=1,inplace=True)
X_test['project_grade_category_0'] = X_test['project_grade_category'].apply(lambda x : response_encode(catrgories_dict,x)[0])
X_test['project_grade_category_1'] = X_test['project_grade_category'].apply(lambda x : response_encode(catrgories_dict,x)[1])
X_test.drop(['project_grade_category'],axis=1,inplace=True)
project_grade_category_scalar_0 = StandardScaler()
project_grade_category_scalar_1 = StandardScaler()
pgc_train_stand_0 = project_grade_category_scalar_0.fit_transform(X_train['project_grade_category_0'].values.reshape(-1,1))
pgc_train_stand_1 = project_grade_category_scalar_1.fit_transform(X_train['project_grade_category_1'].values.reshape(-1,1))
pgc_cv_stand_0 = project_grade_category_scalar_0.transform(X_cv['project_grade_category_0'].values.reshape(-1,1))
pgc_cv_stand_1 = project_grade_category_scalar_1.transform(X_cv['project_grade_category_1'].values.reshape(-1,1))
pgc_test_stand_0 = project_grade_category_scalar_0.transform(X_test['project_grade_category_0'].values.reshape(-1,1))
pgc_test_stand_1 = project_grade_category_scalar_1.transform(X_test['project_grade_category_1'].values.reshape(-1,1))
# -
X_train.drop(['project_is_approved'],axis=1,inplace=True)
X_cv.drop(['project_is_approved'],axis=1,inplace=True)
X_test.drop(['project_is_approved'],axis=1,inplace=True)
# + [markdown] colab_type="text" id="5YnkzKnmCyoN"
# ### 1.5.2 Vectorizing Text data
# + [markdown] colab_type="text" id="W544CoFtCyoN"
# #### 1.5.2.1 Bag of words
# + colab={} colab_type="code" id="2D93QosECyoP" outputId="ca16a2b1-fde3-46c0-a026-6a839ffa6e4a"
# We are considering only the words which appeared in at least 10 documents(rows or projects).
vectorizer = CountVectorizer(min_df=10)
text_bow_train= vectorizer.fit_transform(X_train['essay'].values)
text_bow_cv= vectorizer.transform(X_cv['essay'].values)
text_bow_test= vectorizer.transform(X_test['essay'].values)
print("Shape of matrix after one hot encoding train ",text_bow_train.shape)
print("Shape of matrix after one hot encoding cv ",text_bow_cv.shape)
print("Shape of matrix after one hot encoding test ",text_bow_test.shape)
# + colab={} colab_type="code" id="rvPTRSkrCyoU"
# you can vectorize the title also
# before you vectorize the title make sure you preprocess it
# -
# We are considering only the words which appeared in at least 10 documents(rows or projects).
vectorizer = CountVectorizer(min_df=10)
title_bow_train= vectorizer.fit_transform(X_train['project_title'].values)
title_bow_cv= vectorizer.transform(X_cv['project_title'].values)
title_bow_test= vectorizer.transform(X_test['project_title'].values)
print("Shape of matrix after one hot encoding train ",title_bow_train.shape)
print("Shape of matrix after one hot encoding cv ", title_bow_cv.shape)
print("Shape of matrix after one hot encoding test ", title_bow_test.shape)
# + [markdown] colab_type="text" id="gK_SHRpTCyol"
# #### 1.5.2.2 TFIDF vectorizer
# + colab={} colab_type="code" id="l0gzc2iwCyoo" outputId="3ada03da-5eec-4a16-c7bd-915d1c9352ae"
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer = TfidfVectorizer(min_df=10)
text_tfidf_train = vectorizer.fit_transform(X_train['essay'].values)
text_tfidf_cv = vectorizer.transform(X_cv['essay'].values)
text_tfidf_test = vectorizer.transform(X_test['essay'].values)
print("Shape of matrix after one hot encoding train",text_tfidf_train.shape)
print("Shape of matrix after one hot encoding cv", text_tfidf_cv.shape)
print("Shape of matrix after one hot encoding test", text_tfidf_test.shape)
# -
vectorizer = TfidfVectorizer(min_df=10)
title_tfidf_train= vectorizer.fit_transform(X_train['project_title'].values)
title_tfidf_cv= vectorizer.transform(X_cv['project_title'].values)
title_tfidf_test= vectorizer.transform(X_test['project_title'].values)
print("Shape of matrix after one hot encoding train ",title_tfidf_train.shape)
print("Shape of matrix after one hot encoding cv ", title_tfidf_cv.shape)
print("Shape of matrix after one hot encoding test ", title_tfidf_test.shape)
# + [markdown] colab_type="text" id="YHwGesZUCyo1"
# #### 1.5.2.3 Using Pretrained Models: Avg W2V
# + colab={} colab_type="code" id="FcsomcruCyo2" outputId="9876bfe7-73e1-454a-b937-f66fae906539"
'''
# Reading glove vectors in python: https://stackoverflow.com/a/38230349/4084039
def loadGloveModel(gloveFile):
print ("Loading Glove Model")
f = open(gloveFile,'r', encoding="utf8")
model = {}
for line in tqdm(f):
splitLine = line.split()
word = splitLine[0]
embedding = np.array([float(val) for val in splitLine[1:]])
model[word] = embedding
print ("Done.",len(model)," words loaded!")
return model
model = loadGloveModel('glove.42B.300d.txt')
# ============================
Output:
Loading Glove Model
1917495it [06:32, 4879.69it/s]
Done. 1917495 words loaded!
# ============================
words = []
for i in preproced_texts:
words.extend(i.split(' '))
for i in preproced_titles:
words.extend(i.split(' '))
print("all the words in the coupus", len(words))
words = set(words)
print("the unique words in the coupus", len(words))
inter_words = set(model.keys()).intersection(words)
print("The number of words that are present in both glove vectors and our coupus", \
len(inter_words),"(",np.round(len(inter_words)/len(words)*100,3),"%)")
words_courpus = {}
words_glove = set(model.keys())
for i in words:
if i in words_glove:
words_courpus[i] = model[i]
print("word 2 vec length", len(words_courpus))
# stronging variables into pickle files python: http://www.jessicayung.com/how-to-use-pickle-to-save-and-load-variables-in-python/
import pickle
with open('glove_vectors', 'wb') as f:
pickle.dump(words_courpus, f)
'''
# + colab={} colab_type="code" id="Gu0YB3p4Cyo8"
# stronging variables into pickle files python: http://www.jessicayung.com/how-to-use-pickle-to-save-and-load-variables-in-python/
# make sure you have the glove_vectors file
with open('glove_vectors', 'rb') as f:
model = pickle.load(f)
glove_words = set(model.keys())
# + colab={} colab_type="code" id="TEKi0VqVCyo_" outputId="23b34800-bc16-471c-c966-256d45cbdbcd"
# average Word2Vec
# compute average word2vec for each review.
def avg_w2v(data):
avg_w2v_vectors = []; # the avg-w2v for each sentence/review is stored in this list
for sentence in tqdm(data): # for each review/sentence
vector = np.zeros(300) # as word vectors are of zero length
cnt_words =0; # num of words with a valid vector in the sentence/review
for word in sentence.split(): # for each word in a review/sentence
if word in glove_words:
vector += model[word]
cnt_words += 1
if cnt_words != 0:
vector /= cnt_words
avg_w2v_vectors.append(vector)
return avg_w2v_vectors
# +
text_avg_w2v_train = avg_w2v(X_train['essay'].values)
text_avg_w2v_cv = avg_w2v(X_cv['essay'].values)
text_avg_w2v_test = avg_w2v(X_test['essay'].values)
text_avg_w2v_train = scipy.sparse.csr_matrix(text_avg_w2v_train)
text_avg_w2v_cv = scipy.sparse.csr_matrix(text_avg_w2v_cv)
text_avg_w2v_test = scipy.sparse.csr_matrix(text_avg_w2v_test)
print(text_avg_w2v_train.shape)
print(text_avg_w2v_cv.shape)
print(text_avg_w2v_test.shape)
# +
title_avg_w2v_train = avg_w2v(X_train['project_title'])
title_avg_w2v_cv = avg_w2v(X_cv['project_title'])
title_avg_w2v_test = avg_w2v(X_test['project_title'])
title_avg_w2v_train = scipy.sparse.csr_matrix(title_avg_w2v_train)
title_avg_w2v_cv = scipy.sparse.csr_matrix(title_avg_w2v_cv)
title_avg_w2v_test = scipy.sparse.csr_matrix(title_avg_w2v_test)
print(title_avg_w2v_train.shape)
print(title_avg_w2v_cv.shape)
print(title_avg_w2v_test.shape)
# + [markdown] colab_type="text" id="_s3QN_ZNCypD"
# #### 1.5.2.3 Using Pretrained Models: TFIDF weighted W2V
# + colab={} colab_type="code" id="8aB83HDUCypL" outputId="c1329ce0-bc01-443f-f1c8-bd2ce2bd50f3"
def tfidf_w2v(training_data, test_data):
# S = ["abc def pqr", "def def def abc", "pqr pqr def"]
tfidf_model = TfidfVectorizer()
tfidf_model.fit(training_data)
# we are converting a dictionary with word as a key, and the idf as a value
dictionary = dict(zip(tfidf_model.get_feature_names(), list(tfidf_model.idf_)))
tfidf_words = set(tfidf_model.get_feature_names())
# average Word2Vec
# compute average word2vec for each review.
tfidf_w2v_vectors = []; # the avg-w2v for each sentence/review is stored in this list
for sentence in tqdm(test_data): # for each review/sentence
vector = np.zeros(300) # as word vectors are of zero length
tf_idf_weight =0; # num of words with a valid vector in the sentence/review
for word in sentence.split(): # for each word in a review/sentence
if (word in glove_words) and (word in tfidf_words):
vec = model[word] # getting the vector for each word
# here we are multiplying idf value(dictionary[word]) and the tf value((sentence.count(word)/len(sentence.split())))
tf_idf = dictionary[word]*(sentence.count(word)/len(sentence.split())) # getting the tfidf value for each word
vector += (vec * tf_idf) # calculating tfidf weighted w2v
tf_idf_weight += tf_idf
if tf_idf_weight != 0:
vector /= tf_idf_weight
tfidf_w2v_vectors.append(vector)
return tfidf_w2v_vectors
# +
text_tfidf_w2v_train = tfidf_w2v(X_train['essay'].values,X_train['essay'].values)
text_tfidf_w2v_cv = tfidf_w2v(X_train['essay'].values,X_cv['essay'].values)
text_tfidf_w2v_test = tfidf_w2v(X_train['essay'].values,X_test['essay'].values)
text_tfidf_w2v_train = scipy.sparse.csr_matrix(text_tfidf_w2v_train)
text_tfidf_w2v_cv = scipy.sparse.csr_matrix(text_tfidf_w2v_cv)
text_tfidf_w2v_test = scipy.sparse.csr_matrix(text_tfidf_w2v_test)
print(text_tfidf_w2v_train.shape)
print(text_tfidf_w2v_cv.shape)
print(text_tfidf_w2v_test.shape)
# + colab={} colab_type="code" id="yknBsjfOCypP"
# Similarly you can vectorize for title also
# +
title_tfidf_w2v_train = tfidf_w2v(X_train['project_title'].values,X_train['project_title'].values)
title_tfidf_w2v_cv = tfidf_w2v(X_train['project_title'].values,X_cv['project_title'].values)
title_tfidf_w2v_test = tfidf_w2v(X_train['project_title'].values,X_test['project_title'].values)
title_tfidf_w2v_train = scipy.sparse.csr_matrix(title_tfidf_w2v_train)
title_tfidf_w2v_cv = scipy.sparse.csr_matrix(title_tfidf_w2v_cv)
title_tfidf_w2v_test = scipy.sparse.csr_matrix(title_tfidf_w2v_test)
print(title_tfidf_w2v_train.shape)
print(title_tfidf_w2v_cv.shape)
print(title_tfidf_w2v_test.shape)
# + [markdown] colab_type="text" id="6Em6Kb2-CypR"
# ### 1.5.3 Vectorizing Numerical features
# + colab={} colab_type="code" id="owkbYbowCypV"
# check this one: https://www.youtube.com/watch?v=0HOqOcln3Z4&t=530s
# standardization sklearn: https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html
# price_standardized = standardScalar.fit(project_data['price'].values)
# this will rise the error
# ValueError: Expected 2D array, got 1D array instead: array=[725.05 213.03 329. ... 399. 287.73 5.5 ].
# Reshape your data either using array.reshape(-1, 1)
price_scalar = StandardScaler()
#price_scalar.fit(project_data['price'].values.reshape(-1,1)) # finding the mean and standard deviation of this data
#print(f"Mean : {price_scalar.mean_[0]}, Standard deviation : {np.sqrt(price_scalar.var_[0])}")
# Now standardize the data with above maen and variance.
price_standardized_train = price_scalar.fit_transform(X=X_train['price'].values.reshape(-1, 1))
price_standardized_cv = price_scalar.transform(X=X_cv['price'].values.reshape(-1, 1))
price_standardized_test = price_scalar.transform(X=X_test['price'].values.reshape(-1, 1))
print(price_standardized_train.shape)
print(price_standardized_cv.shape)
print(price_standardized_test.shape)
# -
pre_proj_scalar = StandardScaler()
pre_proj_standardized_train = pre_proj_scalar.fit_transform(X=X_train['teacher_number_of_previously_posted_projects'].values.reshape(-1, 1))
pre_proj_standardized_cv = pre_proj_scalar.transform(X=X_cv['teacher_number_of_previously_posted_projects'].values.reshape(-1, 1))
pre_proj_standardized_test = pre_proj_scalar.transform(X=X_test['teacher_number_of_previously_posted_projects'].values.reshape(-1, 1))
print(pre_proj_standardized_train.shape)
print(pre_proj_standardized_cv.shape)
print(pre_proj_standardized_test.shape)
# + [markdown] colab_type="text" id="5UbaJH21Cypa"
# ### 1.5.4 Merging all the above features
# + [markdown] colab_type="text" id="fAZixvIeCypb"
# - we need to merge all the numerical vectors i.e catogorical, text, numerical vectors
# + colab={} colab_type="code" id="J7uuEmryCype" outputId="b0360c1d-592a-4bd7-b8c7-a20d91219fa8"
# merge two sparse matrices: https://stackoverflow.com/a/19710648/4084039
from scipy.sparse import hstack
# with the same hstack function we are concatinating a sparse matrix and a dense matirx :)
X_BOW_train = hstack((cc_train_stand_0,cc_train_stand_1,csc_train_stand_0,csc_train_stand_1,tp_train_stand_0,tp_train_stand_1,ss_train_stand_0,ss_train_stand_1,
pgc_train_stand_0,pgc_train_stand_1,price_standardized_train, pre_proj_standardized_train,text_bow_train, title_bow_train))
X_BOW_cv = hstack((cc_cv_stand_0,cc_cv_stand_1,csc_cv_stand_0,csc_cv_stand_1,tp_cv_stand_0,tp_cv_stand_1,ss_cv_stand_0,ss_cv_stand_1,pgc_cv_stand_0,
pgc_cv_stand_1,price_standardized_cv, pre_proj_standardized_cv,text_bow_cv, title_bow_cv))
X_BOW_test = hstack((cc_test_stand_0,cc_test_stand_1,csc_test_stand_0,csc_test_stand_1,tp_test_stand_0,tp_test_stand_1,ss_test_stand_0,ss_test_stand_1,
pgc_test_stand_0,pgc_test_stand_1,price_standardized_test, pre_proj_standardized_test,text_bow_test, title_bow_test))
print(X_BOW_train.shape)
print(X_BOW_cv.shape)
print(X_BOW_test.shape)
# -
X_TFIDF_train = hstack((cc_train_stand_0,cc_train_stand_1,csc_train_stand_0,csc_train_stand_1,tp_train_stand_0,tp_train_stand_1,ss_train_stand_0,ss_train_stand_1,
pgc_train_stand_0,pgc_train_stand_1,price_standardized_train, pre_proj_standardized_train,text_tfidf_train, title_tfidf_train))
X_TFIDF_cv = hstack((cc_cv_stand_0,cc_cv_stand_1,csc_cv_stand_0,csc_cv_stand_1,tp_cv_stand_0,tp_cv_stand_1,ss_cv_stand_0,ss_cv_stand_1,pgc_cv_stand_0,
pgc_cv_stand_1,price_standardized_cv, pre_proj_standardized_cv,text_tfidf_cv , title_tfidf_cv))
X_TFIDF_test = hstack((cc_test_stand_0,cc_test_stand_1,csc_test_stand_0,csc_test_stand_1,tp_test_stand_0,tp_test_stand_1,ss_test_stand_0,ss_test_stand_1,
pgc_test_stand_0,pgc_test_stand_1,price_standardized_test, pre_proj_standardized_test,text_tfidf_test, title_tfidf_test))
print(X_TFIDF_train.shape)
print(X_TFIDF_cv.shape)
print(X_TFIDF_test.shape)
X_avg_w2v_train = hstack((cc_train_stand_0,cc_train_stand_1,csc_train_stand_0,csc_train_stand_1,tp_train_stand_0,tp_train_stand_1,ss_train_stand_0,ss_train_stand_1,pgc_train_stand_0,pgc_train_stand_1,price_standardized_train, pre_proj_standardized_train, text_avg_w2v_train, title_avg_w2v_train))
X_avg_w2v_cv = hstack((cc_cv_stand_0,cc_cv_stand_1,csc_cv_stand_0,csc_cv_stand_1,tp_cv_stand_0,tp_cv_stand_1,ss_cv_stand_0,ss_cv_stand_1,pgc_cv_stand_0,
pgc_cv_stand_1,price_standardized_cv, pre_proj_standardized_cv, text_avg_w2v_cv, title_avg_w2v_cv))
X_avg_w2v_test = hstack((cc_test_stand_0,cc_test_stand_1,csc_test_stand_0,csc_test_stand_1,tp_test_stand_0,tp_test_stand_1,ss_test_stand_0,ss_test_stand_1,
pgc_test_stand_0,pgc_test_stand_1,price_standardized_test, pre_proj_standardized_test,text_avg_w2v_test, title_avg_w2v_test))
print(X_avg_w2v_train.shape)
print(X_avg_w2v_cv.shape)
print(X_avg_w2v_test.shape)
# +
#sub_X_avg_w2v_train = X_avg_w2v_train.tocsr()[random.sample(range(X_avg_w2v_train.shape[0]), 2000), :]
#print(sub_X_avg_w2v_train.shape)
# -
X_tfidf_w2v_train = hstack((cc_train_stand_0,cc_train_stand_1,csc_train_stand_0,csc_train_stand_1,tp_train_stand_0,tp_train_stand_1,ss_train_stand_0,ss_train_stand_1,
pgc_train_stand_0,pgc_train_stand_1,price_standardized_train, pre_proj_standardized_train, text_tfidf_w2v_train, title_tfidf_w2v_train))
X_tfidf_w2v_cv = hstack((cc_cv_stand_0,cc_cv_stand_1,csc_cv_stand_0,csc_cv_stand_1,tp_cv_stand_0,tp_cv_stand_1,ss_cv_stand_0,ss_cv_stand_1,pgc_cv_stand_0,
pgc_cv_stand_1,price_standardized_cv, pre_proj_standardized_cv, text_tfidf_w2v_cv, title_tfidf_w2v_cv))
X_tfidf_w2v_test = hstack((cc_test_stand_0,cc_test_stand_1,csc_test_stand_0,csc_test_stand_1,tp_test_stand_0,tp_test_stand_1,ss_test_stand_0,ss_test_stand_1,
pgc_test_stand_0,pgc_test_stand_1,price_standardized_test, pre_proj_standardized_test, text_tfidf_w2v_test, title_tfidf_w2v_test))
print(X_tfidf_w2v_train.shape)
print(X_tfidf_w2v_cv.shape)
print(X_tfidf_w2v_test.shape)
# + [markdown] colab_type="text" id="V3vrK7BSCypi"
# # Assignment 9: RF and GBDT
# -
# #### Response Coding: Example
# <img src='response.JPG' width=700px>
#
# > The response tabel is built only on train dataset.
# > For a category which is not there in train data and present in test data, we will encode them with default values
# Ex: in our test data if have State: D then we encode it as [0.5, 0.5]
# <ol>
# <li><strong>Apply both Random Forrest and GBDT on these feature sets</strong>
# <ul>
# <li><font color='red'>Set 1</font>: categorical(instead of one hot encoding, try <a href='https://www.appliedaicourse.com/course/applied-ai-course-online/lessons/handling-categorical-and-numerical-features/'>response coding</a>: use probability values), numerical features + project_title(BOW) + preprocessed_eassay (BOW)</li>
# <li><font color='red'>Set 2</font>: categorical(instead of one hot encoding, try <a href='https://www.appliedaicourse.com/course/applied-ai-course-online/lessons/handling-categorical-and-numerical-features/'>response coding</a>: use probability values), numerical features + project_title(TFIDF)+ preprocessed_eassay (TFIDF)</li>
# <li><font color='red'>Set 3</font>: categorical(instead of one hot encoding, try <a href='https://www.appliedaicourse.com/course/applied-ai-course-online/lessons/handling-categorical-and-numerical-features/'>response coding</a>: use probability values), numerical features + project_title(AVG W2V)+ preprocessed_eassay (AVG W2V). Here for this set take <b>20K</b> datapoints only. </li>
# <li><font color='red'>Set 4</font>: categorical(instead of one hot encoding, try <a href='https://www.appliedaicourse.com/course/applied-ai-course-online/lessons/handling-categorical-and-numerical-features/'>response coding</a>: use probability values), numerical features + project_title(TFIDF W2V)+ preprocessed_eassay (TFIDF W2V). Here for this set take <b>20K</b> datapoints only. </li> </ul>
# </li>
# <br>
# <li><strong>The hyper paramter tuning (Consider any two hyper parameters preferably n_estimators, max_depth)</strong>
# <ul>
# <li> Consider the following range for hyperparameters <b>n_estimators</b> = [10, 50, 100, 150, 200, 300, 500, 1000],
# <b>max_depth</b> = [2, 3, 4, 5, 6, 7, 8, 9, 10] </li>
# <li>Find the best hyper parameter which will give the maximum <a href='https://www.appliedaicourse.com/course/applied-ai-course-online/lessons/receiver-operating-characteristic-curve-roc-curve-and-auc-1/'>AUC</a> value</li>
# <li>Find the best hyper paramter using simple cross validation data</li>
# <li>You can write your own for loops to do this task</li>
# </ul>
# </li>
# <br>
# <li>
# <strong>Representation of results</strong>
# <ul>
# <li>You need to plot the performance of model both on train data and cross validation data for each hyper parameter, like shown in the figure
# <img src='3d_plot.JPG' width=500px> with X-axis as <strong>n_estimators</strong>, Y-axis as <strong>max_depth</strong>, and Z-axis as <strong>AUC Score</strong> , we have given the notebook which explains how to plot this 3d plot, you can find it in the same drive <i>3d_scatter_plot.ipynb</i></li>
# <p style="text-align:center;font-size:30px;color:red;"><strong>or</strong></p> <br>
# <li>You need to plot the performance of model both on train data and cross validation data for each hyper parameter, like shown in the figure
# <img src='heat_map.JPG' width=300px> <a href='https://seaborn.pydata.org/generated/seaborn.heatmap.html'>seaborn heat maps</a> with rows as <strong>n_estimators</strong>, columns as <strong>max_depth</strong>, and values inside the cell representing <strong>AUC Score</strong> </li>
# <li>You can choose either of the plotting techniques: 3d plot or heat map</li>
# <li>Once after you found the best hyper parameter, you need to train your model with it, and find the AUC on test data and plot the ROC curve on both train and test.
# <img src='train_test_auc.JPG' width=300px></li>
# <li>Along with plotting ROC curve, you need to print the <a href='https://www.appliedaicourse.com/course/applied-ai-course-online/lessons/confusion-matrix-tpr-fpr-fnr-tnr-1/'>confusion matrix</a> with predicted and original labels of test data points
# <img src='confusion_matrix.png' width=300px></li>
# </ul>
# <br>
# <li><strong>Conclusion</strong>
# <ul>
# <li>You need to summarize the results at the end of the notebook, summarize it in the table format. To print out a table please refer to this prettytable library<a href='http://zetcode.com/python/prettytable/'> link</a>
# <img src='summary.JPG' width=400px>
# </li>
# </ul>
# </ol>
# <h4><font color='red'>Note: Data Leakage</font></h4>
#
# 1. There will be an issue of data-leakage if you vectorize the entire data and then split it into train/cv/test.
# 2. To avoid the issue of data-leakag, make sure to split your data first and then vectorize it.
# 3. While vectorizing your data, apply the method fit_transform() on you train data, and apply the method transform() on cv/test data.
# 4. For more details please go through this <a href='https://soundcloud.com/applied-ai-course/leakage-bow-and-tfidf'>link.</a>
# + [markdown] colab_type="text" id="u7svGNyWCypl"
# <h1>2. Random Forest and GBDT </h1>
# + [markdown] colab_type="text" id="W6fBRNBMCypl"
# <h2>2.1 Splitting data into Train and cross validation(or test): Stratified Sampling</h2>
# + colab={} colab_type="code" id="WACKQWT_Cypl"
# please write all the code with proper documentation, and proper titles for each subsection
# go through documentations and blogs before you start coding
# first figure out what to do, and then think about how to do.
# reading and understanding error messages will be very much helpfull in debugging your code
# when you plot any graph make sure you use
# a. Title, that describes your plot, this will be very helpful to the reader
# b. Legends if needed
# c. X-axis label
# d. Y-axis label
# + [markdown] colab_type="text" id="VG7SrQdCCypn"
# <h2>2.2 Make Data Model Ready: encoding numerical, categorical features</h2>
# + colab={} colab_type="code" id="cTlBv2kmCypo"
# please write all the code with proper documentation, and proper titles for each subsection
# go through documentations and blogs before you start coding
# first figure out what to do, and then think about how to do.
# reading and understanding error messages will be very much helpfull in debugging your code
# make sure you featurize train and test data separatly
# when you plot any graph make sure you use
# a. Title, that describes your plot, this will be very helpful to the reader
# b. Legends if needed
# c. X-axis label
# d. Y-axis label
# + [markdown] colab_type="text" id="V8pUeQFiCypq"
# <h2>2.3 Make Data Model Ready: encoding eassay, and project_title</h2>
# + colab={} colab_type="code" id="7L7e-_EfCypq"
# please write all the code with proper documentation, and proper titles for each subsection
# go through documentations and blogs before you start coding
# first figure out what to do, and then think about how to do.
# reading and understanding error messages will be very much helpfull in debugging your code
# make sure you featurize train and test data separatly
# when you plot any graph make sure you use
# a. Title, that describes your plot, this will be very helpful to the reader
# b. Legends if needed
# c. X-axis label
# d. Y-axis label
# + [markdown] colab_type="text" id="UL8bHrflCyps"
# <h2>2.4 Applying Random Forest</h2>
#
# <br>Apply Random Forest on different kind of featurization as mentioned in the instructions
# <br> For Every model that you work on make sure you do the step 2 and step 3 of instrucations
# + colab={} colab_type="code" id="Tsay467cCypt"
# please write all the code with proper documentation, and proper titles for each subsection
# go through documentations and blogs before you start coding
# first figure out what to do, and then think about how to do.
# reading and understanding error messages will be very much helpfull in debugging your code
# when you plot any graph make sure you use
# a. Title, that describes your plot, this will be very helpful to the reader
# b. Legends if needed
# c. X-axis label
# d. Y-axis label
# + [markdown] colab_type="text" id="B1dIm8PiCypw"
# ### 2.4.1 Applying Random Forests on BOW,<font color='red'> SET 1</font>
# +
clf = RandomForestClassifier(random_state=42,class_weight='balanced',n_jobs=-1)
parameters = [{'n_estimators' : [10, 50, 100, 150, 200, 300, 500, 1000],'max_depth' : [3, 4, 5, 6, 7, 8, 9, 10]}]
grid_search = GridSearchCV(estimator=clf,param_grid=parameters,cv=5,scoring='roc_auc',return_train_score=True,n_jobs=-1)
grid_search.fit(X=X_BOW_train,y=y_train)
scores_train = grid_search.cv_results_['mean_train_score'].reshape(len(parameters[0]['n_estimators']),len(parameters[0]['max_depth']))
scores_test = grid_search.cv_results_['mean_test_score'].reshape(len(parameters[0]['n_estimators']),len(parameters[0]['max_depth']))
rf_bow_best_params = grid_search.best_params_
# -
def create_heatmap(data):
fig,ax = plt.subplots(figsize = (10, 5))
im = ax.imshow(data,interpolation = 'nearest', cmap = 'RdBu')
ax.set_xticks(np.arange(len(parameters[0]['n_estimators'])))
ax.set_yticks(np.arange(len(parameters[0]['max_depth'])))
ax.set_xticklabels(parameters[0]['n_estimators'])
ax.set_yticklabels(parameters[0]['max_depth'])
for i in range(len(parameters[0]['max_depth'])):
for j in range(len(parameters[0]['n_estimators'])):
text = ax.text(j, i, np.round(data[i, j],decimals=3),ha = 'center',color = 'c')
ax.set_title('Grid Search AUC score')
plt.xlabel('n_estimators')
plt.ylabel('max_depth')
plt.subplots_adjust(left=0.2, right=0.95, bottom=0.15, top=0.95)
plt.colorbar(im,ax=ax)
plt.show()
create_heatmap(scores_train)
create_heatmap(scores_test)
# +
rf = RandomForestClassifier(n_estimators=rf_bow_best_params['n_estimators'],max_depth=rf_bow_best_params['max_depth'],class_weight='balanced',n_jobs=-1)
rf.fit(X=X_BOW_train,y=y_train)
y_train_pred = []
y_test_pred = []
for j in range(0,X_BOW_train.shape[0],100):
y_train_pred.extend(rf.predict(X=X_BOW_train.tocsr()[j:j+100]))
for j in range(0,X_BOW_test.shape[0],100):
y_test_pred.extend(rf.predict(X=X_BOW_test.tocsr()[j:j+100]))
train_fpr, train_tpr, thresholds = roc_curve(y_train,y_train_pred)
test_fpr, test_tpr, thresholds = roc_curve(y_test,y_test_pred)
plt.plot(train_fpr,train_tpr,label = "Train AUC = "+str(auc(train_fpr,train_tpr)))
plt.plot(test_fpr,test_tpr, label = "Test AUC = "+str(auc(test_fpr,test_tpr)))
BOW_train_auc = auc(train_fpr,train_tpr)
BOW_test_auc = auc(test_fpr,test_tpr)
plt.legend()
plt.xlabel('False Positive Range(FPR)')
plt.ylabel('True Positive Range(TPR)')
plt.title('AUC')
plt.show()
# +
cnf_matrix_train = confusion_matrix(y_train,y_train_pred)
cnf_matrix_test = confusion_matrix(y_test,y_test_pred)
classes = [0,1]
df_cm_train = pd.DataFrame(cnf_matrix_train, columns=classes, index = classes)
df_cm_test = pd.DataFrame(cnf_matrix_test, columns=classes, index=classes)
plt.subplot(211)
try:
heatmap = sns.heatmap(df_cm_train,annot=True,fmt="d")
except ValueError:
raise ValueError('Confusion matrix values must be integers')
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(),rotation = 0,ha = "right", fontsize = 14)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(),rotation = 0,ha = "right", fontsize = 14)
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.title('Confusion matrix for train')
plt.subplot(212)
try:
heatmap = sns.heatmap(df_cm_test,annot=True,fmt="d")
except ValueError:
raise ValueError('Confusion matrix values must be integers')
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(),rotation = 0,ha = "right", fontsize = 14)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(),rotation = 0,ha = "right", fontsize = 14)
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.title('Confusion matrix for test')
plt.subplots_adjust(hspace=1)
# -
# ### 2.4.1 Applying Random Forests on TFIDF,<font color='red'> SET 2</font>
# +
clf = RandomForestClassifier(random_state=42,class_weight='balanced',n_jobs=-1)
parameters = [{'n_estimators' : [10, 50, 100, 150, 200, 300, 500, 1000],'max_depth' : [3, 4, 5, 6, 7, 8, 9, 10]}]
grid_search = GridSearchCV(estimator=clf,param_grid=parameters,cv=5,scoring='roc_auc',return_train_score=True,n_jobs=-1)
grid_search.fit(X=X_TFIDF_train,y=y_train)
scores_train = grid_search.cv_results_['mean_train_score'].reshape(len(parameters[0]['n_estimators']),len(parameters[0]['max_depth']))
scores_test = grid_search.cv_results_['mean_test_score'].reshape(len(parameters[0]['n_estimators']),len(parameters[0]['max_depth']))
rf_tfidf_best_params = grid_search.best_params_
# -
create_heatmap(scores_train)
create_heatmap(scores_test)
# +
rf = RandomForestClassifier(n_estimators=rf_tfidf_best_params['n_estimators'],max_depth=rf_tfidf_best_params['max_depth'],class_weight='balanced',n_jobs=-1)
rf.fit(X=X_TFIDF_train,y=y_train)
y_train_pred = []
y_test_pred = []
for j in range(0,X_TFIDF_train.shape[0],100):
y_train_pred.extend(rf.predict(X=X_TFIDF_train.tocsr()[j:j+100]))
for j in range(0,X_TFIDF_test.shape[0],100):
y_test_pred.extend(rf.predict(X=X_TFIDF_test.tocsr()[j:j+100]))
train_fpr, train_tpr, thresholds = roc_curve(y_train,y_train_pred)
test_fpr, test_tpr, thresholds = roc_curve(y_test,y_test_pred)
plt.plot(train_fpr,train_tpr,label = "Train AUC = "+str(auc(train_fpr,train_tpr)))
plt.plot(test_fpr,test_tpr, label = "Test AUC = "+str(auc(test_fpr,test_tpr)))
TFIDF_train_auc = auc(train_fpr,train_tpr)
TFIDF_test_auc = auc(test_fpr,test_tpr)
plt.legend()
plt.xlabel('False Positive Range(FPR)')
plt.ylabel('True Positive Range(TPR)')
plt.title('AUC')
plt.show()
# +
cnf_matrix_train = confusion_matrix(y_train,y_train_pred)
cnf_matrix_test = confusion_matrix(y_test,y_test_pred)
classes = [0,1]
df_cm_train = pd.DataFrame(cnf_matrix_train, columns=classes, index = classes)
df_cm_test = pd.DataFrame(cnf_matrix_test, columns=classes, index=classes)
plt.subplot(211)
try:
heatmap = sns.heatmap(df_cm_train,annot=True,fmt="d")
except ValueError:
raise ValueError('Confusion matrix values must be integers')
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(),rotation = 0,ha = "right", fontsize = 14)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(),rotation = 0,ha = "right", fontsize = 14)
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.title('Confusion matrix for train')
plt.subplot(212)
try:
heatmap = sns.heatmap(df_cm_test,annot=True,fmt="d")
except ValueError:
raise ValueError('Confusion matrix values must be integers')
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(),rotation = 0,ha = "right", fontsize = 14)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(),rotation = 0,ha = "right", fontsize = 14)
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.title('Confusion matrix for test')
plt.subplots_adjust(hspace=1)
# -
# ### 2.4.1 Applying Random Forests on AVG W2V,<font color='red'> SET 3</font>
# +
clf = RandomForestClassifier(random_state=42,class_weight='balanced',n_jobs=-1)
parameters = [{'n_estimators' : [10, 50, 100, 150, 200, 300, 500, 1000],'max_depth' : [3, 4, 5, 6, 7, 8, 9, 10]}]
grid_search = GridSearchCV(estimator=clf,param_grid=parameters,cv=5,scoring='roc_auc',return_train_score=True,n_jobs=-1)
grid_search.fit(X=X_avg_w2v_train,y=y_train)
scores_train = grid_search.cv_results_['mean_train_score'].reshape(len(parameters[0]['n_estimators']),len(parameters[0]['max_depth']))
scores_test = grid_search.cv_results_['mean_test_score'].reshape(len(parameters[0]['n_estimators']),len(parameters[0]['max_depth']))
rf_avg_w2v_best_params = grid_search.best_params_
# -
create_heatmap(scores_train)
create_heatmap(scores_test)
# +
rf = RandomForestClassifier(n_estimators=rf_avg_w2v_best_params['n_estimators'],max_depth=rf_avg_w2v_best_params['max_depth'],class_weight='balanced',n_jobs=-1)
rf.fit(X=X_avg_w2v_train,y=y_train)
y_train_pred = []
y_test_pred = []
for j in range(0,X_avg_w2v_train.shape[0],100):
y_train_pred.extend(rf.predict(X=X_avg_w2v_train.tocsr()[j:j+100]))
for j in range(0,X_avg_w2v_test.shape[0],100):
y_test_pred.extend(rf.predict(X=X_avg_w2v_test.tocsr()[j:j+100]))
train_fpr, train_tpr, thresholds = roc_curve(y_train,y_train_pred)
test_fpr, test_tpr, thresholds = roc_curve(y_test,y_test_pred)
plt.plot(train_fpr,train_tpr,label = "Train AUC = "+str(auc(train_fpr,train_tpr)))
plt.plot(test_fpr,test_tpr, label = "Test AUC = "+str(auc(test_fpr,test_tpr)))
avg_w2v_train_auc = auc(train_fpr,train_tpr)
avg_w2v_test_auc = auc(test_fpr,test_tpr)
plt.legend()
plt.xlabel('False Positive Range(FPR)')
plt.ylabel('True Positive Range(TPR)')
plt.title('AUC')
plt.show()
# +
cnf_matrix_train = confusion_matrix(y_train,y_train_pred)
cnf_matrix_test = confusion_matrix(y_test,y_test_pred)
classes = [0,1]
df_cm_train = pd.DataFrame(cnf_matrix_train, columns=classes, index = classes)
df_cm_test = pd.DataFrame(cnf_matrix_test, columns=classes, index=classes)
plt.subplot(211)
try:
heatmap = sns.heatmap(df_cm_train,annot=True,fmt="d")
except ValueError:
raise ValueError('Confusion matrix values must be integers')
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(),rotation = 0,ha = "right", fontsize = 14)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(),rotation = 0,ha = "right", fontsize = 14)
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.title('Confusion matrix for train')
plt.subplot(212)
try:
heatmap = sns.heatmap(df_cm_test,annot=True,fmt="d")
except ValueError:
raise ValueError('Confusion matrix values must be integers')
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(),rotation = 0,ha = "right", fontsize = 14)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(),rotation = 0,ha = "right", fontsize = 14)
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.title('Confusion matrix for test')
plt.subplots_adjust(hspace=1)
# -
# ### 2.4.1 Applying Random Forests on TFIDF W2V ,<font color='red'> SET 4</font>
# +
clf = RandomForestClassifier(random_state=42,class_weight='balanced',n_jobs=-1)
parameters = [{'n_estimators' : [10, 50, 100, 150, 200, 300, 500, 1000],'max_depth' : [3, 4, 5, 6, 7, 8, 9, 10]}]
grid_search = GridSearchCV(estimator=clf,param_grid=parameters,cv=5,scoring='roc_auc',return_train_score=True,n_jobs=-1)
grid_search.fit(X=X_tfidf_w2v_train,y=y_train)
scores_train = grid_search.cv_results_['mean_train_score'].reshape(len(parameters[0]['n_estimators']),len(parameters[0]['max_depth']))
scores_test = grid_search.cv_results_['mean_test_score'].reshape(len(parameters[0]['n_estimators']),len(parameters[0]['max_depth']))
rf_tfidf_w2v_best_params = grid_search.best_params_
# -
create_heatmap(scores_train)
create_heatmap(scores_test)
# +
rf = RandomForestClassifier(n_estimators=rf_tfidf_w2v_best_params['n_estimators'],max_depth=rf_tfidf_w2v_best_params['max_depth'],class_weight='balanced',n_jobs=-1)
rf.fit(X=X_tfidf_w2v_train,y=y_train)
y_train_pred = []
y_test_pred = []
for j in range(0,X_tfidf_w2v_train.shape[0],100):
y_train_pred.extend(rf.predict(X=X_tfidf_w2v_train.tocsr()[j:j+100]))
for j in range(0,X_tfidf_w2v_test.shape[0],100):
y_test_pred.extend(rf.predict(X=X_tfidf_w2v_test.tocsr()[j:j+100]))
train_fpr, train_tpr, thresholds = roc_curve(y_train,y_train_pred)
test_fpr, test_tpr, thresholds = roc_curve(y_test,y_test_pred)
plt.plot(train_fpr,train_tpr,label = "Train AUC = "+str(auc(train_fpr,train_tpr)))
plt.plot(test_fpr,test_tpr, label = "Test AUC = "+str(auc(test_fpr,test_tpr)))
tfidf_w2v_train_auc = auc(train_fpr,train_tpr)
tfidf_w2v_test_auc = auc(test_fpr,test_tpr)
plt.legend()
plt.xlabel('False Positive Range(FPR)')
plt.ylabel('True Positive Range(TPR)')
plt.title('AUC')
plt.show()
# +
cnf_matrix_train = confusion_matrix(y_train,y_train_pred)
cnf_matrix_test = confusion_matrix(y_test,y_test_pred)
classes = [0,1]
df_cm_train = pd.DataFrame(cnf_matrix_train, columns=classes, index = classes)
df_cm_test = pd.DataFrame(cnf_matrix_test, columns=classes, index=classes)
plt.subplot(211)
try:
heatmap = sns.heatmap(df_cm_train,annot=True,fmt="d")
except ValueError:
raise ValueError('Confusion matrix values must be integers')
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(),rotation = 0,ha = "right", fontsize = 14)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(),rotation = 0,ha = "right", fontsize = 14)
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.title('Confusion matrix for train')
plt.subplot(212)
try:
heatmap = sns.heatmap(df_cm_test,annot=True,fmt="d")
except ValueError:
raise ValueError('Confusion matrix values must be integers')
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(),rotation = 0,ha = "right", fontsize = 14)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(),rotation = 0,ha = "right", fontsize = 14)
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.title('Confusion matrix for test')
plt.subplots_adjust(hspace=1)
# -
# <h2>2.5 Applying GBDT</h2>
#
# <br>Apply GBDT on different kind of featurization as mentioned in the instructions
# <br> For Every model that you work on make sure you do the step 2 and step 3 of instrucations
# ### 2.4.1 Applying XGBOOST on BOW,<font color='red'> SET 1</font>
# +
xgb_clf = xgb.XGBClassifier()
parameters = [{'n_estimators' : [10, 50, 100, 150, 200, 300, 500, 1000],'max_depth' : [3, 4, 5, 6, 7, 8, 9, 10]}]
grid_search = GridSearchCV(estimator=xgb_clf,param_grid=parameters,cv=5,scoring='roc_auc',return_train_score=True,n_jobs=-1)
grid_search.fit(X=X_BOW_train,y=y_train)
scores_train = grid_search.cv_results_['mean_train_score'].reshape(len(parameters[0]['n_estimators']),len(parameters[0]['max_depth']))
scores_test = grid_search.cv_results_['mean_test_score'].reshape(len(parameters[0]['n_estimators']),len(parameters[0]['max_depth']))
xg_bow_best_params = grid_search.best_params_
# -
def create_3d_map(grid_search):
trace1 = go.Scatter3d(x=grid_search.cv_results_['param_n_estimators'],y=grid_search.cv_results_['param_max_depth'],z=scores_train.ravel(), name = 'train')
trace2 = go.Scatter3d(x=grid_search.cv_results_['param_n_estimators'],y=grid_search.cv_results_['param_max_depth'],z=scores_test.ravel(), name = 'Cross validation')
data = [trace1, trace2]
layout = go.Layout(scene = dict(
xaxis = dict(title='n_estimators'),
yaxis = dict(title='max_depth'),
zaxis = dict(title='AUC'),))
fig = go.Figure(data=data, layout=layout)
offline.iplot(fig, filename='3d-scatter-colorscale')
create_3d_map(grid_search)
# +
xgb_clf = xgb.XGBClassifier(n_estimators = xg_bow_best_params['n_estimators'],
max_depth = xg_bow_best_params['max_depth'],
n_jobs = -1)
xgb_clf.fit(X=X_BOW_train,y=y_train)
y_train_pred = []
y_test_pred = []
for j in range(0,X_BOW_train.shape[0],100):
y_train_pred.extend(xgb_clf.predict_proba(X_BOW_train.tocsr()[j:j+100])[:,1])
for j in range(0,X_BOW_test.shape[0],100):
y_test_pred.extend(xgb_clf.predict_proba(X_BOW_test.tocsr()[j:j+100])[:,1])
train_fpr, train_tpr, thresholds = roc_curve(y_train,y_train_pred)
test_fpr, test_tpr, thresholds = roc_curve(y_test,y_test_pred)
plt.plot(train_fpr,train_tpr,label = "Train AUC = "+str(auc(train_fpr,train_tpr)))
plt.plot(test_fpr,test_tpr, label = "Test AUC = "+str(auc(test_fpr,test_tpr)))
xg_BOW_train_auc = auc(train_fpr,train_tpr)
xg_BOW_test_auc = auc(test_fpr,test_tpr)
plt.legend()
plt.xlabel('False Positive Range(FPR)')
plt.ylabel('True Positive Range(TPR)')
plt.title('AUC')
plt.show()
# +
cnf_matrix_train = confusion_matrix(y_train,np.round(y_train_pred))
cnf_matrix_test = confusion_matrix(y_test,np.round(y_test_pred))
classes = [0,1]
df_cm_train = pd.DataFrame(cnf_matrix_train, columns=classes, index = classes)
df_cm_test = pd.DataFrame(cnf_matrix_test, columns=classes, index=classes)
plt.subplot(211)
try:
heatmap = sns.heatmap(df_cm_train,annot=True,fmt="d")
except ValueError:
raise ValueError('Confusion matrix values must be integers')
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(),rotation = 0,ha = "right", fontsize = 14)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(),rotation = 0,ha = "right", fontsize = 14)
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.title('Confusion matrix for train')
plt.subplot(212)
try:
heatmap = sns.heatmap(df_cm_test,annot=True,fmt="d")
except ValueError:
raise ValueError('Confusion matrix values must be integers')
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(),rotation = 0,ha = "right", fontsize = 14)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(),rotation = 0,ha = "right", fontsize = 14)
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.title('Confusion matrix for test')
plt.subplots_adjust(hspace=1)
# -
# ### 2.4.1 Applying XGBOOST on TFIDF,<font color='red'> SET 2</font>
# +
xgb_clf = xgb.XGBClassifier(random_state=42,n_jobs=-1)
parameters = [{'n_estimators' : [10, 50, 100, 150, 200, 300, 500, 1000],'max_depth' : [3, 4, 5, 6, 7, 8, 9, 10]}]
grid_search = GridSearchCV(estimator=xgb_clf,param_grid=parameters,cv=5,scoring='roc_auc',return_train_score=True,n_jobs=-1)
grid_search.fit(X=X_TFIDF_train,y=y_train)
scores_train = grid_search.cv_results_['mean_train_score'].reshape(len(parameters[0]['n_estimators']),len(parameters[0]['max_depth']))
scores_test = grid_search.cv_results_['mean_test_score'].reshape(len(parameters[0]['n_estimators']),len(parameters[0]['max_depth']))
xg_tfidf_best_params = grid_search.best_params_
# -
create_3d_map(grid_search)
# +
xgb_clf = xgb.XGBClassifier(n_estimators=xg_tfidf_best_params['n_estimators'],max_depth=xg_tfidf_best_params['max_depth'],n_jobs=-1)
xgb_clf.fit(X=X_TFIDF_train,y=y_train)
y_train_pred = []
y_test_pred = []
for j in range(0,X_TFIDF_train.shape[0],100):
y_train_pred.extend(xgb_clf.predict_proba(X=X_TFIDF_train.tocsr()[j:j+100])[:,1])
for j in range(0,X_TFIDF_test.shape[0],100):
y_test_pred.extend(xgb_clf.predict_proba(X=X_TFIDF_test.tocsr()[j:j+100])[:,1])
train_fpr, train_tpr, thresholds = roc_curve(y_train,y_train_pred)
test_fpr, test_tpr, thresholds = roc_curve(y_test,y_test_pred)
plt.plot(train_fpr,train_tpr,label = "Train AUC = "+str(auc(train_fpr,train_tpr)))
plt.plot(test_fpr,test_tpr, label = "Test AUC = "+str(auc(test_fpr,test_tpr)))
xg_TFIDF_train_auc = auc(train_fpr,train_tpr)
xg_TFIDF_test_auc = auc(test_fpr,test_tpr)
plt.legend()
plt.xlabel('False Positive Range(FPR)')
plt.ylabel('True Positive Range(TPR)')
plt.title('AUC')
plt.show()
# +
cnf_matrix_train = confusion_matrix(y_train,np.round(y_train_pred))
cnf_matrix_test = confusion_matrix(y_test,np.round(y_test_pred))
classes = [0,1]
df_cm_train = pd.DataFrame(cnf_matrix_train, columns=classes, index = classes)
df_cm_test = pd.DataFrame(cnf_matrix_test, columns=classes, index=classes)
plt.subplot(211)
try:
heatmap = sns.heatmap(df_cm_train,annot=True,fmt="d")
except ValueError:
raise ValueError('Confusion matrix values must be integers')
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(),rotation = 0,ha = "right", fontsize = 14)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(),rotation = 0,ha = "right", fontsize = 14)
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.title('Confusion matrix for train')
plt.subplot(212)
try:
heatmap = sns.heatmap(df_cm_test,annot=True,fmt="d")
except ValueError:
raise ValueError('Confusion matrix values must be integers')
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(),rotation = 0,ha = "right", fontsize = 14)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(),rotation = 0,ha = "right", fontsize = 14)
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.title('Confusion matrix for test')
plt.subplots_adjust(hspace=1)
# -
# ### 2.4.1 Applying XGBOOST on AVG W2V,<font color='red'> SET 3</font>
# +
xgb_clf = RandomForestClassifier()
parameters = [{'n_estimators' : [10, 50, 100, 150, 200, 300, 500, 1000],'max_depth' : [3, 4, 5, 6, 7, 8, 9, 10]}]
grid_search = GridSearchCV(estimator=xgb_clf,param_grid=parameters,cv=5,scoring='roc_auc',return_train_score=True,n_jobs=-1)
grid_search.fit(X=X_avg_w2v_train,y=y_train)
scores_train = grid_search.cv_results_['mean_train_score'].reshape(len(parameters[0]['n_estimators']),len(parameters[0]['max_depth']))
scores_test = grid_search.cv_results_['mean_test_score'].reshape(len(parameters[0]['n_estimators']),len(parameters[0]['max_depth']))
xg_avg_w2v_best_params = grid_search.best_params_
# -
create_3d_map(grid_search)
# +
xgb_clf = RandomForestClassifier(n_estimators=xg_avg_w2v_best_params['n_estimators'],max_depth=xg_avg_w2v_best_params['max_depth'],n_jobs=-1)
xgb_clf.fit(X=X_avg_w2v_train,y=y_train)
y_train_pred = []
y_test_pred = []
for j in range(0,X_avg_w2v_train.shape[0],100):
y_train_pred.extend(xgb_clf.predict_proba(X=X_avg_w2v_train.tocsr()[j:j+100])[:,1])
for j in range(0,X_avg_w2v_test.shape[0],100):
y_test_pred.extend(xgb_clf.predict_proba(X=X_avg_w2v_test.tocsr()[j:j+100])[:,1])
train_fpr, train_tpr, thresholds = roc_curve(y_train,y_train_pred)
test_fpr, test_tpr, thresholds = roc_curve(y_test,y_test_pred)
plt.plot(train_fpr,train_tpr,label = "Train AUC = "+str(auc(train_fpr,train_tpr)))
plt.plot(test_fpr,test_tpr, label = "Test AUC = "+str(auc(test_fpr,test_tpr)))
xg_avg_w2v_train_auc = auc(train_fpr,train_tpr)
xg_avg_w2v_test_auc = auc(test_fpr,test_tpr)
plt.legend()
plt.xlabel('False Positive Range(FPR)')
plt.ylabel('True Positive Range(TPR)')
plt.title('AUC')
plt.show()
# +
cnf_matrix_train = confusion_matrix(y_train,np.round(y_train_pred))
cnf_matrix_test = confusion_matrix(y_test,np.round(y_test_pred))
classes = [0,1]
df_cm_train = pd.DataFrame(cnf_matrix_train, columns=classes, index = classes)
df_cm_test = pd.DataFrame(cnf_matrix_test, columns=classes, index=classes)
plt.subplot(211)
try:
heatmap = sns.heatmap(df_cm_train,annot=True,fmt="d")
except ValueError:
raise ValueError('Confusion matrix values must be integers')
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(),rotation = 0,ha = "right", fontsize = 14)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(),rotation = 0,ha = "right", fontsize = 14)
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.title('Confusion matrix for train')
plt.subplot(212)
try:
heatmap = sns.heatmap(df_cm_test,annot=True,fmt="d")
except ValueError:
raise ValueError('Confusion matrix values must be integers')
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(),rotation = 0,ha = "right", fontsize = 14)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(),rotation = 0,ha = "right", fontsize = 14)
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.title('Confusion matrix for test')
plt.subplots_adjust(hspace=1)
# -
# ### 2.4.1 Applying XGBOOST on TFIDF W2V,<font color='red'> SET 4</font>
# +
xgb_clf = RandomForestClassifier()
parameters = [{'n_estimators' : [10, 50, 100, 150, 200, 300, 500, 1000],'max_depth' : [3, 4, 5, 6, 7, 8, 9, 10]}]
grid_search = GridSearchCV(estimator=xgb_clf,param_grid=parameters,cv=5,scoring='roc_auc',return_train_score=True,n_jobs=-1)
grid_search.fit(X=X_tfidf_w2v_train,y=y_train)
scores_train = grid_search.cv_results_['mean_train_score'].reshape(len(parameters[0]['n_estimators']),len(parameters[0]['max_depth']))
scores_test = grid_search.cv_results_['mean_test_score'].reshape(len(parameters[0]['n_estimators']),len(parameters[0]['max_depth']))
xg_tfidf_w2v_best_params = grid_search.best_params_
# -
create_3d_map(grid_search)
# +
xgb_clf = RandomForestClassifier(n_estimators=xg_tfidf_w2v_best_params['n_estimators'],max_depth=xg_tfidf_w2v_best_params['max_depth'],n_jobs=-1)
xgb_clf.fit(X=X_tfidf_w2v_train,y=y_train)
y_train_pred = []
y_test_pred = []
for j in range(0,X_tfidf_w2v_train.shape[0],100):
y_train_pred.extend(xgb_clf.predict_proba(X=X_tfidf_w2v_train.tocsr()[j:j+100])[:,1])
for j in range(0,X_tfidf_w2v_test.shape[0],100):
y_test_pred.extend(xgb_clf.predict_proba(X=X_tfidf_w2v_test.tocsr()[j:j+100])[:,1])
train_fpr, train_tpr, thresholds = roc_curve(y_train,y_train_pred)
test_fpr, test_tpr, thresholds = roc_curve(y_test,y_test_pred)
plt.plot(train_fpr,train_tpr,label = "Train AUC = "+str(auc(train_fpr,train_tpr)))
plt.plot(test_fpr,test_tpr, label = "Test AUC = "+str(auc(test_fpr,test_tpr)))
xg_tfidf_w2v_train_auc = auc(train_fpr,train_tpr)
xg_tfidf_w2v_test_auc = auc(test_fpr,test_tpr)
plt.legend()
plt.xlabel('False Positive Range(FPR)')
plt.ylabel('True Positive Range(TPR)')
plt.title('AUC')
plt.show()
# +
cnf_matrix_train = confusion_matrix(y_train,np.round(y_train_pred))
cnf_matrix_test = confusion_matrix(y_test,np.round(y_test_pred))
classes = [0,1]
df_cm_train = pd.DataFrame(cnf_matrix_train, columns=classes, index = classes)
df_cm_test = pd.DataFrame(cnf_matrix_test, columns=classes, index=classes)
plt.subplot(211)
try:
heatmap = sns.heatmap(df_cm_train,annot=True,fmt="d")
except ValueError:
raise ValueError('Confusion matrix values must be integers')
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(),rotation = 0,ha = "right", fontsize = 14)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(),rotation = 0,ha = "right", fontsize = 14)
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.title('Confusion matrix for train')
plt.subplot(212)
try:
heatmap = sns.heatmap(df_cm_test,annot=True,fmt="d")
except ValueError:
raise ValueError('Confusion matrix values must be integers')
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(),rotation = 0,ha = "right", fontsize = 14)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(),rotation = 0,ha = "right", fontsize = 14)
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.title('Confusion matrix for test')
plt.subplots_adjust(hspace=1)
# + [markdown] colab_type="text" id="bemispVtCyp-"
# <h1>3. Conclusions</h1>
# + colab={} colab_type="code" id="zRO-VPG2Cyp_"
# Please compare all your models using Prettytable library
# +
from prettytable import PrettyTable
table = PrettyTable(['Vectorizer','Max Depth','n estimators','Train AUC','Test AUC'])
table.add_row(['BOW',rf_bow_best_params['max_depth'],rf_bow_best_params['n_estimators'],BOW_train_auc,BOW_test_auc])
table.add_row(['TFIDF',rf_tfidf_best_params['max_depth'],rf_tfidf_best_params['n_estimators'],TFIDF_train_auc,TFIDF_test_auc])
table.add_row(['AVG W2V',rf_avg_w2v_best_params['max_depth'],rf_avg_w2v_best_params['n_estimators'],avg_w2v_train_auc,avg_w2v_test_auc])
table.add_row(['TFIDF_w2v',rf_tfidf_w2v_best_params['max_depth'],rf_tfidf_w2v_best_params['n_estimators'],tfidf_w2v_train_auc,tfidf_w2v_test_auc])
table.add_row(['XGB_BOW',xg_bow_best_params['max_depth'],xg_bow_best_params['n_estimators'],xg_BOW_train_auc,xg_BOW_test_auc])
table.add_row(['XGB_TFIDF',xg_tfidf_best_params['max_depth'],xg_tfidf_best_params['n_estimators'],xg_TFIDF_train_auc,xg_TFIDF_test_auc])
table.add_row(['XGB_AVG W2V',xg_avg_w2v_best_params['max_depth'],xg_avg_w2v_best_params['n_estimators'],xg_avg_w2v_train_auc,xg_avg_w2v_test_auc])
table.add_row(['XGB_TFIDF_w2v',xg_tfidf_w2v_best_params['max_depth'],xg_tfidf_w2v_best_params['n_estimators'],xg_tfidf_w2v_train_auc,xg_tfidf_w2v_test_auc])
print(table)
| 82,129 |
/MachineLearning/NaiveBayes/NaiveBayes.ipynb
|
a5ea42d6cba6fca2da8a2ac1116e6d34cd46e021
|
[] |
no_license
|
ravi4all/ML_RegJune_3_30_2019
|
https://github.com/ravi4all/ML_RegJune_3_30_2019
| 0 | 3 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 10,722 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
data = {
"height" : [6.0,6.2,5.6,5.8,5.9,5.11,6.0,5.5,
4.7,4.2,5.2,5.1,5.9,5.7,5.3,4.9],
"weight" : [86,85,67,70,73,71,78,69,49,47,50,49,53,60,56,55],
"gender" : ["male","male","male","male","male","male","male","male",
"female","female","female","female","female","female","female","female",]
}
df = pd.DataFrame(data)
df.head()
total_obs = df['gender'].count()
male_count = df['gender'][df['gender'] == "male"].count()
male_count
female_count = df['gender'][df['gender'] == "female"].count()
female_count
prob_male = male_count / total_obs
prob_female = female_count / total_obs
prob_male, prob_female
mean = df.groupby('gender').mean()
mean
var = df.groupby('gender').var()
var
# +
male_mean_height = mean['height'][1]
male_mean_weight = mean['weight'][1]
male_var_height = var['height'][1]
male_var_weight = var['weight'][1]
female_mean_height = mean['height'][0]
female_mean_weight = mean['weight'][0]
female_var_height = var['height'][0]
female_var_weight = var['weight'][0]
# -
def p_given_x(obs,mean,var):
return (1 / (np.sqrt(2 * np.pi * var))) * np.exp(-((obs - mean) ** 2) / (2 * var))
newInput = [5.1,55]
p_h, p_w = newInput
male = p_given_x(p_h,male_mean_height, male_var_height) * p_given_x(p_w,male_mean_weight, male_var_weight) * prob_male
female = p_given_x(p_h,female_mean_height, female_var_height) * p_given_x(p_w,female_mean_weight,female_var_weight) * prob_female
male > female
female
| 1,786 |
/MNIST_GAN_example_for_michael.ipynb
|
9342edc0da7deb4a73a85a14193776ec5c1d2616
|
[] |
no_license
|
FinchMF/example_for_michael
|
https://github.com/FinchMF/example_for_michael
| 0 | 1 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 226,345 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3.7
# language: python
# name: python3
# ---
# ## Segmenting and Clustering Neighborhoods in Toronto [Part 1]
import pandas as pd
import requests
from bs4 import BeautifulSoup
# #### webscraping for List of Postal Codes of Canada from Wikipedia
import requests
url = "https://en.wikipedia.org/wiki/List_of_postal_codes_of_Canada:_M"
wiki_url = requests.get(url)
wiki_url
wiki_data = pd.read_html(wiki_url.text)
wiki_data = wiki_data[0]
# #### convert to dataframe
# +
table_contents=[]
soup = BeautifulSoup(wiki_url.content, 'html.parser')
table=soup.find('table')
for row in table.findAll('td'):
cell = {}
if row.span.text=='Not assigned':
pass
else:
cell['PostalCode'] = row.p.text[:3]
cell['Borough'] = (row.span.text).split('(')[0]
cell['Neighborhood'] = (((((row.span.text).split('(')[1]).strip(')')).replace(' /',',')).replace(')',' ')).strip(' ')
table_contents.append(cell)
# print(table_contents)
df=pd.DataFrame(table_contents)
df['Borough']=df['Borough'].replace({'Downtown TorontoStn A PO Boxes25 The Esplanade':'Downtown Toronto Stn A',
'East TorontoBusiness reply mail Processing Centre969 Eastern':'East Toronto Business',
'EtobicokeNorthwest':'Etobicoke Northwest','East YorkEast Toronto':'East York/East Toronto',
'MississaugaCanada Post Gateway Processing Centre':'Mississauga'})
df
# -
df.shape
# ## Segmenting and Clustering Neighborhoods in Toronto [Part 2]
# ! pip install geocoder
import geocoder
df_geo = pd.read_csv("https://cocl.us/Geospatial_data")
df_geo
df_geo.shape
combined_data = df.join(df_geo.set_index('Postal Code'), on='PostalCode', how='inner')
combined_data
combined_data.shape
# ## Segmenting and Clustering Neighborhoods in Toronto [Part 3]
from geopy.geocoders import Nominatim
# +
address = 'Toronto, Ontario'
geolocator = Nominatim(user_agent="toronto_explorer")
location = geolocator.geocode(address)
latitude = location.latitude
longitude = location.longitude
print('The coordinates of Toronto are {}, {}.'.format(latitude, longitude))
# -
# ! pip install folium
import folium
# +
# Creating the map of Toronto
map_Toronto = folium.Map(location=[latitude, longitude], zoom_start=11)
# adding markers to map
for latitude, longitude, borough, neighbourhood in zip(combined_data['Latitude'], combined_data['Longitude'], combined_data['Borough'], combined_data['Neighborhood']):
label = '{}, {}'.format(neighbourhood, borough)
label = folium.Popup(label, parse_html=True)
folium.CircleMarker(
[latitude, longitude],
radius=5,
popup=label,
color='red',
fill=True
).add_to(map_Toronto)
map_Toronto
# +
CLIENT_ID = 'SBBLKN0P5EAWKXAVL1FC3ML4DHGX54DVWHQEFEYQ2AI3IHBD'
CLIENT_SECRET = '2NLES0IJKVC1VGENKREFFAE4UPFGDMZ44SK0S44LMETGGQZA'
VERSION = '20210517' # Foursquare API version
print('Your credentails:')
print('CLIENT_ID: ' + CLIENT_ID)
print('CLIENT_SECRET:' + CLIENT_SECRET)
# +
neighborhood_latitude = combined_data.loc[0, 'Latitude'] # neighborhood latitude value
neighborhood_longitude = combined_data.loc[0, 'Longitude'] # neighborhood longitude value
neighborhood_name = combined_data.loc[0, 'Neighborhood'] # neighborhood name
print('Latitude and longitude values of {} are {}, {}.'.format(neighborhood_name,
neighborhood_latitude,
neighborhood_longitude))
# -
LIMIT = 100
radius = 500
url = 'https://api.foursquare.com/v2/venues/explore?&client_id={}&client_secret={}&v={}&ll={},{}&radius={}&limit={}'.format(
CLIENT_ID,
CLIENT_SECRET,
VERSION,
neighborhood_latitude,
neighborhood_longitude,
radius,
LIMIT)
url
results = requests.get(url).json()
results
def getNearbyVenues(names, latitudes, longitudes, radius=500):
venues_list=[]
for name, lat, lng in zip(names, latitudes, longitudes):
print(name)
# create the API request URL
url = 'https://api.foursquare.com/v2/venues/explore?&client_id={}&client_secret={}&v={}&ll={},{}&radius={}'.format(
CLIENT_ID,
CLIENT_SECRET,
VERSION,
lat,
lng,
radius
)
# make the GET request
results = requests.get(url).json()["response"]['groups'][0]['items']
# return only relevant information for each nearby venue
venues_list.append([(
name,
lat,
lng,
v['venue']['name'],
v['venue']['categories'][0]['name']) for v in results])
nearby_venues = pd.DataFrame([item for venue_list in venues_list for item in venue_list])
nearby_venues.columns = ['Neighborhood',
'Neighborhood Latitude',
'Neighborhood Longitude',
'Venue',
'Venue Category']
return(nearby_venues)
venues_in_toronto = getNearbyVenues(combined_data['Neighborhood'], combined_data['Latitude'], combined_data['Longitude'])
venues_in_toronto.shape
venues_in_toronto.head()
venues_in_toronto.groupby('Neighborhood').head()
venues_in_toronto.groupby('Venue Category').max()
toronto_venue_cat = pd.get_dummies(venues_in_toronto[['Venue Category']], prefix="", prefix_sep="")
toronto_venue_cat
# +
toronto_venue_cat['Neighborhood'] = venues_in_toronto['Neighborhood']
# moving neighborhood column to the first column
fixed_columns = [toronto_venue_cat.columns[-1]] + list(toronto_venue_cat.columns[:-1])
toronto_venue_cat = toronto_venue_cat[fixed_columns]
toronto_venue_cat.head()
# -
toronto_grouped = toronto_venue_cat.groupby('Neighborhood').mean().reset_index()
toronto_grouped.head()
def return_most_common_venues(row, num_top_venues):
row_categories = row.iloc[1:]
row_categories_sorted = row_categories.sort_values(ascending=False)
return row_categories_sorted.index.values[0:num_top_venues]
# +
import numpy as np
num_top_venues = 10
indicators = ['st', 'nd', 'rd']
# create columns according to number of top venues
columns = ['Neighborhood']
for ind in np.arange(num_top_venues):
try:
columns.append('{}{} Most Common Venue'.format(ind+1, indicators[ind]))
except:
columns.append('{}th Most Common Venue'.format(ind+1))
# create a new dataframe
neighborhoods_venues_sorted = pd.DataFrame(columns=columns)
neighborhoods_venues_sorted['Neighborhood'] = toronto_grouped['Neighborhood']
for ind in np.arange(toronto_grouped.shape[0]):
neighborhoods_venues_sorted.iloc[ind, 1:] = return_most_common_venues(toronto_grouped.iloc[ind, :], num_top_venues)
neighborhoods_venues_sorted.head()
# +
# import k-means from clustering stage
from sklearn.cluster import KMeans
# set number of clusters
k_num_clusters = 5
toronto_grouped_clustering = toronto_grouped.drop('Neighborhood', 1)
# run k-means clustering
kmeans = KMeans(n_clusters=k_num_clusters, random_state=0).fit(toronto_grouped_clustering)
kmeans
# -
kmeans.labels_[0:100]
neighborhoods_venues_sorted.insert(0, 'Cluster Labels', kmeans.labels_)
toronto_merged = combined_data
toronto_merged = toronto_merged.join(neighborhoods_venues_sorted.set_index('Neighborhood'), on='Neighborhood')
toronto_merged.head()
05c900" colab={"base_uri": "https://localhost:8080/", "height": 282}
plt.hist(hist['Sentiment'], bins=10)
plt.ylabel('Frequency')
plt.xlim(left=-1, right = 1)
# + [markdown] id="mkpqrk3j_tZ2" colab_type="text"
# Take a look at the sentiment score of the classified negatves
# + id="CYnPizCXAeyq" colab_type="code" outputId="ce6a331f-e7fe-4a58-c05f-fd53d4c5b22a" colab={"base_uri": "https://localhost:8080/", "height": 282}
plt.hist(hist.Textblob[hist.Sentiment == -1], bins = 10)
plt.hist(hist.Textblob[hist.Sentiment == 0], bins = 10)
plt.ylabel('Frequency')
plt.xlim(left=-1, right = 1)
# + id="fP-ImlyKNUmQ" colab_type="code" outputId="f6174357-99c0-41a6-9c51-85b0bd1c3ca5" colab={"base_uri": "https://localhost:8080/", "height": 34}
hist[hist.Sentiment == -1][0:20]
print(hist.Temperature[15])
# + [markdown] id="j4cVC4agjbSU" colab_type="text"
# #### Test Accuracy
# + id="KX92ZEjGun-L" colab_type="code" outputId="3437bf9a-5b37-48a5-857a-72ffd81d1752" colab={"base_uri": "https://localhost:8080/", "height": 204}
survey_resp_temp[0:5]
# + id="jTMRCgkdtBs8" colab_type="code" outputId="36b12fca-5ac0-4075-fab1-e6dd4af55562" colab={"base_uri": "https://localhost:8080/", "height": 187}
survey_resp_temp['Tb'] = 1
survey_resp_temp.fillna
survey_resp_temp['Tb'][survey_resp_temp['Textblob'] < 0] = -1
survey_resp_temp['Tb'][survey_resp_temp['Textblob'] == 0] = 0
# + id="RIexSWEiydWw" colab_type="code" outputId="9e9705f9-31cb-43f2-9f7c-87e8dccd75c5" colab={"base_uri": "https://localhost:8080/", "height": 204}
survey_resp_temp = survey_resp_temp.drop('Textblob', axis = 1)
survey_resp_temp[0:5]
# + id="60k-MXaHzSFg" colab_type="code" outputId="4192e55a-920a-4052-c09f-983fa207a784" colab={"base_uri": "https://localhost:8080/", "height": 34}
y_pred = survey_resp_temp['Tb']
y_test = survey_resp_temp['Sentiment']
print('accuracy %s' % accuracy_score(y_pred, y_test))
# + [markdown] id="cCYDFow2zw8K" colab_type="text"
# Not good at all
# + [markdown] id="HH7CPvRs9s1j" colab_type="text"
# #### Confusion matrix
# + id="sWQuCfgV9ulQ" colab_type="code" colab={}
y_actu = pd.Series(y_test, name='Actual')
y_pred = pd.Series(y_pred, name='Predicted')
df_confusion = pd.crosstab(y_actu, y_pred)
# + id="WmdWiDhJ9xxt" colab_type="code" outputId="05090e96-5e26-471e-dd6a-68e322244284" colab={"base_uri": "https://localhost:8080/", "height": 142}
df_confusion
# + id="XDsup6o1A-Dn" colab_type="code" outputId="6c3a3043-6f3a-43d9-ba95-ad5432b80cf2" colab={"base_uri": "https://localhost:8080/", "height": 289}
plot_confusion_matrix(df_confusion)
# + [markdown] id="Ijv21rxVksYq" colab_type="text"
# ### Vader
# + id="dSkKF07X0Eoo" colab_type="code" outputId="95b9ab29-fa24-464e-9e8a-dc21f99717e4" colab={"base_uri": "https://localhost:8080/", "height": 102}
pip install vaderSentiment
# + id="2wdNrzgzS_ha" colab_type="code" colab={}
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
# + [markdown] id="M9UM_SLuvwQ4" colab_type="text"
# #### Run the model
# + id="WOsLS5Smz2Gb" colab_type="code" outputId="041bd70c-9cf6-4204-81df-f83e5c2c5cd3" colab={"base_uri": "https://localhost:8080/", "height": 204}
survey_resp_temp2 = df
analyzer = SentimentIntensityAnalyzer()
scores = []
for i in range(0,len(survey_resp_temp2['Temperature'])):
score = analyzer.polarity_scores(survey_resp_temp2['Temperature'][i])
scores.append(score)
df_scores= pd.DataFrame(scores)
#check the number of negative, neutral and positive
len(df_scores[df_scores['compound'] > 0])
len(df_scores[df_scores['compound'] == 0])
len(df_scores[df_scores['compound'] < 0])
df_scores['compound'][0:5]
df['Vader'] = df_scores['compound']
df[0:5]
# + id="CIXHDy7xz2KA" colab_type="code" outputId="d84160de-0c28-4350-fe1e-501b251c4358" colab={"base_uri": "https://localhost:8080/", "height": 102}
df['Vd'] = 'Neutral'
df.fillna
df['Vd'][df['Vader'] < 0] = 'Negative'
# + id="EW9Tz_viz2Nw" colab_type="code" outputId="bb6f1b59-6dbf-40b2-b35d-be6e3d252789" colab={"base_uri": "https://localhost:8080/", "height": 204}
df[0:5]
# + [markdown] id="OnWTWxP8sMo4" colab_type="text"
# #### Histogram
# + id="G_tolwC0vnip" colab_type="code" outputId="79f6aef7-7b1f-439e-b827-9bb8a534b4c1" colab={"base_uri": "https://localhost:8080/", "height": 282}
plt.hist(hist['Sentiment'], bins=10)
plt.ylabel('Frequency')
plt.xlim(left=-1, right = 1)
# + id="yQalogW1v56i" colab_type="code" outputId="11540ff0-411e-40e7-cad2-9f675abbc627" colab={"base_uri": "https://localhost:8080/", "height": 282}
plt.hist(df['Vader'], bins=10)
plt.hist(df['Vader'].loc[df['Sentiment']== -1], bins=10)
plt.ylabel('Frequency')
plt.xlim(left=-1, right = 1)
# + id="tSE1_an-44uM" colab_type="code" outputId="632e7812-257c-4183-ecc0-2d96d90f75f1" colab={"base_uri": "https://localhost:8080/", "height": 221}
df['Vader'].loc[df['Sentiment']== 0]
# + [markdown] id="51eejJ2Jvqt9" colab_type="text"
# #### Test Accuracy
# + id="PMGdf3Axz2Qi" colab_type="code" outputId="1f62678c-2582-4bfd-a9bd-0f026602914f" colab={"base_uri": "https://localhost:8080/", "height": 340}
y_pred = df['Vd']
y_test = df['Sentiment']
print('accuracy %s' % accuracy_score(y_pred, y_test))
# + [markdown] id="oqaQP5Z42h8r" colab_type="text"
# Even worse unfortunately
# + [markdown] id="N2_lUo66v2Z2" colab_type="text"
# #### Confusion matrix
# + id="NCOGK0_xBJWx" colab_type="code" colab={}
y_actu = pd.Series(y_test, name='Actual')
y_pred = pd.Series(y_pred, name='Predicted')
df_confusion = pd.crosstab(y_actu, y_pred)
# + id="KuW6jxvpBTVI" colab_type="code" outputId="8dae162a-f5e3-43e9-9d04-c814edce3a02" colab={"base_uri": "https://localhost:8080/", "height": 142}
df_confusion
# + id="SFs64LXMBOg4" colab_type="code" outputId="c9f448b3-8447-49ec-f1ad-e740b24e2ac9" colab={"base_uri": "https://localhost:8080/", "height": 281}
plot_confusion_matrix(df_confusion)
# + [markdown] id="CFS_aXmukYI8" colab_type="text"
# ## Binomial Sentiment Classification (Neg/Neutral)
# + [markdown] id="AsKMgjfgffca" colab_type="text"
# ### Randomly split up the data into test (30%) and training (70%) sets
# + [markdown] id="55VoDJN49aFB" colab_type="text"
# try 50-50 or 30-70
# + id="8iBaI8A1dVWE" colab_type="code" colab={}
X = df.Temperature
y = df.Sentiment
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state = 0)
# + [markdown] id="hLolnWhVdVWH" colab_type="text"
# ### Naive Bayes
# + id="cCg9z2DCdVWH" colab_type="code" colab={}
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfTransformer
# + [markdown] id="3Go0Rkbkf_2b" colab_type="text"
# #### Create the classifier pipeline
# + id="ZNIpRaRyf3YP" colab_type="code" outputId="5c593437-3a30-4eb8-ee00-239209602ee2" colab={"base_uri": "https://localhost:8080/", "height": 306}
nb = Pipeline([('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', MultinomialNB()),
])
nb.fit(X_train, y_train)
# + [markdown] id="mOIJneGzgAjX" colab_type="text"
# #### Predict the test variables
# + id="abPSIKppdVWK" colab_type="code" outputId="45d59b76-e6b1-4a55-8e51-60206de7d152" colab={"base_uri": "https://localhost:8080/", "height": 51}
# %%time
from sklearn.metrics import classification_report
y_pred = nb.predict(X_test)
# + [markdown] id="04bX3e9kgCB4" colab_type="text"
# #### Verify the accuracy of the results
# + id="kza_0lgCdVWO" colab_type="code" outputId="076cb9b7-90ea-4d6f-d298-e5f5149eaea6" colab={"base_uri": "https://localhost:8080/", "height": 187}
print('accuracy %s' % accuracy_score(y_pred, y_test))
print(classification_report(y_test, y_pred,target_names=my_tags))
# + [markdown] id="DH3CcZd4gDQ-" colab_type="text"
# #### Confusion matrix visualizing error distribution
# + id="UgEUzWZaB8qF" colab_type="code" outputId="100b3fa5-c8fe-4b3d-a3b5-1a6163c2c583" colab={"base_uri": "https://localhost:8080/", "height": 258}
y_actu = pd.Series(np.array(y_test, dtype = '<U8'), name='Actual')
y_pred = pd.Series(y_pred, name='Predicted')
df_confusion = pd.crosstab(y_actu, y_pred, dropna = False)
plot_confusion_matrix(df_confusion)
# + id="9U1PCJ3hiQTZ" colab_type="code" outputId="92af7934-2537-4bcd-fa40-d0fecfddd36a" colab={"base_uri": "https://localhost:8080/", "height": 142}
df_confusion
# + [markdown] id="cOi0BtVKdVWR" colab_type="text"
# ### Logistic Regression
# + id="JKOBp_k6dVWS" colab_type="code" colab={}
from sklearn.linear_model import LogisticRegression
# + [markdown] id="RUcf85AwgGtU" colab_type="text"
# #### Create the classifier pipeline
# + id="srBm-XIZgF6A" colab_type="code" outputId="18774dc7-4729-4b61-c36e-7c8e6fef94ff" colab={"base_uri": "https://localhost:8080/", "height": 391}
logreg = Pipeline([('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', LogisticRegression(n_jobs=1, C=1e5, solver = 'lbfgs')),
])
logreg.fit(X_train, y_train)
# + [markdown] id="5GfL_hppgHYx" colab_type="text"
# #### Predict the test variables
# + id="w1wjwC6qdVWV" colab_type="code" outputId="03b70aca-f290-4ad5-eac7-46cfd97ec8b6" colab={"base_uri": "https://localhost:8080/", "height": 51}
# %%time
y_pred = logreg.predict(X_test)
# + [markdown] id="pvCqfwV8gIX3" colab_type="text"
# #### Verify the accuracy of the results
# + [markdown] id="XJras2JX6nDl" colab_type="text"
#
# + id="eYpR63K6dVWY" colab_type="code" outputId="73d37905-f374-4057-abf6-01fcfba3c6c0" colab={"base_uri": "https://localhost:8080/", "height": 187}
print('accuracy %s' % accuracy_score(y_pred, y_test))
print(classification_report(y_test, y_pred,target_names=my_tags))
# + [markdown] id="I2qQLzjngJJg" colab_type="text"
# #### Confusion matrix visualizing error distribution
# + id="ueyg15pACQJj" colab_type="code" outputId="bbc5095a-50ee-4398-e58e-02c48b175c57" colab={"base_uri": "https://localhost:8080/", "height": 254}
y_actu = pd.Series(np.array(y_test, dtype = '<U8'), name='Actual')
y_pred = pd.Series(y_pred, name='Predicted')
df_confusion = pd.crosstab(y_actu, y_pred, dropna = False)
plot_confusion_matrix(df_confusion)
# + id="kwrvxX9tiOvv" colab_type="code" outputId="614054e1-0f9d-416d-d37a-07c6971864d1" colab={"base_uri": "https://localhost:8080/", "height": 142}
df_confusion
# + [markdown] id="ybvtbMXBdVWc" colab_type="text"
# ### Support Vector Machine (SVM)
# + id="36aAbLXxdVWd" colab_type="code" colab={}
from sklearn.linear_model import SGDClassifier
# + [markdown] id="Reev7WVrhNVz" colab_type="text"
# #### Create the classifier pipeline
# + id="JjFmJ5hQhMXQ" colab_type="code" outputId="19470ac5-2ec8-482b-99b0-6762ccd712c1" colab={"base_uri": "https://localhost:8080/", "height": 374}
sgd = Pipeline([('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier(loss='hinge', penalty='l2',alpha=1e-3, random_state=42, max_iter=5, tol=None)),
])
sgd.fit(X_train, y_train)
# + [markdown] id="R8FsVjxbhN9y" colab_type="text"
# #### Predict the test variables
# + id="56TpqCyMdVWh" colab_type="code" outputId="57b6f90e-37bf-4519-e377-8fa6f6ce7655" colab={"base_uri": "https://localhost:8080/", "height": 51}
# %%time
y_pred = sgd.predict(X_test)
# + [markdown] id="eUekmGNKhOqP" colab_type="text"
# #### Verify the accuracy of the results
# + id="NjA-NuVLdVWk" colab_type="code" outputId="667c991e-adb0-4842-f214-cf5bd39e1af7" colab={"base_uri": "https://localhost:8080/", "height": 187}
print('accuracy %s' % accuracy_score(y_pred, y_test))
print(classification_report(y_test, y_pred,target_names=my_tags))
# + [markdown] id="SignUD-BhPXh" colab_type="text"
# #### Confusion matrix visualizing error distribution
# + id="8hSJRD6jCVyp" colab_type="code" outputId="81ac6160-2b59-4216-d71a-600796cd80af" colab={"base_uri": "https://localhost:8080/", "height": 258}
y_actu = pd.Series(np.array(y_test, dtype = '<U8'), name='Actual')
y_pred = pd.Series(y_pred, name='Predicted')
df_confusion = pd.crosstab(y_actu, y_pred, dropna = False)
plot_confusion_matrix(df_confusion)
# + id="MTXL_KBAiLdb" colab_type="code" outputId="cd6367e2-1062-4653-95b3-7822fba67040" colab={"base_uri": "https://localhost:8080/", "height": 142}
df_confusion
# + [markdown] id="FYSH4Jbt8FG7" colab_type="text"
# ### Decision tree classifier
# + id="nChCeL758aSX" colab_type="code" colab={}
from sklearn import tree
# + [markdown] id="uM0mrsEV8H-A" colab_type="text"
# #### Create the pipeline
# + id="Yrvry6KV8fCd" colab_type="code" outputId="e8fe2c2c-d841-48fb-e8af-4071756f7b91" colab={"base_uri": "https://localhost:8080/", "height": 425}
sgd = Pipeline([('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', tree.DecisionTreeClassifier(random_state = 0, max_depth=2)),
])
sgd.fit(X_train, y_train)
# + [markdown] id="l8yKYX298NSd" colab_type="text"
# #### Predict the test variables
# + id="0tQ7L2IP8toL" colab_type="code" outputId="caff1dd2-5fa2-4494-be5f-1acf6294f553" colab={"base_uri": "https://localhost:8080/", "height": 51}
# %%time
y_pred = sgd.predict(X_test)
# + [markdown] id="YN_KEJhR8QEy" colab_type="text"
# #### Verify result acuracy
# + id="murD4Znw8wfc" colab_type="code" outputId="55a5c191-7fe6-4829-e7e8-1c15bd9827fb" colab={"base_uri": "https://localhost:8080/", "height": 187}
print('accuracy %s' % accuracy_score(y_pred, y_test))
print(classification_report(y_test, y_pred,target_names=my_tags))
# + [markdown] id="Kgb8Yrr88TsM" colab_type="text"
# #### Confusion matrix
# + id="fhIWXPX88y06" colab_type="code" outputId="f6b7b95f-1f04-498c-8d26-7e2a2766afec" colab={"base_uri": "https://localhost:8080/", "height": 254}
y_actu = pd.Series(np.array(y_test, dtype = '<U8'), name='Actual')
y_pred = pd.Series(y_pred, name='Predicted')
df_confusion = pd.crosstab(y_actu, y_pred, dropna = False)
plot_confusion_matrix(df_confusion)
# + id="DUhukzhY8yyi" colab_type="code" outputId="239cb46f-6040-4ccf-b21d-f88436bf6d0d" colab={"base_uri": "https://localhost:8080/", "height": 142}
df_confusion
# + [markdown] id="YbGMSly4SHpR" colab_type="text"
# ### Random forest classifier
# + id="0ZbYt7J7R-d1" colab_type="code" colab={}
from sklearn.ensemble import RandomForestClassifier
# + [markdown] id="2-hrxx7VSP1a" colab_type="text"
# #### Create the classifier pipeline
# + id="1VT_fcdDTKjK" colab_type="code" colab={}
sgd = Pipeline([('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', RandomForestClassifier(n_estimators=10000, max_depth=30, random_state=0, min_samples_leaf=1)),
])
sgd.fit(X_train, y_train)
# + [markdown] id="E6JVBoiBSRzN" colab_type="text"
# #### Predict the test variables
# + id="F4kemaJXTc3A" colab_type="code" outputId="381d544f-0d09-4555-cf85-5c45cd3bc709" colab={"base_uri": "https://localhost:8080/", "height": 51}
# %%time
y_pred = sgd.predict(X_test)
# + [markdown] id="RarNR7c9SV64" colab_type="text"
# #### Verify the accuracy of the results
# + id="eLAkq4tjTvp1" colab_type="code" outputId="f13ff1f7-64ee-4e35-fab0-3a8c4cf47525" colab={"base_uri": "https://localhost:8080/", "height": 187}
print('accuracy %s' % accuracy_score(y_pred, y_test))
print(classification_report(y_test, y_pred,target_names=my_tags))
# + [markdown] id="Zo-MFDVvSdJ1" colab_type="text"
# #### Confusion matrix visualizing error distribution
# + id="tACBnkGlUYJo" colab_type="code" outputId="01c01fd2-5302-4101-d291-427e38b5ecb3" colab={"base_uri": "https://localhost:8080/", "height": 258}
y_actu = pd.Series(np.array(y_test, dtype = '<U8'), name='Actual')
y_pred = pd.Series(y_pred, name='Predicted')
df_confusion = pd.crosstab(y_actu, y_pred, dropna = False)
plot_confusion_matrix(df_confusion)
# + id="5QtmCd7CUYbV" colab_type="code" outputId="910ac2fd-6a8f-42bb-934d-f07bca77c65b" colab={"base_uri": "https://localhost:8080/", "height": 142}
df_confusion
# + id="EbkxhenYaMRY" colab_type="code" outputId="6a30d7a2-8a19-443c-cf19-90b4ed1d0a5d" colab={"base_uri": "https://localhost:8080/", "height": 163}
sgd.n_outputs
# + [markdown] id="19qScaRYdVWo" colab_type="text"
# ### [Document Embedding with Paragraph Vectors (Doc2Vec)](https://github.com/susanli2016/NLP-with-Python/blob/master/Doc2Vec%20Consumer%20Complaint.ipynb)
# + id="h6W_crOAdVWp" colab_type="code" colab={}
from tqdm import tqdm
tqdm.pandas(desc="progress-bar")
from gensim.models import Doc2Vec
from sklearn import utils
import gensim
from gensim.models.doc2vec import TaggedDocument
import re
# + id="DHTAjjfGdVWr" colab_type="code" colab={}
def label_sentences(corpus, label_type):
"""
Gensim's Doc2Vec implementation requires each document/paragraph to have a label associated with it.
We do this by using the TaggedDocument method. The format will be "TRAIN_i" or "TEST_i" where "i" is
a dummy index of the post.
"""
labeled = []
for i, v in enumerate(corpus):
label = label_type + '_' + str(i)
labeled.append(TaggedDocument(v.split(), [label]))
return labeled
X_train, X_test, y_train, y_test = train_test_split(df.Temperature, df.Sentiment, random_state=0, test_size=0.3)
X_train = label_sentences(X_train, 'Train')
X_test = label_sentences(X_test, 'Test')
all_data = X_train + X_test
# + id="nyy1Ge7jdVWt" colab_type="code" outputId="65bd5584-4213-4222-fdfe-1af2710a3896" colab={"base_uri": "https://localhost:8080/", "height": 51}
all_data[:2]
# + id="P42KyP1UdVWw" colab_type="code" outputId="55827d60-db01-43b0-d3a7-02a16d145925" colab={"base_uri": "https://localhost:8080/", "height": 561}
model_dbow = Doc2Vec(dm=0, vector_size=300, negative=5, min_count=1, alpha=0.065, min_alpha=0.065)
model_dbow.build_vocab([x for x in tqdm(all_data)])
for epoch in range(30):
model_dbow.train(utils.shuffle([x for x in tqdm(all_data)]), total_examples=len(all_data), epochs=10)
model_dbow.alpha -= 0.002
model_dbow.min_alpha = model_dbow.alpha
;
# + id="McaKYk1HdVW0" colab_type="code" colab={}
def get_vectors(model, corpus_size, vectors_size, vectors_type):
"""
Get vectors from trained doc2vec model
:param doc2vec_model: Trained Doc2Vec model
:param corpus_size: Size of the data
:param vectors_size: Size of the embedding vectors
:param vectors_type: Training or Testing vectors
:return: list of vectors
"""
vectors = np.zeros((corpus_size, vectors_size))
for i in range(0, corpus_size):
prefix = vectors_type + '_' + str(i)
vectors[i] = model.docvecs[prefix]
return vectors
# + id="1Wz0N1eSdVW2" colab_type="code" colab={}
train_vectors_dbow = get_vectors(model_dbow, len(X_train), 300, 'Train')
test_vectors_dbow = get_vectors(model_dbow, len(X_test), 300, 'Test')
# + id="zE_GJFhCdVW4" colab_type="code" outputId="b62d93be-1d7f-4833-c7a1-ff6383716594" colab={"base_uri": "https://localhost:8080/", "height": 255}
logreg = LogisticRegression(n_jobs=1, C=1e5, solver = 'lbfgs')
logreg.fit(train_vectors_dbow, y_train)
logreg = logreg.fit(train_vectors_dbow, y_train)
y_pred = logreg.predict(test_vectors_dbow)
print('accuracy %s' % accuracy_score(y_pred, y_test))
print(classification_report(y_test, y_pred,target_names=my_tags))
# + [markdown] id="MDDU4oxuiCFT" colab_type="text"
# #### Confusion matrix
# + id="z3qpCFPGCaPS" colab_type="code" outputId="efbf33ea-bcab-4262-c9f7-8f69cff2f58d" colab={"base_uri": "https://localhost:8080/", "height": 261}
y_actu = pd.Series(np.array(y_test, dtype = '<U8'), name='Actual')
y_pred = pd.Series(y_pred, name='Predicted')
df_confusion = pd.crosstab(y_actu, y_pred, dropna = False)
plot_confusion_matrix(df_confusion)
# + id="8RSfDhkiiIKa" colab_type="code" outputId="0b8b0de1-4b3f-4ee1-89d3-0ccef5b765f8" colab={"base_uri": "https://localhost:8080/", "height": 142}
df_confusion
# + [markdown] id="N_lTdrshdVW6" colab_type="text"
# ### Bag of Words using Tensorflow
# + id="jehKmOJ0dVW7" colab_type="code" colab={}
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# + id="7U8SvH9sdVW9" colab_type="code" outputId="a561653f-a53f-4825-ae39-e1665bbe0b61" colab={"base_uri": "https://localhost:8080/", "height": 34}
import itertools
import os
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.preprocessing import LabelBinarizer, LabelEncoder
from sklearn.metrics import confusion_matrix
from tensorflow import keras
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.preprocessing import text, sequence
from keras import utils
use_gpu = True
# + id="KCkmbXIHdVXB" colab_type="code" colab={}
train_size = int(len(df) * .7)
train_posts = df['Temperature'][:train_size]
train_tags = df['Sentiment'][:train_size]
test_posts = df['Temperature'][train_size:]
test_tags = df['Sentiment'][train_size:]
# + id="xIpE6fuUdVXE" colab_type="code" colab={}
max_words = 1000
tokenize = text.Tokenizer(num_words=max_words, char_level=False)
tokenize.fit_on_texts(train_posts) # only fit on train
# + id="g3SHY2SpdVXH" colab_type="code" colab={}
x_train = tokenize.texts_to_matrix(train_posts)
x_test = tokenize.texts_to_matrix(test_posts)
# + id="_bDGEe9ldVXJ" colab_type="code" colab={}
encoder = LabelEncoder()
encoder.fit(train_tags)
y_train = encoder.transform(train_tags)
y_test = encoder.transform(test_tags)
# + id="C-CToQMNdVXK" colab_type="code" colab={}
num_classes = np.max(y_train) + 1
y_train = utils.to_categorical(y_train, num_classes)
y_test = utils.to_categorical(y_test, num_classes)
# + id="WiBmN3lpdVXM" colab_type="code" colab={}
batch_size = 24
epochs = 50
# + [markdown] id="Hv-CtyV1dVXP" colab_type="text"
# #### Build the model
# + id="lj-eqL40dVXQ" colab_type="code" colab={}
model = Sequential()
model.add(Dense(512, input_shape=(max_words,)))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
# + [markdown] id="mq4mTAox7rd5" colab_type="text"
# Test the sensitivity of the input layers. No free lunch- no one algorithm is the best.
# + [markdown] id="0fQfNRz38cP8" colab_type="text"
# How are neural nets trimmed. How many nodes and layers are appropriate in text analysis Plot with # of layers vs percent accuracy 5 samples to start with
# + id="-_WNTAamdVXT" colab_type="code" colab={}
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# + id="qqA6hIbIdVXV" colab_type="code" outputId="009321d1-de6b-4c56-cc38-7bfb0a317b44" colab={"base_uri": "https://localhost:8080/", "height": 1000}
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_split=0.1)
# + [markdown] id="GpxyxxOudVXY" colab_type="text"
# #### validate the accuracy
# + id="-f4kjWoydVXY" colab_type="code" outputId="e42ba25b-1337-4000-b73d-d25fa56071d1" colab={"base_uri": "https://localhost:8080/", "height": 51}
score = model.evaluate(x_test, y_test,
batch_size=batch_size, verbose=1)
score
# + id="dQyWrrWwsXjQ" colab_type="code" outputId="2233b264-0137-4593-d6e5-7ec63b91aa08" colab={"base_uri": "https://localhost:8080/", "height": 51}
model.evaluate(x_test, y_test)
# + id="eeFFpdKBVNEF" colab_type="code" outputId="3c947203-e0e5-4f3a-9136-fcf7a0d3a23a" colab={"base_uri": "https://localhost:8080/", "height": 306}
pd.concat([x_test, y_test], axis = 1)
# + id="L-a9A2L4X907" colab_type="code" outputId="089c69de-0dab-4f9c-e437-e7138e423868" colab={"base_uri": "https://localhost:8080/", "height": 254}
df_confusion = pd.crosstab(y_actu, y_pred, dropna = False)
plot_confusion_matrix(df_confusion)
# + id="FEdsYfsOYF_h" colab_type="code" outputId="0ecece5a-e3ff-442e-ffaf-ca68713e80a9" colab={"base_uri": "https://localhost:8080/", "height": 142}
df_confusion
# + id="R_5YXKdaUHdy" colab_type="code" colab={}
con_mat = tf.math.confusion_matrix(labels=y_actu, predictions=y_pred).numpy()
# + id="ssOUnSk-dVXa" colab_type="code" colab={}
from keras import metrics
# + id="g52ApdnVdVXc" colab_type="code" colab={}
print(confusion_matrix(x_test, y_pred))
# + id="gLOCyRiTdVXf" colab_type="code" colab={}
# + [markdown] id="JYWnjTvOdIk7" colab_type="text"
# # Sankey Diagrams
# + [markdown] id="OdjeuwKjgz6u" colab_type="text"
#
# + id="P0ei1VcxhadF" colab_type="code" colab={}
# + id="-WeF0jcqhamC" colab_type="code" colab={}
# + id="E4QRaX5ohaol" colab_type="code" colab={}
# + id="8xyQxNSmharg" colab_type="code" colab={}
# + [markdown] id="MZPYZymzdVFD" colab_type="text"
# # Association Node Networks using ARM
# + [markdown] id="55gdD3MRgq3W" colab_type="text"
# Python doesn't have the arulesviz library which makes it impossible to draw the Node Networks. But we can call R from within python here
# + id="d8c3JryPiv1P" colab_type="code" colab={}
# %load_ext rpy2.ipython
# + id="LOo4MarMP1eM" colab_type="code" colab={}
import rpy2.robjects.packages as rpackages
utils = rpackages.importr('utils')
utils.chooseCRANmirror(ind=1) # select the first mirror in the list
utils.chooseBioCmirror(ind=1)
packnames = ('tm', 'SnowballC', 'arules', 'devtools')
from rpy2.robjects.vectors import StrVector
utils.install_packages(StrVector(packnames))
# + id="KKFTydkMX76n" colab_type="code" colab={} language="R"
#
# install.packages("arulesViz", lib="/data/Rpackages/")
# + id="FW_xlrSts8Qq" colab_type="code" colab={} language="R"
# library(tidyverse)
# library(readxl)
# library(tm)
# library(SnowballC)
# library(arules)
# library(arulesViz)
# + id="NSjrSLjlvcbQ" colab_type="code" outputId="9b4dcc60-05b3-4bed-fef1-111304f663b3" colab={"base_uri": "https://localhost:8080/", "height": 255} language="R"
# txt = read_excel('SLFC survey.xlsx')
# txt
# + id="wD8f15D7yDlK" colab_type="code" outputId="b0aae7f0-b893-4616-9e3d-2c0d783715b1" colab={"base_uri": "https://localhost:8080/", "height": 122} language="R"
# txt$`Temperature`[1:5]
# + [markdown] id="SSHDTUpAcPxW" colab_type="text"
# ### Preprocessing text
# + id="Hu4lLi5jh6c-" colab_type="code" colab={} language="R"
# #-------------------------------------------------DETAILED TEXT MINING --------------------------------------------
# ## Preprocessing to create a document term matrix
# mydata <- txt$`Temperature`
# documents <- VectorSource(t(mydata))
# jeopCorpus <- Corpus(documents)
# jeopCorpus <- tm_map(jeopCorpus, PlainTextDocument)
# jeopCorpus <- tm_map(jeopCorpus, content_transformer(tolower))
# jeopCorpus <- tm_map(jeopCorpus, removePunctuation)
# jeopCorpus <- tm_map(jeopCorpus, stripWhitespace)
# jeopCorpus <- tm_map(jeopCorpus, removeNumbers)
# jeopCorpus <- tm_map(jeopCorpus, removeWords, stopwords("english"))
# jeopCorpus <- tm_map(jeopCorpus, stemDocument)
# # jeopCorpus <- tm_map(jeopCorpus, removeWords, c("per","awocwo","tke","acceptgn","cwomd","jeansd","mwr","lwr","ghazi",
# # "barryrw","barrygn","robert","wbarri","extend","reason","work","jean",
# # "thank","awomd","order","barri","teeven","cli","close","accept","fulli",
# # "mbeaudri","mike","allison","aschmidtk","schmidtk","verbal","can","now",
# # "pleas","will","made","alt","need","new","open","due","use","complet",
# # "done","went","flr", "technician", "comment", "jpl", "awo", "cwo", "gill",
# # "sab","ben","message","left", "rene", "lattfield", "tstatsk"))
# # jeopCorpus <- tm_map(jeopCorpus, content_transformer(gsub), pattern = "\\b(stat|tstat|statsk|tstatsk|statssk)\\b",
# # replacement = "tstat")
# + [markdown] id="ZHVC5SmGdRGa" colab_type="text"
# ### Turn into a tocument term matrix
# + id="-9QrUsx_cYH-" colab_type="code" colab={} language="R"
# df_experiment <- data.frame(text = get("content", jeopCorpus))
# head(df_experiment)
#
# dtm <- DocumentTermMatrix(jeopCorpus)
#
# #-----------Create a bar plot of the most common terms ----------------------------
# #create a term document matrix instead of document term matrix
# tdm <- TermDocumentMatrix(jeopCorpus)
# m <- as.matrix(tdm)
# v <- sort(rowSums(m),decreasing=TRUE)
# d <- data.frame(word = names(v),freq=v)
#
# # Write the raw document text matrix to a CSV ----
# write.csv(as.matrix(dtm), file="dtm.csv")
#
# #Word frequency matrix
# freq <- colSums(as.matrix(dtm))
# length(freq)
# ord <- order(freq)
# m <- as.matrix(freq)
# dim(m)
#
# #creating term matrix with TF-IDF weighting
# terms <-DocumentTermMatrix(jeopCorpus,control = list(weighting = function(x) weightTfIdf(x, normalize = FALSE)))
# write.csv(as.matrix(terms), file="tfidf.csv")
#
# #Word frequency matrix after TF-IDF weighting
# freq <- colSums(as.matrix(dtm))
# length(freq)
# ord <- order(freq)
# m <- as.matrix(freq)
# dim(m)
#
# dtmss <- removeSparseTerms(dtm, 0.994) # This makes a matrix that is only 15% empty space, maximum. 0.9999 usual 0.98
# write.csv(as.matrix(dtmss), file="dtmss.csv")
#
# ## ---- Using non-sparse dtm conduct an association rule analysis
#
# mydata <- read.csv('dtmss.csv', stringsAsFactors = FALSE)
# mydata[1] <- NULL
# + [markdown] id="lIOQXBGidZbF" colab_type="text"
# ### Set up the support and confidence parameters
# + id="b89DcUKHchaK" colab_type="code" colab={} language="R"
# # Set support and confidence values
# Support_ARM = 0.01
# Confidence_ARM = 0.85
# Number_of_rules = 20
#
# ## Support 0.005 and confidence 0.80 support sort
# mydata <- sapply(mydata,as.logical)
# rules <- apriori(mydata,parameter = list(sup = Support_ARM, conf = Confidence_ARM,target="rules"))
# summary(rules)
# rules.sorted <- sort(rules, by="support")
#
# subrules <- head(sort(rules, by="support"),Number_of_rules)
# view(subrules)
# write(subrules, file = "dataNoClusteringSup00005Conf_support.csv", sep = ",")
#
# set.seed(1)
# + [markdown] id="uKkOYIuPdgZI" colab_type="text"
# ### plot the graphs
# + id="o7NHLZJvc3T4" colab_type="code" outputId="15af8642-505b-4d1f-ac68-34de6facac2c" colab={"base_uri": "https://localhost:8080/", "height": 840} language="R"
# plot(subrules, method="graph", control=list(type="items"))
# + id="0UUjD1G_cnj-" colab_type="code" outputId="7a1fe062-8a20-43ca-ed0a-41a92cc492b8" colab={"base_uri": "https://localhost:8080/", "height": 136} language="R"
# ## Support 0.005 and confidence 0.80 lift sort
# mydata <- sapply(mydata,as.logical)
# rules <- apriori(mydata,parameter = list(sup = Support_ARM, conf = Confidence_ARM,target="rules"))
# summary(rules)
# rules.sorted <- sort(rules, by="lift")
#
# subrules <- head(sort(rules, by="lift"),Number_of_rules)
# view(subrules)
# write(subrules, file = "dataNoClusteringSup00005Conf_lift.csv", sep = ",")
#
# set.seed(1)
# + id="LyIKcidJc9Jz" colab_type="code" outputId="f0215f0a-80db-42c7-e4ac-a7c37c818569" colab={"base_uri": "https://localhost:8080/", "height": 925} language="R"
# plot(subrules, method="graph", control=list(type="items"))
# + id="j-arOOwkctNV" colab_type="code" outputId="99c2bc98-a777-4514-e550-3f7b72e4dfe9" colab={"base_uri": "https://localhost:8080/", "height": 136} language="R"
# ## Support 0.0001 and confidence 0.75 confidence sort
# mydata <- sapply(mydata,as.logical)
# rules <- apriori(mydata,parameter = list(sup = Support_ARM, conf = Confidence_ARM,target="rules"))
# summary(rules)
# rules.sorted <- sort(rules, by="confidence")
#
# subrules <- head(sort(rules, by="confidence"),Number_of_rules)
# view(subrules)
# write(subrules, file = "dataNoClusteringSup00005Conf_confidence.csv", sep = ",")
#
# set.seed(1)
# + id="KLxmvFBAdGcP" colab_type="code" outputId="e69b9381-5cad-4471-8f5e-4951c8842dee" colab={"base_uri": "https://localhost:8080/", "height": 840} language="R"
# plot(subrules, method="graph", control=list(type="items"))
# + [markdown] id="-rVkfsSJdcqM" colab_type="text"
# # Topic Modelling of Survey Responses
# + [markdown] id="6y025w4UgPuI" colab_type="text"
# ## Read in the Data
# + id="zaqJVIDGe971" colab_type="code" colab={}
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from nltk.corpus import stopwords
from textblob import TextBlob, Word
from textblob.sentiments import NaiveBayesAnalyzer
from textblob.classifiers import NaiveBayesClassifier
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation, TruncatedSVD
# + id="jLXzaJTUgRl_" colab_type="code" outputId="a9bca6cf-dd38-481e-e524-90c4d0e396d2" colab={"base_uri": "https://localhost:8080/", "height": 202}
survey_resp_temp = pd.read_excel('SLFC survey.xlsx',
sheet_name = 'Temp')
survey_resp_temp = survey_resp_temp[['Floor Work On','Temperature']]
survey_resp_temp = survey_resp_temp.replace('B1 Basement', 'B')
survey_resp_temp[0:5]
# + id="6xkzxvErgT3z" colab_type="code" outputId="83c886d9-91ed-4434-c064-6b5df8f3cddc" colab={"base_uri": "https://localhost:8080/", "height": 202}
survey_resp_maint = pd.read_excel('SLFC survey.xlsx',
sheet_name = 'Maint')
survey_resp_maint = survey_resp_maint[['Floor Work On','Maintenance']]
survey_resp_maint = survey_resp_maint.replace('B1 Basement', 'B')
survey_resp_maint[0:5]
# + [markdown] id="CP8n4NmZzSUQ" colab_type="text"
# Clean up the text (i.e. remove stopwords and make lowercase)
# + id="FXPWA0lly3hS" colab_type="code" outputId="63458a31-af46-430e-d3fd-cf8b7d794deb" colab={"base_uri": "https://localhost:8080/", "height": 202}
survey_resp_temp['Temperature'] = survey_resp_temp['Temperature'].apply(clean_text)
survey_resp_temp['Temperature'].apply(lambda x: len(x.split(' '))).sum()
survey_resp_temp[0:5]
# + id="Xf0GjUJZy_ci" colab_type="code" outputId="28752276-2893-47a3-b494-cd59fb1bb7ad" colab={"base_uri": "https://localhost:8080/", "height": 202}
survey_resp_maint['Maintenance'] = survey_resp_maint['Maintenance'].apply(clean_text)
survey_resp_maint['Maintenance'].apply(lambda x: len(x.split(' '))).sum()
survey_resp_maint[0:5]
# + [markdown] id="xGTA4kH9fbpV" colab_type="text"
# ## Create the vectorized arrays for the text
# + id="rIIMU9vIdqok" colab_type="code" colab={}
#Create a vectorizer (TF-idf and count) for temperature
vectorizer = CountVectorizer()
vectorizer_TFIDF = TfidfVectorizer()
# apply transformation
tf_temp = vectorizer.fit_transform(survey_resp_temp['Temperature']).toarray()
tf_idf_temp = vectorizer_TFIDF.fit_transform(survey_resp_temp['Temperature']).toarray()
# tf_feature_names tells us what word each column in the matric represents
tf_feature_names_temp = vectorizer.get_feature_names()
tf_idf_feature_names_temp = vectorizer_TFIDF.get_feature_names()
# + id="Uchgj2XbfR0M" colab_type="code" colab={}
#Create a vectorizer (TF-idf and count) for maintenance
tf_maint = vectorizer.fit_transform(survey_resp_maint['Maintenance']).toarray()
tf_idf_maint = vectorizer_TFIDF.fit_transform(survey_resp_maint['Maintenance']).toarray()
# tf_feature_names tells us what word each column in the matric represents
tf_feature_names_maint = vectorizer.get_feature_names()
tf_idf_feature_names_maint = vectorizer_TFIDF.get_feature_names()
# + [markdown] id="ASnbE6nUfWMi" colab_type="text"
# ## Create a function to turn the model object "model" into a dataframe that shows the created topics
# + id="dRQECZgjfTs6" colab_type="code" colab={}
def display_topics(model, feature_names, no_top_words):
topic_dict = {}
for topic_idx, topic in enumerate(model.components_):
topic_dict["Topic %d words" % (topic_idx)]= ['{}'.format(feature_names[i])
for i in topic.argsort()[:-no_top_words - 1:-1]]
topic_dict["Topic %d weights" % (topic_idx)]= ['{:.1f}'.format(topic[i])
for i in topic.argsort()[:-no_top_words - 1:-1]]
return pd.DataFrame(topic_dict)
# + [markdown] id="UZAOpmb6fiXu" colab_type="text"
# ## Topic modelling using the LDA (Latent Dirichlet Allocation) algorithm
# + id="ccAFOu2gffS7" colab_type="code" outputId="718f19d9-1504-44e4-92a4-7566acb563ed" colab={"base_uri": "https://localhost:8080/", "height": 410}
#%% Topic modelling using LDA (Latent Dirichlet Allocation) for Thermal complaints
# Create the model object. Start by choosing 10 topics
number_of_topics = 6
# Create the LDA (Latent Dirichlet Allocation ) model
model_LDA_temp = LatentDirichletAllocation(n_components=number_of_topics, random_state=0)
model_LDA_temp.fit(tf_idf_temp)
no_top_words = 10
Topics_LDA_temp = display_topics(model_LDA_temp, tf_feature_names_temp, no_top_words)
Topics_LDA_temp
# + id="sRU0_D8SLaZe" colab_type="code" colab={}
#export_csv = Topics_LDA_temp.to_csv ('LDA_Temp.csv', index = None, header=True)
# + id="4oWPeDkOffVI" colab_type="code" outputId="daea18b1-2c4b-475e-8e05-962b08682b27" colab={"base_uri": "https://localhost:8080/", "height": 202}
#Find the topics for each temperature survey response
Survey_Classification_LDA_temp = pd.DataFrame(model_LDA_temp.transform(tf_idf_temp))
Survey_Classification_LDA_temp['Survey response no.'] = np.arange(1,len(survey_resp_temp)+1,1)
Survey_Classification_LDA_temp = Survey_Classification_LDA_temp[['Survey response no.', 0, 1, 2, 3, 4, 5]]
Survey_Classification_LDA_temp [0:5]
# + id="KjQJ-BSpffXp" colab_type="code" outputId="cd264dd0-4394-408f-c878-333f7b962a17" colab={"base_uri": "https://localhost:8080/", "height": 257}
#%% Topic modelling using LDA (Latent Dirichlet Allocation) for Maintenance complaints
# Create the model object. Start by choosing 10 topics
number_of_topics = 6
# Create the LDA (Latent Dirichlet Allocation ) model
model_LDA_maint = LatentDirichletAllocation(n_components=number_of_topics, random_state=0)
model_LDA_maint.fit(tf_idf_maint)
no_top_words = 5
Topics_LDA_maint = display_topics(model_LDA_maint, tf_feature_names_maint, no_top_words)
export_csv = Topics_LDA_maint.to_csv ('LDA_Maint.csv', index = None, header=True)
Topics_LDA_maint
# + [markdown] id="W01Xl5JnfrPN" colab_type="text"
# QUICK NOTE Clearly the LDA algorithm doesn't work very well against short sentences less that 140 characters (aka tweets) For example, the word global warming comes up in almost every topic so maybe it should be added to the stopwords list Let's try an alternate model (NMF) in order to see if the outcome improves versus LDA
# + [markdown] id="jMyKdpR-f_MN" colab_type="text"
# ## Topic modelling using the NMF (Non-negative Matrix Factorization) algorithm
# + id="0NRsqqIYffZp" colab_type="code" outputId="12ec82e2-31c2-4763-8939-b0dc8ba12646" colab={"base_uri": "https://localhost:8080/", "height": 410}
#%% Topic modelling using NMF (Non-negative Matrix Factorization) for thermal complaints
number_of_topics = 6
model_NMF_temp = NMF(n_components=number_of_topics, random_state=0, alpha=.01, l1_ratio=.9)
model_NMF_temp.fit(tf_idf_temp)
#apply this function to the model
no_top_words = 10
Topics_NMF_temp = display_topics(model_NMF_temp, tf_feature_names_temp, no_top_words)
export_csv = Topics_NMF_temp.to_csv ('NMF_Temp.csv', index = None, header=True)
Topics_NMF_temp
# + id="xCt_M1Rdffdw" colab_type="code" outputId="53e5ce5a-86a8-434e-ce15-afa726c28717" colab={"base_uri": "https://localhost:8080/", "height": 202}
#Find the topics for each thermal survey response
Survey_Classification_NMF_temp = pd.DataFrame(model_NMF_temp.transform(tf_idf_temp))
Survey_Classification_NMF_temp['Survey response no.'] = np.arange(1,len(survey_resp_temp)+1,1)
Survey_Classification_NMF_temp = Survey_Classification_NMF_temp[['Survey response no.', 0, 1, 2, 3, 4, 5]]
Survey_Classification_NMF_temp[0:5]
# + id="OTEL2gzwffhs" colab_type="code" outputId="f02fbdbb-dbf4-4473-a875-617cfc33d362" colab={"base_uri": "https://localhost:8080/", "height": 410}
#%% Topic modelling using NMF (Non-negative Matrix Factorization) for maintenance complaints
number_of_topics = 6
model_NMF_maint = NMF(n_components=number_of_topics, random_state=0, alpha=.01, l1_ratio=.9)
model_NMF_maint.fit(tf_idf_maint)
#apply this function to the model
no_top_words = 10
Topics_NMF_maint = display_topics(model_NMF_maint, tf_feature_names_maint, no_top_words)
export_csv = Topics_NMF_maint.to_csv ('NMF_Maint.csv', index = None, header=True)
Topics_NMF_maint
# + id="hbyuOu8Lffl6" colab_type="code" outputId="5d16a5aa-ed3f-4e2e-ed4b-d17ce29a118d" colab={"base_uri": "https://localhost:8080/", "height": 202}
#Find the topics for each survey response
Survey_Classification_NMF_maint = pd.DataFrame(model_NMF_maint.transform(tf_idf_maint))
Survey_Classification_NMF_maint['Survey response no.'] = np.arange(1,len(survey_resp_maint)+1,1)
Survey_Classification_NMF_maint = Survey_Classification_NMF_maint[['Survey response no.', 0, 1, 2, 3, 4, 5]]
Survey_Classification_NMF_maint[0:5]
| 48,711 |
/exercise 6 databases.ipynb
|
7be29790e155e10ed872f776873e464e5a838be0
|
[] |
no_license
|
EleniPa/ouy_advanced_programming
|
https://github.com/EleniPa/ouy_advanced_programming
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 3,672 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pymongo
import json
people = open("People.json")
client = pymongo.MongoClient("mongodb://localhost:27017/")
db = client["advancedProgramming"]
collection = db["people"]
people_decoded = json.load(people)
student_list = people_decoded.get('students')
ins = collection.insert_many(student_list)
age_results = collection.find({'age':{'$gt': 25}}, {'_id': 0, 'fullName.first':1, 'fullName.surname':1})
for r in age_results:
print(r)
no_middle = collection.find({'fullName.other': {'$eq': None}}, {})
for entry in no_middle:
print(entry)
men_not_tokyo = collection.find({'fullName.title': {'$eq': 'Mr'}, 'city': {'$ne': 'Tokyo'}})
count = 0
for man in men_not_tokyo:
count += 1
print(f"Number of men not in Tokyo: {count}")
women_not_tokyo = collection.find({'fullName.title': {'$in': ['Mrs', 'Miss']}, 'city': {'$ne': 'Tokyo'}})
count = 0
for women in women_not_tokyo:
count += 1
print(f"Number of men not in Tokyo: {count}")
| 1,222 |
/docs/images/Performance.ipynb
|
65afb764e04f45aa573d0e2ed890c04b52ef1a62
|
[
"BSD-3-Clause"
] |
permissive
|
mbrukman/stumpy
|
https://github.com/mbrukman/stumpy
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 43,187 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Generate Performance Plot
# ## Import Packages
# +
import pandas as pd
from bokeh.plotting import figure, output_notebook, show
from bokeh.models import NumeralTickFormatter, Label
from bokeh.palettes import Viridis6
from bokeh.io import export_png
output_notebook()
# -
# ## Retrieve and Process Data
# +
df = pd.read_html('https://github.com/TDAmeritrade/stumpy', match='STUMPED.256')[0]
df = df.rename(columns={'n = 2i': 'n'})
df['GPU-STOMP'] = pd.to_timedelta(df['GPU-STOMP'])
df['STUMP.16'] = pd.to_timedelta(df['STUMP.16'])
df['STUMPED.128'] = pd.to_timedelta(df['STUMPED.128'])
df['STUMPED.256'] = pd.to_timedelta(df['STUMPED.256'])
df['GPU-STUMP.1'] = pd.to_timedelta(df['GPU-STUMP.1'])
df['GPU-STUMP.2'] = pd.to_timedelta(df['GPU-STUMP.2'])
df.head()
# +
dfs = {
'GPU-STOMP': df[['n', 'GPU-STOMP']],
'STUMP.16': df[['n', 'STUMP.16']],
'STUMPED.128': df[['n', 'STUMPED.128']],
'STUMPED.256': df[['n', 'STUMPED.256']],
'GPU-STUMP.1': df[['n', 'GPU-STUMP.1']],
'GPU-STUMP.2': df[['n', 'GPU-STUMP.2']],
}
line_dashes = {
'GPU-STOMP': '10 10',
'STUMP.16': '5 5',
'STUMPED.128': 'solid',
'STUMPED.256': '15 15',
'GPU-STUMP.1': 'solid',
'GPU-STUMP.2': 'solid',
#‘^(\d+(\s+\d+)*)?$’
}
for k in dfs.keys():
dfs[k] = dfs[k].dropna()
# -
# ## Plot Performance Results
# +
p = figure(plot_width=1000)
for i, k in enumerate(dfs.keys()):
p.line(dfs[k].iloc[:, 0],
dfs[k].iloc[:, 1]/pd.Timedelta('1 days'),
line_color=Viridis6[i],
line_width=4,
line_dash=line_dashes[k],
legend_label=k,
)
p.xaxis[0].formatter = NumeralTickFormatter(format="0,0")
p.yaxis[0].formatter = NumeralTickFormatter(format="0,0")
p.xaxis.axis_label = 'Time Series Length (n)'
p.yaxis.axis_label = 'Days'
p.xaxis.axis_label_text_font_size = "16pt"
p.yaxis.axis_label_text_font_size = "16pt"
p.xaxis.major_label_text_font_size = "12pt"
p.yaxis.major_label_text_font_size = "12pt"
p.legend.location = "top_left"
p.legend.label_text_font_size = "12pt"
p.legend.glyph_width = 75
label = Label(x=80000000, y=5, text='Lower is "Better"', render_mode='css', text_font_size='16pt')
p.add_layout(label)
show(p)
# -
export_png(p, 'performance.png')
| 2,567 |
/model.ipynb
|
fcb8b46367f80e16d7befeeea8c25d08cce1c544
|
[] |
no_license
|
Manojython/ColorGAN
|
https://github.com/Manojython/ColorGAN
| 1 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 73,551 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/harvard-ml-courses/a-cs281-demo/blob/master/18_Filter.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="1PH8WirdW0PH" colab_type="code" outputId="4efd3c64-4039-4be5-bf24-b63726c39eec" colab={"base_uri": "https://localhost:8080/", "height": 53}
# !pip install -qU plotly torch daft opt-einsum networkx
# !rm -fr start; git clone --single-branch -b demos2018 -q https://github.com/harvard-ml-courses/cs281-demos start; cp -f start/cs281.py cs281.py
# + id="mjjOL1vuXT5w" colab_type="code" colab={}
import torch
import torch.distributions as ds
import daft
# + id="lDR0yRbIZm1F" colab_type="code" colab={}
# LDS Parameters
Sigma = torch.eye(10).unsqueeze(0)
Sigma2 = torch.eye(20).unsqueeze(0)
A = torch.rand(10, 10)
C = torch.rand(20, 10)
# + id="HCIMjhdkW1V1" colab_type="code" colab={}
# Sample through LDS
x = []
z = [ds.MultivariateNormal(torch.rand(1, 10), Sigma).sample(torch.Size([5])).squeeze(1)]
for t in range(10):
mu = torch.einsum("xz,bz->bx", [C, z[-1]])
x.append(ds.MultivariateNormal(mu, Sigma2).sample())
mu2 = torch.einsum("xz,bz->bx", [A, z[-1]])
z.append(ds.MultivariateNormal(mu2, Sigma).sample())
# + id="imn7nQ35Zj5G" colab_type="code" colab={}
# Non-Linear LDS Parameters
Sigma = torch.eye(10).unsqueeze(0)
Sigma2 = torch.eye(20).unsqueeze(0)
A = torch.rand(10, 10)
C = torch.rand(30, 10)
D = torch.rand(20, 30)
# + id="pZlun0z8XsYi" colab_type="code" colab={}
# Sample through non-linear DS
z = [ds.MultivariateNormal(torch.rand(1, 10), Sigma).sample(torch.Size([5])).squeeze(1)]
for t in range(10):
h = torch.sigmoid(torch.einsum("xz,bz->bx", [C, z[-1]]))
mu = torch.sigmoid(torch.einsum("bx,yx->by", [h, D]))
x.append(ds.MultivariateNormal(mu, Sigma2).sample())
mu2 = torch.einsum("xz,bz->bx", [A, z[-1]])
z.append(ds.MultivariateNormal(mu2, Sigma).sample())
# + id="74MiQbByaGzo" colab_type="code" colab={}
| 2,301 |
/PyImageJ/.ipynb_checkpoints/Rigid registration with pyimagej-checkpoint.ipynb
|
150593cefb379f5541a85fef40e134f8b3e5bae3
|
[] |
no_license
|
CMollier/Notebooks
|
https://github.com/CMollier/Notebooks
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 114,943 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Running a plugin that uses ImageJ1 windows
# Running a plugin that uses ImageJ1 windows requires using ImageJ in GUI mode and requires handling the resulting windows. Each plugin may have unique behavior to pay attention to. E.g., the Rigid Registration plugin requires specific pixel types and operates on the currently selected window.
# +
# The specified fiji install does not seem to ship with rigid registration plugin. I directed it to my local install. Just replace the path with the one to your own.
import imagej
#ij = imagej.init('sc.fiji:fiji')
# Rigid Registration does not have a headless version. You need to specify that ImageJ will NOT be operating in headless mode.
ij = imagej.init('C:/Users/mpinkert/Fiji.app', headless=False)
# -
from skimage import io
import numpy as np
import os
cwd = os.getcwd()
from jnius import autoclass
WindowManager = autoclass('ij.WindowManager')
original_img = io.imread('https://acdbio.com/sites/default/files/styles/sample_image/public/M-IL-8-8-hr_0.jpg')
moving_img = np.mean(original_img, axis=2)
# Let's make the fixed image a translated image so that we can see if the plugin works.
imshape = np.shape(moving_img)
fixed_img = np.zeros(imshape)
fixed_img[40:, 40:] = moving_img[0:(imshape[0]-40), 0:(imshape[1]-40)]
# +
# Rigid Registration only allows a few pixel types, such as 8-bit grayscale
ij.ui().show('fixed image', ij.py.to_java(fixed_img))
ij.py.run_macro("""run("8-bit");""")
# You need to specify that no changes have been made, or else the close window dialogue at the end will ask for confirmation.
img = WindowManager.getCurrentImage()
img.changes = False
ij.ui().show('moving image', ij.py.to_java(moving_img))
ij.py.run_macro("""run("8-bit");""")
img = WindowManager.getCurrentImage()
img.changes = False
# +
# Define the plugin parameters. You can apply the registration to as many open images as you want. Simple keep adding key/value pairs with 'Image name': True, as with the 'moving image': True below
plugin = 'Rigid Registration'
args = {
'initialtransform': [],
'n': 1,
'tolerance': 1.000,
'level': 4,
'stoplevel': 2,
'materialcenterandbbox': [],
'showtransformed': 1,
'template': 'fixed image',
'measure': 'Correlation',
'moving image': True
}
# -
ij.py.run_plugin(plugin, args)
# The window should be open in ImageJ if you are running this cell-by-cell; if not, here you can see the result.
result = WindowManager.getCurrentImage()
ij.py.show(result)
# Get the numpy array out of the image. You must do this before closing the windows.
result_array = ij.py.from_java(result)
result_array
# Close the windows in Fiji.
WindowManager.closeAllWindows()
| 2,966 |
/LOAN PREDICTION.ipynb
|
485f0b390dbcc12272813332250a5fef7961c252
|
[] |
no_license
|
Nilanjan2022/Loan-Prediction
|
https://github.com/Nilanjan2022/Loan-Prediction
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 442,656 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/bjwoo1/python_start/blob/main/%ED%8C%8C%EC%9D%B4%EC%8D%AC%EC%A0%9C%EC%96%B4%EB%AC%B8.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="S-bf0wi6WlNm" outputId="b25c62ec-464b-4d11-c0cf-5b649e27fcfb"
food = input('먹고싶은음식을 입력하세요. >> ')
print('당신이 입력한 먹고 싶은 음식은 ' , food)
# + colab={"base_uri": "https://localhost:8080/"} id="iU3dhie9cZdX" outputId="751b18c1-138d-424c-fd79-83ec3fa8a3a4"
if food == '우동':
print('우산을 들고간다')
print('돈 들고 간다.')
print('우동집으로 간다.')
elif food == '짜장면':
print('중국집으로 간다.')
elif food == '라면': #if 뒤에는 반드시 조건을 쓴다.
print('분식집으로 간다.')
else: #else뒤에는 조건을 쓰지 않는다.
print('집에서 먹는다.')
# + id="s0H_ctrgmXXo"
# + colab={"base_uri": "https://localhost:8080/"} id="PRXMAQKMdYjj" outputId="b70b58b6-ce44-471f-f7d4-310ed012ae11"
start = 0
while start < 10: #True
print(start, '> 내가 반복')
start = start + 1
# + colab={"base_uri": "https://localhost:8080/"} id="4yzS7t9qfb1U" outputId="4761cd72-1e41-46e6-c217-a0fe328278d8"
for x in range(0, 10): #0~9, 기본값이 1씩 증가!
print(x, '나도 반복')
# + colab={"base_uri": "https://localhost:8080/"} id="d7ReNgCgja9W" outputId="2fbcfc76-c496-49dc-ac62-9df30ac1433f"
time = input('현재시각을 입력하시오 >> ')
if int(time) < 11:
print('굿모닝')
elif int(time) < 15:
print('굿애프터눈')
elif int(time) < 20:
print('굿이브닝')
else:
print('굿나잇')
# + id="7jMMOHDKlYHk"
month = input('이번달을 입력하시오 >> ')
if int(month) == 1:
print('겨울입니다.')
elif int(month) <= 5:
print('봄입니다.')
elif int(month) <= 8:
print('여름입니다.')
elif int(month) <= 11:
print('가을입니다.')
else:
print('겨울입니다.')
# + id="8S3s_XiRMZLZ"
an)
# +
plt.plot(K, Sum_of_squared_distances, 'bx-')
plt.xlabel('k')
plt.ylabel('Sum_of_squared_distances')
plt.title('Elbow Method For Optimal k')
plt.show()
# +
plt.plot(K, sils_euclidean, 'bx-')
plt.plot(K, sils_manhattan, 'rx-')
plt.title("Silhouette Scores", fontsize=20)
plt.xticks(K)
plt.xlabel("N. of clusters")
plt.ylabel("Score")
# -
# cluster_k = 5
# y_pred = KMeans(n_clusters=cluster_k, random_state=111).fit_predict(X_scale)
# coefs_data.loc[coefs_data['cluster']>0, 'cluster'] = y_pred+1
# coefs_data_raw.loc[coefs_data_raw['cluster']>0, 'cluster'] = y_pred+1
# +
from sklearn.mixture import GaussianMixture as GMM
from sklearn import metrics
from sklearn.model_selection import train_test_split
import numpy as np
def SelBest(arr:list, X:int)->list:
'''
returns the set of X configurations with shorter distance
'''
dx=np.argsort(arr)[:X]
return arr[dx]
#Courtesy of https://stackoverflow.com/questions/26079881/kl-divergence-of-two-gmms.
#Here the difference is that we take the squared root, so it's a proper metric
def gmm_js(gmm_p, gmm_q, n_samples=10**5):
X = gmm_p.sample(n_samples)[0]
log_p_X = gmm_p.score_samples(X)
log_q_X = gmm_q.score_samples(X)
log_mix_X = np.logaddexp(log_p_X, log_q_X)
Y = gmm_q.sample(n_samples)[0]
log_p_Y = gmm_p.score_samples(Y)
log_q_Y = gmm_q.score_samples(Y)
log_mix_Y = np.logaddexp(log_p_Y, log_q_Y)
return np.sqrt((log_p_X.mean() - (log_mix_X.mean() - np.log(2))
+ log_q_Y.mean() - (log_mix_Y.mean() - np.log(2))) / 2)
n_clusters=np.arange(2, 7)
sils=[]
sils_err=[]
bics=[]
bics_err=[]
js_dist=[]
js_err=[]
iterations=20
for n in n_clusters:
print(n)
tmp_sil=[]
tmp_bic=[]
tmp_dist=[]
for itr in range(iterations):
#print(itr)
gmm=GMM(n, n_init=20).fit(X_scale)
labels=gmm.predict(X_scale)
sil=metrics.silhouette_score(X_scale, labels, metric='manhattan')
tmp_sil.append(sil)
tmp_bic.append(gmm.bic(X_scale))
train, test=train_test_split(X_scale, test_size=0.5)
gmm_train=GMM(n, n_init=20).fit(train)
gmm_test=GMM(n, n_init=20).fit(test)
tmp_dist.append(gmm_js(gmm_train, gmm_test))
val=np.mean(SelBest(np.array(tmp_dist), int(iterations/5)))
err=np.std(tmp_dist)
js_dist.append(val)
js_err.append(err)
val=np.mean(SelBest(np.array(tmp_bic), int(iterations/5)))
err=np.std(tmp_bic)
bics.append(val)
bics_err.append(err)
val=np.mean(SelBest(np.array(tmp_sil), int(iterations/5)))
err=np.std(tmp_sil)
sils.append(val)
sils_err.append(err)
# -
plt.errorbar(n_clusters, sils, yerr=sils_err)
plt.title("Silhouette Scores", fontsize=20)
plt.xticks(n_clusters)
plt.xlabel("N. of clusters")
plt.ylabel("Score")
# +
plt.errorbar(n_clusters, js_dist, yerr=js_err)
plt.title("Distance between Train and Test GMMs", fontsize=20)
plt.xticks(n_clusters)
plt.xlabel("N. of clusters")
plt.ylabel("Distance")
plt.show()
# -
plt.errorbar(n_clusters,bics, yerr=bics_err, label='BIC')
plt.title("BIC Scores", fontsize=20)
plt.xticks(n_clusters)
plt.xlabel("N. of clusters")
plt.ylabel("Score")
plt.legend()
plt.errorbar(n_clusters, np.gradient(bics), yerr=bics_err, label='BIC')
plt.title("Gradient of BIC Scores", fontsize=20)
plt.xticks(n_clusters)
plt.xlabel("N. of clusters")
plt.ylabel("grad(BIC)")
plt.legend()
# +
from sklearn.metrics.cluster import contingency_matrix
def align_cluster_index(ref_cluster, map_cluster):
"""
remap cluster index according the the ref_cluster.
both inputs must have same number of unique cluster index values.
"""
ref_values = np.unique(ref_cluster)
map_values = np.unique(map_cluster)
if ref_values.shape[0]!=map_values.shape[0]:
print('error: both inputs must have same number of unique cluster index values.')
return()
cont_mat = contingency_matrix(ref_cluster, map_cluster)
num_rows = len(cont_mat)
cont_mat_col_sum = np.matmul(np.ones((num_rows, 1)), np.sum(cont_mat, axis = 0).reshape(1, num_rows))
cont_mat_row_sum = np.matmul(np.sum(cont_mat, axis = 1).reshape(num_rows, 1), np.ones((1, num_rows)))
cont_mat = cont_mat*2/(cont_mat_col_sum + cont_mat_row_sum)
print(cont_mat)
sort_0 = np.argsort(cont_mat, axis = 0)
sort_1 = np.argsort(cont_mat, axis = 1)
for i_row in range(len(cont_mat)):
# switch values:
cluster_tmp = map_cluster
map_cluster[cluster_tmp==map_values[sort_1[i_row,-1]]]=ref_values[i_row]
map_cluster[cluster_tmp==map_values[i_row]]=ref_values[sort_1[i_row,-1]]
return(map_cluster)
# +
np.set_printoptions(threshold=1000)
cluster_k = 3
############# K means #####################
km = KMeans(n_clusters=cluster_k, n_init=20)
km = km.fit(X_scale)
km_cluster = km.predict(X_scale)
############# GMM #########################
repetition = 5
y_pred = np.zeros((X_scale.shape[0], repetition))+1111
for i in range(repetition):
dpgmm = GMM(n_components=cluster_k, n_init = 5000, covariance_type='full', random_state = i).fit(X_scale)
y_pred[:, i] = dpgmm.predict(X_scale)
print(y_pred)
for i in range(1, repetition):
y_pred_i = align_cluster_index(y_pred[:,0], y_pred[:,i])
y_pred[:,i]=y_pred_i
print(y_pred)
cluster_gmm = np.zeros(X_scale.shape[0])+110
for ik in range(cluster_k):
ik_count = np.sum(y_pred==ik, axis = 1)
# print(ik_count)
cluster_gmm[ik_count>=7]=ik
cluster_gmm=y_pred[:,0]
# -
coefs_data.loc[coefs_data['thresh']==True, 'cluster_km'] = km_cluster+1
coefs_data.loc[coefs_data['thresh']==True, 'cluster_gmm'] = cluster_gmm+1
coefs_summary = coefs_data.groupby('cluster_gmm').mean()
print(coefs_summary.columns)
coefs_summary
coefs_data.to_csv(output_dir+'/out02_coefs_cluster_sklearn.csv')
coefs_data
pd.set_option('display.max_rows', 500)
print(coefs_data.loc[coefs_data['cluster_gmm']==1,:].shape)
coefs_data.loc[coefs_data['cluster_gmm']==1,:].sort_values(by='Rsquare', ascending=False)
print(coefs_data.loc[coefs_data['cluster_gmm']==2,:].shape)
coefs_data.loc[coefs_data['cluster_gmm']==2,:].sort_values(by='Rsquare', ascending=False)
pd.set_option('display.max_rows', 500)
print(coefs_data.loc[coefs_data['cluster_gmm']==3,:].shape)
coefs_data.loc[coefs_data['cluster_gmm']==3,:].sort_values(by='Rsquare', ascending=False)
print(coefs_data.loc[coefs_data['cluster']==4,:].shape)
coefs_data.loc[coefs_data['cluster']==4,:].sort_values(by='Rsquare', ascending=False)
print(coefs_data.loc[coefs_data['cluster']==5,:].shape)
coefs_data.loc[coefs_data['cluster']==5,:].sort_values(by='Rsquare', ascending=False)
# +
# print(coefs_data.loc[coefs_data['cluster']==6,:].shape)
# coefs_data.loc[coefs_data['cluster']==6,:].sort_values(by='Rsquare', ascending=False)
# -
| 8,963 |
/PYTHON300_2.ipynb
|
94911a0f1ec5b88658eb7e3b4f157c997c58b808
|
[] |
no_license
|
4070E017/PYTHONPRATICE
|
https://github.com/4070E017/PYTHONPRATICE
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 1,462 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
from configparser import ConfigParser
from sqlalchemy import create_engine
def config(filename='database.ini', section='postgresql'):
# create a parser
parser = ConfigParser()
# read config file
parser.read(filename)
# get section, default to postgresql
db = {}
if parser.has_section(section):
params = parser.items(section)
for param in params:
db[param[0]] = param[1]
else:
raise Exception('Section {0} not found in the {1} file'.format(section, filename))
return db
# +
param_dic=config()
connect = "postgresql+psycopg2://%s:%s@%s:%s/%s" % (
param_dic['user'],
param_dic['password'],
param_dic['host'],
param_dic['port'],
param_dic['database']
)
engine=create_engine(connect, echo=False)
connection = engine.connect()
# -
my_query= "select timestamp,close,high,low,open,trades,turnover,volume,vwap from bitmex as bt where bt.timestamp >= date '2018-01-01' "
data= connection.execute(my_query).fetchall()
df=pd.DataFrame(data,columns=['timestamp','close','high','low','open','trades','turnover','volume','vwap'])
df.shape
df.info(verbose=True)
df.head()
df['date']=df['timestamp'].dt.date
df=df.groupby('date').mean()
df.head()
df['vwap'].replace(0, np.nan, inplace=True)
df['vwap'].fillna(method='ffill', inplace=True)
from sklearn.preprocessing import MinMaxScaler
values = df['vwap'].values.reshape(-1,1)
values = values.astype('float32')
scaler = MinMaxScaler(feature_range=(0, 1))
scaled = scaler.fit_transform(values)
train_size = int(len(scaled) * 0.7)
test_size = len(scaled) - train_size
train, test = scaled[0:train_size,:], scaled[train_size:len(scaled),:]
print(len(train), len(test))
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset) - look_back):
a = dataset[i:(i + look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
print(len(dataY))
return np.array(dataX), np.array(dataY)
look_back = 1
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
trainX.shape[1]
trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
trainY
# +
from keras.models import Sequential
from keras.layers import Activation, Dense,Dropout
from keras.layers import LSTM,GRU
model = Sequential()
#model.add(LSTM(256, input_shape=(trainX.shape[1], trainX.shape[2])))
#model.add(Dense(1))
model.add(LSTM(256, return_sequences=True,input_shape=(trainX.shape[1], trainX.shape[2])))
model.add(LSTM(256))
model.add(Dense(1))
model.compile(loss='mae', optimizer='adam')
history = model.fit(trainX, trainY, epochs=300, batch_size=100, validation_data=(testX, testY), verbose=0, shuffle=False)
# -
import matplotlib.pyplot as plt
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
plt.show()
yhat = model.predict(testX)
plt.plot(yhat, label='predict')
plt.plot(testY, label='true')
plt.legend()
plt.show()
yhat.reshape(-1,1)
yhat_inverse = scaler.inverse_transform(yhat.reshape(-1, 1))
testY_inverse = scaler.inverse_transform(testY.reshape(-1, 1))
from sklearn.metrics import mean_squared_error
from math import sqrt
rmse = sqrt(mean_squared_error(testY_inverse, yhat_inverse))
print('Test RMSE: %.3f' % rmse)
plt.plot(yhat_inverse, label='predict')
plt.plot(testY_inverse, label='actual', alpha=0.5)
plt.legend()
plt.show()
predictDates = df.tail(len(testX)).index
testY_reshape = testY_inverse.reshape(len(testY_inverse))
yhat_reshape = yhat_inverse.reshape(len(yhat_inverse))
plt.rc('figure', figsize=(20, 10))
fig, ax = plt.subplots()
ax.plot(predictDates, testY_reshape, c='blue',label= 'Actual Price',linewidth=2)
ax.plot(predictDates, yhat_reshape, c='r',label= 'Predict Price',linewidth=2)
ax.tick_params(axis='both', which='major', labelsize=20)
plt.title('Actual VS Predicted BTC Price',fontsize=20)
plt.xlabel('Date',fontsize=20)
plt.ylabel('Price in USD',fontsize=20)
plt.legend(prop={'size': 20})
plt.show()
# # import seaborn as sns
sns.heatmap(df.corr(), annot=True, cmap='RdYlGn', linewidths=0.1, vmin=0)
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = pd.DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
print(names)
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = pd.concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
df.head()
# +
values = df[['vwap'] + ['trades'] + ['volume']].values
values = values.astype('float32')
# -
values
scaler = MinMaxScaler(feature_range=(0, 1))
scaled = scaler.fit_transform(values)
scaled.shape[1]
reframed = series_to_supervised(scaled, 1, 1)
reframed.head()
reframed.drop(reframed.columns[[4,5]], axis=1, inplace=True)
print(reframed.head())
values = reframed.values
n_train_hours = int(len(values) * 0.7)
train = values[:n_train_hours, :]
test = values[n_train_hours:, :]
# split into input and outputs
train_X, train_y = train[:, :-1], train[:, -1]
test_X, test_y = test[:, :-1], test[:, -1]
# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
print(train_X.shape, train_y.shape, test_X.shape, test_y.shape)
multi_model = Sequential()
multi_model.add(LSTM(100, input_shape=(train_X.shape[1], train_X.shape[2])))
multi_model.add(Dense(1))
multi_model.compile(loss='mae', optimizer='adam')
multi_history = multi_model.fit(train_X, train_y, epochs=300, batch_size=100, validation_data=(test_X, test_y), verbose=0, shuffle=False)
plt.plot(multi_history.history['loss'], label='multi_train')
plt.plot(multi_history.history['val_loss'], label='multi_test')
plt.legend()
plt.show()
yhat = multi_model.predict(test_X)
plt.plot(yhat, label='predict')
plt.plot(test_y, label='true')
plt.legend()
plt.show()
test_X.shape[2]
test_X = test_X.reshape((test_X.shape[0], test_X.shape[2]))
# invert scaling for forecast
inv_yhat = np.concatenate((yhat, test_X[:, 1:]), axis=1)
inv_yhat = scaler.inverse_transform(inv_yhat)
inv_yhat = inv_yhat[:,0]
# invert scaling for actual
test_y = test_y.reshape((len(test_y), 1))
inv_y = np.concatenate((test_y, test_X[:, 1:]), axis=1)
inv_y = scaler.inverse_transform(inv_y)
inv_y = inv_y[:,0]
rmse = sqrt(mean_squared_error(inv_y, inv_yhat))
print('Test RMSE: %.3f' % rmse)
predictDates
plt.plot(predictDates, inv_y, label= 'Actual Price')
plt.plot(predictDates, inv_yhat, lable= 'Multi Predict Price')
plot.plot(predictDates, yhat_reshape, label= 'Predict Price')
# +
#sns.set(style="darkgrid")
import seaborn as sns; sns.set()
df.loc['2019':'2018']['high'].resample(rule='T').mean().plot()
# -
data1=df.loc['2017-07-02']
data1['high'].plot(style="-")
data1['high'].resample('BM').mean().plot(style=":")
df=df.fillna(method='ffill')
df.corr()
import numpy
from numpy import array
import matplotlib.pyplot as plt
from pandas import read_csv
import math
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM,GRU
from sklearn.preprocessing import MinMaxScaler,RobustScaler,StandardScaler
from sklearn.metrics import mean_squared_error
from pandas import Series
data=df['close']
def split_sequence(sequence, n_steps_in, n_steps_out):
X, y = list(), list()
for i in range(len(sequence)):
# find the end of this pattern
end_ix = i + n_steps_in
out_end_ix = end_ix + n_steps_out
# check if we are beyond the sequence
if out_end_ix > len(sequence):
break
# gather input and output parts of the pattern
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix:out_end_ix]
X.append(seq_x)
y.append(seq_y)
return array(X), array(y)
# +
numpy.random.seed(0)
# load the dataset
dataframe = data
dataset = dataframe.values
dataset = dataset.astype('float64').reshape(-1, 1)
# normalize the dataset
scaler = MinMaxScaler()
#scaler=RobustScaler()
#scaler=StandardScaler()
dataset = scaler.fit_transform(dataset)
# split into train and test sets
train_size = int(len(dataset) * 0.67)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]
n_steps_in=3
n_steps_out=1
trainX, trainY =split_sequence(train.flatten(), n_steps_in, n_steps_out)
testX, testY = split_sequence(test.flatten(), n_steps_in, n_steps_out)
# -
trainY
trainX.shape,trainY.shape
testX.shape,testY.shape
n_features = 1
trainX = trainX.reshape((trainX.shape[0], trainX.shape[1], n_features))
testX = testX.reshape((testX.shape[0], testX.shape[1], n_features))
trainX.shape,trainY.shape,testX.shape,testY.shape
# +
from keras.layers import Activation, Dense,Dropout
model = Sequential()
model.add(LSTM(256, return_sequences=True,input_shape=(n_steps_in, n_features)))
model.add(LSTM(256))
model.add(Dense(1))
import keras
from keras import optimizers
#keras.optimizers.Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False, clipnorm=1)
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(trainX, trainY, epochs=100, verbose=1,shuffle=False,batch_size=50)
# +
# make predictions
trainPredict = model.predict(trainX)
# -
testPredict = model.predict(testX)
trainPredict.shape,testPredict.shape
def split_y_sequence(sequence, n_steps_in, n_steps_out):
y = list()
for i in range(len(sequence)):
# find the end of this pattern
end_ix = i + n_steps_in
out_end_ix = end_ix + n_steps_out
# check if we are beyond the sequence
if out_end_ix > len(sequence):
break
# gather input and output parts of the pattern
seq_y = sequence[end_ix:out_end_ix]
y.append(seq_y)
return array(y)
trainPredict.shape,trainY.shape,testPredict.shape,testY.shape
# +
#trainpre=split_y_sequence(trainPredict,n_steps_in=3,n_steps_out=1)
# +
#testpre=split_y_sequence(testPredict,n_steps_in=3,n_steps_out=1)
# -
# +
#trp=trainpre.reshape(len(trainpre),2)
#tep=testpre.reshape(len(testpre),2)
# -
testPredict.shape,testY.shape
# +
#trp.shape,trainY.shape
# -
# invert predictions
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform(trainY)
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform(testY)
# calculate root mean squared error
trainScore = math.sqrt(mean_squared_error(trainY, trainPredict))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY, testPredict))
print('Test Score: %.2f RMSE' % (testScore))
predictions = numpy.empty_like(dataset)
predictions[:, :] = numpy.nan
predictions[n_steps_in:len(trainPredict)+n_steps_in, :] = trainPredict
predictions[len(trainPredict)+(n_steps_in*2):len(dataset)+2, :] = testPredict
#data=pd.DataFrame(numpy.concatenate((trainPredict[0:len(trainPredict)-look_back-1],testPredict[0:len(testPredict)-look_back-1])),columns=["predicted"])
#print('one',data.count())
#print('two',dataframe.count())
predictionsDF=pd.DataFrame(predictions,columns=["predicted"],index=dataframe.index)
ans=pd.concat([dataframe,predictionsDF],axis=1)
print( ans,[n_steps_in,trainScore,testScore])
| 12,037 |
/25_Feb_practical_2.ipynb
|
0021c32f118d6d2d690ad4167cc2f5d5b2a73455
|
[] |
no_license
|
asel-datascience/ODS2020
|
https://github.com/asel-datascience/ODS2020
| 6 | 8 | null | 2020-01-28T02:39:06 | 2020-01-28T02:27:18 |
Jupyter Notebook
|
Jupyter Notebook
| false | false |
.py
| 96,082 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/asel-datascience/ODS2020/blob/master/25_Feb_practical_2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="hOjLprxS6ssc" colab_type="code" colab={}
import matplotlib
from matplotlib import pyplot as plt
# + id="obRKwKKJ9QRx" colab_type="code" outputId="4a89c71a-7642-40ef-b6cc-3d0d88ee96c3" colab={"base_uri": "https://localhost:8080/", "height": 284}
x=[1,2,3]
y=[4,5,1]
plt.plot(x,y)
# + id="29kTORxH9h5Q" colab_type="code" colab={}
# + colab_type="code" outputId="0f8868a6-ef99-4daf-b7d2-6f2e1d79390d" id="JtqvgtI2-FHI" colab={"base_uri": "https://localhost:8080/", "height": 284}
x=[1,2,3]
y=[4,5,1]
plt.plot(x,y,"r--")
# + id="XC-G5zTn-MYo" colab_type="code" outputId="557e31fe-3432-41ae-c1a2-ed6452317cd8" colab={"base_uri": "https://localhost:8080/", "height": 284}
plt.plot(x,y,"g*-")
# + id="iBRSAJVp-R-g" colab_type="code" outputId="8e32f655-94f9-4bff-cac1-36a8aae340a8" colab={"base_uri": "https://localhost:8080/", "height": 300}
plt.plot([5,2,7],[2,16,4])
plt.title("Info")
# + id="rOvvHgbV_CBY" colab_type="code" outputId="ed2e87a2-9ba9-46ae-90c7-602e01551ced" colab={"base_uri": "https://localhost:8080/", "height": 298}
plt.plot([5,2,7],[2,16,4])
plt.ylabel("Y axis")
plt.xlabel("X axis")
plt.show
# + id="bEPynwXE_R6n" colab_type="code" outputId="6b060959-c740-4d52-8717-d1c8a01d8b04" colab={"base_uri": "https://localhost:8080/", "height": 284}
plt.bar([0.25,1.25,2.25,3.25,4.25],[50,40,70,80,20],label="BMW",width=.5)
plt.legend()
# + id="TcFpSsC4B_vh" colab_type="code" outputId="5329de94-d7d5-4eee-9beb-b4aa2d9ded63" colab={"base_uri": "https://localhost:8080/", "height": 306}
# + id="xLdc3A6vCNmP" colab_type="code" colab={}
| 2,084 |
/2 - Gene Expression Classification.ipynb
|
4d6f2cc9ba5800b1be03020cf0288c486459c70b
|
[
"MIT"
] |
permissive
|
kakshak07/High-Dimensional-Visualization-Techniques-and-analysis
|
https://github.com/kakshak07/High-Dimensional-Visualization-Techniques-and-analysis
| 1 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 340,875 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Gene Expression Monitoring Analysis
# ## Data Preparation
# +
# Import all the libraries that we shall be using
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from mpl_toolkits.mplot3d import Axes3D
# %matplotlib inline
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV, cross_val_score
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.cluster import KMeans
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
from keras.models import Sequential
from keras.layers import Dense
from keras.callbacks import EarlyStopping
# -
# Let's start by taking a look at our target, the ALL/AML label.
# Import labels (for the whole dataset, both training and testing)
y = pd.read_csv('../input/actual.csv')
print(y.shape)
y.head()
# In the combined training and testing sets there are 72 patients, each of whom are labelled either "ALL" or "AML" depending on the type of leukemia they have. Here's the breakdown:
y['cancer'].value_counts()
# We actually need our labels to be numeric, so let's just do that now.
# Recode label to numeric
y = y.replace({'ALL':0,'AML':1})
labels = ['ALL', 'AML'] # for plotting convenience later on
# Now we move on to the features, which are provided for the training and testing datasets separately.
# +
# Import training data
df_train = pd.read_csv('../input/data_set_ALL_AML_train.csv')
print(df_train.shape)
# Import testing data
df_test = pd.read_csv('../input/data_set_ALL_AML_independent.csv')
print(df_test.shape)
# -
df_train.head()
df_test.head()
# +
# Transform all the call values to numbers (not used in this version)
# df_train.replace(['A','P','M'],['1','2','3'], inplace=True)
# df_test.replace(['A','P','M'],['1','2','3'], inplace=True)
# Remove "call" columns from training and testing data
train_to_keep = [col for col in df_train.columns if "call" not in col]
test_to_keep = [col for col in df_test.columns if "call" not in col]
X_train_tr = df_train[train_to_keep]
X_test_tr = df_test[test_to_keep]
# +
train_columns_titles = ['Gene Description', 'Gene Accession Number', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10',
'11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25',
'26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38']
X_train_tr = X_train_tr.reindex(columns=train_columns_titles)
# +
test_columns_titles = ['Gene Description', 'Gene Accession Number','39', '40', '41', '42', '43', '44', '45', '46',
'47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59',
'60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72']
X_test_tr = X_test_tr.reindex(columns=test_columns_titles)
# -
# Now we can simply transpose the columns and rows so that genes become features and each patient's observations occupies a single row.
# +
X_train = X_train_tr.T
X_test = X_test_tr.T
print(X_train.shape)
X_train.head()
# -
# This is still messy as the first two rows are more or less duplicates of one another and we haven't yet created the column names. Let's simply turn the second row into the column names and delete the first row.
# +
# Clean up the column names for training and testing data
X_train.columns = X_train.iloc[1]
X_train = X_train.drop(["Gene Description", "Gene Accession Number"]).apply(pd.to_numeric)
# Clean up the column names for Testing data
X_test.columns = X_test.iloc[1]
X_test = X_test.drop(["Gene Description", "Gene Accession Number"]).apply(pd.to_numeric)
print(X_train.shape)
print(X_test.shape)
X_train.head()
# +
# Split into train and test (we first need to reset the index as the indexes of two dataframes need to be the same before you combine them).
# Subset the first 38 patient's cancer types
X_train = X_train.reset_index(drop=True)
y_train = y[y.patient <= 38].reset_index(drop=True)
# Subset the rest for testing
X_test = X_test.reset_index(drop=True)
y_test = y[y.patient > 38].reset_index(drop=True)
# -
# Let's now take a look at some summary statistics:
X_train.describe()
# Clearly there is some variation in the scales across the different features. Many machine learning models work much better with data that's on the same scale, so let's create a scaled version of the dataset.
# +
# Convert from integer to float
X_train_fl = X_train.astype(float, 64)
X_test_fl = X_test.astype(float, 64)
# Apply the same scaling to both datasets
scaler = StandardScaler()
X_train_scl = scaler.fit_transform(X_train_fl)
X_test_scl = scaler.transform(X_test_fl) # note that we transform rather than fit_transform
# -
# With 7129 features, it's also worth considering whether we might be able to reduce the dimensionality of the dataset. Once very common approach to this is principal components analysis (PCA). Let's start by leaving the number of desired components as an open question:
pca = PCA()
pca.fit_transform(X_train)
# Let's set a threshold for explained variance of 90% and see how many features are required to meet that threshold. (Here we are using the code from [this kernel](https://www.kaggle.com/rstogi896/geneclassification-using-gridsearchcv-and-svm).)
# +
total = sum(pca.explained_variance_)
k = 0
current_variance = 0
while current_variance/total < 0.90:
current_variance += pca.explained_variance_[k]
k = k + 1
print(k, " features explain around 90% of the variance. From 7129 features to ", k, ", not too bad.", sep='')
pca = PCA(n_components=k)
X_train.pca = pca.fit(X_train)
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
var_exp = pca.explained_variance_ratio_.cumsum()
var_exp = var_exp*100
plt.bar(range(k), var_exp);
# -
# We can't plot something in 22 dimensions, so let's just see what the PCA looks like when we just pick the top three compoments. (Here we are using code taken from [this kernel](https://www.kaggle.com/kanav0183/pca-analysis-for-geneclassification).)
# +
pca3 = PCA(n_components=3).fit(X_train)
X_train_reduced = pca3.transform(X_train)
plt.clf()
fig = plt.figure(1, figsize=(10,6 ))
ax = Axes3D(fig, elev=-150, azim=110,)
ax.scatter(X_train_reduced[:, 0], X_train_reduced[:, 1], X_train_reduced[:, 2], c = y_train.iloc[:,1], cmap = plt.cm.Paired, linewidths=10)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
# -
fig = plt.figure(1, figsize = (10, 6))
plt.scatter(X_train_reduced[:, 0], X_train_reduced[:, 1], c = y_train.iloc[:,1], cmap = plt.cm.Paired, linewidths=10)
plt.annotate('Note the Brown Cluster', xy = (30000,-2000))
plt.title("2D Transformation of the Above Graph ")
# ## Model Building
# Having prepared the dataset, it's now finally time to try out some models.
# ### Baseline
print("Simply predicting everything as acute lymphoblastic leukemia (ALL) results in an accuracy of ", round(1 - np.mean(y_test.iloc[:,1]), 3), ".", sep = '')
# ### K-Means Clustering
# First we shall try an unsupervised clustering approach using the scaled data.
# +
kmeans = KMeans(n_clusters=2, random_state=0).fit(X_train_scl)
km_pred = kmeans.predict(X_test_scl)
print('K-means accuracy:', round(accuracy_score(y_test.iloc[:,1], km_pred), 3))
cm_km = confusion_matrix(y_test.iloc[:,1], km_pred)
ax = plt.subplot()
sns.heatmap(cm_km, annot=True, ax = ax, fmt='g', cmap='Greens')
# labels, title and ticks
ax.set_xlabel('Predicted labels')
ax.set_ylabel('True labels')
ax.set_title('K-means Confusion Matrix')
ax.xaxis.set_ticklabels(labels)
ax.yaxis.set_ticklabels(labels, rotation=360);
# -
# This K-means approach is better than the baseline, but we should be able to do better with some kind of supervised learning model.
# ### Naive Bayes
# For our first supervised model, we shall use a very straightforward naive bayes approach.
# +
# Create a Gaussian classifier
nb_model = GaussianNB()
nb_model.fit(X_train, y_train.iloc[:,1])
nb_pred = nb_model.predict(X_test)
print('Naive Bayes accuracy:', round(accuracy_score(y_test.iloc[:,1], nb_pred), 3))
cm_nb = confusion_matrix(y_test.iloc[:,1], nb_pred)
ax = plt.subplot()
sns.heatmap(cm_nb, annot=True, ax = ax, fmt='g', cmap='Greens')
# labels, title and ticks
ax.set_xlabel('Predicted labels')
ax.set_ylabel('True labels')
ax.set_title('Naive Bayes Confusion Matrix')
ax.xaxis.set_ticklabels(labels)
ax.yaxis.set_ticklabels(labels, rotation=360);
# -
# The naive bayes model is pretty good, just three incorrect classifications.
# ### Logistic Regression
# Another very standard approach is logistic regression. Here we will be using grid search cross-validation tuning to try and determine the best hyperparameters. We don't need to scale the data for logistic regression, nor are we using the PCA version of the dataset.
# +
log_grid = {'C': [1e-03, 1e-2, 1e-1, 1, 10],
'penalty': ['l1', 'l2']}
log_estimator = LogisticRegression(solver='liblinear')
log_model = GridSearchCV(estimator=log_estimator,
param_grid=log_grid,
cv=3,
scoring='accuracy')
log_model.fit(X_train, y_train.iloc[:,1])
print("Best Parameters:\n", log_model.best_params_)
# Select best log model
best_log = log_model.best_estimator_
# Make predictions using the optimised parameters
log_pred = best_log.predict(X_test)
print('Logistic Regression accuracy:', round(accuracy_score(y_test.iloc[:,1], log_pred), 3))
cm_log = confusion_matrix(y_test.iloc[:,1], log_pred)
ax = plt.subplot()
sns.heatmap(cm_log, annot=True, ax = ax, fmt='g', cmap='Greens')
# labels, title and ticks
ax.set_xlabel('Predicted labels')
ax.set_ylabel('True labels')
ax.set_title('Logistic Regression Confusion Matrix')
ax.xaxis.set_ticklabels(labels)
ax.yaxis.set_ticklabels(labels, rotation=360);
# -
# This logistic regression model manages perfect classification.
# ### Support Vector Machine
#
# Here we will try another traditional approach, a support vector machine (SVM) classifier. For the SVM, so we using the PCA version of the dataset. Again we use grid search cross-validation to tune the model.
# +
# Parameter grid
svm_param_grid = {'C': [0.1, 1, 10, 100], 'gamma': [1, 0.1, 0.01, 0.001, 0.00001, 10], "kernel": ["linear", "rbf", "poly"], "decision_function_shape" : ["ovo", "ovr"]}
# Create SVM grid search classifier
svm_grid = GridSearchCV(SVC(), svm_param_grid, cv=3)
# Train the classifier
svm_grid.fit(X_train_pca, y_train.iloc[:,1])
print("Best Parameters:\n", svm_grid.best_params_)
# Select best svc
best_svc = svm_grid.best_estimator_
# Make predictions using the optimised parameters
svm_pred = best_svc.predict(X_test_pca)
print('SVM accuracy:', round(accuracy_score(y_test.iloc[:,1], svm_pred), 3))
cm_svm = confusion_matrix(y_test.iloc[:,1], svm_pred)
ax = plt.subplot()
sns.heatmap(cm_svm, annot=True, ax = ax, fmt='g', cmap='Greens')
# Labels, title and ticks
ax.set_xlabel('Predicted labels')
ax.set_ylabel('True labels')
ax.set_title('SVM Confusion Matrix')
ax.xaxis.set_ticklabels(labels)
ax.yaxis.set_ticklabels(labels, rotation=360);
# -
# This SVM model is making just a couple of classification errors.
# ### Random Forest
# We now move on to tree-base approaches, starting with the very popular random forest. We don't need scaled data for this, so again we wont use the scaled version of the dataset, just a grid search for tuning the hyperparameters.
# +
# Hyperparameters search grid
rf_param_grid = {'bootstrap': [False, True],
'n_estimators': [60, 70, 80, 90, 100],
'max_features': [0.6, 0.65, 0.7, 0.75, 0.8],
'min_samples_leaf': [8, 10, 12, 14],
'min_samples_split': [3, 5, 7]
}
# Instantiate random forest classifier
rf_estimator = RandomForestClassifier(random_state=0)
# Create the GridSearchCV object
rf_model = GridSearchCV(estimator=rf_estimator, param_grid=rf_param_grid, cv=3, scoring='accuracy')
# Fine-tune the hyperparameters
rf_model.fit(X_train, y_train.iloc[:,1])
print("Best Parameters:\n", rf_model.best_params_)
# Get the best model
rf_model_best = rf_model.best_estimator_
# Make predictions using the optimised parameters
rf_pred = rf_model_best.predict(X_test)
print('Random Forest accuracy:', round(accuracy_score(y_test.iloc[:,1], rf_pred), 3))
cm_rf = confusion_matrix(y_test.iloc[:,1], rf_pred)
ax = plt.subplot()
sns.heatmap(cm_rf, annot=True, ax = ax, fmt='g', cmap='Greens')
# labels, title and ticks
ax.set_xlabel('Predicted labels')
ax.set_ylabel('True labels')
ax.set_title('Random Forest Confusion Matrix')
ax.xaxis.set_ticklabels(labels)
ax.yaxis.set_ticklabels(labels, rotation=360);
# -
# Random forest almost matches the SVM performance.
# ### XG Boost
# Nowadays, gradient boosting models such as XG Boost(XGB) are extremely popular. Here we shall experiment with three alternative versions, PCA with grid search, PCA without grid search and also the orginal data without either PCA or grid search.
# #### XGB — PCA with Grid Search
# +
xgb_grid_params = {'max_depth': [3, 4, 5, 6, 7, 8, 10, 12],
'min_child_weight': [1, 2, 4, 6, 8, 10, 12, 15],
'n_estimators': [40, 50, 60, 70, 80, 90, 100, 110, 120, 130],
'learning_rate': [0.001, 0.01, 0.05, 0.1, 0.2, 0.3]}
fixed_params = {'random_state': 0,
'n_jobs': -1}
xgb_model = GridSearchCV(xgb.XGBClassifier(**fixed_params),
param_grid = xgb_grid_params,
scoring = 'accuracy',
cv = 3)
xgb_model.fit(X_train_pca, y_train.iloc[:,1])
print("Best Parameters:\n", xgb_model.best_params_)
# Get the best model
xgb_model_best = xgb_model.best_estimator_
# Make predictions using the optimised parameters
xgb_pred = xgb_model_best.predict(X_test_pca)
print('XGB (PCA with Grid Search) accuracy:', round(accuracy_score(y_test.iloc[:,1], xgb_pred), 3))
cm_xgb = confusion_matrix(y_test.iloc[:,1], xgb_pred)
ax = plt.subplot()
sns.heatmap(cm_xgb, annot=True, ax = ax, fmt='g', cmap='Greens')
# labels, title and ticks
ax.set_xlabel('Predicted labels')
ax.set_ylabel('True labels')
ax.set_title('XGB (PCA with Grid Search) Confusion Matrix')
ax.xaxis.set_ticklabels(labels)
ax.yaxis.set_ticklabels(labels, rotation=360);
# -
# XGB with PCA and grid search isn't particularly good.
# #### XGB — PCA with no Grid Search
# +
xgb2_model = xgb.XGBClassifier()
xgb2_model.fit(X_train_pca, y_train.iloc[:,1])
xgb2_pred = xgb2_model.predict(X_test_pca)
print('Accuracy: ', round(accuracy_score(y_test.iloc[:,1], xgb2_pred), 3))
cm_xgb2 = confusion_matrix(y_test.iloc[:,1], xgb2_pred)
ax = plt.subplot()
sns.heatmap(cm_xgb2, annot=True, ax = ax, fmt='g', cmap='Greens')
# Labels, title and ticks
ax.set_xlabel('Predicted labels')
ax.set_ylabel('True labels')
ax.set_title('XGB (PCA without Grid Search) Confusion Matrix')
ax.xaxis.set_ticklabels(labels)
ax.yaxis.set_ticklabels(labels, rotation=360);
# -
# Without the grid search, this is barely any better. However, it seems that the grid search may possibly be resulting in some overfitting.
# #### XGB — no PCA or Grid Search
# +
xgb3_model = xgb.XGBClassifier()
xgb3_model.fit(X_train, y_train.iloc[:,1])
xgb3_pred = xgb3_model.predict(X_test)
print('XGB (no PCA or Grid Search) accuracy:', round(accuracy_score(y_test.iloc[:,1], xgb3_pred), 3))
cm_xgb3 = confusion_matrix(y_test.iloc[:,1], xgb3_pred)
ax = plt.subplot()
sns.heatmap(cm_xgb3, annot=True, ax = ax, fmt='g', cmap='Greens')
# Labels, title and ticks
ax.set_xlabel('Predicted labels')
ax.set_ylabel('True labels')
ax.set_title('XGB (no PCA or Grid Search) Confusion Matrix')
ax.xaxis.set_ticklabels(labels)
ax.yaxis.set_ticklabels(labels, rotation=360);
# -
# OK, that's more like it. It seems that the PCA was actually the wrong approach for the XGB model.
# ### Neural Network
# Finally we shall build a neural network using Keras (with TensorFlow as a backend). This only a "shallow" learning model with one hidden layer — adding several extra layers with so few training datapoints would just lead to overfitting.
# Create model architecture
model = Sequential()
model.add(Dense(16, activation='relu', input_shape=(7129,)))
model.add(Dense(1, activation='sigmoid'))
# Compile model
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
# +
# Create training/validation sets
partial_X_train = X_train_scl[:30]
X_val = X_train_scl[30:]
y_train_label = y_train.iloc[:,1]
partial_y_train = y_train_label[:30]
y_val = y_train_label[30:]
# +
# Set up early stopping
es = EarlyStopping(monitor='val_loss', verbose=1, patience=3)
# Fit model
history = model.fit(partial_X_train,
partial_y_train,
epochs=50,
batch_size=4,
validation_data=(X_val, y_val),
callbacks=[es])
# +
# Make predictions
nn_pred = model.predict_classes(X_test_scl)
print('Neural Network accuracy: ', round(accuracy_score(y_test.iloc[:,1], nn_pred), 3))
cm_nn = confusion_matrix(y_test.iloc[:,1], nn_pred)
ax = plt.subplot()
sns.heatmap(cm_nn, annot=True, ax = ax, fmt='g', cmap='Greens')
# Labels, title and ticks
ax.set_xlabel('Predicted labels')
ax.set_ylabel('True labels')
ax.set_title('Neural Network Confusion Matrix')
ax.xaxis.set_ticklabels(labels)
ax.yaxis.set_ticklabels(labels, rotation=360);
# -
# The neural network isn't as good as some of the other models.
| 18,052 |
/notebooks/09-investigate-ground-truths.ipynb
|
fadf6120d9b5b8d5ebde4a27674b96f74e468528
|
[] |
no_license
|
KeithWM/dstl_kaggle
|
https://github.com/KeithWM/dstl_kaggle
| 1 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 7,940 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# This notebook plots ground truth bounding boxes imported via the network architecture, at least using the custom Python data handling layers.
# +
import os
import sys
import pprint
import numpy as np
import matplotlib
matplotlib.use('Agg')
# %matplotlib inline
from matplotlib import pyplot as plt
import time
# +
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
add_path('/home/ubuntu/src/py-faster-rcnn-windowed_input/caffe-fast-rcnn/python')
add_path('/home/ubuntu/src/py-faster-rcnn-windowed_input/lib')
import caffe
from datasets.factory import get_imdb, list_imdbs
from fast_rcnn.test import test_net, plot_all_bboxes
from fast_rcnn.train import get_training_roidb, train_net, SolverWrapper
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
import roi_data_layer.minibatch as minibatch
print "Loaded caffe version {:s} from {:s}.".format(caffe.__version__, caffe.__path__[0])
# -
class_names = {0: 'buildings',
1: 'misc. manmade structures',
2: 'road',
3: 'track',
4: 'trees',
5: 'crops',
6: 'waterway',
7: 'standing water',
8: 'vehicle large',
9: 'vehicle small'}
def get_color(class_name):
import colorsys
h = (hash(class_name) % np.pi) / np.pi
# v = (hash(class_name) % 10)/20. + .5
N_v = 3
v = .5/(N_v-1)*np.floor((hash(class_name) % (N_v*np.pi))/np.pi) + .5
return colorsys.hsv_to_rgb(h, .8, v)
# +
# configure plotting
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
plt.rcParams['figure.figsize'] = (15, 15)
caffe.set_mode_gpu()
# +
N_classes = len(class_names) + 1
classes = (0, 1, 4, 7, 8, 9)
arch = 'VGG16'
appendix = '' # This will codify which classes to train (if not all, in which case this string should be empty)
infix = '.' # for directories
if classes is not None:
appendix = '_'+''.join(['{:d}'.format(c) for c in classes])
infix = '{:d}'.format(len(classes))+'_classes'
N_classes = len(classes) + 1
print infix, appendix
# -
this_dir = os.getcwd()
model_dir = os.path.join(this_dir, '..', 'models', arch, infix)
solver_file = os.path.join(model_dir, 'solver.prototxt')
def combined_roidb(imdb_names):
def get_roidb(imdb_name):
print imdb_name
imdb = get_imdb(imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print 'Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD)
roidb = get_training_roidb(imdb)
return roidb
roidbs = [get_roidb(s) for s in imdb_names.split('+')]
roidb = roidbs[0]
if len(roidbs) > 1:
for r in roidbs[1:]:
roidb.extend(r)
imdb = datasets.imdb.imdb(imdb_names)
else:
imdb = get_imdb(imdb_names)
return imdb, roidb
cfg_from_file('../experiments/cfgs/faster_rcnn_end2end_dstl.yml')
# cfg.PIXEL_MEANS = np.array([[[102.34775165, 93.19367343, 84.36433397]]])
np.random.seed(cfg.RNG_SEED)
caffe.set_random_seed(cfg.RNG_SEED)
imdb_train, roidb_train = combined_roidb('dstl'+appendix+'_train')
imdb_val, roidb_val = combined_roidb('dstl'+appendix+'_val')
def draw_bboxes(blobs):
gt_boxes, data, im_info = blobs.values()
N_boxes = gt_boxes.shape[0]
print N_boxes
if N_boxes > 0:
fig, ax = plt.subplots(1, 1)
im = (data[0, ...].transpose((1, 2, 0)) + cfg.PIXEL_MEANS).astype(np.uint8)
plt.imshow(im)
for gt_box in gt_boxes:
if classes is None:
class_name = class_names[int(gt_box[-1]) - 1]
else:
class_name = class_names[classes[int(gt_box[-1]) - 1]]
ax.add_patch(
plt.Rectangle((gt_box[0], gt_box[1]),
gt_box[2] - gt_box[0],
gt_box[3] - gt_box[1], fill=False,
edgecolor=get_color(class_name), linewidth=3)
)
ax.text(gt_box[0], gt_box[1], '{}'.format(class_name), color=get_color(class_name), fontsize=18, verticalalignment='bottom')
plt.show()
time.sleep(2.)
# Retrieve the 'blobs' from the input-data layer
for roidb_entry in roidb_val:
print roidb_entry['image']
# fetch multple batches/tiles per image,
for i in range(2):
blobs = minibatch.get_minibatch([roidb_entry], N_classes)
draw_bboxes(blobs)
blobs.keys()
print blobs['data'].shape
print blobs['gt_boxes'].shape
print blobs['gt_boxes'].min(axis=0)
print blobs['gt_boxes'].max(axis=0)
print blobs['im_info']
cfg.TRAIN.SCALES
# ##
l. <br>
# Dabei gilt, `elif` und `else` immer nur benutzt werden können, wenn vorher `if` benutzt wurde.<br>
# Von dem `if`, `elif` oder `else` is nur der eingerückte code betroffen. Es gilt, dass `elif` direkt nach dem eingerückten Code von `if` kommen muss, und ähnliches mit `elif` und `else`. <br>
# <b>Eine Einrückung wird von einem Doppelpunkt am Ende der vorigen Zeile markiert.<b>
# <pre>
# `if` `condition1`:
# `code1`
# `elif` `condition2`:
# `code2`
# `else`:
# `code3`
a = 4
b = 14
if a>b:
print('a is bigger')
elif a<b:
print('b is bigger')
else:
print('a is equal to b.')
# Es sind auch mehrere `elif`-Abfragen möglich. <b> Von mehrer zutreffenden `elif`-Abfragen wird nur die erste ausgeführt.</b><br> Auch kann ein Bool als condition benutzt werden.
con1 = False
con2 = True
con3 = True
if con1:
print("con1!")
elif con2:
print("con2!")
elif con3:
print("con3!")
else:
print("No con :(")
# # Schleifen <a class="anchor" id="Schleifen"></a>
# ## For-Schleifen <a class="anchor" id="For-Schleifen"></a>
# Mit einer For-Schleife iteriert man über container-objecte, wie zum beispiel Listen Iterieren
for element in [1,2,3,4,5]:
print(element)
# Mit `range` lassen sich leicht Container mit ganzen Zahlen definieren. Der erste Wert ist der Startwert, der Zweite ist der erste Wert der nicht in dem Container ist und der dritte Wert ist die SChrittweite.
for i in range(-2,5,2):
print(i)
# Wenn man range nur ein Argument übergibt, dann wird das als Endwert iterpretiert, mit Start 0 und Schrittweite 1.
for i in range(10):
print(i)
# ## While-Schleifen <a class="anchor" id="While-Schleifen"></a>
# While-Schleifen werden ausgeführt, solange eine Bedingung zutrifft, bzw. ein Boolean True ist.
i = 0
while i<10:
i = i+1
print(i)
# <b>Die Bedingung sollte immer in Abhängigkeit von dem Schleifeninhalt sein, sonst bekommt man mit Sicherheit eine Endlosschleife.</b>
# # Funktionen I <a class="anchor" id="Funktionen_I"></a>
# Mit dem Key-Word `def` lassen sich Funktionen definieren. Die bekommen Argumente und machen etwas damit. Eine Funktion kann auch etwas zurückgeben, dabei wird das Key-Word `return` benutzt.
def summe(summand1,summand2):
result = summand1+summand2
return result
# Die variablen die in der Funtion benutzt werden existieren nur in der Funktion
print(summand1)
# Eine Funktion ruft man auf indem man hinter den Namen der Funktion in Klammern die Argumente übergibt
c = summe(1,2)
print(c)
| 7,515 |
/c5-Sequence_Models/week2/Emojify+-+v2.ipynb
|
c4b62dd692fb354333fbaee3154d95d1515df7a3
|
[] |
no_license
|
sniggel/coursera-deeplearning.ai
|
https://github.com/sniggel/coursera-deeplearning.ai
| 0 | 1 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 69,280 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ---
# **License**
#
# stats_dist_rosenbrock
#
# Mon Jan 25 20:56:00 2020\
# Copyright 2021\
# Sandro Dias Pinto Vitenti <[email protected]>
#
# ---
# ---
#
# stats_dist_rosenbrock\
# Copyright (C) 2021 Sandro Dias Pinto Vitenti <[email protected]>
#
#
# numcosmo is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# numcosmo is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ---
# +
import sys
import math
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import LogFormatterMathtext
import numpy as np
from scipy.stats import binned_statistic
import matplotlib.ticker as mtick
from IPython.display import HTML, display
import tabulate
from numcosmo_py import Ncm
from numcosmo_py.plotting.tools import confidence_ellipse
from numcosmo_py.plotting.tools import plot_m2lnp
from numcosmo_py.plotting.tools import set_rc_params_article
# %matplotlib inline
# -
Ncm.cfg_init()
Ncm.cfg_set_log_handler(lambda msg: sys.stdout.write(msg) and sys.stdout.flush())
# +
dim = 2
nps = 160
nps_test = 100000
split_fraction = 0.6
rng = Ncm.RNG.seeded_new(None, 123)
sigma1 = math.sqrt(10.0)
sigma2 = 1.0 / sigma1
mu1 = 1.0
def m2lnL_val(va):
return (mu1 - va[0]) ** 2 / sigma1**2 + (va[1] - va[0] ** 2) ** 2 / sigma2**2
# +
interps = []
interps_desc = []
kernel0 = Ncm.StatsDistKernelST.new(dim, 1.0)
interp0 = Ncm.StatsDistKDE.new(kernel0, Ncm.StatsDistCV.NONE)
interps.append(interp0)
interps_desc.append("Interp-KDE:Cauchy")
kernel1 = Ncm.StatsDistKernelST.new(dim, 3.0)
interp1 = Ncm.StatsDistKDE.new(kernel1, Ncm.StatsDistCV.NONE)
interps.append(interp1)
interps_desc.append("Interp-KDE:ST3")
kernel2 = Ncm.StatsDistKernelGauss.new(dim)
interp2 = Ncm.StatsDistKDE.new(kernel2, Ncm.StatsDistCV.NONE)
interps.append(interp2)
interps_desc.append("Interp-KDE:Gauss")
kernel3 = Ncm.StatsDistKernelST.new(dim, 1.0)
interp3 = Ncm.StatsDistVKDE.new(kernel3, Ncm.StatsDistCV.NONE)
interps.append(interp3)
interps_desc.append("Interp-VKDE:Cauchy")
kernel4 = Ncm.StatsDistKernelST.new(dim, 3.0)
interp4 = Ncm.StatsDistVKDE.new(kernel4, Ncm.StatsDistCV.NONE)
interps.append(interp4)
interps_desc.append("Interp-VKDE:ST3")
kernel5 = Ncm.StatsDistKernelGauss.new(dim)
interp5 = Ncm.StatsDistVKDE.new(kernel5, Ncm.StatsDistCV.NONE)
interps.append(interp5)
interps_desc.append("Interp-VKDE:Gauss")
# +
for interp in interps:
interp.reset()
theta_train = [] # Training set variables
m2lnp_train = [] # Training set m2lnp
for i in range(nps):
x1 = rng.gaussian_gen(mu1, sigma1)
x2 = rng.gaussian_gen(x1 * x1, sigma2)
theta_i = [x1, x2]
m2lnp_i = m2lnL_val(theta_i)
theta_train.append(theta_i)
m2lnp_train.append(m2lnp_i)
theta_v_i = Ncm.Vector.new_array(theta_i)
for interp in interps:
interp.add_obs(theta_v_i)
theta_train = np.array(theta_train)
m2lnp_train = np.array(m2lnp_train)
m2lnp_train_v = Ncm.Vector.new_array(m2lnp_train)
theta_test = [] # Test set variables
m2lnp_test = [] # Test set m2lnp
for i in range(nps_test):
x1 = rng.gaussian_gen(mu1, sigma1)
x2 = rng.gaussian_gen(x1 * x1, sigma2)
theta_i = [x1, x2]
m2lnp_i = m2lnL_val(theta_i)
theta_test.append(theta_i)
m2lnp_test.append(m2lnp_i)
theta_v_i = Ncm.Vector.new_array(theta_i)
theta_test = np.array(theta_test)
m2lnp_test = np.array(m2lnp_test)
# +
ml = max([len(interp_desc) for interp_desc in interps_desc])
for interp, interp_desc in zip(interps, interps_desc):
interp.set_cv_type(Ncm.StatsDistCV.SPLIT)
interp.set_split_frac(split_fraction)
interp.set_print_fit(False)
interp.prepare_interp(m2lnp_train_v)
calib_os = interp.get_over_smooth()
print(
f"Calibrate {interp_desc:<{ml}} interpolation "
f"with os = {calib_os:15.8g} and rnorm = {interp.get_rnorm():15.8g}"
)
# +
m2lnp_interps = []
for interp in interps:
m2lnp_interp = []
for theta in theta_test:
m2lnp_interp.append(interp.eval_m2lnp(Ncm.Vector.new_array(theta)))
m2lnp_interp = np.array(m2lnp_interp)
m2lnp_interps.append(m2lnp_interp)
# +
set_rc_params_article(ncol=2, nrows=2)
plotn = 250
vmin = 1.0e-8
gs = mpl.gridspec.GridSpec(3, 3, wspace=0.0, hspace=0.4, height_ratios=[1, 1, 2])
fig = plt.figure()
axa = []
axa.append(fig.add_subplot(gs[0, 0]))
axa.append(fig.add_subplot(gs[0, 1], sharex=axa[0], sharey=axa[0]))
axa.append(fig.add_subplot(gs[0, 2], sharex=axa[0], sharey=axa[0]))
axa.append(fig.add_subplot(gs[1, 0], sharex=axa[0], sharey=axa[0]))
axa.append(fig.add_subplot(gs[1, 1], sharex=axa[0], sharey=axa[0]))
axa.append(fig.add_subplot(gs[1, 2], sharex=axa[0], sharey=axa[0]))
axT = plt.subplot(gs[2, :], sharex=axa[0], sharey=axa[0])
x1_a = np.linspace(-9, 9, plotn)
x2_a = np.linspace(-4, 80, plotn)
for interp, interp_desc, ax0 in zip(interps, interps_desc, axa):
z = np.array(
[
interp.eval_m2lnp(Ncm.Vector.new_array([x1, x2]))
for x2 in x2_a
for x1 in x1_a
]
)
plot_m2lnp(x1_a, x2_a, z, ax0, plotn=plotn, vmin=vmin)
for i in range(0, interp.get_n_kernels()):
y_i, cov_i, n_i, w_i = interp.get_Ki(i)
mu = y_i.dup_array()
cov = np.array([[cov_i.get(i, j) for j in range(2)] for i in range(2)])
confidence_ellipse(mu, cov, ax0, n_std=4, edgecolor="red", lw=0.1)
ax0.set_title(interp_desc)
ax0.set_xlabel("$x_1$")
for i, ax in enumerate(axa):
if i == 0 or i == 3:
ax.set_ylabel("$x_2$")
else:
plt.setp(ax.get_yticklabels(), visible=False)
zT = np.array([m2lnL_val([x1, x2]) for x2 in x2_a for x1 in x1_a])
img = plot_m2lnp(x1_a, x2_a, zT, axT, plotn=plotn, vmin=vmin)
axT.set_title("True Rosenbrock")
axT.set_xlabel("$x_1$")
axT.set_ylabel("$x_2$")
fig.colorbar(img, format=LogFormatterMathtext())
plt.savefig("rosenbrock_kernel_interp.pdf", bbox_inches="tight")
pass
# +
set_rc_params_article(ncol=2, nrows=1)
plotn = 250
vmin = 1.0e-8
fig = plt.figure()
ax = plt.gca()
x1_a = np.linspace(-9, 9, plotn)
x2_a = np.linspace(-4, 80, plotn)
zT = np.array([m2lnL_val([x1, x2]) for x2 in x2_a for x1 in x1_a])
img = plot_m2lnp(x1_a, x2_a, zT, ax, plotn=plotn, vmin=vmin)
ax.set_title("True Rosenbrock")
ax.set_xlabel("$x_1$")
ax.set_ylabel("$x_2$")
fig.colorbar(img, format=LogFormatterMathtext())
plt.savefig("rosenbrock_exact.pdf", bbox_inches="tight")
pass
# +
set_rc_params_article(ncol=2, nrows=1)
plotn = 250
vmin = 1.0e-8
gs = mpl.gridspec.GridSpec(2, 3, wspace=0.0, hspace=0.4, height_ratios=[1, 1])
fig = plt.figure()
axa = []
axa.append(fig.add_subplot(gs[0, 0]))
axa.append(fig.add_subplot(gs[0, 1], sharex=axa[0], sharey=axa[0]))
axa.append(fig.add_subplot(gs[0, 2], sharex=axa[0], sharey=axa[0]))
axa.append(fig.add_subplot(gs[1, 0], sharex=axa[0], sharey=axa[0]))
axa.append(fig.add_subplot(gs[1, 1], sharex=axa[0], sharey=axa[0]))
axa.append(fig.add_subplot(gs[1, 2], sharex=axa[0], sharey=axa[0]))
x1_a = np.linspace(-9, 9, plotn)
x2_a = np.linspace(-4, 80, plotn)
for interp, interp_desc, ax0 in zip(interps, interps_desc, axa):
z = np.array(
[
interp.eval_m2lnp(Ncm.Vector.new_array([x1, x2]))
for x2 in x2_a
for x1 in x1_a
]
)
plot_m2lnp(x1_a, x2_a, z, ax0, plotn=plotn, vmin=vmin)
for i in range(0, interp.get_n_kernels()):
y_i, cov_i, n_i, w_i = interp.get_Ki(i)
mu = y_i.dup_array()
cov = np.array([[cov_i.get(i, j) for j in range(2)] for i in range(2)])
confidence_ellipse(mu, cov, ax0, n_std=4, edgecolor="red", lw=0.1)
ax0.set_title(interp_desc)
ax0.set_xlabel("$x_1$")
for i, ax in enumerate(axa):
if i == 0 or i == 3:
ax.set_ylabel("$x_2$")
else:
plt.setp(ax.get_yticklabels(), visible=False)
fig.colorbar(img, format=LogFormatterMathtext())
plt.savefig("rosenbrock_kernel_interp_only.pdf", bbox_inches="tight")
pass
# +
table = [
[
"Interpolation",
r"|\tilde{\pi}/\pi-1| < 20%",
r"mean $\alpha$",
"Split Fraction",
"Over-Smooth",
]
]
log_diff_array = []
log_prob_array = []
for interp, interp_desc, m2lnp_interp in zip(interps, interps_desc, m2lnp_interps):
p_test = np.exp(-0.5 * m2lnp_test)
p_interp = np.exp(-0.5 * m2lnp_interp)
log_diff = -0.5 * (m2lnp_interp - m2lnp_test)
rel_diff = np.abs(np.expm1(log_diff))
log_prob = -0.5 * (
(m2lnp_interp[0::2] - m2lnp_test[0::2])
- (m2lnp_interp[1::2] - m2lnp_test[1::2])
)
log_prob = np.clip(log_prob, np.log(1.0e-14), 0.0)
prob = np.exp(log_prob)
log_diff_array.append(np.clip(log_diff, np.log(1.0e-3), np.log(1.0e3)))
log_prob_array.append(log_prob)
qp = [0.05, 0.5]
q_rel_diff = np.quantile(rel_diff, qp) * 100.0
q_prob = np.quantile(prob, qp) * 100.0
line = [interp_desc]
line.append(f"{sum(rel_diff<0.3)*100.0/len(rel_diff):.0f}%")
line.append(f"{100.0*np.mean(prob):.0f}%")
line.append(f"{split_fraction * 100.0:.0f}%")
line.append(f"{interp.get_over_smooth():.2f}")
table.append(line)
display(HTML(tabulate.tabulate(table, tablefmt="html")))
# +
set_rc_params_article(ncol=1, nrows=2)
gs = mpl.gridspec.GridSpec(2, 1, hspace=0.0)
fig = plt.figure()
axa = []
axa.append(fig.add_subplot(gs[0, 0]))
axa.append(fig.add_subplot(gs[1, 0], sharex=axa[0]))
p_test_2 = p_test[::2]
index_array_prob = np.argsort(p_test_2)
x_prob = np.log(p_test_2[index_array_prob])
index_array_rel_diff = np.argsort(p_test)
x_rel_diff = np.log(p_test[index_array_rel_diff])
lstyles = [
("k", "-", "o"),
("k", "--", "v"),
("k", ":", "x"),
("b", "-", "o"),
("b", "--", "v"),
("b", ":", "x"),
]
bins = np.logspace(-4, 0, 7)
for desc, log_diff, log_prob, lstyle in zip(
interps_desc, log_diff_array, log_prob_array, lstyles
):
s, edges, _ = binned_statistic(
np.exp(x_rel_diff),
np.abs(np.expm1(log_diff[index_array_rel_diff])),
statistic=lambda x: 100.0 * sum(x < 0.20) / len(x),
bins=bins,
)
axa[0].hlines(
s,
edges[:-1],
edges[1:],
color=lstyle[0],
linestyle=lstyle[1],
label=desc,
lw=0.5,
)
s, edges, _ = binned_statistic(
np.exp(x_prob),
np.exp(log_prob[index_array_prob]),
statistic=lambda x: 100.0 * sum(x) / len(x),
bins=bins,
)
axa[1].hlines(
s,
edges[:-1],
edges[1:],
color=lstyle[0],
linestyle=lstyle[1],
label=desc,
lw=0.5,
)
plt.setp(axa[0].get_xticklabels(), visible=False)
axa[0].yaxis.set_major_formatter(mtick.PercentFormatter())
axa[1].yaxis.set_major_formatter(mtick.PercentFormatter())
axa[0].set_xscale("log")
axa[1].set_xscale("log")
axa[0].legend(loc="upper left")
axa[0].set_ylabel("$|\\tilde\\pi/\\pi-1| < 0.2$")
axa[1].set_ylabel("mean $\\alpha$")
axa[1].set_xlabel("$\\pi$")
plt.savefig("rosenbrock_kernel_err_acpr.pdf", bbox_inches="tight")
pass
b sometimes',
'I am very disappointed','It is the best day in my life',
'I think I will end up alone','My life is so boring','Good job',
'Great so awesome'])
print(X.shape)
print(np.eye(5)[Y_train.reshape(-1)].shape)
print(type(X_train))
# -
# Run the next cell to train your model and learn the softmax parameters (W,b).
pred, W, b = model(X_train, Y_train, word_to_vec_map)
print(pred)
# **Expected Output** (on a subset of iterations):
#
# <table>
# <tr>
# <td>
# **Epoch: 0**
# </td>
# <td>
# cost = 1.95204988128
# </td>
# <td>
# Accuracy: 0.348484848485
# </td>
# </tr>
#
#
# <tr>
# <td>
# **Epoch: 100**
# </td>
# <td>
# cost = 0.0797181872601
# </td>
# <td>
# Accuracy: 0.931818181818
# </td>
# </tr>
#
# <tr>
# <td>
# **Epoch: 200**
# </td>
# <td>
# cost = 0.0445636924368
# </td>
# <td>
# Accuracy: 0.954545454545
# </td>
# </tr>
#
# <tr>
# <td>
# **Epoch: 300**
# </td>
# <td>
# cost = 0.0343226737879
# </td>
# <td>
# Accuracy: 0.969696969697
# </td>
# </tr>
# </table>
# Great! Your model has pretty high accuracy on the training set. Lets now see how it does on the test set.
# ### 1.4 - Examining test set performance
#
print("Training set:")
pred_train = predict(X_train, Y_train, W, b, word_to_vec_map)
print('Test set:')
pred_test = predict(X_test, Y_test, W, b, word_to_vec_map)
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **Train set accuracy**
# </td>
# <td>
# 97.7
# </td>
# </tr>
# <tr>
# <td>
# **Test set accuracy**
# </td>
# <td>
# 85.7
# </td>
# </tr>
# </table>
# Random guessing would have had 20% accuracy given that there are 5 classes. This is pretty good performance after training on only 127 examples.
#
# In the training set, the algorithm saw the sentence "*I love you*" with the label ❤️. You can check however that the word "adore" does not appear in the training set. Nonetheless, lets see what happens if you write "*I adore you*."
#
#
# +
X_my_sentences = np.array(["i adore you", "i love you", "funny lol", "lets play with a ball", "food is ready", "not feeling happy"])
Y_my_labels = np.array([[0], [0], [2], [1], [4],[3]])
pred = predict(X_my_sentences, Y_my_labels , W, b, word_to_vec_map)
print_predictions(X_my_sentences, pred)
# -
# Amazing! Because *adore* has a similar embedding as *love*, the algorithm has generalized correctly even to a word it has never seen before. Words such as *heart*, *dear*, *beloved* or *adore* have embedding vectors similar to *love*, and so might work too---feel free to modify the inputs above and try out a variety of input sentences. How well does it work?
#
# Note though that it doesn't get "not feeling happy" correct. This algorithm ignores word ordering, so is not good at understanding phrases like "not happy."
#
# Printing the confusion matrix can also help understand which classes are more difficult for your model. A confusion matrix shows how often an example whose label is one class ("actual" class) is mislabeled by the algorithm with a different class ("predicted" class).
#
#
#
print(Y_test.shape)
print(' '+ label_to_emoji(0)+ ' ' + label_to_emoji(1) + ' ' + label_to_emoji(2)+ ' ' + label_to_emoji(3)+' ' + label_to_emoji(4))
print(pd.crosstab(Y_test, pred_test.reshape(56,), rownames=['Actual'], colnames=['Predicted'], margins=True))
plot_confusion_matrix(Y_test, pred_test)
# <font color='blue'>
# **What you should remember from this part**:
# - Even with a 127 training examples, you can get a reasonably good model for Emojifying. This is due to the generalization power word vectors gives you.
# - Emojify-V1 will perform poorly on sentences such as *"This movie is not good and not enjoyable"* because it doesn't understand combinations of words--it just averages all the words' embedding vectors together, without paying attention to the ordering of words. You will build a better algorithm in the next part.
#
# ## 2 - Emojifier-V2: Using LSTMs in Keras:
#
# Let's build an LSTM model that takes as input word sequences. This model will be able to take word ordering into account. Emojifier-V2 will continue to use pre-trained word embeddings to represent words, but will feed them into an LSTM, whose job it is to predict the most appropriate emoji.
#
# Run the following cell to load the Keras packages.
import numpy as np
np.random.seed(0)
from keras.models import Model
from keras.layers import Dense, Input, Dropout, LSTM, Activation
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from keras.initializers import glorot_uniform
np.random.seed(1)
# ### 2.1 - Overview of the model
#
# Here is the Emojifier-v2 you will implement:
#
# <img src="images/emojifier-v2.png" style="width:700px;height:400px;"> <br>
# <caption><center> **Figure 3**: Emojifier-V2. A 2-layer LSTM sequence classifier. </center></caption>
#
#
# ### 2.2 Keras and mini-batching
#
# In this exercise, we want to train Keras using mini-batches. However, most deep learning frameworks require that all sequences in the same mini-batch have the same length. This is what allows vectorization to work: If you had a 3-word sentence and a 4-word sentence, then the computations needed for them are different (one takes 3 steps of an LSTM, one takes 4 steps) so it's just not possible to do them both at the same time.
#
# The common solution to this is to use padding. Specifically, set a maximum sequence length, and pad all sequences to the same length. For example, of the maximum sequence length is 20, we could pad every sentence with "0"s so that each input sentence is of length 20. Thus, a sentence "i love you" would be represented as $(e_{i}, e_{love}, e_{you}, \vec{0}, \vec{0}, \ldots, \vec{0})$. In this example, any sentences longer than 20 words would have to be truncated. One simple way to choose the maximum sequence length is to just pick the length of the longest sentence in the training set.
#
# ### 2.3 - The Embedding layer
#
# In Keras, the embedding matrix is represented as a "layer", and maps positive integers (indices corresponding to words) into dense vectors of fixed size (the embedding vectors). It can be trained or initialized with a pretrained embedding. In this part, you will learn how to create an [Embedding()](https://keras.io/layers/embeddings/) layer in Keras, initialize it with the GloVe 50-dimensional vectors loaded earlier in the notebook. Because our training set is quite small, we will not update the word embeddings but will instead leave their values fixed. But in the code below, we'll show you how Keras allows you to either train or leave fixed this layer.
#
# The `Embedding()` layer takes an integer matrix of size (batch size, max input length) as input. This corresponds to sentences converted into lists of indices (integers), as shown in the figure below.
#
# <img src="images/embedding1.png" style="width:700px;height:250px;">
# <caption><center> **Figure 4**: Embedding layer. This example shows the propagation of two examples through the embedding layer. Both have been zero-padded to a length of `max_len=5`. The final dimension of the representation is `(2,max_len,50)` because the word embeddings we are using are 50 dimensional. </center></caption>
#
# The largest integer (i.e. word index) in the input should be no larger than the vocabulary size. The layer outputs an array of shape (batch size, max input length, dimension of word vectors).
#
# The first step is to convert all your training sentences into lists of indices, and then zero-pad all these lists so that their length is the length of the longest sentence.
#
# **Exercise**: Implement the function below to convert X (array of sentences as strings) into an array of indices corresponding to words in the sentences. The output shape should be such that it can be given to `Embedding()` (described in Figure 4).
# +
# GRADED FUNCTION: sentences_to_indices
def sentences_to_indices(X, word_to_index, max_len):
"""
Converts an array of sentences (strings) into an array of indices corresponding to words in the sentences.
The output shape should be such that it can be given to `Embedding()` (described in Figure 4).
Arguments:
X -- array of sentences (strings), of shape (m, 1)
word_to_index -- a dictionary containing the each word mapped to its index
max_len -- maximum number of words in a sentence. You can assume every sentence in X is no longer than this.
Returns:
X_indices -- array of indices corresponding to words in the sentences from X, of shape (m, max_len)
"""
m = X.shape[0] # number of training examples
### START CODE HERE ###
# Initialize X_indices as a numpy matrix of zeros and the correct shape (≈ 1 line)
X_indices = np.zeros((m, max_len))
for i in range(m): # loop over training examples
# Convert the ith training sentence in lower case and split is into words. You should get a list of words.
sentence_words = X[i].lower().split()
# Initialize j to 0
j = 0
# Loop over the words of sentence_words
for w in sentence_words:
# Set the (i,j)th entry of X_indices to the index of the correct word.
X_indices[i, j] = word_to_index[w]
# Increment j to j + 1
j = j + 1
### END CODE HERE ###
return X_indices
# -
# Run the following cell to check what `sentences_to_indices()` does, and check your results.
X1 = np.array(["funny lol", "lets play baseball", "food is ready for you"])
X1_indices = sentences_to_indices(X1,word_to_index, max_len = 5)
print("X1 =", X1)
print("X1_indices =", X1_indices)
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **X1 =**
# </td>
# <td>
# ['funny lol' 'lets play baseball' 'food is ready for you']
# </td>
# </tr>
# <tr>
# <td>
# **X1_indices =**
# </td>
# <td>
# [[ 155345. 225122. 0. 0. 0.] <br>
# [ 220930. 286375. 69714. 0. 0.] <br>
# [ 151204. 192973. 302254. 151349. 394475.]]
# </td>
# </tr>
# </table>
# Let's build the `Embedding()` layer in Keras, using pre-trained word vectors. After this layer is built, you will pass the output of `sentences_to_indices()` to it as an input, and the `Embedding()` layer will return the word embeddings for a sentence.
#
# **Exercise**: Implement `pretrained_embedding_layer()`. You will need to carry out the following steps:
# 1. Initialize the embedding matrix as a numpy array of zeroes with the correct shape.
# 2. Fill in the embedding matrix with all the word embeddings extracted from `word_to_vec_map`.
# 3. Define Keras embedding layer. Use [Embedding()](https://keras.io/layers/embeddings/). Be sure to make this layer non-trainable, by setting `trainable = False` when calling `Embedding()`. If you were to set `trainable = True`, then it will allow the optimization algorithm to modify the values of the word embeddings.
# 4. Set the embedding weights to be equal to the embedding matrix
# +
# GRADED FUNCTION: pretrained_embedding_layer
def pretrained_embedding_layer(word_to_vec_map, word_to_index):
"""
Creates a Keras Embedding() layer and loads in pre-trained GloVe 50-dimensional vectors.
Arguments:
word_to_vec_map -- dictionary mapping words to their GloVe vector representation.
word_to_index -- dictionary mapping from words to their indices in the vocabulary (400,001 words)
Returns:
embedding_layer -- pretrained layer Keras instance
"""
vocab_len = len(word_to_index) + 1 # adding 1 to fit Keras embedding (requirement)
emb_dim = word_to_vec_map["cucumber"].shape[0] # define dimensionality of your GloVe word vectors (= 50)
### START CODE HERE ###
# Initialize the embedding matrix as a numpy array of zeros of shape (vocab_len, dimensions of word vectors = emb_dim)
emb_matrix = np.zeros((vocab_len, emb_dim))
# Set each row "index" of the embedding matrix to be the word vector representation of the "index"th word of the vocabulary
for word, index in word_to_index.items():
emb_matrix[index, :] = word_to_vec_map[word]
# Define Keras embedding layer with the correct output/input sizes, make it non-trainable. Use Embedding(...). Make sure to set trainable=False.
embedding_layer = Embedding(vocab_len, emb_dim, trainable=False)
### END CODE HERE ###
# Build the embedding layer, it is required before setting the weights of the embedding layer. Do not modify the "None".
embedding_layer.build((None,))
# Set the weights of the embedding layer to the embedding matrix. Your layer is now pretrained.
embedding_layer.set_weights([emb_matrix])
return embedding_layer
# -
embedding_layer = pretrained_embedding_layer(word_to_vec_map, word_to_index)
print("weights[0][1][3] =", embedding_layer.get_weights()[0][1][3])
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **weights[0][1][3] =**
# </td>
# <td>
# -0.3403
# </td>
# </tr>
# </table>
# ## 2.3 Building the Emojifier-V2
#
# Lets now build the Emojifier-V2 model. You will do so using the embedding layer you have built, and feed its output to an LSTM network.
#
# <img src="images/emojifier-v2.png" style="width:700px;height:400px;"> <br>
# <caption><center> **Figure 3**: Emojifier-v2. A 2-layer LSTM sequence classifier. </center></caption>
#
#
# **Exercise:** Implement `Emojify_V2()`, which builds a Keras graph of the architecture shown in Figure 3. The model takes as input an array of sentences of shape (`m`, `max_len`, ) defined by `input_shape`. It should output a softmax probability vector of shape (`m`, `C = 5`). You may need `Input(shape = ..., dtype = '...')`, [LSTM()](https://keras.io/layers/recurrent/#lstm), [Dropout()](https://keras.io/layers/core/#dropout), [Dense()](https://keras.io/layers/core/#dense), and [Activation()](https://keras.io/activations/).
# +
# GRADED FUNCTION: Emojify_V2
def Emojify_V2(input_shape, word_to_vec_map, word_to_index):
"""
Function creating the Emojify-v2 model's graph.
Arguments:
input_shape -- shape of the input, usually (max_len,)
word_to_vec_map -- dictionary mapping every word in a vocabulary into its 50-dimensional vector representation
word_to_index -- dictionary mapping from words to their indices in the vocabulary (400,001 words)
Returns:
model -- a model instance in Keras
"""
### START CODE HERE ###
# Define sentence_indices as the input of the graph, it should be of shape input_shape and dtype 'int32' (as it contains indices).
sentence_indices = Input(input_shape, dtype='int32')
# Create the embedding layer pretrained with GloVe Vectors (≈1 line)
embedding_layer = pretrained_embedding_layer(word_to_vec_map, word_to_index)
# Propagate sentence_indices through your embedding layer, you get back the embeddings
embeddings = embedding_layer(sentence_indices)
# Propagate the embeddings through an LSTM layer with 128-dimensional hidden state
# Be careful, the returned output should be a batch of sequences.
X = LSTM(128, return_sequences=True)(embeddings)
# Add dropout with a probability of 0.5
X = Dropout(0.5)(X)
# Propagate X trough another LSTM layer with 128-dimensional hidden state
# Be careful, the returned output should be a single hidden state, not a batch of sequences.
X = LSTM(128, return_sequences=False)(X)
# Add dropout with a probability of 0.5
X = Dropout(0.5)(X)
# Propagate X through a Dense layer with softmax activation to get back a batch of 5-dimensional vectors.
X = Dense(5)(X)
# Add a softmax activation
X = Activation('softmax')(X)
# Create Model instance which converts sentence_indices into X.
model = Model(inputs=sentence_indices, outputs=X)
### END CODE HERE ###
return model
# -
# Run the following cell to create your model and check its summary. Because all sentences in the dataset are less than 10 words, we chose `max_len = 10`. You should see your architecture, it uses "20,223,927" parameters, of which 20,000,050 (the word embeddings) are non-trainable, and the remaining 223,877 are. Because our vocabulary size has 400,001 words (with valid indices from 0 to 400,000) there are 400,001\*50 = 20,000,050 non-trainable parameters.
model = Emojify_V2((maxLen,), word_to_vec_map, word_to_index)
model.summary()
# As usual, after creating your model in Keras, you need to compile it and define what loss, optimizer and metrics your are want to use. Compile your model using `categorical_crossentropy` loss, `adam` optimizer and `['accuracy']` metrics:
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# It's time to train your model. Your Emojifier-V2 `model` takes as input an array of shape (`m`, `max_len`) and outputs probability vectors of shape (`m`, `number of classes`). We thus have to convert X_train (array of sentences as strings) to X_train_indices (array of sentences as list of word indices), and Y_train (labels as indices) to Y_train_oh (labels as one-hot vectors).
X_train_indices = sentences_to_indices(X_train, word_to_index, maxLen)
Y_train_oh = convert_to_one_hot(Y_train, C = 5)
# Fit the Keras model on `X_train_indices` and `Y_train_oh`. We will use `epochs = 50` and `batch_size = 32`.
model.fit(X_train_indices, Y_train_oh, epochs = 50, batch_size = 32, shuffle=True)
# Your model should perform close to **100% accuracy** on the training set. The exact accuracy you get may be a little different. Run the following cell to evaluate your model on the test set.
X_test_indices = sentences_to_indices(X_test, word_to_index, max_len = maxLen)
Y_test_oh = convert_to_one_hot(Y_test, C = 5)
loss, acc = model.evaluate(X_test_indices, Y_test_oh)
print()
print("Test accuracy = ", acc)
# You should get a test accuracy between 80% and 95%. Run the cell below to see the mislabelled examples.
# This code allows you to see the mislabelled examples
C = 5
y_test_oh = np.eye(C)[Y_test.reshape(-1)]
X_test_indices = sentences_to_indices(X_test, word_to_index, maxLen)
pred = model.predict(X_test_indices)
for i in range(len(X_test)):
x = X_test_indices
num = np.argmax(pred[i])
if(num != Y_test[i]):
print('Expected emoji:'+ label_to_emoji(Y_test[i]) + ' prediction: '+ X_test[i] + label_to_emoji(num).strip())
# Now you can try it on your own example. Write your own sentence below.
# Change the sentence below to see your prediction. Make sure all the words are in the Glove embeddings.
x_test = np.array(['not feeling happy'])
X_test_indices = sentences_to_indices(x_test, word_to_index, maxLen)
print(x_test[0] +' '+ label_to_emoji(np.argmax(model.predict(X_test_indices))))
# Previously, Emojify-V1 model did not correctly label "not feeling happy," but our implementation of Emojiy-V2 got it right. (Keras' outputs are slightly random each time, so you may not have obtained the same result.) The current model still isn't very robust at understanding negation (like "not happy") because the training set is small and so doesn't have a lot of examples of negation. But if the training set were larger, the LSTM model would be much better than the Emojify-V1 model at understanding such complex sentences.
#
# ### Congratulations!
#
# You have completed this notebook! ❤️❤️❤️
#
# <font color='blue'>
# **What you should remember**:
# - If you have an NLP task where the training set is small, using word embeddings can help your algorithm significantly. Word embeddings allow your model to work on words in the test set that may not even have appeared in your training set.
# - Training sequence models in Keras (and in most other deep learning frameworks) requires a few important details:
# - To use mini-batches, the sequences need to be padded so that all the examples in a mini-batch have the same length.
# - An `Embedding()` layer can be initialized with pretrained values. These values can be either fixed or trained further on your dataset. If however your labeled dataset is small, it's usually not worth trying to train a large pre-trained set of embeddings.
# - `LSTM()` has a flag called `return_sequences` to decide if you would like to return every hidden states or only the last one.
# - You can use `Dropout()` right after `LSTM()` to regularize your network.
#
# Congratulations on finishing this assignment and building an Emojifier. We hope you're happy with what you've accomplished in this notebook!
#
# # 😀😀😀😀😀😀
#
#
#
# ## Acknowledgments
#
# Thanks to Alison Darcy and the Woebot team for their advice on the creation of this assignment. Woebot is a chatbot friend that is ready to speak with you 24/7. As part of Woebot's technology, it uses word embeddings to understand the emotions of what you say. You can play with it by going to http://woebot.io
#
# <img src="images/woebot.png" style="width:600px;height:300px;">
#
#
#
| 33,577 |
/Pyber/.ipynb_checkpoints/Pyber Notebook-checkpoint.ipynb
|
900722219193bed649c79f3b0c8bcb46483f7979
|
[] |
no_license
|
jsubbie/ride_share_exercise
|
https://github.com/jsubbie/ride_share_exercise
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 137,696 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ahmedhassan97/regression-using-one-variable/blob/master/project1_linear_regression_py.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="beWJ_mhYz4Tb" colab_type="code" colab={}
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# + id="9T2WZXNx0NGO" colab_type="code" colab={}
path = 'data.csv'
data = pd.read_csv(path, header=None, names=['Population',
'Profit'])
# + id="wiyxIUwl0iRv" colab_type="code" outputId="3daff734-9d14-4518-a858-2df47a8458f8" colab={"base_uri": "https://localhost:8080/", "height": 759}
#show data details
print('data = \n' ,data.head(10) )
print('**************************************')
print('data.describe = \n',data.describe())
print('**************************************')
#draw data
data.plot(kind='scatter', x='Population', y='Profit', figsize=(5,5))
# + id="QrnXygZR1NuA" colab_type="code" outputId="e2ac311e-275e-4668-cc2f-cab0f535461b" colab={"base_uri": "https://localhost:8080/", "height": 238}
# adding a new column called ones before the data
data.insert(0, 'Ones', 1)
print('new data = \n' ,data.head(10) )
print('**************************************')
# + id="oQwzAazPzerO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 384} outputId="13f9188c-4b95-4a72-f072-4e471ab10fff"
# separate X (training data) from y (target variable)
#print number of column (97*3)
cols = data.shape[1]
print(cols)
X = data.iloc[:,0:cols-1]
y = data.iloc[:,cols-1:cols]
#print('new data = \n' ,X.head(10) )
#print('new data = \n' ,y.head(10) )
####################################################################################
# convert from data frames to numpy matrices
X = np.matrix(X.values)
y = np.matrix(y.values)
theta = np.matrix(np.array([0,0]))
#print('X \n',X)
#print('X.shape = ' , X.shape)
#print('theta \n',theta)
#print('theta.shape = ' , theta.shape)
#print('y \n',y)
#print('y.shape = ' , y.shape)
#print('**************************************')
##################################################################################################
# cost function
def computeCost(X, y, theta):
z = np.power(((X * theta.T) - y), 2)
#print('z \n',z)
#print(z.shape)
#print('m ' ,len(X))
return np.sum(z) / (2 * len(X))
##print('computeCost(X, y, theta) = ' , computeCost(X, y, theta))
#print('**************************************')
###################################################################################################
# GD function
def gradientDescent(X, y, theta, alpha, iters):
temp = np.matrix(np.zeros(theta.shape))
parameters = int(theta.ravel().shape[1])
#print("temp is ",temp)
#print("parameters is ",parameters)
cost = np.zeros(iters)
for i in range(iters):
error = (X * theta.T) - y
for j in range(parameters):
term = np.multiply(error, X[:,j])
temp[0,j] = theta[0,j] - ((alpha / len(X)) * np.sum(term))
theta = temp
cost[i] = computeCost(X, y, theta)
return theta, cost
##################################################################################################
# initialize variables for learning rate and iterations
alpha = 0.01
iters = 10000
# perform gradient descent to "fit" the model parameters
g, cost = gradientDescent(X, y, theta, alpha, iters)
#print('g = ' , g)
#print('cost = ' , cost[0:50] )
#print('computeCost = ' , computeCost(X, y, g))
#print('**************************************')
###########################################################################
# get best fit line
x = np.linspace(data.Population.min(), data.Population.max(),100)
#print('x \n',x)
#print('g \n',g)
f = g[0, 0] + (g[0, 1] * x)
#print('f \n',f)
###################################################################################
# draw the line
fig, ax = plt.subplots(figsize=(5,5))
ax.plot(x, f, 'r', label='Prediction')
ax.scatter(data.Population, data.Profit, label='Traning Data')
ax.legend()
ax.set_xlabel('Population')
ax.set_ylabel('Profit')
ax.set_title('Predicted Profit vs. Population Size')
# + id="J4JIy3G62J0X" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 367} outputId="2fadcc26-75b1-42c3-c36a-8c1a2cfb27a5"
# draw error graph
fig, ax = plt.subplots(figsize=(5,5))
ax.plot(np.arange(iters), cost, 'r')
ax.set_xlabel('Iterations')
ax.set_ylabel('Cost')
ax.set_title('Error vs. Training Epoch')
# + id="Ih4Qe_Wc08YR" colab_type="code" colab={}
e ($) Per City
total_fare_per_city_type = pyber.groupby(["Type"]).sum()["Fare"]
# Total Number of Rides Per City_Type
total_rides_per_city_type = pyber.groupby(["Type"]).count()["Fare"]
# +
# Chart for - % of Total Fares by City Type
# Labels for the sections of our pie chart
labels = ["Urban", "Suburban", "Rural"]
# The values of each section of the pie chart
sizes = [40078, 20335, 4255]
# The colors of each section of the pie chart
colors = ["gold", "lightskyblue", "lightcoral"]
# Tells matplotlib to seperate the "Python" section from the others
explode = (0.1, 0, 0)
# -
plt.pie(sizes, explode=explode, labels=labels, colors=colors, autopct='%.f%%', shadow=True, startangle=225)
plt.title("% of Total Fare by City Type")
plt.show()
# +
# Chart for - % of Total Rides by City Type
# Labels for the sections of our pie chart
labels = ["Urban", "Suburban", "Rural"]
# The values of each section of the pie chart
sizes = [1625, 657, 125]
# The colors of each section of the pie chart
colors = ["gold", "lightskyblue", "lightcoral"]
# Tells matplotlib to seperate the "Python" section from the others
explode = (0.1, 0, 0)
# -
plt.pie(sizes, explode=explode, labels=labels, colors=colors, autopct='%.f%%', shadow=True, startangle=225)
plt.title("% of Total Rides by City Type")
plt.show()
# +
# Chart for - % of Total Rides by City Type
# Labels for the sections of our pie chart
labels = ["Urban", "Suburban", "Rural"]
# The values of each section of the pie chart
sizes = [64501, 9730, 727]
# The colors of each section of the pie chart
colors = ["gold", "lightskyblue", "lightcoral"]
# Tells matplotlib to seperate the "Python" section from the others
explode = (0.1, 0, 0)
# -
plt.pie(sizes, explode=explode, labels=labels, colors=colors, autopct='%.f%%', shadow=True, startangle=225)
plt.title("% of Total Drivers by City Type")
plt.show()
| 6,690 |
/notebooks/vasis/04_design.ipynb
|
4ee77609680960fc23bb63f83ea3efee86b31448
|
[] |
no_license
|
hrbolek/learning
|
https://github.com/hrbolek/learning
| 12 | 21 | null | 2022-09-12T11:39:48 | 2022-05-24T08:55:54 |
Jupyter Notebook
|
Jupyter Notebook
| false | false |
.py
| 10,880 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Návrh / vzory
# ## Úvod
# Definovat pravidla, která platí obecně a která když dodržíte, bude vámi vyvíjený systém skvělý ve funkčnosti, udržitelnost, výkonu a dalších atributech, je úsilí, které trvá od samotné odloučení oboru informatika a dosud se ho nikomu nepodařilo vyřešit definitivně. Cílem je vytvořit systém, který nebude omezen v budoucím rozvoji. Je důležité si uvědomit, že mnoho informační systémů (a platí i pro programy) je "živých", tedy že se s časem mění, jsou dodělávány funkcionality. Nesmí ovšem při dodatečném začlenění nových funkcionalit vznikat nesourodý systém, který by v jistém okamžiku se ukázal jako dále neudržitelný = nerozšiřitelný = příliš drahý. Takový požadavek úzce souvisí s architekturami otevřených systémů, kdy nově zaváděný systém má navazovat na systémy existující a omezující zavádění systémů v budoucnosti, systémů s funkcionalitou o níž není v danou chvíli ani vidu ani slechu.
#
# Je důležité se opírat o zkušenosti odborníků, kteří si mnohdy prošli vývojářským peklem, dopustili se mnoha chyb a sami se těchto chyb vyvarovat. Z tohoto důvodu se lze ve škole, ale i v praxi bavit o některých poučkách, jejichž platnost se může zdát univerzální.
#
# Přestože Dijkstra se na adresu objektového programování ozval několikrát a dosti silně, je objekt a jeho reprezentace v informačním systému stále jeden z problémů, který se řeší a tedy objektové programování má stále silný vliv na vývoj informačních systémů.
# > **Povinné video**
# >
# > [Design Patterns in Python by Peter Ullrich 28min](https://www.youtube.com/watch?v=bsyjSW46TDg)
# Může se jevit, že persistence dat je triviální problém, který je navíc vyřešen definicí normálních forem. V informačním systému ovšem (a zcela jistě v aplikacích) nejde jen o persistentní data.
#
# > Persistentní data jsou reprezentována typicky záznamy v databázi (tabulce) nebo souboru. Nazýváme je persistentní neboť jejich existence je nezávislá na spuštěné aplikaci.
#
# Příkladem může být interakce uživatele nad mapou se zakreslenými objekty. Jak lze a jak má být realizováno propojení událostí, které vznikají při pohybu kurzoru myši v okně aplikace (případně nad canvasem ve webové aplikaci), s persistentními objekty (zakreslené prvky).
#
# - prvek zákresu
# - draw
# - createIndex
#
# - index prvku
# - prvek
# - contains
#
# - projekce
# - toMap
# - fromMap
#
# - nástroj
# - onClick
# - onDblClick
# - onMouseDown
# - onMouseUp
# - onMouseMove
#
# - událost
# - souřadnice okna
# - souřadnice na mapě
# ## GRASP
# > **Povinné video**
# >
# > [Conceptual architecture GRASP 13min](https://www.youtube.com/watch?v=pIJbp5Q3jhQ)
#
# Následující část textu je silně inspirována sérií textů, které jsou dostupné [zde](https://zdrojak.cz/clanky/grasp-1-uvod-a-protected-variations/).
# GRASP je zkratka pocházející z anglických slov
# - General
# - Responsibility
# - Assigment
# - Software
# - Patterns
# Craig Larman ve své knize "Applying UML and Patterns: An Introduction to Object-Oriented Analysis and Design and the Unified Process" uvedl sadu principů, které by měly být zobecněním zkušeností z vývoje informačních systémů.
# ### Protected variations – Chráněné změny
# Tento princip míří na skutečnost, že systém podléhá změnám potřeb a návrh by měl být cílen tak, aby tyto nové potřeby nevedl ke změnám v mnoha částech systému.
#
# Velmi záleží na schopnosti návrháře předvídat a izolovat funkcionality. Příkladem dobré praxe je sedmivrstvý model ISO-OSI, kde každá vrstva má svoji odpovědnost za funkčnost. Funkčnost je popsána pomocí rozhraní a libovolný prvek tato rozhraní splňující může vrstvu nahradit.
#
# https://zdrojak.cz/clanky/grasp-1-uvod-a-protected-variations/
# ### High cohesion – Vysoká soudržnost
# Vysoká soudržnost a slabá provázanost spolu souvisí.
#
# Vysoká soudržnost je myšlenka, že jednotlivé funkcionality, které prvek poskytuje spolu souvisí. Míra souvislosti (soudržnosti) by měly být co nejvyšší. Tento princip navazuje na princip Chráněné změny. Jestliže je nezbytné v systému něco změnit, míra soudržnosti spolu s mírou provázanosti určuje které prvky budou touto požadovanou změnou zasaženy a bude tedy nutné u nich provést změny.
#
# Obecně prvky, které procházejí změnami jsou rizikem chybovosti a tedy nespolehlivosti informačního systému.
#
# https://zdrojak.cz/clanky/grasp-2-high-cohesion/
# ### Low coupling – Slabá provázanost
# Libovolné dva prvky v systému by neměly poskytovat související funkcionalitu. Opačný přístup vede k situaci, kdy požadavek na případnou změnu vede k nutnosti měnit více prvků v systému.
#
# https://zdrojak.cz/clanky/grasp-3-low-coupling/
# ### Pure fabrication – Čistá konstrukce
# Princip Pure fabrication ukazuje na možný výskyt potřeby třídy (objektu), který nemá reprezentaci v reálném světě.
#
# ### Polymorphism
# Polymorfismus, někde česky nazývaný mnohotvárnost je jedním ze základních pojmů z oblasti objektově orientovaného programování.
#
# - encapsulation
# - polymorphism
# - inheritance
#
# Pokud chování závisí na typu objektu (třídě), přiřaďte zodpovědnost za toto chování pomocí polymorfických metod třídě, na které toto chování závisí.
# ### Indirection – Nepřímé vazby
# „Většina problémů může být vyřešena přidáním další úrovně nepřímosti.“
# Zde je ovšem nutné také uvést komplementární poučku: „Většina problémů s výkonem může být vyřešena ubráním nějaké úrovně nepřímosti.“
#
# V ISO-OSI by mohlo být významně méně vrstev. Pokud analyzujete komunikaci frontend (web client) - backend (server) na protokolu http, dojdete k závěru, že si vystačíte s pěti vrstvami. Existenci vrstev 5, 6, 7 lze považovat za důsledek vyřešit některé nepřímé vazby v návrhu.
# ### Information expert – Informační expert/Expert
# Zodpovědnost přidělte informačnímu expertovi – prvku, který má informace potřebné pro splnění této zodpovědnosti.
#
# Tento princip souvisí s Encaspulation. Datová struktura (objekt - třída) má veškeré nástroje k tomu, aby mohla realizovat všechny operace s ní.
# ### Creator – Tvůrce
# Přiřaďte třídě B zodpovědnost za vytváření instancí třídy A, pokud platí jedno nebo více z následujících:
#
# - B je agregátor objektů A
# - B obsahuje objekty A
# - B uchovává záznamy o objektech A
# - B úzce spolupracuje s objekty A
# - B má inicializační data pro A (B je informační expert pro inicializaci A)
#
# V API endpointu jsou spravovány kolekce, odpovědnost za prvky je dána kolekci (CRUD operace)
# ### Controller
# V ASP.NET je controllerem prvek, který přijímá události přicházející po HTTP protokolu a realizuje odpovídající operace nad datovými strukturami. Trochu je zde kolize s principem Creator.
#
| 6,984 |
/python-basic/.ipynb_checkpoints/static web crawling-checkpoint.ipynb
|
50361d81a11e460bdb6e77bd17c93c85d20b7acd
|
[] |
no_license
|
honux77/lectures
|
https://github.com/honux77/lectures
| 1 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 2,924 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 웹 크롤링
import requests
result = requests.get('http://www.naver.com')
result
'주진형' in result.text
from bs4 import BeautifulSoup
bs = BeautifulSoup(result.text, 'html.parser')
elements = bs.select('div.section_navbar div.PM_CL_realtimeKeyword_rolling a')
rank = elements[0].select_one('span.ah_r').text
text = elements[0].select_one('span.ah_k').text
print(rank, text)
for elem in elements:
rank = elem.select_one('span.ah_r').text
text = elem.select_one('span.ah_k').text
print(rank, text)
| 777 |
/Dimension Reduction.ipynb
|
aad975ee4480a1f20649bb31023c6c8b35443af4
|
[] |
no_license
|
clesleycode/machine-learning-series-python
|
https://github.com/clesleycode/machine-learning-series-python
| 1 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 1,312 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# ==========================================
# 3D voxel / volumetric plot with rgb colors
# ==========================================
#
# Demonstrates using ``ax.voxels`` to visualize parts of a color space
#
#
# +
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
def midpoints(x):
sl = ()
for i in range(x.ndim):
x = (x[sl + np.index_exp[:-1]] + x[sl + np.index_exp[1:]]) / 2.0
sl += np.index_exp[:]
return x
# prepare some coordinates, and attach rgb values to each
r, g, b = np.indices((17, 17, 17)) / 16.0
rc = midpoints(r)
gc = midpoints(g)
bc = midpoints(b)
# define a sphere about [0.5, 0.5, 0.5]
sphere = (rc - 0.5)**2 + (gc - 0.5)**2 + (bc - 0.5)**2 < 0.5**2
# combine the color components
colors = np.zeros(sphere.shape + (3,))
colors[..., 0] = rc
colors[..., 1] = gc
colors[..., 2] = bc
# and plot everything
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.voxels(r, g, b, sphere,
facecolors=colors,
edgecolors=np.clip(2*colors - 0.5, 0, 1), # brighter
linewidth=0.5)
ax.set(xlabel='r', ylabel='g', zlabel='b')
plt.show()
| 1,437 |
/sqs/.ipynb_checkpoints/SQS-checkpoint.ipynb
|
6030e89300b001165a7f72e936392051a5d9dd80
|
[] |
no_license
|
arunmastermind/AWS-examples-using-BOTO3
|
https://github.com/arunmastermind/AWS-examples-using-BOTO3
| 0 | 2 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 7,501 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sys
import subprocess
import optparse
import threading
from neo4jrestclient.client import GraphDatabase
from getpass import getpass
from time import sleep
def main():
'''
VDNS was written to be included in the MercenaryHuntFramework and on Mercenary-Linux
It can however be run as a standalone application. This application requires that bro-cut
be installed on the host. Default location that it looks for bro-cut is /usr/local/bro/bin/bro-cut
If your installation path for bro-cut is different, modify the sourcefile accordingly.
'''
# Handle command-line arguments
PARSER = optparse.OptionParser()
PARSER.add_option('--logfile', default=None, help='Logfile to read from. Default: %default')
(options, args) = PARSER.parse_args() #changed Feb2017 Throwing error for unknown var OPTIONS
gdb = GraphDatabase('http://localhost:7474/browser/', username='neo4j', password='neo4j')
# Create a BRO log file reader and pull from the logfile
full_query = "cat {0} | /usr/local/bro/bin/bro-cut uid id.orig_h id.orig_p\
id.resp_h id.resp_p query answers qtype_name ".format(options.logfile)
# ___ Fails the first time even after the NUll node is added____
dnsquery = gdb.labels.create("DNS_COMMS") #create label
queries = gdb.labels.create("DNS_QUERIES") #create label
answers = gdb.labels.create("DNS_ANSWERS") #create label
qtypes = gdb.labels.create("DNS_QTYPES") #create label
dns_Sips = gdb.labels.create("DNS_SOURCE_IPS") #create label
dns_Dips = gdb.labels.create("DNS_DEST_IPS") #create label
print ("[+] Creating Labels...")
sleep(5)
nval = gdb.node.create(query="NULL") #create null node
nval.labels.add('DNS_QUERIES') #initialize node label w/ null node
nval2 = gdb.node.create(uid = 'NULL', s_ip = 'NULL', d_ip = 'NULL', s_port = 'NULL', d_port = 'NULL', qtype = 'NULL', query = 'NULL', answer="NULL") #create node
nval2.labels.add('DNS_COMMS')#initialize node label w/ null node
nval3 = gdb.node.create(qtype="NULL") #create node
nval3.labels.add('DNS_QTYPES') #initialize node label w/ null node
nval4 = gdb.node.create(answer="NULL") #create node
nval4.labels.add('DNS_ANSWERS') #initialize node label w/ null node
nval5 = gdb.node.create(s_ip="NULL") #create node
nval5.labels.add('DNS_SOURCE_IPS') #initialize node label w/ null node
nval6 = gdb.node.create(d_ip='NULL')
nval6.labels.add('DNS_DEST_IPS')
p = subprocess.Popen(full_query, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
cnt = 0 #Counter to Track Number of Entries based on threads created to ingest them
for line in p.stdout.readlines():
lline = line.split()
for val in range(len(lline)):
if ',' in lline[val]:
lline2 = lline[val].split(',')
else:
lline2 = lline[val]
if (val == 0):
v1 = lline2
elif (val ==1): #Source IP
v2 = lline2
srclist = gdb.labels.get('DNS_SOURCE_IPS') # handle on query transaction for nodes w/ matchng label
check = srclist.get(s_ip=lline2) #check nodes for existence of value of lline2
if (len(check) == 0): # If no query matches a query already under label
hsrc = gdb.nodes.create(s_ip=lline2)
dns_Sips.add(hsrc)
elif (val == 2):
v3 = lline2
elif (val == 3): #Dest IP
v4 = lline2
dstlist = gdb.labels.get('DNS_DEST_IPS') # handle on query transaction for nodes w/ matchng label
check = dstlist.get(d_ip=lline2) #check nodes for existence of value of lline2
if (len(check) == 0): # If no query matches a query already under label
hdst = gdb.nodes.create(d_ip=lline2)
dns_Dips.add(hdst)
elif (val == 4): #Dest Port
v5 = lline2
elif (val == 5): #Query
v6 = lline2
querylist = gdb.labels.get('DNS_QUERIES') #get handle on query transaction for all nodes for the DNS_QUERY LABEL
check = querylist.get(query=lline2)
if (len(check) == 0): # If no query matches a query already under DNS_QUERIES label
hquery = gdb.nodes.create(query=lline2)
queries.add(hquery)
elif (val == 6): #Answers
v7 = lline2
anslist = gdb.labels.get('DNS_ANSWERS') #get handle on query tansaction for all nodes with DNS_ANSWERS label
check = anslist.get(answer=lline2)
if (len(check) == 0): #If no query matches a query already under DNS_ANSWERS label
hanswer = gdb.nodes.create(answer=lline2)
answers.add(hanswer)
elif (val == 7): #QueryType
v8 = lline2
qtlist = gdb.labels.get('DNS_QTYPES') #get handle on query transaction for all nodes with DNS_QUERY label
check = qtlist.get(qtype=lline2)
if (len(check) == 0): #If no query matches a query already under DNS_QTYPES label
hqtype = gdb.nodes.create(qtype=lline2) #Create Node and return handle to Node
qtypes.add(hqtype) #Use Handle to Label DNS_QTYPES to add node using the handle to the node
else:
pass
q = gdb.nodes.create(uid=v1, s_ip=v2, s_port=v3, d_ip=v4, d_port=v5, query=v6, answer=v7, qtype=v8)
new_thread = threading.Thread(dnsquery.add(q))
new_thread.start()
new_thread.join
cnt += 1
print ("[+] {0} DNS Log Entries Injested".format(cnt))
if __name__ == '__main__':
#parse args from cli
if len(sys.argv) < 2:
print ("Error: Too Few Arguments")
print ("<command> --help")
sys.exit()
main()
| 6,247 |
/analysis/r_nba-Bayes_1_Model.ipynb
|
ab75e27e6deb3e07f1212cd976b4ea3ef8c1d51e
|
[] |
no_license
|
blin17/reddit_nba_predictor
|
https://github.com/blin17/reddit_nba_predictor
| 1 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 92,844 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
'''
Analysis of /r/nba Posts
@author: Brian Lin
'''
# %matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import re
# +
def return_csv_files(path):
pattern = r'^.*\.csv$'
return [f for f in os.listdir(path) if bool(re.match(pattern,f))]
path = '../data/backlog/'
csv_files = return_csv_files(path)
df = pd.DataFrame()
for csv in csv_files:
csv_df = pd.read_csv(path + csv)
df = pd.concat([df,csv_df])
df['created'] = pd.to_datetime(df.created, unit = 's')
df.index = range(0,len(df))
# -
'''
Randomly shuffle the dataframe and generates a training, cross-validation, and test set
'''
random_df = df.reindex(np.random.permutation(df.index))
training_df = random_df.iloc[0:12000]
cross_validation_df = random_df.iloc[12000:15000]
test_df = random_df.iloc[15000:]
def front_page(score,n=150):
if score >= n:
return 1
else:
return 0
training_Y = training_df.score.apply(front_page)
cross_validation_Y = cross_validation_df.score.apply(front_page)
test_Y = test_df.score.apply(front_page)
# +
common_words = pd.read_csv('../lib/common_words.txt', header = None)[0].values
def remove_common_words(title, common_words):
tw = map(lambda d: d.strip(".,:()[]!?\"").lower(),title.split())
return [w for w in tw if w not in common_words]
def generate_words_and_scores(titles, score, common_words):
title_words = []
score_words = []
for index in range(len(titles)):
title_word_list = remove_common_words(titles.iloc[index], common_words)
title_words.extend(title_word_list)
score_words.extend([scores.iloc[index]] * len(title_word_list))
title_words_s = pd.Series(title_words)
words_scores_df = pd.concat([title_words_s, pd.Series(score_words)], axis = 1)
words_scores_df.columns = ['word', 'score']
return words_scores_df
titles = training_df.title
scores = training_df.score
word_df = generate_words_and_scores(titles,scores, common_words)
word_df_agg = word_df.groupby('word').agg({'score':['mean','std','median','count']})
# -
words_sorted_median = word_df_agg[word_df_agg[('score','count')] > 10].sort_values(by=[('score','median')], ascending=False)
top200 = np.array(words_sorted_median[:200].index)
bottom200 = np.array(words_sorted_median[-200:].index)
top_bottom200 = np.array(pd.concat([words_sorted_median[:200],words_sorted_median[-200:]]).index)
# +
#Generates the feature word vectors
def create_feature_row(title, word_vec):
feature_row = np.zeros((len(word_vec),))
for word in title:
if word in word_vec:
word_index = np.where(word_vec==word)[0][0]
feature_row[word_index] = 1
return feature_row
def generate_feature_vector(title_s, top_words, common_words):
l = []
for title in title_s:
title_r = remove_common_words(title, common_words)
l.append(create_feature_row(title_r, top_words))
return l
def sanitize_titles(title_s):
l = []
for title in title_s:
title_r = remove_common_words(title, common_words)
l.append(title_r)
return l
# -
def prediction_score(predicted_p, Y,threshold=0.5):
def greater_than_one_half(x):
if x > threshold:
return 1
else:
return 0
predicted_Y = np.array([greater_than_one_half(x) for x in predicted_p])
true_pos = 0
false_pos = 0
true_neg = 0
false_neg = 0
for i in range(len(predicted_Y)):
if predicted_Y[i] == 1 and Y[i] == 1:
true_pos +=1
elif predicted_Y[i] == 1 and Y[i] == 0:
false_pos +=1
elif predicted_Y[i] == 0 and Y[i] == 1:
true_neg +=1
elif predicted_Y[i] == 0 and Y[i] == 0:
false_neg +=1
wrong = float(np.sum(np.abs(predicted_Y - Y)))
total = float(len(predicted_Y))
ans = dict()
ans['accuracy'] = 1-(wrong/total)
ans['true_pos'] = true_pos
ans['true_neg'] = true_neg
ans['false_pos'] = false_pos
ans['false_neg'] = false_neg
if true_pos == 0 and false_pos == 0:
ans['recall'] = float(true_pos) / float((true_pos + true_neg))
elif true_pos == 0 and true_neg == 0:
ans['precision'] = float(true_pos) / float((true_pos + false_pos))
else:
ans['precision'] = float(true_pos) / float((true_pos + false_pos))
ans['recall'] = float(true_pos) / float((true_pos + true_neg))
ans['f1_score'] = 2 * ans['precision'] * ans['recall'] / (ans['precision'] + ans['recall'] )
return ans
# +
def generate_bayes_prob(words, X, Y):
pxy1 = {}
pxy0 = {}
for index in range(len(words)):
word = words[index]
pxy1[word] = float(((X.iloc[:,index] == 1) & (Y == 1)).sum() +1) / float((Y.values == 1).sum() +2)
pxy0[word] = float(((X.iloc[:,index] == 1) & (Y == 0)).sum() +1) / float((Y.values == 0).sum() +2)
return pxy1, pxy0
def calculate_prob(sanitized_title_list,debug=False):
global bayes_pxy1,bayes_pxy0, py1, py0,pno1, pno0
pyx1 = 1.0
pyx0 = 1.0
for word in sanitized_title_list:
if word in bayes_pxy1:
pyx1 *= bayes_pxy1[word]
pyx0 *= bayes_pxy0[word]
if debug:
print 'yes'
else:
# applies a factor if word not seen before
#pyx1 *= pno1
#pyx0 *= pno0
if debug:
print 'no'
if debug:
print word, pyx1, pyx0
return pyx1*py1, pyx0*py0
# -
#word_vector = np.array(word_df_agg.index)
word_vector = top_bottom200
title_features_bayes_df = pd.DataFrame(generate_feature_vector(titles,word_vector,common_words), index = training_df.index)
sum(title_features_bayes_df.sum(axis =1) == 0)
bayes_pxy1,bayes_pxy0 = generate_bayes_prob(word_vector, title_features_bayes_df, training_Y)
Y = training_Y
py1 = float(sum(Y.values == 1)) / len(Y)
py0 = float(sum(Y.values == 0)) / len(Y)
pno1 = 1/float(sum(Y.values == 1))
pno0 = 1/float(sum(Y.values == 0))
sanitized_titles = sanitize_titles(training_df.title)
class_probs = []
for i in range(len(sanitized_titles)):
title = sanitized_titles[i]
class_probs.append(calculate_prob(title))
probs = pd.DataFrame(class_probs)
sum(probs[0] > probs[1])
probs.index = training_df.index
pd.concat([training_df[probs[0] > probs[1]][['title','score']], probs], axis=1, join= 'inner').head()
# +
'''
Analysis to Determine if Time of Day determines how likely a post will make the front page.
If you find the right time to post (between 16-20)
'''
def fp_per_hour(domain= None, fp=True):
# returns percent of posts that make front page per hour
s0 = None
s1 = None
if domain == None:
s0 = (training_df[(Y.values == 0)].created.apply(lambda d: d.hour))
s1 = (training_df[(Y.values == 1)].created.apply(lambda d: d.hour))
sall = (training_df.created.apply(lambda d: d.hour))
else:
s0 = (training_df[(Y.values == 0) & (training_df.domain == domain)].created.apply(lambda d: d.hour))
s1 = (training_df[(Y.values == 1) & (training_df.domain == domain)].created.apply(lambda d: d.hour))
sall = (training_df[training_df.domain == domain].created.apply(lambda d: d.hour))
s0 = (s0.groupby(s0).size())
s1 = (s1.groupby(s1).size())
sall = sall.groupby(sall).size()
if fp:
return s1/sall
else:
return sall
fig = plt.figure()
ax1,ax2,ax3,ax4,ax5,ax6,ax7,ax8 = (fig.add_subplot(241), fig.add_subplot(242), fig.add_subplot(243), fig.add_subplot(244),
fig.add_subplot(245), fig.add_subplot(246), fig.add_subplot(247), fig.add_subplot(248))
fig.set_figheight(4)
fig.set_figwidth(12)
ax1.plot(fp_per_hour(None, False))
ax1.set_title('# Posts')
ax5.plot(fp_per_hour())
ax5.set_title('% Posts making FP')
ax2.plot(fp_per_hour('self.nba', False))
ax2.set_title('# self.nba posts')
ax6.plot(fp_per_hour('self.nba'))
ax6.set_title('% Posts making FP')
ax3.plot(fp_per_hour('twitter.com', False))
ax3.set_title('# Twitter posts')
ax7.plot(fp_per_hour('twitter.com'))
ax7.set_title('% Twitter Posts making FP')
ax4.plot(fp_per_hour('youtube.com', False))
ax4.set_title('# Youtube posts')
ax8.plot(fp_per_hour('youtube.com'))
ax8.set_title('% Youtube Posts making FP')
fig.tight_layout()
# -
Y_predicted3 = probs.apply(lambda d: 1 if d[0] > d[1] else 0, axis = 1)
predicted_Y3 = np.array(Y_predicted3)
p_score = prediction_score(predicted_Y3, Y.values)
p_score
training_df[(predicted_Y3 == 1) & (Y.values == 1)][['title','score','domain','created']].domain.value_counts()[:15].plot(kind='barh')
| 8,965 |
/dog_app.ipynb
|
bdfcbe4f42d8047e665f4d186d2e93457f6cfc17
|
[] |
no_license
|
AishaHakami/CNN-Project-Dog-Breed-Classifier
|
https://github.com/AishaHakami/CNN-Project-Dog-Breed-Classifier
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 4,824,132 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Jugador aleatorio con _Football_
# Ejemplo de jugador aleatorio para el entorno [_Football_](https://github.com/google-research/football) versión `academy_empty_goal_close`.
#
# Ejecución local: requiere instalación según [instrucciones](https://github.com/jgromero/eci2019-DRL/blob/master/ejercicio/Instrucciones%20Entorno%20Football.pdf).
#
# <!--
# <a href="http://www.youtube.com/watch?feature=player_embedded&v=F8DcgFDT9sc" target="_blank"><img src="http://img.youtube.com/vi/F8DcgFDT9sc/0.jpg"
# alt="IMAGE ALT TEXT HERE" width="580" border="3" /></a>
# -->
# ## Listar versiones del entorno
from gfootball.env import scenario_builder
scenario_builder.all_scenarios()
# ## Crear entorno
# +
import gfootball.env as football_env
env = football_env.create_environment(
env_name='academy_empty_goal_close',
stacked=False, # solo estado, no pixeles
representation='simple115', # solo estado, no pixeles
with_checkpoints=True, # recompensas intermedias, no solo al marcar
render=True) # mostrar graficamente
# -
# ## Explorar entorno virtual
#
# En primer lugar, vamos a explorar cómo funciona este entorno.
# Cada estado es una tupla de 115 elementos.
#
# | Información | Estructura | Explicación
# | --------------------|----------------------| ----------------------
# | Posición del balón | (x, y, z) |
# | Dirección del balón | (x, y, z) |
# | Control del balón | array(3) | (1, 0, 0): nadie, (0, 1, 0): locales, (0, 0, 1): visitantes
# | Jugador activo | array(11) | codificación de jugador activo en locales
# | Posiciones locales | 11 x array(2) | 11 posiciones (x, y) de cada jugador local
# | Movimiento locales | 11 x array(2) | 11 vectores de movimiento (x, y) de cada jugador local
# | Posiciones visitantes | 11 x array(2) | 11 posiciones (x, y) de cada jugador visitante
# | Movimiento visitantes | 11 x array(2) | 11 vectores de movimiento (x, y) de cada jugador visitante
# | Modo de juego | array(7) | codificación de modo de juego: {NormalMode, KickOffMode, GoalKickMode, FreeKickMode, CornerMode, ThrowInMode, PenaltyMode}
#
# En la modalidad `academy_empty_goal_close` solo hay **51 elementos activos**.
# El agente puede realizar 21 acciones.
from gfootball.env import football_action_set
print(env.observation_space)
print(env.action_space)
football_action_set.action_set_dict['default']
# ## Agente aleatorio
# Implementación de un agente aleatorio que juega durante 10 episodios.
# +
# for i in range(1, 10):
# env.reset()
# acc_reward = 0
# while True:
# action = env.action_space.sample()
# observation, reward, done, info = env.step(action)
# acc_reward += reward
# if done:
# break
# print("Recomensa episodio {:d}: {:.2f}".format(i, acc_reward))
# env.close()
# -
# Para desactivar _logging_:
import gfootball.env as football_env
from gfootball.env import football_action_set
env = football_env.create_environment(
env_name='academy_empty_goal_close',
stacked=False, # solo estado, no pixeles
representation='simple115', # solo estado, no pixeles
with_checkpoints=True, # recompensas intermedias, no solo al marcar
render=True)
football_action_set.action_set_dict['default']
import sys
import gym
import random
import torch
import numpy as np
from collections import deque
import matplotlib.pyplot as plt
# %matplotlib inline
import logging, os
logging.disable(logging.WARNING)
from dqn_agent import Agent
agent = Agent(state_size=115, action_size=21, seed=0)
# +
def dqn(n_episodes=300, batch_length=50, eps_start=0.01, eps_end=0.01, eps_decay=0.99):
"""Deep Q-Learning.
Params
======
n_episodes (int): numero maximo de episodios de entrenamiento (n_episodios)
max_t (int): numero maximo de pasos por episodio (n_entrenamiento)
eps_start (float): valor inicial de epsilon
eps_end (float): valor final de epsilon
eps_decay (float): factor de multiplicacion (por episodio) de epsilon
"""
agent.qnetwork_local.load_state_dict(torch.load('checkpoint.pth'))
scores = [] # puntuaciones de cada episodio
scores_window = deque(maxlen=batch_length) # puntuaciones de los ultimos 100 episodios
mean_scores = []
eps = eps_start # inicializar epsilon
promedio = 0
for i_episode in range(1, n_episodes+1):
state = env.reset()
acc_reward = 0
while True:
# elegir accion At con politica e-greedy
action = agent.act(state, eps)
# aplicar At y obtener Rt+1, St+1
observation, reward, done, info = env.step(action)
# almacenar <St, At, Rt+1, St+1>
agent.memory.add(state, action, reward, observation, done)
# train & update
agent.step(state, action, reward, observation, done)
acc_reward += reward
if done:
break
scores_window.append(acc_reward) # guardar ultima puntuacion
scores.append(acc_reward) # guardar ultima puntuacion
eps = max(eps_end, eps_decay*eps) # reducir epsilon
if len(scores_window)==batch_length:
promedio = np.mean(scores_window)
mean_scores.append(promedio)
print('\rEpisodio {}\tPuntuacion media (ultimos {:d}): {:.2f}'.format(i_episode, i_episode-int(i_episode/batch_length)*batch_length, np.mean(scores_window)), end="")
if i_episode % batch_length == 0:
print('\rEpisodio {}\tPuntuacion media ({:d} anteriores): {:.2f}'.format(i_episode, batch_length, np.mean(scores_window)))
if len(scores_window)==batch_length and promedio >= max(mean_scores):
torch.save(agent.qnetwork_local.state_dict(), 'checkpoint.pth')
# if np.mean(scores_window)>=1.90:
# print('\nProblema resuelto en {:d} episodios!\tPuntuacion media (ultimos {:d}): {:.2f}'.format(i_episode-50, 50, np.mean(scores_window)))
# torch.save(agent.qnetwork_local.state_dict(), 'checkpoint.pth') # guardar pesos de agente entrenado
# break
return scores
scores = dqn()
# plot the scores
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(np.arange(len(scores)), scores)
plt.ylabel('Puntuacion')
plt.xlabel('Episodio #')
plt.show()
# -
# ## Agente entrenado
# Implementación del agente entrenado durante 50 episodios.
# +
agent.qnetwork_local.load_state_dict(torch.load('checkpoint.pth'))
scores = [] # puntuaciones de cada episodio
for i_episode in range(1, 51):
state = env.reset()
acc_reward = 0
while True:
# elegir accion At con politica e-greedy
action = agent.act(state, 0.0)
# aplicar At y obtener Rt+1, St+1
observation, reward, done, info = env.step(action)
acc_reward += reward
if done:
break
scores.append(acc_reward) # guardar ultima puntuacion
print('\rEpisodio {}\tPuntuacion media: {:.2f}'.format(i_episode, np.mean(scores)), end="")
env.close()
print('\npuntuación media final: %f' %np.mean(scores))
# plot the scores
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(np.arange(len(scores)), scores, '.')
plt.ylabel('Puntuacion')
plt.xlabel('Episodio #')
plt.show()
| 7,975 |
/07_clustering_pca/07_pca_solved.ipynb
|
ab20c7f984ba799980c6530d7a0fded816194d44
|
[] |
no_license
|
makhalanobis/machine-learning-with-love
|
https://github.com/makhalanobis/machine-learning-with-love
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 1,563,072 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Введение в машинное обучение
#
# ## Семинар #7
#
# ### Екатерина Кондратьева
#
# [email protected]
#
# ## Обучение без учителя: кластеризация. Снижение размерности данных PCA.
# -
# ## 1. Обучение без учителя: Kластеризация.
# Кластерный анализ (англ. cluster analysis) — многомерная статистическая процедура, выполняющая сбор данных, содержащих информацию о выборке объектов, и затем упорядочивающая объекты в сравнительно однородные группы. Задача кластеризации относится к статистической обработке, а также к широкому классу задач обучения без учителя.
#
# Источники:
# 1. Лекция https://ru.coursera.org/lecture/unsupervised-learning/primier-klastierizatsiia-tiekstov-po-tiemie-bVVzw
# 2. https://ru.coursera.org/lecture/python-for-data-science/mietod-glavnykh-komponient-principal-component-analysis-X8bem
# 2. https://ru.coursera.org/lecture/unsupervised-learning/otbor-priznakov-na-osnovie-modieliei-qnzXA
# 3. https://www.hse.ru/mirror/pubs/share/215285956
#linear algebra
import numpy as np
#data structures
import pandas as pd
#ml models
import scipy as sp
import sklearn
from sklearn import datasets
from sklearn.linear_model import LinearRegression
from sklearn import metrics
from sklearn.metrics import accuracy_score
from sklearn.svm import SVR
#plots
import matplotlib.pyplot as plt
# %matplotlib inline
#beautiful plots
import seaborn as sns
#linear regression
import statsmodels.api as sm
#off the warnings
import warnings
warnings.filterwarnings("ignore")
from sklearn.datasets import load_breast_cancer
from sklearn.neighbors import KNeighborsClassifier #KNN
from sklearn.linear_model import LogisticRegression #Logistic Regression
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
# ### В предыдущих занятиях: классификация на выборке ирисов
# +
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
iris = load_iris()
X = iris.data
y = iris.target
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42, stratify=y)
clf = KNeighborsClassifier(n_neighbors=30,
metric='chebyshev',
p=2)
clf.fit(X_train, y_train)
print(clf.score(X_test, y_test))
# -
clf = KNeighborsClassifier(n_neighbors=30,
metric='chebyshev',
p=2)
clf.fit(X_train, y_train)
print(clf.score(X_test, y_test))
from sklearn.tree import DecisionTreeClassifier
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X_train, y_train)
print(clf.score(X_test, y_test))
# +
pair=[0, 1]
X = X_train[:, [0, 1]]
y = y_train
n_classes = 3
plot_colors = "ryb"
plot_step = 0.005
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
clf = KNeighborsClassifier(n_neighbors=30,
metric='chebyshev',
p=2).fit(X, y)a
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
#print(clf.score(X_test[:, [0, 1]], y_test)) ---- посмотреть????
Z = Z.reshape(xx.shape)
plt.figure(figsize=(15, 10))
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.RdYlBu)
plt.xlabel(iris.feature_names[pair[0]])
plt.ylabel(iris.feature_names[pair[1]])
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
# -
# # Почему мы потеряли по точности?
iris = load_iris()
X = iris.data
y = iris.target
len(y)
len(y)
# +
import pydotplus
from IPython.display import Image
from sklearn import tree
pair = [0, 1]
X = X_train[:, pair]
y = y_train
clf = DecisionTreeClassifier(random_state=42, max_depth=3).fit(X, y)
dot_data = tree.export_graphviz(clf, out_file=None,
feature_names=['petal length',
'petal width'],
class_names=iris.target_names,
filled=True, rounded=True,
special_characters=True)
graph = pydotplus.graph_from_dot_data(dot_data)
print(clf.score(X_test[:, pair], y_test))
Image(graph.create_png())
# -
# ### Что, если мы не знаем, сколько классов в нашей выборке?
# Алгоритм KMeans: https://ru.wikipedia.org/wiki/%D0%9C%D0%B5%D1%82%D0%BE%D0%B4_k-%D1%81%D1%80%D0%B5%D0%B4%D0%BD%D0%B8%D1%85
#
# Реализация https://scikit-learn.org/stable/auto_examples/cluster/plot_cluster_iris.html
#
# +
# Code source: Gaël Varoquaux, Modified for documentation by Jaques Grobler, License: BSD 3 clause
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
import matplotlib.style
plt.style.use('ggplot')
np.random.seed(42)
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = [('k_means_iris_8', KMeans(n_clusters=8)),
('k_means_iris_3', KMeans(n_clusters=3)),
('k_means_iris_bad_init', KMeans(n_clusters=3, n_init=1,
init='random'))]
fignum = 1
titles = ['8 clusters', '3 clusters', '3 clusters, bad initialization']
for name, est in estimators:
fig = plt.figure(fignum, figsize=(8, 8))
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=60, azim=120)
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2],
c=labels.astype(np.float), edgecolor='k')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
ax.set_title(titles[fignum - 1])
ax.dist = 12
fignum = fignum + 1
# Plot the ground truth
fig = plt.figure(fignum, figsize=(8, 8))
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=60, azim=120)
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean(),
X[y == label, 2].mean() + 2, name,
horizontalalignment='center',
bbox=dict(alpha=.2, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y, edgecolor='k')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
ax.set_title('Ground Truth')
ax.dist = 12
fig.show()
# -
# Откуда мы знали, что нужно искать 3-8 кластеров `kmeans`?
#
#
# СМЕКАЛОЧКА! Мы угадали сколько выбрать.
# Silhouette Coefficient: https://en.wikipedia.org/wiki/Silhouette_(clustering)
from sklearn.metrics import silhouette_score
silhouette_score
est=KMeans(n_clusters=3)
est.fit(X)
labels = est.labels_
silhouette_score(pd.DataFrame(labels),pd.DataFrame(y.astype(int)))
# Как будет меняться оценка от размера класса?
# ### А что, если мы не хотим заниматься перебором. Как оценить втурненнюю размерность выборки?
# Статья на NIPS 2004: https://papers.nips.cc/paper/2577-maximum-likelihood-estimation-of-intrinsic-dimension.pdf
# +
"""
Implementation of 'Maximum Likelihood Estimation of Intrinsic Dimension' by Elizaveta Levina and Peter J. Bickel
how to use
----------
The goal is to estimate intrinsic dimensionality of data, the estimation of dimensionality is scale dependent
(depending on how much you zoom into the data distribution you can find different dimesionality), so they
propose to average it over different scales, the interval of the scales [k1, k2] are the only parameters of the algorithm.
This code also provides a way to repeat the estimation with bootstrapping to estimate uncertainty.
Here is one example with swiss roll :
from sklearn.datasets import make_swiss_roll
X, _ = make_swiss_roll(1000)
k1 = 10 # start of interval(included)
k2 = 20 # end of interval(included)
intdim_k_repeated = repeated(intrinsic_dim_scale_interval,
X,
mode='bootstrap',
nb_iter=500, # nb_iter for bootstrapping
verbose=1,
k1=k1, k2=k2)
intdim_k_repeated = np.array(intdim_k_repeated)
# the shape of intdim_k_repeated is (nb_iter, size_of_interval) where
# nb_iter is number of bootstrap iterations (here 500) and size_of_interval
# is (k2 - k1 + 1).
# Plotting the histogram of intrinsic dimensionality estimations repeated over
# nb_iter experiments
plt.hist(intdim_k_repeated.mean(axis=1))
"""
# from tqdm import tqdm
import pandas as pd
import numpy as np
from sklearn.neighbors import NearestNeighbors
def intrinsic_dim_sample_wise(X, k=5):
neighb = NearestNeighbors(n_neighbors=k+1).fit(X)
dist, ind = neighb.kneighbors(X) # distances between the samples and points
dist = dist[:, 1:] # the distance between the first points to first points (as basis ) equals zero
# the first non trivial point
dist = dist[:, 0:k]# including points k-1
assert dist.shape == (X.shape[0], k) # requirments are there is no equal points
assert np.all(dist > 0)
d = np.log(dist[:, k - 1: k] / dist[:, 0:k-1]) # dinstanec betveen the bayeasan statistics
d = d.sum(axis=1) / (k - 2)
d = 1. / d
intdim_sample = d
return intdim_sample
def intrinsic_dim_scale_interval(X, k1=10, k2=20):
X = pd.DataFrame(X).drop_duplicates().values # remove duplicates in case you use bootstrapping
intdim_k = []
for k in range(k1, k2 + 1): # in order to reduse the noise by eliminating of the nearest neibours
m = intrinsic_dim_sample_wise(X, k).mean()
intdim_k.append(m)
return intdim_k
def repeated(func, X, nb_iter=100, random_state=None, mode='bootstrap', **func_kw):
if random_state is None:
rng = np.random
else:
rng = np.random.RandomState(random_state)
nb_examples = X.shape[0]
results = []
iters = range(nb_iter)
for i in iters:
if mode == 'bootstrap':# and each point we want to resample with repeating points to reduse the errors
#232 111 133
Xr = X[rng.randint(0, nb_examples, size=nb_examples)]
elif mode == 'shuffle':
ind = np.arange(nb_examples)
rng.shuffle(ind)
Xr = X[ind]
elif mode == 'same':
Xr = X
else:
raise ValueError('unknown mode : {}'.format(mode))
results.append(func(Xr, **func_kw))
return results
# +
k1 = 2 # start of interval(included)
k2 = 6 # end of interval(included)
nb_iter = 50 # more iterations more accuracy
# intrinsic_dim_scale_interval gives better estimation
X = iris.data
intdim_k_repeated = repeated(intrinsic_dim_scale_interval,
X,
mode='bootstrap',
nb_iter=nb_iter, # nb_iter for bootstrapping
k1=k1, k2=k2)
intdim_k_repeated = np.array(intdim_k_repeated)
print (np.shape(intdim_k_repeated))
# -
plt.plot(range(k1,k2+1), np.mean(intdim_k_repeated, axis=0),'b') # it is the mean walue
# Для наглядности построим СТД
plt.plot(range(k1,k2+1), np.mean(intdim_k_repeated, axis=0)+ np.std(intdim_k_repeated, axis=0),'r')
plt.plot(range(k1,k2+1), np.mean(intdim_k_repeated, axis=0)- np.std(intdim_k_repeated, axis=0),'y')
# # 2. Снижение размерности данных PCA.
# Лекция:
# https://ru.coursera.org/lecture/unsupervised-learning/mietod-ghlavnykh-komponient-rieshieniie-e72bH
import math
sample_size = 50
sample_dimensionality = 2
# +
current_state = np.random.get_state()
np.random.seed(0)
standard_data = np.random.normal(0, 1,
(sample_size, sample_dimensionality)) # нормально распределенные данные
initial_mean = np.random.normal(0, 1, sample_dimensionality) # среднее из нормального распределения
transformation_matrix = np.random.normal(0, 1,
(sample_dimensionality, sample_dimensionality)) # матрица трансформации нормальная
np.random.set_state(current_state)
data = np.dot(standard_data, transformation_matrix) + initial_mean
print("\nInitial data:\n", standard_data)
print("\nInitial mean:\n", initial_mean)
print("\nTransformation:\n", transformation_matrix)
print("\nData (transformed):\n", data)
# -
sample_mean = np.mean(data, axis = 0)
centered_data = data - sample_mean
sample_covariance = np.dot(centered_data.transpose(), centered_data) / sample_size # ковариации
lambdas, eigen_vectors = np.linalg.eigh(sample_covariance) # главные компоненты
print("\nEigenvector\n", eigen_vectors[:, 0])
print("\nVariances:\n", lambdas)
print("\nEstimation of covariance matrix:\n", eigen_vectors)
cted_mean = initial_mean # ожидаемое среднеее
expacted_covariance_matrix = np.dot(transformation_matrix.transpose(), transformation_matrix) # ожидаемая ковариация
expected_lambdas, expected_eigen_vectors = np.linalg.eigh(expacted_covariance_matrix)
print("\nExpected data mean:\n", expected_mean)
print("\nExpected covariance:\n", expacted_covariance_matrix)
print("\nExpected principal components:\n", expected_lambdas)
print("\nExpected variances:\n", expected_eigen_vectors)
# +
figure_handle = plt.figure(figsize=(12, 12))
subplot_handle = figure_handle.add_subplot(111)
root_lambdas = np.array([math.sqrt(x) for x in lambdas]) # переводим в стандартные отклонения
expected_root_lambdas = np.array([math.sqrt(x) for x in expected_lambdas])
plot_range = 3 * np.max(root_lambdas) # наибольшее стандартное отклонение
subplot_handle.set(xlim = [sample_mean[0] - plot_range, sample_mean[0] + plot_range],
ylim = [sample_mean[1] - plot_range, sample_mean[1] + plot_range],
title='Random Vectors', xlabel='First Component', ylabel='Second Component')
subplot_handle.scatter(data[:, 0], data[:, 1], s=3)
for component in range(0,2):
subplot_handle.plot([sample_mean[0] - eigen_vectors[0, component] * root_lambdas[component],
sample_mean[0] + eigen_vectors[0, component] * root_lambdas[component]],
[sample_mean[1] - eigen_vectors[1, component] * root_lambdas[component],
sample_mean[1] + eigen_vectors[1, component] * root_lambdas[component]],
'ro-', markersize=15, linewidth=5)
plt.show()
# -
# ### Посмотрим, как это работает на ирисах:
# +
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import decomposition
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
fig = plt.figure(1, figsize=(8, 7))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=60, azim=120)
plt.cla()
pca = decomposition.PCA(n_components=3)
pca.fit(X)
X = pca.transform(X)
for name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]:
ax.text3D(X[y == label, 0].mean(),
X[y == label, 1].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.nipy_spectral,
edgecolor='k')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
plt.show()
# +
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42, stratify=y)
pca = decomposition.PCA(n_components=3)
pca.fit(X_train)
X_train = pca.transform(X_train)
Xtest=pca.transform(X_test)
clf = KNeighborsClassifier(n_neighbors=30,
metric='chebyshev',
p=2)
clf.fit(X_train, y_train)
print(clf.score(X_test, y_test))
# -
# ### PCA на картинках, датасет `Handwritten digits`:
# +
from sklearn.datasets import load_digits
digits = load_digits()
print(digits.data.shape)
import matplotlib.pyplot as plt
plt.gray()
plt.matshow(digits.images[0])
plt.show()
plt.gray()
plt.matshow(digits.data[0].reshape(8, 8))
plt.show()
# -
np.shape(digits['data'])
# +
data=digits['data']
sample_size, sample_dim = np.shape(data)
sample_mean = np.mean(data, axis = 0)
centered_data = data - sample_mean
sample_covariance = np.dot(centered_data.transpose(), centered_data) / sample_size
lambdas, eigen_vectors = np.linalg.eigh(sample_covariance)
print(sample_size, sample_dim)
lambdas = lambdas[-1::-1]
eigen_vectors = eigen_vectors[:, -1::-1]
print("\nVariances:\n", lambdas)
# -
# Визуализация PCA на датасете Handwritten Digits:
#
# https://scikit-learn.org/stable/auto_examples/cluster/plot_kmeans_digits.html#sphx-glr-auto-examples-cluster-plot-kmeans-digits-py
# "сколько" дисперсии объясняет каждая компонента
plt.plot(range(sample_dim), np.cumsum(lambdas) / np.sum(lambdas), "-*b", label = 'cumsum')
plt.plot(range(sample_dim), [1]*sample_dim, "r")
plt.plot(range(sample_dim), [0.99]*sample_dim, "m")
plt.plot(range(sample_dim), [0.95]*sample_dim, "g")
plt.legend(['cumsum', '1', '0.99', '0.95'])
print(np.cumsum(lambdas) / np.sum(lambdas))
from sklearn.linear_model import LogisticRegression
log_reg=LogisticRegression(random_state=42)
data.shape
plt.imshow(data[3,:].reshape(8,8))
# +
from sklearn.model_selection import StratifiedKFold
X = pd.DataFrame(data)
y = pd.DataFrame(digits.target)
skf = StratifiedKFold(n_splits=10, random_state=42)
skf.get_n_splits(X, y)
#print(skf)
scores=[]
for train_index, test_index in skf.split(X, y):
#print("TRAIN:", train_index, "TEST:", test_index)
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = y.iloc[train_index], y.iloc[test_index]
log_reg.fit(X_train, y_train)
print(log_reg.score(X_test, y_test))
scores.append(log_reg.score(X_test, y_test))
# -
np.mean(scores),np.std(scores)
pca=decomposition.PCA(n_components=30)
pca
isomap=manifold.Isomap(n_components=30)
# +
# Сравнип прирост по точности на нелинейных методах снижения размерности
scores1=[]
for train_index, test_index in skf.split(X, y):
#print("TRAIN:", train_index, "TEST:", test_index)
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = y.iloc[train_index], y.iloc[test_index]
X_train=isomap.fit_transform(X_train)
log_reg.fit(X_train, y_train)
X_test=isomap.transform(X_test)
print(log_reg.score(X_test, y_test))
scores1.append(log_reg.score(X_test, y_test))
# -
np.mean(scores1),np.std(scores1)
from scipy.stats import ttest_ind
ttest_ind(scores, scores1)
# +
k1 = 4 # start of interval(included)
k2 = 12 # end of interval(included)
nb_iter = 50 # more iterations more accuracy
# intrinsic_dim_scale_interval gives better estimation
X = data
intdim_k_repeated = repeated(intrinsic_dim_scale_interval,
X,
mode='bootstrap',
nb_iter=nb_iter, # nb_iter for bootstrapping
k1=k1, k2=k2)
intdim_k_repeated = np.array(intdim_k_repeated)
print (np.shape(intdim_k_repeated))
# -
plt.plot(range(k1,k2+1), np.mean(intdim_k_repeated, axis=0),'b') # it is the mean walue
# Для наглядности построим СТД
plt.plot(range(k1,k2+1), np.mean(intdim_k_repeated, axis=0)+ np.std(intdim_k_repeated, axis=0),'r')
plt.plot(range(k1,k2+1), np.mean(intdim_k_repeated, axis=0)- np.std(intdim_k_repeated, axis=0),'y')
# +
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(82 * '_')
print('init\t\ttime\tinertia\thomo\tcompl\tv-meas\tARI\tAMI\tsilhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('%-9s\t%.2fs\t%i\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(82 * '_')
# #############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, x_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
# -
# # 3. Геометрические методы снижения размерности Manifold learning
# Визуализация нелинейных методов снижения размерности на датасете `Handwritten digits`:
# https://scikit-learn.org/stable/auto_examples/manifold/plot_lle_digits.html
# +
# Authors: Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble,
discriminant_analysis, random_projection)
digits = datasets.load_digits(n_class=6)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
n_neighbors = 30
plt.figure(figsize=[8,10])
#----------------------------------------------------------------------
# Scale and visualize the embedding vectors
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(y[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(X.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 4e-3:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
#----------------------------------------------------------------------
# Plot images of the digits
n_img_per_row = 20
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
#----------------------------------------------------------------------
# Random 2D projection using a random unitary matrix
print("Computing random projection")
rp = random_projection.SparseRandomProjection(n_components=2, random_state=42)
X_projected = rp.fit_transform(X)
plot_embedding(X_projected, "Random Projection of the digits")
#----------------------------------------------------------------------
# Projection on to the first 2 principal components
print("Computing PCA projection")
t0 = time()
X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X)
plot_embedding(X_pca,
"Principal Components projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Projection on to the first 2 linear discriminant components
print("Computing Linear Discriminant Analysis projection")
X2 = X.copy()
X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible
t0 = time()
X_lda = discriminant_analysis.LinearDiscriminantAnalysis(n_components=2).fit_transform(X2, y)
plot_embedding(X_lda,
"Linear Discriminant projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Isomap projection of the digits dataset
print("Computing Isomap embedding")
t0 = time()
X_iso = manifold.Isomap(n_neighbors, n_components=2).fit_transform(X)
print("Done.")
plot_embedding(X_iso,
"Isomap projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Locally linear embedding of the digits dataset
print("Computing LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='standard')
t0 = time()
X_lle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_lle,
"Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Modified Locally linear embedding of the digits dataset
print("Computing modified LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='modified')
t0 = time()
X_mlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_mlle,
"Modified Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# HLLE embedding of the digits dataset
print("Computing Hessian LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='hessian')
t0 = time()
X_hlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_hlle,
"Hessian Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# LTSA embedding of the digits dataset
print("Computing LTSA embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='ltsa')
t0 = time()
X_ltsa = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_ltsa,
"Local Tangent Space Alignment of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# MDS embedding of the digits dataset
print("Computing MDS embedding")
clf = manifold.MDS(n_components=2, n_init=1, max_iter=100)
t0 = time()
X_mds = clf.fit_transform(X)
print("Done. Stress: %f" % clf.stress_)
plot_embedding(X_mds,
"MDS embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Random Trees embedding of the digits dataset
print("Computing Totally Random Trees embedding")
hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0,
max_depth=5)
t0 = time()
X_transformed = hasher.fit_transform(X)
pca = decomposition.TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
plot_embedding(X_reduced,
"Random forest embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Spectral embedding of the digits dataset
print("Computing Spectral embedding")
embedder = manifold.SpectralEmbedding(n_components=2, random_state=0,
eigen_solver="arpack")
t0 = time()
X_se = embedder.fit_transform(X)
plot_embedding(X_se,
"Spectral embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# t-SNE embedding of the digits dataset
print("Computing t-SNE embedding")
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
t0 = time()
X_tsne = tsne.fit_transform(X)
plot_embedding(X_tsne,
"t-SNE embedding of the digits (time %.2fs)" %
(time() - t0))
plt.show()
# -
# ## Кластеринг текстов:
#
# https://scikit-learn.org/stable/auto_examples/text/plot_document_clustering.html
from sklearn.datasets import fetch_20newsgroups
dataset = fetch_20newsgroups(subset='all',
shuffle=True, random_state=42)
# +
# Author: Peter Prettenhofer <[email protected]>
# Lars Buitinck
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
def is_interactive():
return not hasattr(sys.modules['__main__'], '__file__')
# work-around for Jupyter notebook and IPython console
argv = [] if is_interactive() else sys.argv[1:]
(opts, args) = op.parse_args(argv)
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
# #############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
# categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset "
"using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', alternate_sign=False,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
alternate_sign=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
# #############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
# -
# ## Задача 1.
# Примерить PCA к датасету `sklearn.datasets.fetch_olivetti_faces `. Визуализировать компоненты
#
# https://scikit-learn.org/0.19/datasets/olivetti_faces.html.
# +
import numpy as np
from sklearn.datasets import fetch_olivetti_faces
from numpy.random import RandomState
rng = RandomState(0)
data = fetch_olivetti_faces(shuffle=True, random_state=rng).data
target = fetch_olivetti_faces(shuffle=True, random_state=rng).target
image_shape = (64, 64)
print(data)
# -
data.shape
plt.gray()
plt.matshow(data[12].reshape(64, 64))
plt.show()
plt.gray()
plt.matshow(data[24].reshape(64, 64))
plt.show()
plt.gray()
plt.matshow(data[40].reshape(64, 64))
plt.show()
target.max()
np.where(target==1)
# +
X = pd.DataFrame(data)
y = pd.DataFrame(target)
skf = StratifiedKFold(n_splits=10, random_state=42)
skf.get_n_splits(X, y)
#print(skf)
scores=[]
for train_index, test_index in skf.split(X, y):
#print("TRAIN:", train_index, "TEST:", test_index)
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = y.iloc[train_index], y.iloc[test_index]
log_reg.fit(X_train, y_train)
print(log_reg.score(X_test, y_test))
scores.append(log_reg.score(X_test, y_test))
# +
sample_size, sample_dim = np.shape(data)
sample_mean = np.mean(data, axis = 0)
centered_data = data - sample_mean
sample_covariance = np.dot(centered_data.transpose(), centered_data) / sample_size
lambdas, eigen_vectors = np.linalg.eigh(sample_covariance)
print(sample_size, sample_dim)
lambdas = lambdas[-1::-1]
eigen_vectors = eigen_vectors[:, -1::-1]
print("\nVariances:\n", lambdas)
# -
eigen_vectors[1,:].shape
plt.matshow(eigen_vectors[:,1].reshape(64, 64))
plt.show()
plt.matshow(eigen_vectors[:,100].reshape(64, 64))
plt.show()
# "сколько" дисперсии объясняет каждая компонента
plt.figure(figsize=[20,10])
plt.plot(range(sample_dim), np.cumsum(lambdas) / np.sum(lambdas), "-*b", label = 'cumsum')
plt.plot(range(sample_dim), [1]*sample_dim, "r")
plt.plot(range(sample_dim), [0.99]*sample_dim, "m")
plt.plot(range(sample_dim), [0.95]*sample_dim, "g")
plt.legend(['cumsum', '1', '0.99', '0.95'])
print(np.cumsum(lambdas) / np.sum(lambdas))
pca=decomposition.PCA(n_components=300)
pca
# +
X = pd.DataFrame(data)
y = pd.DataFrame(target)
skf = StratifiedKFold(n_splits=10, random_state=42)
skf.get_n_splits(X, y)
scores1=[]
for train_index, test_index in skf.split(X, y):
#print("TRAIN:", train_index, "TEST:", test_index)
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = y.iloc[train_index], y.iloc[test_index]
X_train=pca.fit_transform(X_train)
log_reg.fit(X_train, y_train)
X_test=pca.transform(X_test)
print(log_reg.score(X_test, y_test))
scores1.append(log_reg.score(X_test, y_test))
# -
ttest_ind(scores, scores1)
| 41,013 |
/benchmark_fitting_methods.ipynb
|
8a25bcd8cb091cbc237b75cb397db78417da2b63
|
[] |
no_license
|
yxlinaqua/saboca_project
|
https://github.com/yxlinaqua/saboca_project
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 1,133,792 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={} colab_type="code" id="2XyEWy6KZkV4"
import torch
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import time
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="U9n0YSCFa41F" outputId="57cab3fa-5342-410d-abb0-444f3a284a90"
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(DEVICE)
# + colab={} colab_type="code" id="27CwcYg7ZkWC"
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
self.dropout = nn.Dropout(0.1)
self.to(DEVICE)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = self.dropout(x)
x = F.relu(self.fc1(x))
x = self.dropout(x)
x = F.relu(self.fc2(x))
x = self.dropout(x)
x = self.fc3(x)
return x
# + colab={} colab_type="code" id="PPwAB38HZkWK"
class Network(nn.Module):
"""CNN."""
def __init__(self, activation_fn, zero_padding,dropout_2d, dropout):
"""CNN Builder."""
super(Network, self).__init__()
self.conv_layer = nn.Sequential(
# Conv Layer block 1
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=zero_padding),
# nn.BatchNorm2d(32),
activation_fn(),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=zero_padding),
activation_fn(),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout2d(p=dropout_2d),
# Conv Layer block 2
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=zero_padding),
# nn.BatchNorm2d(128),
activation_fn(),
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=zero_padding),
activation_fn(),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),
activation_fn(),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.fc_layer = nn.Sequential(
nn.Dropout(p=dropout),
nn.Linear(4096, 1024),
activation_fn(),
nn.Linear(1024, 512),
activation_fn(),
nn.Dropout(p=dropout),
nn.Linear(512, 10)
)
self.to(DEVICE)
def forward(self, x):
"""Perform forward."""
# conv layers
x = self.conv_layer(x)
# flatten
x = x.view(x.size(0), -1)
# fc layer
x = self.fc_layer(x)
return x
# + colab={} colab_type="code" id="j52wUMTYZkWf"
class CNN(nn.Module):
"""CNN."""
def __init__(self, activation_fn, zero_padding,dropout_2d, dropout):
"""CNN Builder."""
super(CNN, self).__init__()
self.conv_layer = nn.Sequential(
# Conv Layer block 1
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=zero_padding),
# nn.BatchNorm2d(32, track_running_stats = False),
activation_fn(),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=zero_padding),
activation_fn(),
nn.MaxPool2d(kernel_size=2, stride=2),
# Conv Layer block 2
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=zero_padding),
# nn.BatchNorm2d(128, track_running_stats = False),
activation_fn(),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=zero_padding),
activation_fn(),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout2d(p=dropout_2d),
# Conv Layer block 3
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=zero_padding),
# nn.BatchNorm2d(256, track_running_stats = False),
activation_fn(),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=zero_padding),
activation_fn(),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.fc_layer = nn.Sequential(
nn.Dropout(p=dropout),
nn.Linear(4096, 1024),
activation_fn(),
nn.Linear(1024, 512),
activation_fn(),
nn.Dropout(p=dropout),
nn.Linear(512, 10)
)
self.to(DEVICE)
def forward(self, x):
"""Perform forward."""
# conv layers
x = self.conv_layer(x)
# flatten
x = x.view(x.size(0), -1)
# fc layer
x = self.fc_layer(x)
return x
# + colab={} colab_type="code" id="Rmr0WOwIZkWo"
def deactivate_batchnorm(m):
if isinstance(m, nn.BatchNorm2d):
m.reset_parameters()
m.eval()
with torch.no_grad():
m.weight.fill_(1.0)
m.bias.zero_()
# +
# transform = transforms.Compose(
# [transforms.ToTensor(),
# transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
trainset = torchvision.datasets.CIFAR10(root='/home/swasti/Documents/sem6/VR/Assignment3/Feature-Learning/Part_b/data', train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='/home/swasti/Documents/sem6/VR/Assignment3/Feature-Learning/Part_b/data', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=4,
shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
print(len(trainset))
# +
# functions to show an image
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
# get some random training images
dataiter = iter(trainloader)
images, labels = dataiter.next()
# show images
imshow(torchvision.utils.make_grid(images))
# print labels
print(' '.join('%5s' % classes[labels[j]] for j in range(4)))
# + colab={} colab_type="code" id="aoS9C712ZkW8"
activation_fn = nn.Sigmoid # nn.Sigmoid or nn.Tanh or nn.ReLU
zero_padding = 1 # 0 for no padding
batch_norm = False # False for no batch_norm
dropout_2d = 0.05
dropout = 0.1
m = 0.9 # momentum
# + colab={} colab_type="code" id="4M4b8QdHZkW_"
net = Network(activation_fn, zero_padding, dropout_2d, dropout)
# if(batch_norm==False):
# net.apply(deactivate_batchnorm)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=1e-4)#, momentum = m)
# + colab={"base_uri": "https://localhost:8080/", "height": 555} colab_type="code" id="BjbUy1u3ZkXD" outputId="8121b926-2c79-45b1-a05b-26591ed89796"
for epoch in range(2): # loop over the dataset multiple times
since = time.time()
running_loss = 0.0
net.train()
for i, data in enumerate(trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
inputs = inputs.to(DEVICE)
labels = labels.to(DEVICE)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
# + colab={} colab_type="code" id="h7T5930hZkXJ"
# PATH = './cifar_CNN_sig.pth'
# torch.save(net, PATH)
# + colab={"base_uri": "https://localhost:8080/", "height": 155} colab_type="code" id="Bat8QtMtZkXS" outputId="4fad3c17-da9f-43e0-c12b-7b311272e05f"
dataiter = iter(testloader)
images, labels = dataiter.next()
# print images
imshow(torchvision.utils.make_grid(images))
print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(4)))
# + colab={} colab_type="code" id="hVjFqxs5ZkXt"
# net = torch.load(PATH)
net.eval()
# + colab={} colab_type="code" id="tBk7zetbZkX0"
# outputs = net(images)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="2jVPz1laZkX5" outputId="f3080f33-dec5-44e7-bc32-ae72f7c76552"
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data
images = images.to(DEVICE)
labels = labels.to(DEVICE)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
# + colab={"base_uri": "https://localhost:8080/", "height": 191} colab_type="code" id="ZXCyS3SCZkX9" outputId="e6647099-b78c-43ed-a741-160e2101d5d8"
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
with torch.no_grad():
for data in testloader:
images, labels = data
images = images.to(DEVICE)
labels = labels.to(DEVICE)
outputs = net(images)
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).squeeze()
for i in range(4):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1
for i in range(10):
print('Accuracy of %5s : %2d %%' % (
classes[i], 100 * class_correct[i] / class_total[i]))
# + colab={} colab_type="code" id="7xDukyRuZkYB"
| 10,860 |
/ETL_Code/ETL_Codev3.ipynb
|
62891982c2f77cbeb6ee9f97e23e61304e1d4740
|
[] |
no_license
|
DeniseMcK9210/Equity-In-Hollywood
|
https://github.com/DeniseMcK9210/Equity-In-Hollywood
| 0 | 3 | null | 2019-10-16T01:04:56 | 2019-10-16T01:04:27 |
Jupyter Notebook
|
Jupyter Notebook
| false | false |
.py
| 142,181 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: PythonData
# language: python
# name: pythondata
# ---
# import dependencies
import pandas as pd
from sqlalchemy import create_engine
import psycopg2
import numpy as np
# import Bechdel_test CSV file and convert to pandas Dataframe
bechdel_csv = "./Datasets/movies.csv"
bechdel_df_1 = pd.read_csv(bechdel_csv)
bechdel_df_1.head()
# Drop unecessary columns (test, clean_test, budget, domgross, intgross, code, period code, decade code)
bechdel_df_2 = bechdel_df_1[['year', 'imdb', 'title', 'binary', 'budget_2013$', 'domgross_2013$', 'intgross_2013$']]
bechdel_df_2.head()
# Drop Nan values
bechdel_df_2.dropna()
pd.options.mode.chained_assignment = None
# remove first two characters from tconst to convert to imdbid
#bechdel_df_2['imdb'] = bechdel_df_2['imdb'].str[2:]
#[i[1:] for i in bechdel_df_2['imdb']]
bechdel_df_2['imdb'] = bechdel_df_2['imdb'].astype(dtype=np.str)
#bechdel_df_2['imdb'] = bechdel_df_2['imdb'].str[-9:]
#bechdel_df_2.info()
#bechdel_df_2['imdb'] = bechdel_df_2['imdb'].str.strip('tt')
bechdel_df_2['imdb'] = bechdel_df_2['imdb'].str[-7:]
#bechdel_df_2.imdb.replace('tt', '', regex=True)
#bechdel_df_2['imdb'].apply(lambda x: x.replace('tt', '')[-7:])
#bechdel_df_2['imdb'] = bechdel_df_2['imdb'].str.replace(r'tt', '')
#.astype(np.dtype=int)
#bechdel_df_2.info()
bechdel_df_2.head()
bechdel_df_2.columns = ['year', 'imdbid', 'title', 'binary', 'budget_2013$', 'domgross_2013$', 'intgross_2013$']
#bechdel_df_2.reset_index(drop=True, inplace=True)
bechdel_df_2.head()
# +
## DO NOT RERUN THIS CELL
# convert title_basics.tsv to imdb_genres.csv
#dfs = pd.read_csv('./Datasets/title_basics.tsv', sep='\t', chunksize=50)
#for df in dfs:
# df.to_csv('imdb_movie_genres.csv', sep=',', mode='a')
# -
# convert imdb_genres.csv to DataFrame
genre_csv = "./Datasets/imdb_genres.csv"
genre_df_1 = pd.read_csv(genre_csv,na_values=['\\N'])
genre_df_1.dropna()
#genre_df_1 = pd.read_csv(genre_csv)
genre_df_1.head()
# Drop unecessary columns (originalTitle, isAdult, endYear, runtimeMinutes)
genre_df_2 = genre_df_1[['tconst', 'titleType', 'primaryTitle', 'startYear', 'genres']]
genre_df_2.head()
# Filter titleType for movie
genre_df_2 = genre_df_2[genre_df_2['titleType']== 'movie']
genre_df_2.dropna(how='any', inplace=True)
genre_df_2.head()
# +
# Filter data for years between 1970 and 2013
#genre_df_2['startYear'] = pd.to_numeric(genre_df_2.startYear)
#genre_df_2['startYear'] = gendf_2['startYear'].astype(int)
#bechdelapi_df_1 = pd.read_csv(bechdelapi_csv,na_values=['\\N'])
#genre_df_2['genres'].replace(r'\s+|\\n', ' ', regex=True, inplace=True)
genre_df_2["startYear"] = genre_df_2["startYear"].astype(dtype=np.int64)
#genre_df_2.['startYear'] = pd.to_datetime(df['startYear'], format='%y')
genre_df_2.info()
#genre_df_2['startYear'] = pd.to_datetime(genre_df_2['startYear'], format='%Y')
genre_df_2 = genre_df_2.loc[(genre_df_2['startYear'] >= 1970) & (genre_df_2['startYear'] <= 2013)]
#genre_df_2.dropna(subset=['genres'], inplace=True)
genre_df_2.head()
#genre_df_2[genre_df_2['startYear']=='*']
#genre_df_2["startYear"] = pd.to_numeric(genre_df_2["startYear"])
# -
# remove first two characters from tconst to convert to imdbid
genre_df_2['tconst'] = genre_df_2['tconst'].str[2:]
genre_df_2.head()
# drop unnecessary columns
genre_df_3 = genre_df_2[['tconst', 'primaryTitle', 'genres']]
# rename columns for consistency
genre_df_3.columns = ['imdbid', 'title', 'genres']
genre_df_3.reset_index(drop=True, inplace=True)
#clean_genre_df = genre_df_3.rename(columns={'tconst': 'imdbid', 'genres': 'genres'}, inplace=True)
genre_df_3.head()
# import bechdeltest_api CSV file and convert to pandas Dataframe
bechdelapi_csv = "./Datasets/bechdeltest_api.csv"
bechdelapi_df_1 = pd.read_csv(bechdelapi_csv)
bechdelapi_df_1.head()
# +
# Drop unecessary columns (Unnamed:, isAdult, endYear, runtimeMinutes)
#bechdelapi_df_2 = bechdelapi_df_1[['imdbid', 'rating', 'title', 'year']]
bechdelapi_df_2 = bechdelapi_df_1[['imdbid', 'rating', 'title']]
# Remove missing values
bechdelapi_df_2.dropna()
#bechdelapi_df_2.columns['imdbid', 'bechdel_rating']
bechdelapi_df_2.rename(columns={'imdbid':'imdbid',
'rating':'bechdel_rating', 'title': 'title'},
inplace=True)
bechdelapi_df_2.head()
# +
## DO NOT RERUN THIS CELL
# convert title_ratings.tsv to imdb_ratings.csv
#dfs = pd.read_csv('./Datasets/title_ratings.tsv', sep='\t', chunksize=50)
#for df in dfs:
# df.to_csv('imdb_ratings.csv', sep=',', mode='a')
# -
# convert imdb_genres.csv to DataFrame
rating_csv = "./Datasets/imdb_ratings.csv"
rating_df_1 = pd.read_csv(rating_csv,na_values=['\\N'])
rating_df_1.dropna()
#genre_df_1 = pd.read_csv(genre_csv)
rating_df_1.head()
# +
# Drop unecessary columns (Unnamed: 0)
rating_df_2 = rating_df_1[['tconst', 'averageRating', 'numVotes']]
# Remove NaN values
rating_df_2.dropna(how='any', inplace=True)
rating_df_2.head()
# +
rating_df_2['tconst'] = rating_df_2['tconst'].astype(dtype=np.str)
rating_df_2['tconst'] = rating_df_2['tconst'].str[2:]
rating_df_2.head()
# -
rating_df_2.rename(columns={'tconst':'imdbid',
'averageRating':'averageRating', 'numVotes': 'numVotes'},
inplace=True)
rating_df_2.head()
bechdel_df_2 = bechdel_df_2[~bechdel_df_2['imdbid'].astype(str).str.contains('t')]
# +
genre_df_3["imdbid"] = genre_df_3["imdbid"].astype(dtype=np.int64)
genre_df_3.info()
bechdelapi_df_2["imdbid"] = bechdelapi_df_2["imdbid"].astype(dtype=np.int64)
#bechdelapi_df_2.info()
bechdel_df_2["imdbid"] = bechdel_df_2["imdbid"].astype(dtype=np.int64)
rating_df_2.info()
rating_df_2["imdbid"] = rating_df_2["imdbid"].astype(str)
rating_df_2 = rating_df_2[~rating_df_2['imdbid'].str.contains('onst')]
rating_df_2["imdbid"] = rating_df_2["imdbid"].astype(dtype=np.int64)
#bechdel_df_2.info()
# +
## merge (1)rating_df_2, (2)bechdelapi_df_2, (3)genre_df_3, (4)bechdel_df_2 on 'imdbid'
# merge 1 and 2
merge1_df = pd.concat([bechdelapi_df_2.set_index('imdbid'),bechdel_df_2.set_index('imdbid')], axis=1, join='inner')
#first_mergedf = pd.merge(rating_df_2, bechdelapi_df_2, on='imdbid', how='inner')
merge1_df.reset_index(inplace=True)
#merge1_df.info()
#merge2_df = pd.concat([rating_df_2.set_index('tconst'), genre_df_3.set_index('tconst')], axis=1, join='inner')
final_mergedf = pd.concat([merge1_df.set_index('imdbid'), genre_df_3.set_index('imdbid')], axis=1, join='inner')
final_mergedf.reset_index(inplace=True)
#test_mergedf = pd.concat([rating_df_2.set_index('imdbid'), genre_df_3.set_index('imdbid')], axis=1, join='inner')
#genre_df_3.head()
#test_mergedf.info()
test_merge = pd.concat([final_mergedf.set_index('imdbid'), rating_df_2.set_index('imdbid')], axis=1, join='inner')
#test_merge.head()
#test_merge.drop(test_merge.columns[[3, 8]], axis=1, inplace=True)
test_merge = test_merge.loc[:,~test_merge.columns.duplicated()]
test_merge.dropna(how='any', inplace=True)
test_merge.info()
# -
all_csv = "./Datasets/allTests.csv"
allTests_df_1 = pd.read_csv(all_csv)
allTests_df_1.head()
# Create Engine and connection to Database
engine = create_engine('postgres://postgres:PASSWORD@localhost:5432/Bechdel_Test')
conn = engine.connect()
# Verify tables
engine.table_names()
# Use pandas to load bechdel test csv into converted DataFrame into database
clean_bechdel_test_df.to_sql(name='bechdel_test', conn=engine, if_exists='append', index=False)
# Confirm data has been added by querying table
pd.read_sql_query('select * from bechdel_test', conn=engine).head()
# +
## Run sample queries to answer questions
| 7,804 |
/E14 InmulSajin(김영범).ipynb
|
4caf0011faefb67b316f952298b13d096e0d950d
|
[] |
no_license
|
Eddy-Kim-Age/Aiffel_Code
|
https://github.com/Eddy-Kim-Age/Aiffel_Code
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 623,660 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="Alneqmq-lKIo"
# # Unit 27. 파일
# + colab={"base_uri": "https://localhost:8080/"} id="14uIqDWOk-Ft" outputId="8863e9d5-b77e-4299-edb8-007f09132a83"
# !ls -l sample_data/
# + [markdown] id="rSA9xdBEmnRG"
# ## 27.1 문자열 쓰기, 읽기
# + id="QsD51wLhmY7s"
# 파일 모드 : read, write, append
file = open('hello.txt','w') # write, 덮어쓰기가 됨
file.write('Hello.world!')
file.close()
# + colab={"base_uri": "https://localhost:8080/"} id="je6hzRatnEI4" outputId="8e11ac69-c1fd-4210-abc8-fedebfc4ae8e"
# cat은 unix 명령어(윈도우에서는 type)
# !cat hello.txt
# + colab={"base_uri": "https://localhost:8080/"} id="0EDxArLbnLjT" outputId="6082bbfc-fd01-4a2c-867c-bd05f37d63f0"
file = open('hello.txt') # read, 'r' mode가 default
s = file.read()
print(s)
file.close()
# + [markdown] id="eZ2YhP98pncK"
# ### 파이썬 스타일
# + colab={"base_uri": "https://localhost:8080/"} id="eBWnNBEopWAA" outputId="6fdbd7c6-9a22-4428-9fc6-417308a87785"
with open('hello.txt') as file: # with문이 끝나면 저절로 file.close()가 실행됨
s = file.read()
print(s)
# + [markdown] id="Ku5OZ8U8p_i8"
# ## 27.2 문자열 여러줄
# + id="5LvqldExpmJr"
with open('hello.txt','w') as file:
for i in range(3):
file.write(f'Hello, world! {i+1}\n')
# + colab={"base_uri": "https://localhost:8080/"} id="9Za06Yg_q26H" outputId="626c0697-d4b3-4b71-ebf9-1c3ca809d1fd"
# !cat hello.txt
# + id="9mC6dPOiq4ja"
lines = ['안녕하세요.\n', '파이썬\n', '코딩도장입니다.\n']
with open('hello.txt','w') as file:
file.writelines(lines)
# + colab={"base_uri": "https://localhost:8080/"} id="jGiBaEBrrshr" outputId="ec6d30be-a144-4a70-cfcc-a1592318ec42"
# !cat hello.txt
# + id="2KdBXVdOrvsB"
s = """안녕하세요.
파이썬
코딩도장입니다."""
with open('hello.txt','w') as file:
file.write(s)
# + colab={"base_uri": "https://localhost:8080/"} id="LTnabcjEs5wT" outputId="60350fdd-b3b4-4e54-ad59-03fc3dddb1c3"
# !cat hello.txt
# + colab={"base_uri": "https://localhost:8080/"} id="wHviRAq-s7xs" outputId="c75dea76-6b91-4bea-d7d2-356902efa61a"
with open('hello.txt') as file:
s = file.read() # file.read(size), default = EOF
print(s)
# + colab={"base_uri": "https://localhost:8080/"} id="3pS7y11vtEZF" outputId="0116d0c8-c46a-40f0-f318-76b67f59816d"
with open('hello.txt') as file:
s = file.readline() # 한 줄씩 읽어옴
print(s)
# + colab={"base_uri": "https://localhost:8080/"} id="xbvcSba3tVnC" outputId="a50e6287-0586-4fb9-f4c9-097e6ef46f53"
with open('hello.txt') as file:
line = None
while line != '':
line = file.readline()
print(line.strip('\n'))
# + colab={"base_uri": "https://localhost:8080/"} id="qFLIsakit3th" outputId="3880a442-ab76-49f3-c55e-6f87397b158e"
with open('hello.txt') as file:
for line in file: #dir()에 __iter__가 정의되어 있으면 for문에 사용가능
print(line.strip('\n'))
# + colab={"base_uri": "https://localhost:8080/"} id="6hLdX8wBxIxW" outputId="592f3d65-1603-42ef-da40-eba8d99355d4"
dir(file) # 속성과 메소드를 출력
# + colab={"base_uri": "https://localhost:8080/"} id="s6sh_2IXxTON" outputId="715a8b12-1faa-44bd-d010-299eded09a4e"
x = {'a':10,'b':20}
dir(x)
# + colab={"base_uri": "https://localhost:8080/"} id="oiCLSDQbxvWx" outputId="d7835e7b-c661-491a-fb80-9e1a829de2d1"
for a in x: # for a in x.keys()와 동일
print(a)
# + colab={"base_uri": "https://localhost:8080/"} id="UdR4-Caxx0Dn" outputId="d419a049-debe-4134-abdc-b2c519975a22"
with open('hello.txt') as file:
s = file.readlines() # 한 줄씩을 요소로 하여 리스트로 저장
print(s)
# + [markdown] id="pbIFUGfCzITl"
# ### append
# + id="5ltS-oNqydkX"
with open('hello.txt','a') as file:
file.write('Append\n')
# + colab={"base_uri": "https://localhost:8080/"} id="MzlUE5Poy62a" outputId="7bbdfa46-c7ee-4286-a781-b0c4747c8017"
# !cat hello.txt
# + [markdown] id="AA0q9N8gzc-5"
# ### binary mode
# + colab={"base_uri": "https://localhost:8080/"} id="vz_zeJiyzWMV" outputId="a1c69dc0-5e11-4a64-e358-81efae1f2fc8"
with open('flower.jpg','rb') as bin: # binary모드 이용 : rb, wb, ab
x = bin.read(10)
print(x)
# + [markdown] id="Ym_pHEzx05E3"
# ## 27.3 파이썬 객체 읽고 쓰기
# + id="__mZ_p5n0JZs"
# 객체를 파일에 저장할 때 사용
import pickle
scores = {'kor':90,'eng':90,'mat':80}
with open('binary.pkl','wb') as file:
pickle.dump(scores, file) # dump() : 파이썬 객체를 파일에 저장하는 피클링 메서드
# + colab={"base_uri": "https://localhost:8080/"} id="bIssEN9a1oSd" outputId="b4a5887e-12cb-46f3-f899-07587efa16fa"
# !cat binary.pkl
# + colab={"base_uri": "https://localhost:8080/"} id="QGj3dVLi1zQf" outputId="d71edd3d-fcad-4d5c-a5b9-fa9f7badc4a9"
with open('binary.pkl','rb') as file:
new_scores = pickle.load(file) # load() : 파일에서 파이썬 객체를 읽어오는 언피클링 메서드
new_scores
| 4,844 |
/Analytics/Analytics.ipynb
|
18be88b6750e7bfa8b948ea935217166824b0e45
|
[] |
no_license
|
takuge/RealEstate
|
https://github.com/takuge/RealEstate
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 775,320 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/pragneshrana/Algo_Trial/blob/master/EM.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="j5ibtUzCHI10" colab_type="text"
# #EM Algo.
# If data points are given without label let's assume that it has generates the cluster.
#
# #Objective
# The main objective of clustering algotihm is to minimize the intra-cluster variance and maximize the interclasss variance.
#
# #Why EM is required?
# For Fuel data set it was assumed that there is a similarity between fuel for given physical conidition. To find out such behaviour in the data points intially K-mean clustering technique was applied and based on silhouette score around ~0.55 which suggests that data points generates clusters but it's low value suggest that data points are scattered and it does not generates perfect cluster.
#
# Perfect variability of the data points can't be captured by K-measn algorithm as it assigns the data points based on distance from cluster centroids and **it does hard assignments to the data points.** <br>
#
# **What K-means does?** <br>
# The cost function in K-Measn clustering is given as, <br>
# $$ J = \sum_{n=1}^{N} \sum_{k=1}^{K} r_{nk} ||x_n - \mu_k||^2$$ <br>
#
# where, <br>
# $r_{nk} \in \{0,1\}$ <br>
# $x_k$ is data points <br>
# $\mu_k$ is cluster centroid <br>
# By minimizing the two norm between cluster and centroid (centroid is just a mean of all the poins inside the specific cluster) iterativelly variacne is minimized. <br>
#
# Let's find out variance of the specific cluster(Interclass variacne) given that certain data points belongs to specific cluster. <br>
# Points associated with specific cluster i is given by $D_i$. where i deontes $i^{th}$ cluster. <br>
#
# $$Var(X|X \in D_i) \\
# = E[ ||X-E(X)||^2 |X \in D_i] \\
# =\frac{1}{n_i} \sum_{X \in D_i} ||X-E(X)||^2 \\
# $$
#
# Now, as it was assumed that X$\in D_i$ has some probability will be assoiciated with its cluster and other cluster as well which can be obtained by taking expectation of conditional variance.
# so,<br>
#
# $$E_i[Var(X|X \in D_i)]\\
# =\sum_i p(X \in D_i) * \frac{1}{n_i} \sum_{X \in D_i} ||X-E(X)||^2 \\
# = \sum_{i=1}^{k}\frac{n_i}{n} \frac{1}{n_i} \sum_{X \in D_i} ||X-E(X)||^2 \\
# = \frac{1}{n} \sum_{i=1}^{k} \sum_{X \in D_i} ||X-E(X)||^2
# $$ <br>
# Ref: https://stats.stackexchange.com/questions/198239/k-means-clustering-minimizes-conditional-variance <br>
#
# K-Means actually minimizes the overall conditional variance but hard assignment of data points to cluster. And K-Means is special case of EM algortihm when covariance matrix is assumed to be diagonal matrix. <br>
#
# so, for soft assignment of data points and to get rid of above complecity EM algorithm is used.<br>
#
#
# #Gaussian Mixture Model<br>
#
# EM algorithm works on gaussian mixture modelling. <br>
# When data points follows complex distribution which can be represented using more than gaussian mixture models. <br>
# Gaussian mixture models can be represented by linear superpostition of gaussian mixture models, <br>
# $$
# p(x) = \sum_{k=1}^{K} \pi_k \mathcal{N}(x|\mu_k,\Sigma_k)
# $$<br>
# Where $\pi_k = p(z_k=1)$ in this marginal distribution z is K dimentional **binary random variable** which same as one-hot encoading $z_k \in \{0,1\}$. <br>
#
# $\pi_k$ can be seen as weights to the specific distribution and it must satisfy $\sum_{k=1}^{K} \pi_k =1$<br>
#
# **Concept and Proof:** <bR>
# If dataset follows more than one gaussian distribution then joint distribution (Joint distribution shows relatioship between two random variable) of $p(x,z)$ is $p(z) p(x|z)$<br>
#
# as z follows 1 of K representation so, $$p(z=1) = \pi_k $$ or $$ p(z) = \prod_{k=1}^{K} \pi_k^{z_k} $$<br>
# e.g. z will be like for k=3 {0,0,1}<br>
# (exempli gratia = for example)<br>
#
# for, particular class, let's assume that it follows tha gaussian distribution so,<br>
# $$ p(x|z_k=1) = \mathcal{N} (x|\mu_k,\Sigma_k)$$
# in general it can be written as,
# $$ p(x|z) = \prod_{k=1}^{K} \mathcal{N} (x|\mu_k,\Sigma_k)^{z_k}$$
# multiplication as it was assumed that all samples are drawn independently <br>
#
# Now, let's marginal distribution of x.<br>
# $$p(x) = \sum_z p(z) p(x|z) \\
# = \sum_z \pi_k^{z_k} \prod_{k=1}^{K} \mathcal{N} (x|\mu_k,\Sigma_k)^{z_k} \\
# = \sum_{j=1}^{K} \prod_{k=1}^{K} (\pi_k \mathcal{N} (x|\mu_k,\Sigma_k))^{z_{kj}} \\
# $$
# $$
# p(x)= \sum_{j=1}^{K}\pi_j \mathcal{N} (x|\mu_j,\Sigma_j)
# $$
# as, $z_{kj}$ can be represented by 1 of K representation <br>
#
# for new data points(prediction),<br>
# $$ \gamma(z_k) = p(z_k =1|x) $$<br>
# $$ = \frac{p(z_k=1) p(x|z_k=1)}{\sum_{j=1}^{K}p(z_j=1)p(p(x|z_j=1)}$$<br>
# $$ = \frac{\pi_k \mathcal{N} (x|\mu_k,\Sigma_k)} {\sum_{j=1}^{K}\pi_j\mathcal{N} (x|\mu_j,\Sigma_j)}$$<br>
#
# $\gamma(z_k)$ is called responcibility, which actually shows respocility of data points to specific class.
# This whole theory is valid for 1 data point x.<br>
# <br>
#
# **For whole dataset :**<br>
# Now, let's take more genearalize case in which observation are made as, ${x_1,x_2,x_3,...,x_n}$ are i.i.d and dataset as matrix $N X D$ in which $n^{th}$ row is given by $x_n^T$ likewise, latent variable is Z is of matrix {N X K} with rows $z_n^{T}$.<br>
# The log likelihood of the function is given by,<br>
# $$ln \ p(X|\pi,\mu,\Sigma) = \sum_{n=1}^{N}ln\bigg \{\sum_{k=1}^{K} \pi_k \mathcal{N}(x_n|\mu_k,\Sigma_k)\bigg \}$$
#
# **Critical problem**:<br>
# let's take, $\Sigma = \sigma^2_k I$.
# 1. In case if one of the data point is same as $\mu_k$ then it may generate sigularity as $\sigma_k \rightarrow 0$
# $$\mathcal{N}(x_n|x_n,\sigma_jI) = \frac{1}{(2\pi)^{\frac{1}{2}}} \frac{1}{\sigma_j}$$
#
# 2. For k parameters we way K! way to find out soln
# 3. Summation is inside the $ln$ so logarithm don'e act directly on gaussian.If we differentiate log-likelihood we won't get closed form solution to that.
#
# To solve such problem one of the technique is EM algortihm.
#
# Differentiating the equation with respect to $\mu_k$ gives,<br>
# $$\dfrac{\partial}{\partial \mu_k} ( ln \ p(X|\pi,\mu,\Sigma) )$$<br>
# $$= \sum_{n=1}^{N}ln\bigg \{\sum_{k=1}^{K} \pi_k \mathcal{N}(x_n|\mu_k,\Sigma_k)\bigg \}$$
#
# Now, to find out the unkowns let's differentiate the formula w.r.t $\mu_k$<br>
#
# $$ 0 = \sum_{n=1}^{N} \frac{\pi_k \mathcal{N}(x_n|\mu_k,\Sigma_k)}{\sum_{k=1}^{K} \pi_j \mathcal{N}(x_n|\mu_j,\Sigma_j) }\Sigma_k(x_n - \mu_k)$$
#
# $$ 0 = \sum_{n=1}^{N} \gamma(z_{nk}) \Sigma_k(x_n - \mu_k)$$
#
# By algebric manipulation we can get $\mu_k$,<br>
# $$\mu_k = \frac{1}{N_k} \sum_{n=1}^{N}\gamma{(z_{nk}) x_n} .............A$$
# where,<br>
# $$N_k = \sum_{n=1}^{N} \gamma(z_{nk})$$
# which shows effective number of data points assigned to the cluster k.<br>
#
# Now differentiating w.r.t the $\Sigma_k$ and using maximum likelihood we can obtain,<br>
# $$\Sigma_k = \frac{1}{N_k} \sum_{n=1}^{N} \gamma(z_{nk}) (x_n - \mu_k) (x_n - \mu_k)^T..................B$$
# The key difference is that it has rathe than single gaussain each data point is weighted by the corresponding posterior probability.
#
#
# Now to maximize the mixing coefficient by using probability distribution along with it's constrain $\sum_{k=1}^{K}\pi_k = 1$, suing all information we can get fucntion as,
# $$\ln p (X|\pi,\mu\Sigma) + \lambda \bigg( \sum_{k=1}^{K}\pi_k -1 \bigg)$$
# And by algebric manipulation we can get,<br>
# $$\pi_k = \frac{N_k}{N}....................C$$
#
# Solution A,B,C contains responsibility in each other and which also depend on alll the parameter so it is not possible to have closed form solution.<br>
#
# so, it can solved using Iterative algorithm,<br>
# 1. Intialie value of mean, covariance and mixing coefficient
# 2. E step: use current value for mean, covariance and mixing coefficient to evaluate responcibility or posterior probability.
# 3. m step: Reestimate the parameter mean, covariance and mixing coefficient .
# In that first evaluate mean then covariance and then mixing coefficient
# 4. Evaluate the log likelihood
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
# # Note:
# # Expectation :
# **Learning : How expectation is different from mean.** <br>
# Expectation or expected value is weighted average. <br>
# let's take an example of die. If roll a die then outcome $X \in \{1,2,3,4,5,6\}$.
# so
# $$E[X] = 1* p_x(1) + 2* p_x(2) +3* p_x(3) +4* p_x(4) +5* p_x(5) +6* p_x(6) $$
# Expected value is useful when your outcome is uncertain or probablistic.<br>
#
# Let's take an example. <br>
# In salary contains two part <br>
# 1. fixed part - deterministic <br>
# 2. Bonus part - probabilistic <br>
#
# Bonus part depends on the performance of the person. For simplicity let's take case that you can get three type of bonus $\in \{1000,500,100\}$. As everyday are not same your performance in job also varies according to circumcatnace. <br>
#
#
# In one month,
# you stay motivatied and work hard has probability = 0.6 <br>
# you orced yourself and workhard = 0.3 <br>
# you stay demotivaed = 0.1 <br>
#
# now, assume that this probability is directy asssociated with your bonus.
# so, probability that you will get, <br>
# 1000 is 0.6 <br>
# 500 is 0.3 <br>
# 100 is 0.1 <br>
# now every month you ideally excpect some money in your account so that can be calculated by taking average of this weighted average, <br>
#
# $$E[X] = 0.6*1000+0.3*500+0.1*100 = 750.1 $$<br>
# ideally you should get this as a bonus part. <br>
# so, mathematically $$E[X] = \sum_{i} x_i p_i $$ \
# AS, probabilty varies expectation also varies but if consider and include all affecting parameters it may lead to mean value.
# Associated probability varies signitficanlty with experiments and case to case. \
# For example of bonus, \
# motivation in job varies month to month, and assoicted probabilites also varies but if you take data of 10 years then it gives sense of average bonus.
#
#
#
# #Variance - deviation or despersion
# Variance is defined to measure the devation from teh mean. \
#
# Ideally it should be defined like $E[|X - E(X)|]$ byr as this function is not differentiable so, for mathematical operation it is defined as $E[(X - E(X))^2]$.
#
# $$Var(X) = E[(X - E(X))^2] =\sum_i p_i (X_i - E(X))^2
# $$
#
# Now let's understand same with example of Bonus. The expected bonus per month is obtained by $E[X]$ but deviation of random variable(bonus) from the mean bonus. It gives spread of your bonus from the mean bonus.
#
# Variance gives sense of spread from the mean. As differnet amount bonus has probability associated with it. It variation also follows same probability in variation.
# + id="FOqTmpZsowsA" colab_type="code" colab={}
#kmeans
import numpy as np
import pandas as pd
class Kmeans():
'''
K-means algorithm from scratch
Input: data,no_of_cluster,max_iteration
'''
def __init__(self,data,no_of_cluster,max_iteration):
self.data = data
self.data_rows = data.shape[0]
self.data_cols = data.shape[1]
self.no_of_cluster = no_of_cluster
self.max_iteration = max_iteration
self.centroids = np.zeros((self.no_of_cluster,self.data_cols))
def initialize_centroid_random(self):
'''
initialized centroid with random number
centroid dimension = each row specifies one cluster
'''
#colXno_of_cluster data
centroids = np.random.rand(self.no_of_cluster,self.data_cols)
return centroids
def initialize_centroid_random_sampaling(self):
'''
initialized centroid with random number
centroid dimension = each row specifies one cluster
'''
#colXno_of_cluster data
data = pd.DataFrame(self.data)
for i in range(self.no_of_cluster):
sampling = data.sample(n=int(self.data_rows *0.01))
self.centroids[i,:] = np.mean(sampling,axis=0)
def initialize_centroid(self):
'''
initialized centroid taking half of the dataset
centroid dimension = each row specifies one cluster
'''
#colXno_of_cluster data
k = int(self.data_rows/self.no_of_cluster) #appriximate
j=0
for i in range(self.no_of_cluster):
# print(self.data[j:k,:])
self.centroids[i,:] = np.mean(self.data[j:k,:],axis=0)
j=j+k
k=k+k
def initialize_centroid_rand_uniform(self):
'''
Best out of all
centroids by taking random uniform of (min,max) of all dimention
'''
#colXno_of_cluster data
max_dim_array = np.amax(self.data,axis=0)
min_dim_array = np.amin(self.data,axis=0)
for i in range(self.data_cols):
#updating centroids colmwise
self.centroids[:,i] = np.random.uniform(max_dim_array[i],min_dim_array[i],self.no_of_cluster)
def initialize_centroid_uniform(self):
'''
centroids by taking linear uniform of (min,max) of all dimention
'''
#colXno_of_cluster data
max_dim_array = np.amax(self.data,axis=0)
min_dim_array = np.amin(self.data,axis=0)
for i in range(self.data_cols):
#updating centroids colmwise
self.centroids[:,i] = np.linspace(max_dim_array[i],min_dim_array[i],self.no_of_cluster)
def least_distance_from_point(self,dist_from_points):
'''
It will measure distance from given point
'''
distance = np.zeros((self.data_rows,len(dist_from_points)))
sum_distace = np.zeros((self.data_rows,1))
for i in range(len(dist_from_points)):
data_matrix = np.ones((self.data_rows,self.data_cols)) * dist_from_points[i,:] #matix single data point by repetation
distance_from_centroid = self.data - data_matrix #subtraction of all data point from given data point
distance[:,i] = np.linalg.norm(distance_from_centroid,axis=1) #calcualting distacne
distance = np.sum(distance,axis=1) #sum if more than 1 points
max_index = np.where(distance == np.max(distance,axis=0)) #index of maximum distance
next_centroid = self.data[max_index,:]
return next_centroid
def initialize_centroid_max(self):
'''
centroids by taking taking arandom point then assigning max and then
finding point at max distance and then point at max distance to both
'''
#colXno_of_cluster data
print('Initialization of cluster centroids')
data = pd.DataFrame(self.data)
self.centroids[0,:] = data.sample(n=1)
for i in range(1,self.no_of_cluster):
self.plotting(self.data)
self.centroids[i,:] = self.least_distance_from_point(self.centroids[:i+1,:])
def calculate_dist(self,data):
'''
data = nXm
centroid =mX no_of_cluster
'''
'''
#centroid into matrix form
to find out norm have to subtraction and for that have to generate matrix of each centroid
by repeting the each centroid times no_of_rows
'''
distance = np.zeros((self.data_rows,self.no_of_cluster))
for i in range(self.no_of_cluster):
centroid_matrix = np.ones((self.data_rows,self.data_cols)) * self.centroids[i,:]
distance_from_centroid = data - centroid_matrix
distance[:,i] = np.square(np.linalg.norm(distance_from_centroid,axis=1)) #as cost fucntion is variance
return distance
def find_nearest_centroid(self,distance):
'''
Returns the indices of the minimum values along an axis.
return labels
'''
return np.argmin(distance,axis=1)
def update_centroid(self,data,labels):
'''
it will update the centroid location taking mean of newly assigned points
'''
new_centroids = np.zeros((self.no_of_cluster,self.data_cols))
#making new dataset
new_data = np.zeros((self.data_rows,self.data_cols+1))
new_data[:,:-1] = data
new_data[:,-1] = labels
for i in range(self.no_of_cluster):
filtered_data = new_data[new_data[:,-1] == i]
new_centroids[i,:] = np.mean(filtered_data[:,:-1],axis=0)
return new_centroids
def plotting(self,data):
#plotting
plt.scatter(data[:, 0], data[:, 1], s = 40, color = 'g')
for i in range(self.no_of_cluster):
plt.scatter(self.centroids[i,0],self.centroids[i,1])
plt.xlabel("X")
plt.ylabel("Y")
plt.show()
def fit(self,data):
'''
main method which wiil call above mehods successively
'''
self.initialize_centroid_max()
self.plotting(data)
print('Assigning cluster by moving centroids')
for i in range(self.max_iteration):
distance = self.calculate_dist(data)
# print(distance)
labels = self.find_nearest_centroid(distance)
new_centroids = self.update_centroid(data,labels)
if(np.linalg.norm(self.centroids-new_centroids) < 0.001):#break if diffn in centroid is less than error
break
else:
self.centroids = new_centroids
# print(centroids)
print(i)
self.plotting(data)
return labels,new_centroids
def predict(self,test_set):
'''
It will predict for the test case
'''
distance = self.calculate_dist(test_set)
labels = self.find_nearest_centroid(distance)
# return labels
# + id="3PMOL1XTQWeW" colab_type="code" outputId="cc82e09a-31f0-40d8-aa0f-d9c278fa20d5" colab={"base_uri": "https://localhost:8080/", "height": 1000}
#EM
import numpy as np
import pandas as pd
from scipy.stats import multivariate_normal
from sklearn.preprocessing import MultiLabelBinarizer
import copy
#kmeans
import numpy as np
import pandas as pd
class Kmeans():
'''
K-means algorithm from scratch
Input: data,no_of_cluster,max_iteration
'''
def __init__(self,data,no_of_cluster,max_iteration):
self.data = data
self.data_rows = data.shape[0]
self.data_cols = data.shape[1]
self.no_of_cluster = no_of_cluster
self.max_iteration = max_iteration
self.centroids = np.zeros((self.no_of_cluster,self.data_cols))
def initialize_centroid_random(self):
'''
initialized centroid with random number
centroid dimension = each row specifies one cluster
'''
#colXno_of_cluster data
centroids = np.random.rand(self.no_of_cluster,self.data_cols)
return centroids
def initialize_centroid_random_sampaling(self):
'''
initialized centroid with random number
centroid dimension = each row specifies one cluster
'''
#colXno_of_cluster data
data = pd.DataFrame(self.data)
for i in range(self.no_of_cluster):
sampling = data.sample(n=int(self.data_rows *0.01))
self.centroids[i,:] = np.mean(sampling,axis=0)
def initialize_centroid(self):
'''
initialized centroid taking half of the dataset
centroid dimension = each row specifies one cluster
'''
#colXno_of_cluster data
k = int(self.data_rows/self.no_of_cluster) #appriximate
j=0
for i in range(self.no_of_cluster):
# print(self.data[j:k,:])
self.centroids[i,:] = np.mean(self.data[j:k,:],axis=0)
j=j+k
k=k+k
def initialize_centroid_rand_uniform(self):
'''
Best out of all
centroids by taking random uniform of (min,max) of all dimention
'''
#colXno_of_cluster data
max_dim_array = np.amax(self.data,axis=0)
min_dim_array = np.amin(self.data,axis=0)
for i in range(self.data_cols):
#updating centroids colmwise
self.centroids[:,i] = np.random.uniform(max_dim_array[i],min_dim_array[i],self.no_of_cluster)
def initialize_centroid_uniform(self):
'''
centroids by taking linear uniform of (min,max) of all dimention
'''
#colXno_of_cluster data
max_dim_array = np.amax(self.data,axis=0)
min_dim_array = np.amin(self.data,axis=0)
for i in range(self.data_cols):
#updating centroids colmwise
self.centroids[:,i] = np.linspace(max_dim_array[i],min_dim_array[i],self.no_of_cluster)
def least_distance_from_point(self,dist_from_points):
'''
It will measure distance from given point
'''
distance = np.zeros((self.data_rows,len(dist_from_points)))
sum_distace = np.zeros((self.data_rows,1))
for i in range(len(dist_from_points)):
data_matrix = np.ones((self.data_rows,self.data_cols)) * dist_from_points[i,:] #matix single data point by repetation
distance_from_centroid = self.data - data_matrix #subtraction of all data point from given data point
distance[:,i] = np.linalg.norm(distance_from_centroid,axis=1) #calcualting distacne
distance = np.sum(distance,axis=1) #sum if more than 1 points
max_index = np.where(distance == np.max(distance,axis=0)) #index of maximum distance
next_centroid = self.data[max_index,:]
return next_centroid
def initialize_centroid_max(self):
'''
centroids by taking taking arandom point then assigning max and then
finding point at max distance and then point at max distance to both
'''
#colXno_of_cluster data
print('Initialization of cluster centroids')
data = pd.DataFrame(self.data)
self.centroids[0,:] = data.sample(n=1)
for i in range(1,self.no_of_cluster):
self.plotting(self.data)
self.centroids[i,:] = self.least_distance_from_point(self.centroids[:i+1,:])
def calculate_dist(self,data):
'''
data = nXm
centroid =mX no_of_cluster
'''
'''
#centroid into matrix form
to find out norm have to subtraction and for that have to generate matrix of each centroid
by repeting the each centroid times no_of_rows
'''
distance = np.zeros((self.data_rows,self.no_of_cluster))
for i in range(self.no_of_cluster):
centroid_matrix = np.ones((self.data_rows,self.data_cols)) * self.centroids[i,:]
distance_from_centroid = data - centroid_matrix
distance[:,i] = np.square(np.linalg.norm(distance_from_centroid,axis=1)) #as cost fucntion is variance
return distance
def find_nearest_centroid(self,distance):
'''
Returns the indices of the minimum values along an axis.
return labels
'''
return np.argmin(distance,axis=1)
def update_centroid(self,data,labels):
'''
it will update the centroid location taking mean of newly assigned points
'''
new_centroids = np.zeros((self.no_of_cluster,self.data_cols))
#making new dataset
new_data = np.zeros((self.data_rows,self.data_cols+1))
new_data[:,:-1] = data
new_data[:,-1] = labels
for i in range(self.no_of_cluster):
filtered_data = new_data[new_data[:,-1] == i]
new_centroids[i,:] = np.mean(filtered_data[:,:-1],axis=0)
return new_centroids
def plotting(self,data):
#plotting
plt.scatter(data[:, 0], data[:, 1], s = 40, color = 'g')
for i in range(self.no_of_cluster):
plt.scatter(self.centroids[i,0],self.centroids[i,1])
plt.xlabel("X")
plt.ylabel("Y")
plt.show()
def fit(self,data):
'''
main method which wiil call above mehods successively
'''
self.initialize_centroid_max()
self.plotting(data)
print('Assigning cluster by moving centroids')
for i in range(self.max_iteration):
distance = self.calculate_dist(data)
# print(distance)
labels = self.find_nearest_centroid(distance)
new_centroids = self.update_centroid(data,labels)
if(np.linalg.norm(self.centroids-new_centroids) < 0.001):#break if diffn in centroid is less than error
break
else:
self.centroids = new_centroids
self.plotting(data)
return labels,new_centroids
def predict(self,test_set):
'''
It will predict for the test case
'''
distance = self.calculate_dist(test_set)
labels = self.find_nearest_centroid(distance)
# return labels
#EM
import numpy as np
import pandas as pd
from scipy.stats import multivariate_normal
from sklearn.preprocessing import MultiLabelBinarizer
import copy
class EM():
'''
Expectation-Maximization algorithm
The goal of this algorithm is to maximize the likelihod function with respect to
the parameter comprimising the means and covariance of the components and the mixing coefficient
which are unkown and has to be estimated from the data
'''
def __init__(self,data,no_of_cluster,max_iteration,labels=None):
self.data = data
self.data_rows = data.shape[0]
self.data_cols = data.shape[1]
self.no_of_cluster = no_of_cluster
self.max_iteration = max_iteration
self.mu = [] #means
self.covariance = [] #variance
self.labels = labels
self.w = np.ones(self.no_of_cluster) #weights assigned to the cluster
print('Initilization Done')
self.responcibility = np.zeros((self.data_rows,self.no_of_cluster)) #making responcibility with zeros
#intializing responcibility
if(labels.size != 0 ): #if responcibility is passed through k-means then
unique_labels = np.unique(self.labels)
for i in range(self.no_of_cluster):
self.responcibility[:,i] = self.labels #chagnig colm of zero with labels
for j in range(self.data_rows):
if(self.responcibility[j,i] == unique_labels[i]):
self.responcibility[j,i] = 1
else:
self.responcibility[j,i] = 0
#weight intilization
print('weight assignment')
for i in range(self.no_of_cluster):
self.w[i] = np.sum(self.responcibility[:,i]) / self.data_rows #weights for specific cluster = number of points assigned to cluster / total points
for i in range(self.no_of_cluster):
self.mu.append(np.random.rand(self.data_cols))
rand_matrix = np.random.rand(self.data_cols,self.data_cols)
spd_mat = np.matmul(rand_matrix,rand_matrix.transpose())
self.covariance.append(spd_mat) #AT*A SPD matrix
def E_step(self):
'''
This step includes calculation of Responcibility using current parameters
# '''
# print('E-Step')
# print('self.mu: ', self.mu)
# print('self.covariance: ', self.covariance)
# print('self.w: ', self.w)
numerator = copy.copy(self.responcibility)
# print('numerator shape',numerator.shape)
for i in range(self.no_of_cluster): #number of clusters
for j in range(self.data_rows): #number of rows
numerator[j,i] = multivariate_normal.pdf(self.data[j],self.mu[i],self.covariance[i]) #multivariate gaussian
denominator = np.sum(numerator,axis=1) #row wise summation of weighted gaussian pdf
# print('denominator: ', denominator.shape)
for i in range(self.no_of_cluster): #number of clusters
self.responcibility[i,:] = numerator[i,:]/denominator[i]
#returning denominator as it will be useful to calculate the log-likelihood
return denominator
def M_step(self):
'''
This step maximize the parameter of specific distribution
'''
# print('M-Step')
# print('self.mu: ', self.mu)
# print('self.covariance: ', self.covariance)
# print('self.w: ', self.w)
N_k = [] #number of data points assigned to tghe specific cluster
for i in range(self.no_of_cluster):
N_k.append(np.sum(self.responcibility[:,i]))
for i in range(self.no_of_cluster):
self.w[i] = N_k[i] / self.data_rows # data_rows = N
self.mu[i] = (np.sum(self.data * self.responcibility[:,i].reshape(-1,1), axis=0)) / N_k[i]
for i in range(self.no_of_cluster):
temp_mat = np.zeros((self.data_cols,self.data_cols))
for j in range(self.data_rows):
data_mean_diff = self.data[j,:] - self.mu[i]
temp_mat = temp_mat + np.outer(data_mean_diff,data_mean_diff) * self.responcibility[j,i]
# print('temp_mat: ', temp_mat)
self.covariance[i] = temp_mat / N_k[i]
def logliklihood(self,denominator):
'''
This method will calculate the likelihood
'''
likelihood = np.sum(np.log(denominator))#natural log
print('likelihood: ', likelihood)
return likelihood
def plotting(self):
#plotting
plt.scatter(self.data[:, 0], self.data[:, 1], s = 40, color = 'g')
for i in range(self.no_of_cluster):
plt.scatter(self.mu[i][0],self.mu[i][1])
plt.xlabel("X")
plt.ylabel("Y")
plt.show()
def fit(self):
'''
This methods calls E_step and M_step Iteratively
'''
likelihood = []
for i in range(10):
summ_respo = self.E_step() #summation of weighted gaussian pdf
self.M_step()
likelihood.append(self.logliklihood(summ_respo))
# if(np.linalg.norm(likelihood[i-1] - likelihood[i-1]) <0.001): #if likelihood diffn is less than error break
# break
# print(self.mu)
self.plotting()
print('self.mu: ', self.mu)
print('self.covariance: ', self.covariance)
print('self.w: ', self.w)
if __name__ == "__main__":
# Creating Test DataSets using sklearn.datasets.make_blobs
from sklearn.datasets.samples_generator import make_blobs
from matplotlib import pyplot as plt
from matplotlib import style
#defining number of cluster
no_of_cluster = 3
#clustering points
style.use("fivethirtyeight")
X, y = make_blobs(n_samples = 500, centers = no_of_cluster,cluster_std =3, n_features = 4)
#calling KMeans
kmean_obj = Kmeans(X,no_of_cluster,5)
labels, centroids = kmean_obj.fit(X)
print('centroids: ', centroids)
#calling KMeans
em_obj = EM(X,no_of_cluster,15,labels=labels)
em_obj.fit()
# + id="Hu3feZufQWwR" colab_type="code" outputId="fc993059-ff76-419a-9d70-0eaf64599fe0" colab={"base_uri": "https://localhost:8080/", "height": 583}
#Trial
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal
from sklearn.datasets.samples_generator import make_blobs
from matplotlib import pyplot as plt
from matplotlib import style
import scipy as sp
x = np.linspace(0, 5, 10, endpoint=False)
y = multivariate_normal.pdf(x, mean=2.5, cov=0.5);
plt.plot(x, y)
plt.show()
#plot
x, y = np.mgrid[-1:1:.01, -1:1:.01]
pos = np.empty(x.shape + (2,))
pos[:, :, 0] = x; pos[:, :, 1] = y
rv = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]])
plt.contourf(x, y, rv.pdf(pos))
plt.show()
X = []
for i in range(5):
X.append(np.random.rand(2))
print(X)
mean = np.array([1,1.5])
covariance = np.array([[ 5, 2],
[3 ,4]])
data_cluster = np.zeros(5)
for i in range(5):
data_cluster[i] = sp.stats.multivariate_normal.pdf(X[i],mean,covariance)
print(data_cluster)
# + id="lWSHR2LgzAQf" colab_type="code" colab={}
lab_type="code" id="Vzf4dDSHnFQQ"
def our_hash_vectors(vecs, rng, n_buckets, n_hashes, mask=None, verbose=False):
"""
Args:
vecs: tensor of at least 2 dimension,
rng: random number generator
n_buckets: number of buckets in each hash table
n_hashes: the number of hash tables
mask: None indicating no mask or a 1D boolean array of length vecs.shape[0], containing the location of padding value
verbose: controls prints for debug
Returns:
A vector of size n_hashes * vecs.shape[0] containing the buckets associated with each input vector per hash table.
"""
# check for even, integer bucket sizes
assert isinstance(n_buckets, int) and n_buckets % 2 == 0
rng = fastmath.stop_gradient(tie_in(vecs, rng))
rot_size = n_buckets
### Start Code Here
### Step 1 ###
rotations_shape = [vecs.shape[-1],n_hashes, rot_size//2]
random_rotations = fastmath.random.normal(rng, rotations_shape).astype(np.float32)
if verbose:
print("random.rotations.shape", random_rotations.shape)
### Step 2 ###
if fastmath.backend_name() == "jax":
rotated_vecs = np.einsum("tf,fhb->htb", vecs, random_rotations)
print("using jax")
else:
# Step 2a
random_rotations = np.reshape(random_rotations,[-1, n_hashes * (rot_size // 2)] )
if verbose:
print("random_rotations reshaped", random_rotations.shape)
# Step 2b
rotated_vecs = np.dot(vecs, random_rotations)
if verbose:
print("rotated_vecs1", rotated_vecs.shape)
# Step 2c
rotated_vecs = np.reshape( rotated_vecs, [-1, n_hashes, rot_size//2])
if verbose:
print("rotated_vecs2", rotated_vecs.shape)
# Step 2d
rotated_vecs = np.transpose(rotated_vecs, (1,0,2))
if verbose:
print("rotated_vecs3", rotated_vecs.shape)
### Step 3 ###
rotated_vecs = np.concatenate([rotated_vecs, -rotated_vecs], axis=-1)
if verbose:
print("rotated_vecs.shape", rotated_vecs.shape)
### Step 4 ###
buckets = np.argmax(rotated_vecs, axis=-1).astype(np.int32)
if verbose:
print("buckets.shape", buckets.shape)
if verbose:
print("buckets", buckets)
if mask is not None:
n_buckets += 1 # Create an extra bucket for padding tokens only
buckets = np.where(mask[None, :], buckets, n_buckets - 1)
# buckets is now (n_hashes, seqlen). Next we add offsets so that
# bucket numbers from different hashing rounds don't overlap.
offsets = tie_in(buckets, np.arange(n_hashes, dtype=np.int32))
offsets = np.reshape(offsets * n_buckets, (-1, 1))
### Step 5 ###
buckets = np.reshape(buckets+offsets, (-1,))
if verbose:
print("buckets with offsets", buckets.shape, "\n", buckets)
### End Code Here
return buckets
# %% colab={"base_uri": "https://localhost:8080/", "height": 403} colab_type="code" id="cPGNaVpAi8wM" outputId="a5a6a956-30b7-4de7-a5c2-65011a9d3816"
# example code. Note for reference, the sizes in this example match the values in the diagram above.
ohv_q = np.ones((8, 5)) # (seq_len=8, n_q=5)
ohv_n_buckets = 4 # even number
ohv_n_hashes = 3
with fastmath.use_backend("tf"):
ohv_rng = fastmath.random.get_prng(1)
ohv = our_hash_vectors(
ohv_q, ohv_rng, ohv_n_buckets, ohv_n_hashes, mask=None, verbose=True
)
print("ohv shape", ohv.shape, "\nohv", ohv) # (ohv_n_hashes * ohv_n_buckets)
# note the random number generators do not produce the same results with different backends
with fastmath.use_backend("jax"):
ohv_rng = fastmath.random.get_prng(1)
ohv = our_hash_vectors(ohv_q, ohv_rng, ohv_n_buckets, ohv_n_hashes, mask=None)
print("ohv shape", ohv.shape, "\nohv", ohv) # (ohv_n_hashes * ohv_n_buckets)
# %% [markdown] colab_type="text" id="XAQqr1_XkCf6"
# <details>
# <summary>
# <font size="3"><b> Expected Output </b></font>
# </summary>
#
# **Expected Values**
# ```
# random.rotations.shape (5, 3, 2)
# random_rotations reshaped (5, 6)
# rotated_vecs1 (8, 6)
# rotated_vecs2 (8, 3, 2)
# rotated_vecs3 (3, 8, 2)
# rotated_vecs.shape (3, 8, 4)
# buckets.shape (3, 8)
# buckets ndarray<tf.Tensor(
# [[3 3 3 3 3 3 3 3]
# [3 3 3 3 3 3 3 3]
# [3 3 3 3 3 3 3 3]], shape=(3, 8), dtype=int32)>
# buckets with offsets (24,)
# ndarray<tf.Tensor([ 3 3 3 3 3 3 3 3 7 7 7 7 7 7 7 7 11 11 11 11 11 11 11 11], shape=(24,), dtype=int32)>
# ohv shape (24,)
# ohv ndarray<tf.Tensor([ 3 3 3 3 3 3 3 3 7 7 7 7 7 7 7 7 11 11 11 11 11 11 11 11], shape=(24,), dtype=int32)>
# using jax
# ohv shape (24,)
# ohv [ 3 3 3 3 3 3 3 3 5 5 5 5 5 5 5 5 11 11 11 11 11 11 11 11]```
# %% [markdown]
# <details>
# <summary>
# <font size="3" ><b>Completed code for reference </b></font>
# </summary>
#
# ```
# # since this notebook is ungraded the completed code is provided here for reference
#
# def our_hash_vectors(vecs, rng, n_buckets, n_hashes, mask=None, verbose=False):
# """
# Args:
# vecs: tensor of at least 2 dimension,
# rng: random number generator
# n_buckets: number of buckets in each hash table
# n_hashes: the number of hash tables
# mask: None indicating no mask or a 1D boolean array of length vecs.shape[0], containing the location of padding value
# verbose: controls prints for debug
# Returns:
# A vector of size n_hashes * vecs.shape[0] containing the buckets associated with each input vector per hash table.
#
# """
#
# # check for even, integer bucket sizes
# assert isinstance(n_buckets, int) and n_buckets % 2 == 0
#
# rng = fastmath.stop_gradient(tie_in(vecs, rng))
# rot_size = n_buckets
# ### Start Code Here
#
# ### Step 1 ###
# rotations_shape = (vecs.shape[-1], n_hashes, rot_size // 2)
# random_rotations = fastmath.random.normal(rng, rotations_shape).astype(
# np.float32)
# if verbose: print("random.rotations.shape", random_rotations.shape)
#
# ### Step 2 ###
# if fastmath.backend_name() == 'jax':
# rotated_vecs = np.einsum('tf,fhb->htb', vecs, random_rotations)
# if verbose: print("using jax")
# else:
# #Step 2a
# random_rotations = np.reshape(random_rotations,
# [-1, n_hashes * (rot_size // 2)])
# if verbose: print("random_rotations reshaped", random_rotations.shape)
# #Step 2b
# rotated_vecs = np.dot(vecs, random_rotations)
# if verbose: print("rotated_vecs1", rotated_vecs.shape)
# #Step 2c
# rotated_vecs = np.reshape(rotated_vecs, [-1, n_hashes, rot_size//2])
# if verbose: print("rotated_vecs2", rotated_vecs.shape)
# #Step 2d
# rotated_vecs = np.transpose(rotated_vecs, (1, 0, 2))
# if verbose: print("rotated_vecs3", rotated_vecs.shape)
#
# ### Step 3 ###
# rotated_vecs = np.concatenate([rotated_vecs, -rotated_vecs], axis=-1)
# if verbose: print("rotated_vecs.shape", rotated_vecs.shape)
# ### Step 4 ###
# buckets = np.argmax(rotated_vecs, axis=-1).astype(np.int32)
# if verbose: print("buckets.shape", buckets.shape)
# if verbose: print("buckets", buckets)
#
# if mask is not None:
# n_buckets += 1 # Create an extra bucket for padding tokens only
# buckets = np.where(mask[None, :], buckets, n_buckets - 1)
#
# # buckets is now (n_hashes, seqlen). Next we add offsets so that
# # bucket numbers from different hashing rounds don't overlap.
# offsets = tie_in(buckets, np.arange(n_hashes, dtype=np.int32))
# offsets = np.reshape(offsets * n_buckets, (-1, 1))
# ### Step 5 ###
# buckets = np.reshape(buckets + offsets, (-1,))
# if verbose: print("buckets with offsets", buckets.shape, "\n", buckets)
# return buckets```
# %% [markdown]
# <a name="3.3"></a>
# ## Part 3.3 Sorting Buckets
# %% [markdown]
# Great! Now that we have a hash function, we can work on sorting our buckets and performing our matrix operations.
# We'll walk through this algorithm in small steps:
# * sort_buckets - we'll perform the sort
# * softmax
# * dotandv - do the matrix math to form the dotproduct and output
# These routines will demonstrate a simplified version of the algorithm. We won't address masking and variable bucket sizes but will consider how they would be handled.
#
# **sort_buckets**
#
# At this point, we have called the hash function and were returned the associated buckets. For example, if we started with
# `q[n_seq,n_q]`, with `n_hash = 2; n_buckets = 4; n_seq = 8`
# we might be returned:
# `bucket = [0,1,2,3,0,1,2,3, 4,5,6,7,4,5,6,7] `
# Note that it is n_hash\*n_seq long and that the bucket values for each hash have been offset by n_hash so the numbers do not overlap. Going forward, we going to sort this array of buckets to group together members of the same (hash,bucket) pair.
#
# **Instructions**
# **Step 1** Our goal is to sort $q$ rather than the bucket list, so we will need to track the association of the buckets to their elements in $q$.
# * using np.arange, create `ticker`, just a sequence of numbers (0..n_hashed * seqlen) associating members of q with their bucket.
#
# **Step 2** This step is provided to you as it is a bit difficult to describe. We want to disambiguate elements that map to the same bucket. When a sorting routine encounters a situation where multiple entries have the same value, it can correctly choose any entry to go first. This makes testing ambiguous. This prevents that. We multiply all the buckets by `seqlen` and then add `ticker % seqlen`
#
# **Step 3** Here we are! Ready to sort. This is the exciting part.
# * Utilize [fastmath.sort_key_val](https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.sort_key_val.html#jax.lax.sort_key_val) and sort `buckets_and_t` and `ticker`.
#
# **Step 4** We need to be able to undo the sort at the end to get things back into their correct locations
# * sort `sticker` and `ticker` to for the reverse map
#
# **Step 5** create our sorted q and sorted v
# * use [np.take](https://numpy.org/doc/stable/reference/generated/numpy.take.html) and `st` to grab correct values in `q` for the sorted values, `sq`. Use axis=0.
#
# Use the example code below the routine to check and help debug your results.
# %%
def sort_buckets(buckets, q, v, n_buckets, n_hashes, seqlen, verbose=True):
"""
Args:
buckets: tensor of at least 2 dimension,
n_buckets: number of buckets in each hash table
n_hashes: the number of hash tables
"""
if verbose:
print("---sort_buckets--")
## Step 1
ticker = None
if verbose:
print("ticker", ticker.shape, ticker)
## Step 2
buckets_and_t = seqlen * buckets + (ticker % seqlen) # provided
if verbose:
print("buckets_and_t", buckets_and_t.shape, buckets_and_t)
# Hash-based sort ("s" at the start of variable names means "sorted")
# Step 3
sbuckets_and_t, sticker = None
if verbose:
print("sbuckets_and_t", sbuckets_and_t.shape, sbuckets_and_t)
if verbose:
print("sticker", sticker.shape, sticker)
# Step 4
_, undo_sort = None
if verbose:
print("undo_sort", undo_sort.shape, undo_sort)
# Step 5
st = sticker % seqlen # provided
sq = None
sv = None
return sq, sv, sticker, undo_sort
# %%
t_n_hashes = 2
t_n_buckets = 4
t_n_seq = t_seqlen = 8
t_n_q = 3
n_v = 5
t_q = (np.array([(j % t_n_buckets) for j in range(t_n_seq)]) * np.ones((t_n_q, 1))).T
t_v = np.ones((t_n_seq, n_v))
t_buckets = np.array(
[
(j % t_n_buckets) + t_n_buckets * i
for i in range(t_n_hashes)
for j in range(t_n_seq)
]
)
print("q\n", t_q)
print("t_buckets: ", t_buckets)
t_sq, t_sv, t_sticker, t_undo_sort = sort_buckets(
t_buckets, t_q, t_v, t_n_buckets, t_n_hashes, t_seqlen, verbose=True
)
print("sq.shape", t_sq.shape, "sv.shape", t_sv.shape)
print("sq\n", t_sq)
# %% [markdown]
# <details>
# <summary>
# <font size="3"><b> Expected Output </b></font>
# </summary>
#
# **Expected Values**
# ```
# q
# [[0. 0. 0.]
# [1. 1. 1.]
# [2. 2. 2.]
# [3. 3. 3.]
# [0. 0. 0.]
# [1. 1. 1.]
# [2. 2. 2.]
# [3. 3. 3.]]
# t_buckets: [0 1 2 3 0 1 2 3 4 5 6 7 4 5 6 7]
# ---sort_buckets--
# ticker (16,) [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15]
# buckets_and_t (16,) [ 0 9 18 27 4 13 22 31 32 41 50 59 36 45 54 63]
# sbuckets_and_t (16,) [ 0 4 9 13 18 22 27 31 32 36 41 45 50 54 59 63]
# sticker (16,) [ 0 4 1 5 2 6 3 7 8 12 9 13 10 14 11 15]
# undo_sort (16,) [ 0 2 4 6 1 3 5 7 8 10 12 14 9 11 13 15]
# sq.shape (16, 3) sv.shape (16, 5)
# sq
# [[0. 0. 0.]
# [0. 0. 0.]
# [1. 1. 1.]
# [1. 1. 1.]
# [2. 2. 2.]
# [2. 2. 2.]
# [3. 3. 3.]
# [3. 3. 3.]
# [0. 0. 0.]
# [0. 0. 0.]
# [1. 1. 1.]
# [1. 1. 1.]
# [2. 2. 2.]
# [2. 2. 2.]
# [3. 3. 3.]
# [3. 3. 3.]]
#
# ```
# %% [markdown]
# <details>
# <summary>
# <font size="3" ><b>Completed code for reference </b></font>
# </summary>
#
# ```
# # since this notebook is ungraded the completed code is provided here for reference
# def sort_buckets(buckets, q, v, n_buckets, n_hashes, seqlen, verbose=True):
# """
# Args:
# buckets: tensor of at least 2 dimension,
# n_buckets: number of buckets in each hash table
# n_hashes: the number of hash tables
# """
# if verbose: print("---sort_buckets--")
# ## Step 1
# ticker = np.arange(n_hashes * seqlen)
# if verbose: print("ticker",ticker.shape, ticker)
# ## Step 2
# buckets_and_t = seqlen * buckets + (ticker % seqlen)
# if verbose: print("buckets_and_t",buckets_and_t.shape, buckets_and_t)
#
# # Hash-based sort ("s" at the start of variable names means "sorted")
# #Step 3
# sbuckets_and_t, sticker = fastmath.sort_key_val(
# buckets_and_t, ticker, dimension=-1)
# if verbose: print("sbuckets_and_t",sbuckets_and_t.shape, sbuckets_and_t)
# if verbose: print("sticker",sticker.shape, sticker)
# #Step 4
# _, undo_sort = fastmath.sort_key_val(sticker, ticker, dimension=-1)
# if verbose: print("undo_sort",undo_sort.shape, undo_sort)
#
# #Step 4
# st = (sticker % seqlen)
# sq = np.take(q, st, axis=0)
# sv = np.take(v, st, axis=0)
# return sq, sv, sticker, undo_sort
# ```
# %% [markdown]
# <a name="3.4"></a>
# ## Part 3.4 Chunked dot product attention
# %% [markdown]
# Now let's create the dot product attention. We have sorted $Q$ so that elements that the hash has determined are likely to be similar are adjacent to each other. We now want to perform the dot-product within those limited regions - in 'chunks'.
#
# <img src = "C4W4_LN2_image12.PNG" height="400" width="750">
# <center><b>Figure 11: Performing dot product in 'chunks' </b></center>
#
#
# The example we have been working on is shown above, with sequences of 8, 2 hashes, 4 buckets and, conveniently, the content of Q was such that when sorted, there were 2 entries in each bucket. If we reshape Q into a (8,2,n_q), we can use numpy matmul to perform the operation. Numpy [matmul](https://numpy.org/doc/stable/reference/generated/numpy.matmul.html) will treat the inputs as a stack of matrices residing in the last two indexes. This will allow us to matrix multiply Q with itself in *chunks* and later can also be used to perform the matrix multiply with v.
#
# We will perform a softmax on the output of the dot product of Q and Q, but in this case, there is a bit more to the story. Recall the output of the hash had multiple hash tables. We will perform softmax on those separately and then must combine them. This is where the form of softmax we defined at the top of the notebook comes into play. The routines below will utilize the logsumexp values that the `our_softmax` routine calculates.
#
# There is a good deal of [reshaping](https://numpy.org/doc/stable/reference/generated/numpy.reshape.html) to get things into the right formats. The code has many print statements that match the expected values below. You can use those to check your work as you go along. If you don't do a lot of 3-dimensional matrix multiplications in your daily life, it might be worthwhile to open a spare cell and practice a few simple examples to get the hang of it! Here is one to start with:
#
# %%
a = np.arange(16 * 3).reshape((16, 3))
chunksize = 2
ar = np.reshape(
a, (-1, chunksize, a.shape[-1])
) # the -1 usage is very handy, see numpy reshape
print(ar.shape)
# %% [markdown]
#
# **Instructions**
# **Step 1** Reshaping Q
# * np.reshape `sq` (sorted q) to be 3 dimensions. The middle dimension is the size of the 'chunk' specified by `kv_chunk_len`
# * np.swapaxes to perform a 'transpose' on the reshaped `sq`, *but only on the last two dimension*
# * np.matmul the two values.
#
# **Step 2**
# * use our_softmax to perform the softmax on the dot product. Don't forget `passthrough`
#
# **Step 3**
# * np.reshape `sv`. Like `sq`, the middle dimension is the size of the 'chunk' specified by `kv_chunk_len`
# * np.matmul dotlike and the reshaped `sv`
# * np.reshape so to a two dimensional array with the last dimension stays the same (`so.shape[-1]`)
# * `logits` also needs reshaping, we'll do that.
#
# **Step 4** Now we can undo the sort.
# * use [np.take](https://numpy.org/doc/stable/reference/generated/numpy.take.html) and `undo_sort` and axis = 0 to unsort so
# * do the same with `slogits`.
#
# **Step 5** This step combines the results of multiple hashes. Recall, the softmax was only over the values in one hash, this extends it to all the hashes. Read through it, the code is provided. Note this is taking place *after* the matrix multiply with v while the softmax output is used before the multiply. How does this achieve the correct result?
# %%
def dotandv(
sq, sv, undo_sort, kv_chunk_len, n_hashes, seqlen, passthrough, verbose=False
):
# Step 1
rsq = None
rsqt = None
if verbose:
print("rsq.shape,rsqt.shape: ", rsq.shape, rsqt.shape)
dotlike = None
if verbose:
print("dotlike\n", dotlike)
# Step 2
dotlike, slogits = None
if verbose:
print("dotlike post softmax\n", dotlike)
# Step 3
vr = None
if verbose:
print("dotlike.shape, vr.shape:", dotlike.shape, vr.shape)
so = None
if verbose:
print("so.shape:", so.shape)
so = None
slogits = np.reshape(slogits, (-1,)) # provided
if verbose:
print("so.shape,slogits.shape", so.shape, slogits.shape)
# Step 4
o = None
logits = None
if verbose:
print("o.shape,o", o.shape, o)
if verbose:
print("logits.shape, logits", logits.shape, logits)
# Step 5 (Provided)
if n_hashes > 1:
o = np.reshape(o, (n_hashes, seqlen, o.shape[-1]))
logits = np.reshape(logits, (n_hashes, seqlen, 1))
probs = np.exp(logits - fastmath.logsumexp(logits, axis=0, keepdims=True))
o = np.sum(o * probs, axis=0)
return o
# %%
t_kv_chunk_len = 2
out = dotandv(
t_sq,
t_sv,
t_undo_sort,
t_kv_chunk_len,
t_n_hashes,
t_seqlen,
passthrough=True,
verbose=True,
)
print("out\n", out)
print("\n-----With softmax enabled----\n")
out = dotandv(
t_sq,
t_sv,
t_undo_sort,
t_kv_chunk_len,
t_n_hashes,
t_seqlen,
passthrough=False,
verbose=True,
)
print("out\n", out)
# %% [markdown]
# <details>
# <summary>
# <font size="3"><b> Expected Output </b></font>
# </summary>
#
# **Expected Values**
# ```
# rsq.shape,rsqt.shape: (8, 2, 3) (8, 3, 2)
# dotlike
# [[[ 0. 0.]
# [ 0. 0.]]
#
# [[ 3. 3.]
# [ 3. 3.]]
#
# [[12. 12.]
# [12. 12.]]
#
# [[27. 27.]
# [27. 27.]]
#
# [[ 0. 0.]
# [ 0. 0.]]
#
# [[ 3. 3.]
# [ 3. 3.]]
#
# [[12. 12.]
# [12. 12.]]
#
# [[27. 27.]
# [27. 27.]]]
# dotlike post softmax
# [[[ 0. 0.]
# [ 0. 0.]]
#
# [[ 3. 3.]
# [ 3. 3.]]
#
# [[12. 12.]
# [12. 12.]]
#
# [[27. 27.]
# [27. 27.]]
#
# [[ 0. 0.]
# [ 0. 0.]]
#
# [[ 3. 3.]
# [ 3. 3.]]
#
# [[12. 12.]
# [12. 12.]]
#
# [[27. 27.]
# [27. 27.]]]
# dotlike.shape, vr.shape: (8, 2, 2) (8, 2, 5)
# so.shape: (8, 2, 5)
# so.shape,slogits.shape (16, 5) (16,)
# o.shape,o (16, 5) [[ 0. 0. 0. 0. 0.]
# [ 6. 6. 6. 6. 6.]
# [24. 24. 24. 24. 24.]
# [54. 54. 54. 54. 54.]
# [ 0. 0. 0. 0. 0.]
# [ 6. 6. 6. 6. 6.]
# [24. 24. 24. 24. 24.]
# [54. 54. 54. 54. 54.]
# [ 0. 0. 0. 0. 0.]
# [ 6. 6. 6. 6. 6.]
# [24. 24. 24. 24. 24.]
# [54. 54. 54. 54. 54.]
# [ 0. 0. 0. 0. 0.]
# [ 6. 6. 6. 6. 6.]
# [24. 24. 24. 24. 24.]
# [54. 54. 54. 54. 54.]]
# logits.shape, logits (16,) [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
# out
# [[ 0. 0. 0. 0. 0.]
# [ 6. 6. 6. 6. 6.]
# [24. 24. 24. 24. 24.]
# [54. 54. 54. 54. 54.]
# [ 0. 0. 0. 0. 0.]
# [ 6. 6. 6. 6. 6.]
# [24. 24. 24. 24. 24.]
# [54. 54. 54. 54. 54.]]
#
# -----With softmax enabled----
#
# rsq.shape,rsqt.shape: (8, 2, 3) (8, 3, 2)
# dotlike
# [[[ 0. 0.]
# [ 0. 0.]]
#
# [[ 3. 3.]
# [ 3. 3.]]
#
# [[12. 12.]
# [12. 12.]]
#
# [[27. 27.]
# [27. 27.]]
#
# [[ 0. 0.]
# [ 0. 0.]]
#
# [[ 3. 3.]
# [ 3. 3.]]
#
# [[12. 12.]
# [12. 12.]]
#
# [[27. 27.]
# [27. 27.]]]
# dotlike post softmax
# [[[0.5 0.5 ]
# [0.5 0.5 ]]
#
# [[0.5 0.5 ]
# [0.5 0.5 ]]
#
# [[0.49999976 0.49999976]
# [0.49999976 0.49999976]]
#
# [[0.49999976 0.49999976]
# [0.49999976 0.49999976]]
#
# [[0.5 0.5 ]
# [0.5 0.5 ]]
#
# [[0.5 0.5 ]
# [0.5 0.5 ]]
#
# [[0.49999976 0.49999976]
# [0.49999976 0.49999976]]
#
# [[0.49999976 0.49999976]
# [0.49999976 0.49999976]]]
# dotlike.shape, vr.shape: (8, 2, 2) (8, 2, 5)
# so.shape: (8, 2, 5)
# so.shape,slogits.shape (16, 5) (16,)
# o.shape,o (16, 5) [[1. 1. 1. 1. 1. ]
# [1. 1. 1. 1. 1. ]
# [0.9999995 0.9999995 0.9999995 0.9999995 0.9999995]
# [0.9999995 0.9999995 0.9999995 0.9999995 0.9999995]
# [1. 1. 1. 1. 1. ]
# [1. 1. 1. 1. 1. ]
# [0.9999995 0.9999995 0.9999995 0.9999995 0.9999995]
# [0.9999995 0.9999995 0.9999995 0.9999995 0.9999995]
# [1. 1. 1. 1. 1. ]
# [1. 1. 1. 1. 1. ]
# [0.9999995 0.9999995 0.9999995 0.9999995 0.9999995]
# [0.9999995 0.9999995 0.9999995 0.9999995 0.9999995]
# [1. 1. 1. 1. 1. ]
# [1. 1. 1. 1. 1. ]
# [0.9999995 0.9999995 0.9999995 0.9999995 0.9999995]
# [0.9999995 0.9999995 0.9999995 0.9999995 0.9999995]]
# logits.shape, logits (16,) [ 0.6931472 3.6931472 12.693148 27.693148 0.6931472 3.6931472
# 12.693148 27.693148 0.6931472 3.6931472 12.693148 27.693148
# 0.6931472 3.6931472 12.693148 27.693148 ]
# out
# [[1. 1. 1. 1. 1. ]
# [1. 1. 1. 1. 1. ]
# [0.99999905 0.99999905 0.99999905 0.99999905 0.99999905]
# [0.99999905 0.99999905 0.99999905 0.99999905 0.99999905]
# [1. 1. 1. 1. 1. ]
# [1. 1. 1. 1. 1. ]
# [0.99999905 0.99999905 0.99999905 0.99999905 0.99999905]
# [0.99999905 0.99999905 0.99999905 0.99999905 0.99999905]]
# ```
# %% [markdown]
# <details>
# <summary>
# <font size="3" ><b>Completed code for reference </b></font>
# </summary>
#
# ```
# # since this notebook is ungraded the completed code is provided here for reference
# def dotandv(sq, sv, undo_sort, kv_chunk_len, n_hashes, seqlen, passthrough, verbose=False ):
# # Step 1
# rsq = np.reshape(sq,(-1, kv_chunk_len, sq.shape[-1]))
# rsqt = np.swapaxes(rsq, -1, -2)
# if verbose: print("rsq.shape,rsqt.shape: ", rsq.shape,rsqt.shape)
# dotlike = np.matmul(rsq, rsqt)
# if verbose: print("dotlike\n", dotlike)
#
# #Step 2
# dotlike, slogits = our_softmax(dotlike, passthrough)
# if verbose: print("dotlike post softmax\n", dotlike)
#
# #Step 3
# vr = np.reshape(sv, (-1, kv_chunk_len, sv.shape[-1]))
# if verbose: print("dotlike.shape, vr.shape:", dotlike.shape, vr.shape)
# so = np.matmul(dotlike, vr)
# if verbose: print("so.shape:", so.shape)
# so = np.reshape(so, (-1, so.shape[-1]))
# slogits = np.reshape(slogits, (-1,)) # provided
# if verbose: print("so.shape,slogits.shape", so.shape, slogits.shape)
#
# #Step 4
# o = np.take(so, undo_sort, axis=0)
# logits = np.take(slogits, undo_sort, axis=0)
# if verbose: print("o.shape,o", o.shape, o)
# if verbose: print("logits.shape, logits", logits.shape, logits)
#
# #Step 5 (Provided)
# if n_hashes > 1:
# o = np.reshape(o, (n_hashes, seqlen, o.shape[-1]))
# logits = np.reshape(logits, (n_hashes, seqlen, 1))
# probs = np.exp(logits - fastmath.logsumexp(logits, axis=0, keepdims=True))
# o = np.sum(o * probs, axis=0)
#
# return(o)
# ```
# %% [markdown]
# Great! You have now done examples code for most of the operation that are unique to the LSH version of self-attention. I'm sure at this point you are wondering what happens if the number of entries in a bucket is not evenly distributed the way our example is. It is possible, for example for all of the `seqlen` entries to land in one bucket. Further, since the buckets are not aligned, our 'chunks' may be misaligned with the start of the bucket. The implementation addresses this by attending to adjacent chunks as was described in the lecture:
#
# <img src = "C4W4_LN2_image13.PNG" height="400" width="750">
# <center><b>Figure 12: Misaligned Access, looking before and after </b></center>
#
# Hopefully, having implemented parts of this, you will appreciate this diagram more fully.
#
#
# %% [markdown]
# <a name="3.5"></a>
# ## Part 3.5 OurLSHSelfAttention
#
# You can examine the full implementations below. Area's we did not 'attend to' in our implementations above include variable bucket sizes and masking. We will instantiate a layer of the full implementation below. We tried to use the same variable names above to make it easier to decipher the full version. Note that some of the functionality we implemented in our routines is split between `attend` and `forward_unbatched`. We've inserted our version of hash below, but use the original version of `attend`.
# %%
# original version from trax 1.3.4
def attend(
q,
k=None,
v=None,
q_chunk_len=None,
kv_chunk_len=None,
n_chunks_before=0,
n_chunks_after=0,
mask_fn=None,
q_info=None,
kv_info=None,
dropout=0.0,
rng=None,
):
"""Dot-product attention, with optional chunking and/or masking.
Args:
q: Query vectors, shape [q_len, d_qk]
k: Key vectors, shape [kv_len, d_qk]; or None
v: Value vectors, shape [kv_len, d_v]
q_chunk_len: Set to non-zero to enable chunking for query vectors
kv_chunk_len: Set to non-zero to enable chunking for key/value vectors
n_chunks_before: Number of adjacent previous chunks to attend to
n_chunks_after: Number of adjacent subsequent chunks to attend to
mask_fn: TODO(kitaev) doc
q_info: Query-associated metadata for masking
kv_info: Key-associated metadata for masking
dropout: Dropout rate
rng: RNG for dropout
Returns:
A tuple (output, dots_logsumexp). The output has shape [q_len, d_v], and
dots_logsumexp has shape [q_len]. The logsumexp of the attention
probabilities is useful for combining multiple rounds of attention (as in
LSH attention).
"""
assert v is not None
share_qk = k is None
if q_info is None:
q_info = np.arange(q.shape[-2], dtype=np.int32)
if kv_info is None and not share_qk:
kv_info = np.arange(v.shape[-2], dtype=np.int32)
# Split q/k/v into chunks along the time axis, if desired.
if q_chunk_len is not None:
q = np.reshape(q, (-1, q_chunk_len, q.shape[-1]))
q_info = np.reshape(q_info, (-1, q_chunk_len))
if share_qk:
assert kv_chunk_len is None or kv_chunk_len == q_chunk_len
k = q
kv_chunk_len = q_chunk_len
if kv_info is None:
kv_info = q_info
elif kv_chunk_len is not None:
# kv_info is not None, but reshape as required.
kv_info = np.reshape(kv_info, (-1, kv_chunk_len))
elif kv_chunk_len is not None:
k = np.reshape(k, (-1, kv_chunk_len, k.shape[-1]))
kv_info = np.reshape(kv_info, (-1, kv_chunk_len))
if kv_chunk_len is not None:
v = np.reshape(v, (-1, kv_chunk_len, v.shape[-1]))
if share_qk:
k = length_normalized(k)
k = k / np.sqrt(k.shape[-1])
# Optionally include adjacent chunks.
if q_chunk_len is not None or kv_chunk_len is not None:
assert q_chunk_len is not None and kv_chunk_len is not None
else:
assert n_chunks_before == 0 and n_chunks_after == 0
k = look_adjacent(k, n_chunks_before, n_chunks_after)
v = look_adjacent(v, n_chunks_before, n_chunks_after)
kv_info = look_adjacent(kv_info, n_chunks_before, n_chunks_after)
# Dot-product attention.
dots = np.matmul(q, np.swapaxes(k, -1, -2))
# Masking
if mask_fn is not None:
dots = mask_fn(dots, q_info[..., :, None], kv_info[..., None, :])
# Softmax.
dots_logsumexp = fastmath.logsumexp(dots, axis=-1, keepdims=True)
dots = np.exp(dots - dots_logsumexp)
if dropout > 0.0:
assert rng is not None
# Dropout is broadcast across the bin dimension
dropout_shape = (dots.shape[-2], dots.shape[-1])
#
keep_prob = tie_in(dots, 1.0 - dropout)
keep = fastmath.random.bernoulli(rng, keep_prob, dropout_shape)
multiplier = keep.astype(dots.dtype) / tie_in(keep, keep_prob)
dots = dots * multiplier
# The softmax normalizer (dots_logsumexp) is used by multi-round LSH attn.
out = np.matmul(dots, v)
out = np.reshape(out, (-1, out.shape[-1]))
dots_logsumexp = np.reshape(dots_logsumexp, (-1,))
return out, dots_logsumexp
# %% colab={} colab_type="code" id="ihFoYYBGKFVu"
class OurLSHSelfAttention(tl.LSHSelfAttention):
"""Our simplified LSH self-attention """
def forward_unbatched(self, x, mask=None, *, weights, state, rng, update_state):
attend_rng, output_rng = fastmath.random.split(rng)
w_q, w_v, w_o = weights
q = np.matmul(x, w_q)
v = np.matmul(x, w_v)
if update_state:
_, old_hash_rng = state
hash_rng, hash_subrng = fastmath.random.split(old_hash_rng)
# buckets = self.hash_vectors(q, hash_subrng, mask) # original
## use our version of hash
buckets = our_hash_vectors(
q, hash_subrng, self.n_buckets, self.n_hashes, mask=mask
)
s_buckets = buckets
if self._max_length_for_buckets:
length = self.n_hashes * self._max_length_for_buckets
if buckets.shape[0] < length:
s_buckets = np.concatenate(
[buckets, np.zeros(length - buckets.shape[0], dtype=np.int32)],
axis=0,
)
state = (s_buckets, hash_rng)
else:
buckets, _ = state
if self._max_length_for_buckets:
buckets = buckets[: self.n_hashes * x.shape[0]]
seqlen = x.shape[0]
assert int(buckets.shape[0]) == self.n_hashes * seqlen
ticker = tie_in(x, np.arange(self.n_hashes * seqlen, dtype=np.int32))
buckets_and_t = seqlen * buckets + (ticker % seqlen)
buckets_and_t = fastmath.stop_gradient(buckets_and_t)
# Hash-based sort ("s" at the start of variable names means "sorted")
sbuckets_and_t, sticker = fastmath.sort_key_val(
buckets_and_t, ticker, dimension=-1
)
_, undo_sort = fastmath.sort_key_val(sticker, ticker, dimension=-1)
sbuckets_and_t = fastmath.stop_gradient(sbuckets_and_t)
sticker = fastmath.stop_gradient(sticker)
undo_sort = fastmath.stop_gradient(undo_sort)
st = sticker % seqlen
sq = np.take(q, st, axis=0)
sv = np.take(v, st, axis=0)
mask_fn = functools.partial(
mask_self_attention,
causal=self.causal,
exclude_self=True,
masked=self.masked,
)
q_info = st
assert (mask is not None) == self.masked
kv_info = None
if self.masked:
# mask is a boolean array (True means "is valid token")
smask = np.take(mask, st, axis=0)
ones_like_mask = tie_in(x, np.ones_like(smask, dtype=np.int32))
kv_info = q_info * np.where(smask, ones_like_mask, -ones_like_mask)
## use original version of attend (could use ours but lacks masks and masking)
so, slogits = attend(
sq,
k=None,
v=sv,
q_chunk_len=self.chunk_len,
n_chunks_before=self.n_chunks_before,
n_chunks_after=self.n_chunks_after,
mask_fn=mask_fn,
q_info=q_info,
kv_info=kv_info,
dropout=self.attention_dropout,
rng=attend_rng,
)
# np.take(so, undo_sort, axis=0); np.take(slogits, undo_sort, axis=0) would
# also work, but these helpers include performance optimizations for TPU.
o = permute_via_gather(so, undo_sort, sticker, axis=0)
logits = permute_via_sort(slogits, sticker, buckets_and_t, axis=-1)
if self.n_hashes > 1:
o = np.reshape(o, (self.n_hashes, seqlen, o.shape[-1]))
logits = np.reshape(logits, (self.n_hashes, seqlen, 1))
probs = np.exp(logits - fastmath.logsumexp(logits, axis=0, keepdims=True))
o = np.sum(o * probs, axis=0)
assert o.shape == (seqlen, w_v.shape[-1])
out = np.matmul(o, w_o)
out = apply_broadcasted_dropout(out, self.output_dropout, output_rng)
return out, state
# %% colab={} colab_type="code" id="QG3yCwWV3zJd"
# Here we're going to try out our LSHSelfAttention
n_heads = 3
causal = False
masked = False
mask = None
chunk_len = 8
n_chunks_before = 0
n_chunks_after = 0
attention_dropout = 0.0
n_hashes = 5
n_buckets = 4
seq_len = 8
emb_len = 5
al = OurLSHSelfAttention(
n_heads=n_heads,
d_qk=3,
d_v=4,
causal=causal,
chunk_len=8,
n_chunks_before=n_chunks_before,
n_chunks_after=n_chunks_after,
n_hashes=n_hashes,
n_buckets=n_buckets,
use_reference_code=True,
attention_dropout=attention_dropout,
mode="train",
)
x = jax.random.uniform(jax.random.PRNGKey(0), (1, seq_len, emb_len), dtype=np.float32)
al_osa = fastmath.random.get_prng(1)
_, _ = al.init(tl.shapes.signature(x), rng=al_osa)
# %% colab={} colab_type="code" id="TzHug40iMe3S"
al(x)
# %% [markdown] colab_type="text" id="c5IBhMGjmg0z"
# <details>
# <summary>
# <font size="3"><b> Expected Output </b></font>
# </summary>
#
# **Expected Values**
# ```
# using jax
# using jax
# using jax
# DeviceArray([[[ 6.6842824e-01, -1.1364323e-01, -5.4430610e-01,
# 2.1126242e-01, -1.0988623e-02],
# [ 7.0949769e-01, -1.5455185e-01, -5.9923315e-01,
# 2.2719440e-01, 1.3833776e-02],
# [ 7.1442688e-01, -1.2046628e-01, -5.3956544e-01,
# 1.7320301e-01, -1.6552269e-02],
# [ 6.7178929e-01, -7.6611102e-02, -5.9399861e-01,
# 2.1236290e-01, 7.9482794e-04],
# [ 7.1518433e-01, -1.1359170e-01, -5.7821894e-01,
# 2.1304411e-01, 3.0598268e-02],
# [ 6.8235350e-01, -9.3979925e-02, -5.5341840e-01,
# 2.1608177e-01, -6.6673756e-04],
# [ 6.1286640e-01, -8.1027031e-02, -4.8148823e-01,
# 1.9373313e-01, 3.1555295e-02],
# [ 7.2203505e-01, -1.0199660e-01, -5.5215168e-01,
# 1.7872262e-01, -2.2289157e-02]]], dtype=float32)```
# %% [markdown]
# **Congratuations!** you have created a custom layer and have become familiar with LSHSelfAttention.
# %%
| 67,723 |
/notebooks/2_Clasificacion_de_texto_transformadores_multiclase.ipynb
|
d45239306d34645e625e02614880edb092883878
|
[
"MIT"
] |
permissive
|
cogitovsmachina/riiaa_2020_TNLP
|
https://github.com/cogitovsmachina/riiaa_2020_TNLP
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 72,573 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + papermill={"duration": 8.331994, "end_time": "2020-11-16T21:14:35.867395", "exception": false, "start_time": "2020-11-16T21:14:27.535401", "status": "completed"}
# importing the necessary frameworks
import matplotlib.pyplot as plt
import seaborn as sns
import keras
from keras.models import Sequential
from keras.layers import Dense, Conv2D , MaxPool2D , Flatten , Dropout , BatchNormalization
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report,confusion_matrix
from keras.callbacks import ReduceLROnPlateau
import cv2
import os
# + papermill={"duration": 0.11319, "end_time": "2020-11-16T21:14:36.073938", "exception": false, "start_time": "2020-11-16T21:14:35.960748", "status": "completed"}
# reading the path in the os.dir
labels = ['PNEUMONIA', 'NORMAL']
img_size = 150
def get_training_data(data_dir):
data = []
for label in labels:
path = os.path.join(data_dir, label)
class_num = labels.index(label)
for img in os.listdir(path):
try:
img_arr = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)
resized_arr = cv2.resize(img_arr, (img_size, img_size)) # Reshaping images to preferred size
data.append([resized_arr, class_num])
except Exception as e:
print(e)
return np.array(data)
# + papermill={"duration": 79.119588, "end_time": "2020-11-16T21:15:55.281829", "exception": false, "start_time": "2020-11-16T21:14:36.162241", "status": "completed"}
# alloting the file location
train = get_training_data('../input/chest-xray-pneumonia/chest_xray/chest_xray/train')
test = get_training_data('../input/chest-xray-pneumonia/chest_xray/chest_xray/test')
val = get_training_data('../input/chest-xray-pneumonia/chest_xray/chest_xray/val')
# + papermill={"duration": 0.267914, "end_time": "2020-11-16T21:15:55.607158", "exception": false, "start_time": "2020-11-16T21:15:55.339244", "status": "completed"}
# value count of normal and pneumonia instances
l = []
for i in train:
if(i[1] == 0):
l.append("Pneumonia")
else:
l.append("Normal")
sns.set_style('darkgrid')
sns.countplot(l)
# + papermill={"duration": 0.758831, "end_time": "2020-11-16T21:15:56.429599", "exception": false, "start_time": "2020-11-16T21:15:55.670768", "status": "completed"}
# plotting or showing x ray images of normal and pneumonia lungs
plt.figure(figsize = (5,5))
plt.imshow(train[0][0], cmap='gray')
plt.title(labels[train[0][1]])
plt.figure(figsize = (5,5))
plt.imshow(train[-1][0], cmap='gray')
plt.title(labels[train[-1][1]])
# + papermill={"duration": 0.086713, "end_time": "2020-11-16T21:15:56.578332", "exception": false, "start_time": "2020-11-16T21:15:56.491619", "status": "completed"}
x_train = []
y_train = []
x_val = []
y_val = []
x_test = []
y_test = []
for feature, label in train:
x_train.append(feature)
y_train.append(label)
for feature, label in test:
x_test.append(feature)
y_test.append(label)
for feature, label in val:
x_val.append(feature)
y_val.append(label)
# + papermill={"duration": 0.590437, "end_time": "2020-11-16T21:15:57.232717", "exception": false, "start_time": "2020-11-16T21:15:56.642280", "status": "completed"}
# Normalize the data
x_train = np.array(x_train) / 255
x_val = np.array(x_val) / 255
x_test = np.array(x_test) / 255
# + papermill={"duration": 0.127266, "end_time": "2020-11-16T21:15:57.470537", "exception": false, "start_time": "2020-11-16T21:15:57.343271", "status": "completed"}
# resize data for deep learning
x_train = x_train.reshape(-1, img_size, img_size, 1)
y_train = np.array(y_train)
x_val = x_val.reshape(-1, img_size, img_size, 1)
y_val = np.array(y_val)
x_test = x_test.reshape(-1, img_size, img_size, 1)
y_test = np.array(y_test)
# + papermill={"duration": 0.532693, "end_time": "2020-11-16T21:15:58.113865", "exception": false, "start_time": "2020-11-16T21:15:57.581172", "status": "completed"}
# With data augmentation to prevent overfitting and handling the imbalance in dataset
# setting the flip and zooming the image
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range = 30, # randomly rotate images in the range (degrees, 0 to 180)
zoom_range = 0.2, # Randomly zoom image
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip = True, # randomly flip images
vertical_flip=False) # randomly flip images
datagen.fit(x_train)
# + papermill={"duration": 2.935454, "end_time": "2020-11-16T21:16:01.126316", "exception": false, "start_time": "2020-11-16T21:15:58.190862", "status": "completed"}
# sequencial model building by including the stride and padding. activation as relu. batch norm and fallout
model = Sequential()
model.add(Conv2D(32 , (3,3) , strides = 1 , padding = 'valid' , activation = 'relu' , input_shape = (150,150,1)))
model.add(BatchNormalization())
model.add(MaxPool2D((2,2) , strides = 2 , padding = 'valid'))
model.add(Conv2D(64 , (3,3) , strides = 1 , padding = 'valid' , activation = 'relu'))
model.add(Dropout(0.1))
model.add(BatchNormalization())
model.add(MaxPool2D((2,2) , strides = 2 , padding = 'valid'))
model.add(Conv2D(64 , (3,3) , strides = 1 , padding = 'valid' , activation = 'relu'))
model.add(BatchNormalization())
model.add(MaxPool2D((2,2) , strides = 2 , padding = 'valid'))
model.add(Conv2D(128 , (3,3) , strides = 1 , padding = 'valid' , activation = 'relu'))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(MaxPool2D((2,2) , strides = 2 , padding = 'same'))
model.add(Conv2D(256 , (3,3) , strides = 1 , padding = 'same' , activation = 'relu'))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(MaxPool2D((2,2) , strides = 2 , padding = 'same'))
model.add(Flatten())
model.add(Dense(units = 128 , activation = 'relu'))
model.add(Dropout(0.2))
model.add(Dense(units = 1 , activation = 'sigmoid'))
model.compile(optimizer = "rmsprop" , loss = 'binary_crossentropy' , metrics = ['accuracy'])
model.summary()
# + papermill={"duration": 0.075157, "end_time": "2020-11-16T21:16:01.283958", "exception": false, "start_time": "2020-11-16T21:16:01.208801", "status": "completed"}
# check for the optimum learining rate.
learning_rate_reduction = ReduceLROnPlateau(monitor='val_accuracy', patience = 2, verbose=1,factor=0.3, min_lr=0.000001)
# + papermill={"duration": 150.660422, "end_time": "2020-11-16T21:18:32.009443", "exception": false, "start_time": "2020-11-16T21:16:01.349021", "status": "completed"}
history = model.fit(datagen.flow(x_train,y_train, batch_size = 32) ,epochs = 12 , validation_data = datagen.flow(x_val, y_val) ,callbacks = [learning_rate_reduction])
# + papermill={"duration": 1.71235, "end_time": "2020-11-16T21:18:34.534855", "exception": false, "start_time": "2020-11-16T21:18:32.822505", "status": "completed"}
# evalating the model
print("Loss of the model is - " , model.evaluate(x_test,y_test)[0])
print("Accuracy of the model is - " , model.evaluate(x_test,y_test)[1]*100 , "%")
# + papermill={"duration": 1.49726, "end_time": "2020-11-16T21:18:36.827090", "exception": false, "start_time": "2020-11-16T21:18:35.329830", "status": "completed"}
# plotting the accuracies while training and validation dataset
epochs = [i for i in range(12)]
fig , ax = plt.subplots(1,2)
train_acc = history.history['accuracy']
train_loss = history.history['loss']
val_acc = history.history['val_accuracy']
val_loss = history.history['val_loss']
fig.set_size_inches(20,10)
ax[0].plot(epochs , train_acc , 'go-' , label = 'Training Accuracy')
ax[0].plot(epochs , val_acc , 'ro-' , label = 'Validation Accuracy')
ax[0].set_title('Training & Validation Accuracy')
ax[0].legend()
ax[0].set_xlabel("Epochs")
ax[0].set_ylabel("Accuracy")
ax[1].plot(epochs , train_loss , 'g-o' , label = 'Training Loss')
ax[1].plot(epochs , val_loss , 'r-o' , label = 'Validation Loss')
ax[1].set_title('Testing Accuracy & Loss')
ax[1].legend()
ax[1].set_xlabel("Epochs")
ax[1].set_ylabel("Training & Validation Loss")
plt.show()
# + papermill={"duration": 1.136349, "end_time": "2020-11-16T21:18:38.742482", "exception": false, "start_time": "2020-11-16T21:18:37.606133", "status": "completed"}
# last ten predictions
predictions = model.predict_classes(x_test)
predictions = predictions.reshape(1,-1)[0]
predictions[:10]
# + papermill={"duration": 0.82318, "end_time": "2020-11-16T21:18:40.336748", "exception": false, "start_time": "2020-11-16T21:18:39.513568", "status": "completed"}
# descriptive confusion matrix and its stats
print(classification_report(y_test, predictions, target_names = ['Pneumonia (Class 0)','Normal (Class 1)']))
# + papermill={"duration": 0.903504, "end_time": "2020-11-16T21:18:42.010367", "exception": false, "start_time": "2020-11-16T21:18:41.106863", "status": "completed"}
# confusion matrix
cm = confusion_matrix(y_test,predictions)
cm
_grades_at_GCSE',
'CH_Rates_of_Children_Looked_After',
'CH_%_of_pupils_whose_first_language_is_not_English',
'CH_%_children_living_in_out-of-work_households',
'HE_Male_life_expectancy',
'HE_Female_life_expectancy',
'HE_Life_satisfaction_score',
'HE_Worthwhileness_score',
'HE_Happiness_score', 'HE_Anxiety_score',
'HE_Mortality_rate_from_causes_considered_preventable'
]
for col in cols_to_convert_int:
london[col] = london[col].astype("float")
# -
#Checking column types
london.info()
#Descriptive Statistics
round(london.describe().T,2)
# ## Exploratory Data Analysis
# +
# Bar Plot For Population
london_sort_population = london.sort_values("DE_Population_Estimate")
plt.figure(figsize=(15,10),dpi=200),
plt.style.use('default')
ax = sns.barplot(x="Borough", y="DE_Population_Estimate", data=london_sort_population)
plt.title("Population", weight="bold", c="red", fontsize=15)
plt.xlabel("",weight="bold",c="k")
plt.xticks(rotation=90, weight="bold")
plt.ylabel("Population",weight= "bold",c="k")
plt.yticks(weight="bold")
#for p in ax.patches:
# ax.annotate('{:.0f}'.format(p.get_height()), (p.get_x()+0.4, p.get_height()),
# ha='center', va='bottom',
# color= 'black', size=8)
x_coordinates = [0, 32]
y_median = [(london["DE_Population_Estimate"].mean(),2), (london["DE_Population_Estimate"].mean(),2)]
plt.plot(x_coordinates, y_median,linestyle = '--', c="gray")
plt.text(.5,280000,'Average Population = 275831',fontsize = 13,color = 'black')
plt.show()
# +
# Bar Plot For House Price
london_sort_housePrice = london.sort_values("HO_Median_House_Price")
plt.figure(figsize=(15,10),dpi=200),
plt.style.use('default')
ax = sns.barplot(x="Borough", y="HO_Median_House_Price", data=london_sort_housePrice)
plt.title("Median House Price", weight="bold", c="red", fontsize=15)
plt.xlabel("",weight="bold",c="k")
plt.xticks(rotation=90, weight="bold")
plt.ylabel("Median House Price",weight= "bold",c="k")
plt.yticks(weight="bold")
#for p in ax.patches:
# ax.annotate('{:.0f}'.format(p.get_height()), (p.get_x()+0.4, p.get_height()),
# ha='center', va='bottom',
# color= 'black', size=8)
x_coordinates = [0, 32]
y_median = [(london["HO_Median_House_Price"].median(),2), (london["HO_Median_House_Price"].median(),2)]
plt.plot(x_coordinates, y_median,linestyle = '--', c="gray")
plt.text(.5,420000,'Median House Price = 408625',fontsize = 13,color = 'black')
plt.show()
# -
# ## Resample all values between -1 and 1
#set index Borough name
london = london.set_index('Borough')
# ### Criteria-1: Demograhpy
london
# +
#Resamling Demography Criteria
london["DE.Ind_Pop"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london.iloc[:,1].values.reshape(-1,1))*-1)
london["DE.Ind_InlAre"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london.iloc[:,2].values.reshape(-1,1)))
london["DE.Ind_AvgAge"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london.iloc[:,3].values.reshape(-1,1))*-1)
london["DE.Ind_Age_0_15"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london.iloc[:,4].values.reshape(-1,1)))
london["DE.Ind_Mig"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london.iloc[:,5].values.reshape(-1,1))*-1)
#Filtering Demography columns
col_list_DE = london.columns[london.columns.str.contains('DE_')]
col_list_DE_Ind = london.columns[london.columns.str.contains('DE.Ind')]
#Creating Demografy Data Frame
london_DE = london[col_list_DE]
london_DE_Ind = london[col_list_DE_Ind]
round(london_DE_Ind,2)
# +
# Heatmap for Demography Criteria
plt.figure(figsize=(16,16),dpi=200),
sns.set(font_scale=1)
sns.heatmap(london_DE_Ind,annot=True,fmt=".2", linewidths=1, cmap="Oranges",cbar_kws={'label': 'Range','orientation':'vertical'})
plt.style.use('default')
plt.ylabel("")
plt.xlabel("")
plt.yticks(weight="bold",rotation=0, size=9)
plt.xticks([0.5,1.5,2.5,3.5,4.5],["Population","Area","Avg.Age","Age(0-15)","Migrant"],weight="bold",rotation=0,size=9)
plt.title("Demography Indices Values",weight="bold",c="red", fontsize=15)
plt.show()
# -
# london_DE_Ind["DE_Total"] = london_DE_Ind.sum(axis=1)
round(london_DE_Ind,2)
# +
#AHP for Demography
london_DE_Ind_Weight = [4.6,26.4,9.8,15.5,43.6]
london_DE_Ind["DE_Total"] = ((london_DE_Ind["DE.Ind_Pop"]*london_DE_Ind_Weight[0])+
(london_DE_Ind["DE.Ind_InlAre"]*london_DE_Ind_Weight[1])+
(london_DE_Ind["DE.Ind_AvgAge"]*london_DE_Ind_Weight[2])+
(london_DE_Ind["DE.Ind_Age_0_15"]*london_DE_Ind_Weight[3])+
(london_DE_Ind["DE.Ind_Mig"]*london_DE_Ind_Weight[4]))
london_DE_Ind["DE_Total"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london_DE_Ind.iloc[:,5].values.reshape(-1,1)))
round(london_DE_Ind,2)
# +
# Heatmap for Demography Criteria_Weighted
plt.figure(figsize=(16,16),dpi=200),
sns.set(font_scale=1)
sns.heatmap(london_DE_Ind,annot=True,fmt=".2", linewidths=1, cmap="Oranges",cbar_kws={'label': 'Range','orientation':'vertical'})
plt.style.use('default')
plt.ylabel("")
plt.xlabel("")
plt.yticks(weight="bold",rotation=0, size=9)
plt.xticks([0.5,1.5,2.5,3.5,4.5,5.5],["Population","Area","Avg.Age","Age(0-15)","Migrant","Total"],weight="bold",rotation=0,size=9)
plt.title("Demography Indices Values",weight="bold",c="red", fontsize=15)
plt.show()
# -
# ### Criteria-2: Diversity
#
# +
#Resamling Diversity Criteria
london["DI.Ind_BornAbroad"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london.iloc[:,6].values.reshape(-1,1))*-1)
london["DI.Ind_LarMigPop"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london.iloc[:,7].values.reshape(-1,1))*-1)
london["DI.Ind_MaiLanNotEng"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london.iloc[:,8].values.reshape(-1,1))*-1)
london["DI.Ind_NewMigr"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london.iloc[:,9].values.reshape(-1,1))*-1)
#Filtering Diversity columns
col_list_DI = london.columns[london.columns.str.contains('DI_')]
col_list_DI_Ind = london.columns[london.columns.str.contains('DI.Ind')]
#Creating Diversity Data Frame
london_DI = london[col_list_DI]
london_DI_Ind = london[col_list_DI_Ind]
round(london_DI_Ind,2)
# +
# Heatmap for Diversity Criteria
plt.figure(figsize=(16,16),dpi=200),
sns.set(font_scale=1)
sns.heatmap(london_DI_Ind,annot=True,fmt=".2", linewidths=1, cmap="Oranges",cbar_kws={'label': 'Range','orientation':'vertical'})
plt.style.use('default')
plt.ylabel("")
plt.xlabel("")
plt.yticks(weight="bold",rotation=0, size=9)
plt.xticks([0.5,1.5,2.5,3.5],["Born Abroad","Largest Migrant","Main Lang Not Eng","New Migrant"],weight="bold",rotation=0,size=9)
plt.title("Diversity Indices Values",weight="bold",c="red", fontsize=15)
plt.show()
# +
#AHP for Diversity
london_DI_Ind_Weight = [4.9,25.9,13.6,55.6]
london_DI_Ind["DI_Total"] = ((london_DI_Ind.iloc[:,0]*london_DE_Ind_Weight[0])+
(london_DI_Ind.iloc[:,1]*london_DE_Ind_Weight[1])+
(london_DI_Ind.iloc[:,2]*london_DE_Ind_Weight[2])+
(london_DI_Ind.iloc[:,3]*london_DE_Ind_Weight[3]))
london_DI_Ind["DI_Total"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london_DI_Ind.iloc[:,4].values.reshape(-1,1)))
round(london_DI_Ind.sort_values("DI_Total",ascending=False).head(10),2)
# +
# Heatmap for Diversity Criteria
plt.figure(figsize=(16,16),dpi=200),
sns.set(font_scale=1)
sns.heatmap(london_DI_Ind,annot=True,fmt=".2", linewidths=1, cmap="Oranges",cbar_kws={'label': 'Range','orientation':'vertical'})
plt.style.use('default')
plt.ylabel("")
plt.xlabel("")
plt.yticks(weight="bold",rotation=0, size=9)
plt.xticks([0.5,1.5,2.5,3.5,4.5],["Born Abroad","Largest Migrant","Main Lang Not Eng","New Migrant","Total"],weight="bold",rotation=0,size=9)
plt.title("Diversity Indices Values",weight="bold",c="red", fontsize=15)
plt.show()
# -
# ### Criteria-3: Labour Market
# +
#Resamling Labour Market Criteria
london["LM.Ind_EmpRat"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london.iloc[:,10].values.reshape(-1,1)))
london["LM.Ind_HouMedInc"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london.iloc[:,11].values.reshape(-1,1)))
london["LM.Ind_AdlVol"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london.iloc[:,12].values.reshape(-1,1)))
london["LM.Ind_NumJob"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london.iloc[:,13].values.reshape(-1,1)))
london["LM.Ind_NumBus"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london.iloc[:,14].values.reshape(-1,1)))
#Filtering Diversity columns
col_list_LM = london.columns[london.columns.str.contains('LM_')]
col_list_LM_Ind = london.columns[london.columns.str.contains('LM.Ind')]
#Creating Diversity Data Frame
london_LM = london[col_list_LM]
london_LM_Ind = london[col_list_LM_Ind]
round(london_LM_Ind,2)
# +
# Heatmap for Labour Market
plt.figure(figsize=(16,16),dpi=200),
sns.set(font_scale=1)
sns.heatmap(london_LM_Ind,annot=True,fmt=".2", linewidths=1, cmap="Oranges",cbar_kws={'label': 'Range','orientation':'vertical'})
plt.style.use('default')
plt.ylabel("")
plt.xlabel("")
plt.yticks(weight="bold",rotation=0, size=9)
plt.xticks([0.5,1.5,2.5,3.5,4.5,5.5],["Empl. Rate","Med House Income","Adul Volu","NumberOfJobs","NumberOfBusiness","Total"],weight="bold",rotation=0,size=9)
plt.title("Labour Market Indices Values",weight="bold",c="red", fontsize=15)
plt.show()
# +
#AHP for Labour Market
london_LM_Ind_Weight = [18.9,27.3,3.8,36,13.9]
london_LM_Ind["LM_Total"] = ((london_LM_Ind.iloc[:,0]*london_LM_Ind_Weight[0])+
(london_LM_Ind.iloc[:,1]*london_LM_Ind_Weight[1])+
(london_LM_Ind.iloc[:,2]*london_LM_Ind_Weight[2])+
(london_LM_Ind.iloc[:,3]*london_LM_Ind_Weight[3])+
(london_LM_Ind.iloc[:,3]*london_LM_Ind_Weight[3]))
london_LM_Ind["LM_Total"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london_LM_Ind.iloc[:,4].values.reshape(-1,1)))
round(london_LM_Ind.sort_values("LM_Total",ascending=False).head(10),2)
# -
# ### Criteria-4: Safety
# +
#Resampling Safety Criteria
london["SA.Ind_CriRat"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london.iloc[:,15].values.reshape(-1,1))*-1)
london["SA.Ind_Fires"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london.iloc[:,16].values.reshape(-1,1))*-1)
london["SA.Ind_AmbInc"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london.iloc[:,17].values.reshape(-1,1))*-1)
#Filtering Safety columns
col_list_SA = london.columns[london.columns.str.contains('SA_')]
col_list_SA_Ind = london.columns[london.columns.str.contains('SA.Ind')]
#Creating Safety Data Frame
london_SA = london[col_list_SA]
london_SA_Ind = london[col_list_SA_Ind]
#AHP for Safety
london_SA_Ind_Weight = [73.1,8.1,18.8]
london_SA_Ind["SA_Total"] = ((london_SA_Ind.iloc[:,0]*london_SA_Ind_Weight[0])+
(london_SA_Ind.iloc[:,1]*london_SA_Ind_Weight[1])+
(london_SA_Ind.iloc[:,2]*london_SA_Ind_Weight[2]))
london_SA_Ind["SA_Total"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london_SA_Ind.iloc[:,3].values.reshape(-1,1)))
print(round(london_SA_Ind,2))
# Heatmap for Safety
plt.figure(figsize=(16,16),dpi=200),
sns.set(font_scale=1)
sns.heatmap(london_SA_Ind,annot=True,fmt=".2", linewidths=1, cmap="Oranges",cbar_kws={'label': 'Range','orientation':'vertical'})
plt.style.use('default')
plt.ylabel("")
plt.xlabel("")
plt.yticks(weight="bold",rotation=0, size=9)
plt.xticks([0.5,1.5,2.5,3.5],["Crime Rate","Fires","Ambulance Incidents","Total"],weight="bold",rotation=0,size=9)
plt.title("Safety Indices Values",weight="bold",c="red", fontsize=15)
plt.show()
# -
# ### Criteria-5: Housing
# +
#Resampling Housing Criteria
london["HA.Ind_HouPri"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london.iloc[:,18].values.reshape(-1,1))*-1)
london["HA.Ind_CouTax"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london.iloc[:,19].values.reshape(-1,1))*-1)
london["HA.Ind_NewHom"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london.iloc[:,20].values.reshape(-1,1)))
#Filtering Housing columns
col_list_HA = london.columns[london.columns.str.contains('HA_')]
col_list_HA_Ind = london.columns[london.columns.str.contains('HA.Ind')]
#Creating Housing Data Frame
london_HA = london[col_list_HA]
london_HA_Ind = london[col_list_HA_Ind]
#AHP for Housing
london_HA_Ind_Weight = [74,9.4,16.7]
london_HA_Ind["HA_Total"] = ((london_HA_Ind.iloc[:,0]*london_SA_Ind_Weight[0])+
(london_HA_Ind.iloc[:,1]*london_SA_Ind_Weight[1])+
(london_HA_Ind.iloc[:,2]*london_SA_Ind_Weight[2]))
london_HA_Ind["HA_Total"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london_HA_Ind.iloc[:,3].values.reshape(-1,1)))
print(round(london_HA_Ind,2))
# Heatmap for Housing
plt.figure(figsize=(16,16),dpi=200),
sns.set(font_scale=1)
sns.heatmap(london_HA_Ind,annot=True,fmt=".2", linewidths=1, cmap="Oranges",cbar_kws={'label': 'Range','orientation':'vertical'})
plt.style.use('default')
plt.ylabel("")
plt.xlabel("")
plt.yticks(weight="bold",rotation=0, size=9)
plt.xticks([0.5,1.5,2.5,3.5],["House Price","Council Tax","New Homes","Total"],weight="bold",rotation=0,size=9)
plt.title("Housing Indices Values",weight="bold",c="red", fontsize=15)
plt.show()
# -
# ### Criteria-6: Environment
# +
#Resampling Housing Criteria
london["EN.Ind_AreGre"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london.iloc[:,21].values.reshape(-1,1)))
london["EN.Ind_CarEmi"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london.iloc[:,22].values.reshape(-1,1))*-1)
london["EN.Ind_WasRec"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london.iloc[:,23].values.reshape(-1,1)))
#Filtering Housing columns
col_list_EN = london.columns[london.columns.str.contains('EN_')]
col_list_EN_Ind = london.columns[london.columns.str.contains('EN.Ind')]
#Creating Housing Data Frame
london_EN = london[col_list_EN]
london_EN_Ind = london[col_list_EN_Ind]
#AHP for Housing
london_EN_Ind_Weight = [63.7,25.8,10.5]
london_EN_Ind["EN_Total"] = ((london_EN_Ind.iloc[:,0]*london_EN_Ind_Weight[0])+
(london_EN_Ind.iloc[:,1]*london_EN_Ind_Weight[1])+
(london_EN_Ind.iloc[:,2]*london_EN_Ind_Weight[2]))
london_EN_Ind["EN_Total"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london_EN_Ind.iloc[:,3].values.reshape(-1,1)))
print(round(london_EN_Ind,2))
# Heatmap for Housing
plt.figure(figsize=(16,16),dpi=200),
sns.set(font_scale=1)
sns.heatmap(london_EN_Ind,annot=True,fmt=".2", linewidths=1, cmap="Oranges",cbar_kws={'label': 'Range','orientation':'vertical'})
plt.style.use('default')
plt.ylabel("")
plt.xlabel("")
plt.yticks(weight="bold",rotation=0, size=9)
plt.xticks([0.5,1.5,2.5,3.5],["Greenspace","Carbon Emissions","Waste Recycling","Total"],weight="bold",rotation=0,size=9)
plt.title("Environment Indices Values",weight="bold",c="red", fontsize=15)
plt.show()
# -
# ### Criteria-7: Transport
# +
#Resampling Housing Criteria
london["TR.Ind_NumCar"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london.iloc[:,24].values.reshape(-1,1))*-1)
london["TR.Ind_AduCycle"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london.iloc[:,25].values.reshape(-1,1)))
london["TR.Ind_TraAcc"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london.iloc[:,26].values.reshape(-1,1)))
#Filtering Housing columns
col_list_TR= london.columns[london.columns.str.contains('TR_')]
col_list_TR_Ind = london.columns[london.columns.str.contains('TR.Ind')]
#Creating Housing Data Frame
london_TR = london[col_list_TR]
london_TR_Ind = london[col_list_TR_Ind]
#AHP for Housing
london_TR_Ind_Weight = [12.7,18.6,68.7]
london_TR_Ind["TR_Total"] = ((london_TR_Ind.iloc[:,0]*london_TR_Ind_Weight[0])+
(london_TR_Ind.iloc[:,1]*london_TR_Ind_Weight[1])+
(london_TR_Ind.iloc[:,2]*london_TR_Ind_Weight[2]))
london_TR_Ind["TR_Total"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london_TR_Ind.iloc[:,3].values.reshape(-1,1)))
print(round(london_TR_Ind,2))
# Heatmap for Housing
plt.figure(figsize=(16,16),dpi=200),
sns.set(font_scale=1)
sns.heatmap(london_TR_Ind,annot=True,fmt=".2", linewidths=1, cmap="Oranges",cbar_kws={'label': 'Range','orientation':'vertical'})
plt.style.use('default')
plt.ylabel("")
plt.xlabel("")
plt.yticks(weight="bold",rotation=0, size=9)
plt.xticks([0.5,1.5,2.5,3.5],["Number of Cars","Adults Who Cycle","Public Transport Acc.","Total"],weight="bold",rotation=0,size=9)
plt.title("Transport Indices Values",weight="bold",c="red", fontsize=15)
plt.show()
# -
# ### Criteria:8-Children
# +
#Resampling Children Criteria
london["CH.Ind_AchGCSE"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london.iloc[:,27].values.reshape(-1,1)))
london["CH.Ind_ChiLooAft"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london.iloc[:,28].values.reshape(-1,1))*-1)
london["CH.Ind_PupFLNotEng"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london.iloc[:,29].values.reshape(-1,1))*-1)
london["CH.Ind_LivOOHou"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london.iloc[:,30].values.reshape(-1,1))*-1)
#Filtering Children columns
col_list_CH= london.columns[london.columns.str.contains('CH_')]
col_list_CH_Ind = london.columns[london.columns.str.contains('CH.Ind')]
#Creating Children Data Frame
london_CH = london[col_list_CH]
london_CH_Ind = london[col_list_CH_Ind]
#AHP for Children
london_CH_Ind_Weight = [48.3,24.7,9.4,17.6]
london_CH_Ind["CH_Total"] = ((london_CH_Ind.iloc[:,0]*london_CH_Ind_Weight[0])+
(london_CH_Ind.iloc[:,1]*london_CH_Ind_Weight[1])+
(london_CH_Ind.iloc[:,2]*london_CH_Ind_Weight[2])+
(london_CH_Ind.iloc[:,3]*london_CH_Ind_Weight[3]))
london_CH_Ind["CH_Total"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london_CH_Ind.iloc[:,4].values.reshape(-1,1)))
print(round(london_CH_Ind,2))
# Heatmap for Children
plt.figure(figsize=(16,16),dpi=200),
sns.set(font_scale=1)
sns.heatmap(london_CH_Ind,annot=True,fmt=".2", linewidths=1, cmap="Oranges",cbar_kws={'label': 'Range','orientation':'vertical'})
plt.style.use('default')
plt.ylabel("")
plt.xlabel("")
plt.yticks(weight="bold",rotation=0, size=9)
plt.xticks([0.5,1.5,2.5,3.5,4.5],["Achievement at GCSE","Children Looked After","Pupil First Lan. Not Eng","Out-of-work Living","Total"],weight="bold",rotation=0,size=9)
plt.title("Children Indices Values",weight="bold",c="red", fontsize=15)
plt.show()
# -
# ### Criteria-9: Health
# +
#Resampling Children Criteria
london["HE.Ind_MaleLE"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london.iloc[:,31].values.reshape(-1,1)))
london["HE.Ind_FemaleLE"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london.iloc[:,32].values.reshape(-1,1)))
london["HE.Ind_LifSat"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london.iloc[:,33].values.reshape(-1,1)))
london["HE.Ind_WorWhi"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london.iloc[:,34].values.reshape(-1,1)))
london["HE.Ind_Happ"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london.iloc[:,35].values.reshape(-1,1)))
london["HE.Ind_Anx"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london.iloc[:,36].values.reshape(-1,1))*-1)
london["HE.Ind_MorRat"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london.iloc[:,37].values.reshape(-1,1))*-1)
#Filtering Children columns
col_list_HE= london.columns[london.columns.str.contains('HE_')]
col_list_HE_Ind = london.columns[london.columns.str.contains('HE.Ind')]
#Creating Children Data Frame
london_HE = london[col_list_HE]
london_HE_Ind = london[col_list_HE_Ind]
#AHP for Children
london_HE_Ind_Weight = [26.2,26.2,9.2,3.3,16.5,6,12.7]
london_HE_Ind["HE_Total"] = ((london_HE_Ind.iloc[:,0]*london_HE_Ind_Weight[0])+
(london_HE_Ind.iloc[:,1]*london_HE_Ind_Weight[1])+
(london_HE_Ind.iloc[:,2]*london_HE_Ind_Weight[2])+
(london_HE_Ind.iloc[:,3]*london_HE_Ind_Weight[3])+
(london_HE_Ind.iloc[:,4]*london_HE_Ind_Weight[4])+
(london_HE_Ind.iloc[:,5]*london_HE_Ind_Weight[5]))
london_HE_Ind["HE_Total"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london_HE_Ind.iloc[:,6].values.reshape(-1,1)))
print(round(london_HE_Ind,2))
# Heatmap for Children
plt.figure(figsize=(16,16),dpi=200),
sns.set(font_scale=1)
sns.heatmap(london_HE_Ind,annot=True,fmt=".2", linewidths=1, cmap="Oranges",cbar_kws={'label': 'Range','orientation':'vertical'})
plt.style.use('default')
plt.ylabel("")
plt.xlabel("")
plt.yticks(weight="bold",rotation=0, size=9)
plt.xticks([0.5,1.5,2.5,3.5,4.5,5.5,6.5],["Male Life Exp.","Female Life Exp.","Life Satisfation","Worthwhileness","Happiness","Anxiety","Mortality Rate","Total"],weight="bold",rotation=0,size=9)
plt.title("Health Indices Values",weight="bold",c="red", fontsize=15)
plt.show()
# -
london_criteria_result = (london_DE_Ind["DE_Total"]*2.9)+(london_DI_Ind["DI_Total"]*4.4)+(london_LM_Ind["LM_Total"]*2.5)+(london_SA_Ind["SA_Total"]*23.9)+(london_HA_Ind["HA_Total"]*7.2)+(london_EN_Ind["EN_Total"]*11.2)+(london_TR_Ind["TR_Total"]*5.9)+(london_CH_Ind["CH_Total"]*22)+(london_HE_Ind["HE_Total"]*20)
london_criteria_result = pd.DataFrame(london_criteria_result)
london_criteria_result
# +
#Concat All Indices Total columns
london_criteria_total = [(london_DE_Ind["DE_Total"]),
london_DI_Ind["DI_Total"], london_LM_Ind["LM_Total"],london_SA_Ind["SA_Total"],
london_HA_Ind["HA_Total"],london_EN_Ind["EN_Total"],london_TR_Ind["TR_Total"],
london_CH_Ind["CH_Total"],london_HE_Ind["HE_Total"]]
london_criteria = pd.concat(london_criteria_total,axis=1)
london_criteria["Result"] = (london_DE_Ind["DE_Total"]*2.9)+(london_DI_Ind["DI_Total"]*4.4)+(london_LM_Ind["LM_Total"]*2.5)+(london_SA_Ind["SA_Total"]*23.9)+(london_HA_Ind["HA_Total"]*7.2)+(london_EN_Ind["EN_Total"]*11.2)+(london_TR_Ind["TR_Total"]*5.9)+(london_CH_Ind["CH_Total"]*22)+(london_HE_Ind["HE_Total"]*20)
london_criteria["Result"] = (MinMaxScaler(feature_range = (-1,1)).fit_transform(london_criteria.iloc[:,9].values.reshape(-1,1)))
london_criteria
# +
# Heatmap for Results
plt.figure(figsize=(16,16),dpi=200),
sns.set(font_scale=1)
sns.heatmap(london_criteria,annot=True,fmt=".2", linewidths=1, cmap="Oranges",cbar_kws={'label': 'Range','orientation':'vertical'})
plt.style.use('default')
plt.ylabel("")
plt.xlabel("")
plt.yticks(weight="bold",rotation=0, size=9)
plt.xticks([0.5,1.5,2.5,3.5,4.5,5.5,6.5,7.5,8.5,9.5],["Demography","Diversity","Labour Market","Safety","Housing","Environment","Transport","Children","Health","Total"],weight="bold",rotation=0,size=9)
plt.title("Optimal Boroughs Based On Criteria",weight="bold",c="red", fontsize=15)
plt.show()
# -
#Top 10 Borough Table
round(london_criteria.sort_values("Result",ascending=False).head(20),5)
london_criteria.to_csv("london-borough-result.csv")
| 33,210 |
/4.RNN/12_SMS_Spam_RNN_100단어.ipynb
|
a50fb63595afeb9141179eaf373c2477fa733357
|
[] |
no_license
|
leele91/Deep-Learning
|
https://github.com/leele91/Deep-Learning
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 49,240 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Part 1 - Remember*
import numpy as np
import tensorflow as tf
np.random.seed(101)
tf.set_random_seed(101)
rand_a = np.random.uniform(0,100,(5,5))
rand_a
rand_b = np.random.uniform(0,100,(5,1))
rand_b
a = tf.placeholder(tf.float32)
b = tf.placeholder(tf.float32)
add_op = a + b
mul_op = a * b
# # Create graphs to use this operations
with tf.Session() as sess:
# we use dictionaries to initiate values
add_result = sess.run(add_op,feed_dict={a:rand_a, b:rand_b})
print("Addition result \n")
print(add_result)
mul_result = sess.run(mul_op,feed_dict={a:rand_a, b:rand_b})
print('\n Multiplication Result \n')
print(mul_result)
# # Neural Network
n_features = 10
n_dense_neurons = 3
# common to use None in the shape because we don't know the size of the batch
x = tf.placeholder(tf.float32,(None,n_features))
# +
#weights
W = tf.Variable(tf.random_normal([n_features,n_dense_neurons]))
# bias term
b = tf.Variable(tf.ones([n_dense_neurons]))
# -
xW = tf.matmul(x,W)
z = tf.add(xW, b)
# +
# our activation function will be sigmoid in this example
a = tf.sigmoid(z)
# -
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
layer_out = sess.run(a, feed_dict={x:np.random.random([1,n_features])})
print(layer_out)
# # Simple Regression Example
x_data = np.linspace(0,10,10) + np.random.uniform(-1.5,1.5,10)
x_data
y_label = np.linspace(0,10,10) + np.random.uniform(-1.5,1.5,10)
y_label
import matplotlib.pyplot as plt
# %matplotlib inline
plt.plot(x_data,y_label,'*')
# y = mx + b
np.random.rand(2)
# two random numbers
m = tf.Variable(0.44)
b = tf.Variable(0.87)
# # Creating our cost function
# +
# This helps to know how off we are from the real values so we can tune it in Optimization
error = 0
for x,y in zip(x_data, y_label):
#y_hat is my predicted value
y_hat = m*x + b
#mean squared error function
error += (y - y_hat)**2
# -
# # Optimization
# +
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train = optimizer.minimize(error)
# -
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
#how many steps are we going to run
training_steps = 100
for i in range(training_steps):
sess.run(train)
final_slope, final_intercep = sess.run([m,b])
# +
x_test = np.linspace(-1,11,10)
# y = mx + b
y_prediction_plot = final_slope * x_test + final_intercep
#plotting predicted values
plt.plot(x_test,y_prediction_plot,'r')
#our current data from the dataset
plt.plot(x_data, y_label, '*')
# -
lse, area_l)
self.build_kd_tree(X_r, node, True, (d+1)%len(x_m), False, area_r)
def radius_neighbors(self, x, r):
# Aqui almacenamos los vecinos
self.neighbors = []
self.r_neighbors(x, self.kd_tree, 0, r)
neighbors = self.neighbors
# Nos aseguramos de eliminar estos atributos.
self.neighbors = None
return neighbors
def r_neighbors(self, x, node, d, r):
# Verificamos si el punto se encuentra fuera del hipercubo definido por el nodo actual.
if not all(node.area[2*i] <= x[i] <= node.area[2*i+1] for i in range(len(x))):
# Por cada dimension, verificamos si el punto se encuentra dentro de los lados
# correspondientes al hipercubo
p = []
for i in range(len(x)):
# Si no es asi, almacenamos la coordenada del punto que se encuentra fuera del
# lado del hipercubo.
if node.area[2*i] > x[i]: p.append(node.area[2*i])
elif x[i] > node.area[2*i+1]: p.append(node.area[2*i+1])
else: p.append(x[i])
# Calculamos la distancia entre las coordenadas del punto fuera del hipercubo y
# la interseccion de los lados correspondientes. Si es mayor al radio, no necesitamos
# verificar mas esta rama.
dist = self.d(np.array(p), x)
if dist > r: return
# Calculamos la distancia entre el punto y la raiz actual. Verificamos si es menor
# que el raio
dist = self.d(x, node.x)
if dist < r: self.neighbors.append(node.x)
# Llamamos primero a la subdivision del arbol tal que el punto cumpla la condicion,
# con la esperanza de que al llamar el segundo hijo, este pueda ser descartado facilmente.
# Si no cumple ninguna, se recorre primero el hijo izquierdo (si no es nulo) y luego el derecho.
if x[d] <= node.area[2*d+1] and node.childs[0] != None:
self.r_neighbors(x, node.childs[0], (d+1)%len(x), r)
if node.childs[1] != None:
self.r_neighbors(x, node.childs[1], (d+1)%len(x), r)
elif x[d] >= node.area[2*d] and node.childs[1] != None:
self.r_neighbors(x, node.childs[1], (d+1)%len(x), r)
if node.childs[0] != None:
self.r_neighbors(x, node.childs[0], (d+1)%len(x), r)
elif node.childs[0] != None:
self.r_neighbors(x, node.childs[0], (d+1)%len(x), r)
if node.childs[1] != None:
self.r_neighbors(x, node.childs[1], (d+1)%len(x), r)
elif node.childs[1] != None:
self.r_neighbors(x, node.childs[1], (d+1)%len(x), r)
# -
# ### **KDE**
class KDE:
def __init__(self, X):
self.X = X
# Usaremos el KNN para obtener los vecinos
self.knn = KNN(X)
self.knn.build_kd_tree()
def gauss(self, x, h):
return e**(-np.dot(x,x)/(2*(h**2))) / sqrt(2*pi*h**2)
def p(self, x, dist, h=1):
neighbors = self.knn.radius_neighbors(x, dist)
N = len(neighbors)
if N==0: return 0
return sum(self.gauss((x-n)/h, h) for n in neighbors)/(N*h)
# ## **Lectura de Datos**
# +
normal1 = stats.norm(0, 0.2)
normal2 = stats.norm(0.5, 0.1)
normal3 = stats.norm(-0.2, 0.2)
data = []
for _ in range(100):
data.append(np.array([normal1.rvs(1)[0], normal2.rvs(1)[0]]))
data.append(np.array([normal2.rvs(1)[0], normal1.rvs(1)[0]]))
data.append(np.array([normal3.rvs(1)[0], normal3.rvs(1)[0]]))
plt.plot([d[0] for d in data], [d[1] for d in data], 'o')
plt.show()
# -
# ## **Resultados**
# +
kde = KDE(data)
def p(x): return kde.p(x, 0.5, 2)
x = y = np.linspace(-0.7, 0.8, 100)
X, Y = np.meshgrid(x,y)
Z = [[0 for _ in range(len(X[0]))] for _ in range(len(X))]
for i in range(len(X)):
for j in range(len(X[0])):
Z[i][j] = p(np.array([X[i][j], Y[i][j]]))
plt.contour(X, Y, Z, 20)
plt.show()
# -
ogewogICAgICAgIGFjdGlvbjogJ2NvbnRpbnVlJywKICAgICAgfQogICAgfTsKCiAgICAvLyBVc2UgYSBjaHVua2VkIHNlbmRpbmcgdG8gYXZvaWQgbWVzc2FnZSBzaXplIGxpbWl0cy4gU2VlIGIvNjIxMTU2NjAuCiAgICBsZXQgcG9zaXRpb24gPSAwOwogICAgd2hpbGUgKHBvc2l0aW9uIDwgZmlsZURhdGEuYnl0ZUxlbmd0aCkgewogICAgICBjb25zdCBsZW5ndGggPSBNYXRoLm1pbihmaWxlRGF0YS5ieXRlTGVuZ3RoIC0gcG9zaXRpb24sIE1BWF9QQVlMT0FEX1NJWkUpOwogICAgICBjb25zdCBjaHVuayA9IG5ldyBVaW50OEFycmF5KGZpbGVEYXRhLCBwb3NpdGlvbiwgbGVuZ3RoKTsKICAgICAgcG9zaXRpb24gKz0gbGVuZ3RoOwoKICAgICAgY29uc3QgYmFzZTY0ID0gYnRvYShTdHJpbmcuZnJvbUNoYXJDb2RlLmFwcGx5KG51bGwsIGNodW5rKSk7CiAgICAgIHlpZWxkIHsKICAgICAgICByZXNwb25zZTogewogICAgICAgICAgYWN0aW9uOiAnYXBwZW5kJywKICAgICAgICAgIGZpbGU6IGZpbGUubmFtZSwKICAgICAgICAgIGRhdGE6IGJhc2U2NCwKICAgICAgICB9LAogICAgICB9OwogICAgICBwZXJjZW50LnRleHRDb250ZW50ID0KICAgICAgICAgIGAke01hdGgucm91bmQoKHBvc2l0aW9uIC8gZmlsZURhdGEuYnl0ZUxlbmd0aCkgKiAxMDApfSUgZG9uZWA7CiAgICB9CiAgfQoKICAvLyBBbGwgZG9uZS4KICB5aWVsZCB7CiAgICByZXNwb25zZTogewogICAgICBhY3Rpb246ICdjb21wbGV0ZScsCiAgICB9CiAgfTsKfQoKc2NvcGUuZ29vZ2xlID0gc2NvcGUuZ29vZ2xlIHx8IHt9OwpzY29wZS5nb29nbGUuY29sYWIgPSBzY29wZS5nb29nbGUuY29sYWIgfHwge307CnNjb3BlLmdvb2dsZS5jb2xhYi5fZmlsZXMgPSB7CiAgX3VwbG9hZEZpbGVzLAogIF91cGxvYWRGaWxlc0NvbnRpbnVlLAp9Owp9KShzZWxmKTsK", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 75} id="NUUSVNHxhfoR" outputId="03309bf0-ad9b-4964-dc08-62cdf8094169"
from google.colab import files
uploaded = files.upload()
filename = list(uploaded.keys())[0]
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="hkPvQZdBinze" outputId="b0ea31bf-a7ff-4088-abc9-2c811b189c37"
df = pd.read_csv(filename, encoding='latin1')
df.head()
# + [markdown] id="-vySAa6RjHPe"
# ### 데이터 전처리
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="dU7ThhxgjE1W" outputId="880099b2-5ad9-44b1-9723-f52fae35bdc0"
del df['Unnamed: 2']
del df['Unnamed: 3']
del df['Unnamed: 4']
df['v1'] = df['v1'].replace(['ham','spam'],[0,1]) # 레이블(Labl Encoding) 인코더를 사용해서 동일한 기능 가능
df.head()
# + colab={"base_uri": "https://localhost:8080/"} id="ECdtMpct2pw-" outputId="6d98bdf3-df26-4ee6-b04c-29f26113a710"
df.info()
# + colab={"base_uri": "https://localhost:8080/"} id="tatoFDlFjYqt" outputId="c1ed914d-682b-4057-b0a1-9ae6ce4d116d"
# Null 검사
df.isnull().sum()
# + colab={"base_uri": "https://localhost:8080/"} id="PX145le8jqW1" outputId="d126dda3-0e89-432e-90c7-d902195199d2"
# 데이터 중복이 있는지 확인
df['v2'].nunique()
# + id="9WOU7jca2-gM"
# 데이터 중복 제거
df = df.drop_duplicates('v2', keep='first')
# + colab={"base_uri": "https://localhost:8080/"} id="mMYdf3W22-UG" outputId="6c7fa99b-8359-4630-da90-155b5c3867b2"
df['v1'].value_counts()
# + colab={"base_uri": "https://localhost:8080/"} id="cY6XfxoU2-RK" outputId="b1c36d1d-1cb2-4b64-f67c-405a0e553b80"
X_data = df['v2'].values
y_data = df['v1'].values
print('SMS 본문의 개수:', len(X_data))
print('레이블의 개수:', len(y_data))
# + colab={"base_uri": "https://localhost:8080/"} id="uKgx1o3Ek3-m" outputId="85d0a8be-4e26-44bb-e3d1-882c71314881"
# 단어 집합(vocabulary)을 만들고 크기를 확인
t = Tokenizer()
t.fit_on_texts(X_data)
sequences = t.texts_to_sequences(X_data) # 단어를 숫자값, 인덱스로 변환하여 저장
vocab_size = len(t.word_index) + 1
print('단어 집합의 크기 : %d' % vocab_size)
# + [markdown] id="PNRBdJFkDuE6"
# ### 전체 데이터셋의 길이를 임의의 숫자(100)에 맞춤
# + id="clnF0u3YFwH1"
X_data = sequences
# + colab={"base_uri": "https://localhost:8080/"} id="agZArLmEqdY0" outputId="2d655a10-f708-42cb-ec75-397fb15a8812"
data = pad_sequences(X_data, maxlen=100)
data.shape
# + id="VMO65Vokqhwc"
# 훈련&테스트 데이터 만들기
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
data, y_data, stratify=y_data, test_size=0.2, random_state=seed
)
# + colab={"base_uri": "https://localhost:8080/"} id="mBVfEb8xqxyv" outputId="c51df1fb-f49d-4d6d-ce7b-b1c8bb0a7421"
X_train.shape, X_test.shape
# + [markdown] id="3hfMxlg2rEdT"
# ### 모델 정의/ 설정/학습
# + id="Iakst91MrDr9"
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, Dense, SimpleRNN
# + colab={"base_uri": "https://localhost:8080/"} id="cgXvjEt2rPBz" outputId="70a67184-2b0f-4282-99c5-571a288ccb5b"
model = Sequential()
model.add(Embedding(vocab_size, 32)) # 임베딩 벡터의 차원은 32
model.add(SimpleRNN(32)) # RNN 셀의 hidden_size는 32
model.add(Dense(1, activation='sigmoid'))
model.summary()
# + id="BX57z3gIre49"
model.compile(loss='binary_crossentropy',
optimizer='rmsprop', metrics=['accuracy'])
# + colab={"base_uri": "https://localhost:8080/"} id="m2UzOQvzrsnM" outputId="d1275a06-e064-4899-a8fd-a76e57ac4d74"
history = model.fit(X_train, y_train, epochs=10,
batch_size=60, validation_split=0.2)
# + id="LEV4xI2V1zPT" colab={"base_uri": "https://localhost:8080/"} outputId="0a8a1df0-7a8a-4f23-93b1-6d9fae55de4e"
acc = model.evaluate(X_test, y_test, verbose=2)[1]
print(f'Accuracy: {acc:.4f}')
# + colab={"base_uri": "https://localhost:8080/", "height": 292} id="vFyVePzpCM6X" outputId="a04d5815-93d4-48ad-c7b5-9f8d374809b5"
epochs = range(1, len(history.history['accuracy']) + 1)
plt.plot(epochs, history.history['loss'])
plt.plot(epochs, history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper right')
plt.show()
# + id="kZLLp5P3CUbK"
| 12,068 |
/notebooks/tdh512194/genres-and-influencers.ipynb
|
a5a87c3ecb5640fdae2de11663ff498d2fb9c4eb
|
[] |
no_license
|
Sayem-Mohammad-Imtiaz/kaggle-notebooks
|
https://github.com/Sayem-Mohammad-Imtiaz/kaggle-notebooks
| 5 | 6 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 38,666 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _cell_guid="e5bed034-2821-4c6d-a243-acf82c64aba3" _uuid="9b2ac72cee8c888b582efdd18d3c392a873a59c2"
import pandas as pd
import numpy as np
# + _cell_guid="a2f0be6d-ff06-40ee-8786-559f5b4cc2b2" _uuid="110450eb0b23303c19fbc0222c6eedd8cf5f6431" _kg_hide-input=true _kg_hide-output=false
data_movies = pd.read_csv('../input/tmdb_5000_movies.csv')
data_credits = pd.read_csv('../input/tmdb_5000_credits.csv')
pd.set_option('display.max_columns',None)
# + _cell_guid="baee51f4-2269-4fde-a280-d0f588395435" _uuid="0da254f7ad56dd69a698212b4700e2a85081bb32"
data_movies.head(5)
# + _cell_guid="0136b887-d8a0-4b67-821c-37106a10bdd0" _uuid="5395b381f5d050878778a026e36e5e8df6ac9357"
from pandas.io.json import json_normalize
import json
# + _cell_guid="a62f9611-7f3b-47f0-b0da-7d1ecbbd401c" _uuid="f7dc0bf7347136381889d333f3ea5607d2a35bb7"
def json_decode(data,key):
result = []
data = json.loads(data) #convert to jsonjsonn from string
for item in data: #convert to list from json
result.append(item[key])
return result
# + _cell_guid="deb0c3fc-e781-4de0-8510-1ea6b1aa6911" _uuid="faa7bd94add907ab68cd7f9eaba906fa0342deba"
data_movies.describe(include='all')
# + _cell_guid="6eacb618-8a1c-4cb3-8975-21e54cd62f7b" _uuid="c6bbc6baff1ea19cb80f5e88ddccf15f5617a894"
data_credits.describe(include='all')
# + [markdown] _cell_guid="45d177cf-33ac-4547-90fa-0f9c9a207b8c" _uuid="d1a6bdad11bfc40dc4a9e31291e49071b784c30f"
# ## Clean missing values ##
# + _cell_guid="80528e4a-319f-4735-8a9c-373dfda1b77b" _uuid="a8a164038de0918dff9385af010fe4132824b7e6"
def nan_clean(data,replace=False,alter=''):
nan_count = len(data) - data.count()
if np.logical_and(replace,nan_count > 0):
data.fillna(alter,inplace=True)
print('Replaced NaN with {}'.format(alter))
print('Number of cleaned NANs:{}'.format(nan_count))
else:
print('Number of NANs:{}'.format(nan_count))
return
# + _cell_guid="a9d43114-d63e-4de8-bcf1-4adc82af5c30" _uuid="33732d96854d43cfb10df6f5d19fd67f25689cb3"
nan_clean(data_movies.homepage,replace=True)
# + _cell_guid="6b51f7fd-3ff3-40ec-b048-0192b97df11f" _uuid="9207322004213bd6bbf61f7f082ad9bb0732ce8d"
nan_clean(data_movies.release_date,replace=True)
# + _cell_guid="0f32ba5d-2253-447d-af9a-d196a5b2a0b0" _uuid="ebb772f61cfb417040eb18474ffdb5ca3e854c02"
nan_clean(data_movies.overview,replace=True)
# + _cell_guid="5755cb02-1b71-4c18-894b-acf5f132ba99" _uuid="cf6b21101d1656406cf5eb59359785c275be4a72"
nan_clean(data_movies.runtime,replace=True,alter=0)
# + _cell_guid="0074b946-9089-4ad3-b56d-0b7fa7e018cb" _uuid="dcc77541095a7c2473a996f2891da1630c631390"
nan_clean(data_movies.tagline,replace=True)
# + [markdown] _cell_guid="d63d59ad-f66c-4156-b250-506f66c7b4f0" _uuid="b776d5c2a5cb4c4c0540076331f7d450702b09b8"
# ## <span style="color:red"> **Questions** </span> ##
# + [markdown] _cell_guid="415c5b36-8659-4118-8aab-954927ad9c3c" _uuid="d6ba93fb74fe6616f6feedd4630b46b1e33967b7"
# ### 1.What areas have the most influence on revenue? ###
# + [markdown] _cell_guid="08df34e6-0610-4582-b5ae-c85a848769d0" _uuid="da357f4df76d66586b5fb151a5df7bb5bcc7fa3e"
# # + Feature engineering: convert the nested jsons in **production_countries** column to list of countries
# + _cell_guid="39ba3163-4bee-49b1-aacd-cb2cef7fb9a8" _uuid="733a75f09ff86161c9776c8aec7d6b5996bc1f24"
data_movies.production_countries = data_movies.production_countries.apply(json_decode,key='name')
# + _cell_guid="9c97b16c-15f8-4008-b03f-b895545d6241" _uuid="0363e1c8925afa918d9b9389e5f9ad7f4e816497"
data_movies.production_countries.head()
# + [markdown] _cell_guid="fd9a6a1e-38fe-4a60-8ff9-c05de56abdba" _uuid="4a36a42f16906b3e24a14a9a17e997dcee7f8f38"
# # + query the top 100 movies with highest **revenue** and their respective **production_countries**
# + _cell_guid="1393da95-8fe5-4879-92c6-cdc33eeb81ee" _uuid="b9dbcd09e1c0aee1fca7e37cc2342a7dd5b7eea8"
movie_top = data_movies.nlargest(100,'revenue')[['title','revenue','production_countries']]
# + _cell_guid="4dbce021-5cef-4a2b-8578-608123f29621" _uuid="a45ddab7a08c28e0aff3311cae3da4726455e8eb"
movie_top
# + [markdown] _cell_guid="f590495c-e511-4a97-ab86-cc53bf599231" _uuid="bf57b50e5861b87da19a248270d8928bf55d3536"
# count the occurence of each country in the sorted list
# + _cell_guid="a37c7a7d-3daf-45cd-97bb-d1579ced739a" _uuid="ab0bf303504b7530171087b9f580e577b5bb159e"
from collections import defaultdict
import pprint
country_top = defaultdict(int)
for data in movie_top.production_countries:
for item in data:
country_top[item] += 1
pprint.pprint(country_top)
# + [markdown] _cell_guid="32868f0e-760c-408c-b5d8-04434e24c0d0" _uuid="c471d4bd01723bd84f063e2661759bfc4ef7d55a"
# it can be seen that the *USA* plays the dominance role as it produces all of the top 100 movies from the list, following is *UK* with 19 movies and *New Zealand* with 6 movies
# + _cell_guid="3d1a960c-b78d-4de4-996d-05dcc9166ae1" _uuid="cff49476e87a95df491168f1fb8d9b978e33b28c"
import matplotlib.pyplot as plt
import seaborn as sns
# + _cell_guid="b733874d-664a-435e-9893-4877a78cccca" _uuid="f3b5f18f14b8050208e878e356175d5ed1b70cde"
df = pd.Series(dict(country_top),name='count')
df.index.name='country'
df.plot(kind='barh',grid=True,title='Occurences in the top-100 list')
# + [markdown] _cell_guid="7a66bb01-4298-4278-a33e-01ffa3618a1a" _uuid="bcc23948957b30c58cbe1cf3fbf169c8138f4a19"
# to assess the quatitative value, I now calculate the average revenue produced by each country from the top 100 list
# + _cell_guid="3b6b4c38-a6bc-47c0-a550-9c9db10b0646" _uuid="bc8cd6a28d260def7632f1e981482e60dfb509b2"
country_avg_rvn = defaultdict(int)
for index, row in movie_top.iterrows():
for item in row['production_countries']:
country_avg_rvn[item] += row['revenue']
for key in country_avg_rvn:
country_avg_rvn[key] = country_avg_rvn[key]/country_top[key]
pprint.pprint(country_avg_rvn)
# + _cell_guid="27d25e28-6c03-492d-aece-01b273d2a9bc" _uuid="4679cd005a3db1ffbbefc3f9bbd8625ce1332c39"
df = pd.Series(dict(country_avg_rvn),name='avg_rvn')
df.index.name='country'
df.plot(kind='barh',grid=True,title="Avg revenue per movie in the top-100 for each country")
# + [markdown] _cell_guid="775e1bdd-ffac-48fa-bef1-9e1d6dd8b42f" _uuid="4829cb0fb300d096e040db79d6299dd848b32fcc"
# Although taking part in producing all of the top 100 movies with highest revenues, the *USA* doesn't has their average revenue per movie at the top of the list. *Japan* only contributes to 1 movie from the top 100 list but it has very high revenue, making *Japan* top the list.
# + [markdown] _cell_guid="0e63745a-eada-4d01-bff1-5ad8ca13bd0e" _uuid="7924e44ce05129f2ae7cb197d683fcacf1ee77f0"
# At this point, we still need further exploration to test the relevancy of *USA*. We now see to what extent the *USA* contributes to the whole list's revenue by counting its occurence from the whole list:
# + _cell_guid="c74cec77-efba-4d47-ba1c-ea31af0bf292" _uuid="9f0c04122d966bc73f3571bf93010e63e876903f"
count = 0
for item in data_movies.production_countries:
if 'United States of America' in item:
count +=1
print("The USA produces {0:.0f}% of the movies".format((count/len(data_movies.production_countries))*100))
# + [markdown] _cell_guid="43869009-1743-4452-86bc-2e91231c5e0f" _uuid="3f0e77ffc3e20791edf9af308183c74b8622fb0c"
# Hence, it is hard to say that *USA* has the most influence on the revenue because *USA* takes into account most of the movies listed.
# + [markdown] _cell_guid="5f9c34ab-40ec-44d5-9c84-fce38bb0d415" _uuid="d0506f72320983f0cbbdde018c54dadd020cd2da"
# The cases of the *UK*, *New Zealand* and *Japan* worth further investigation as they have significance value on the two charts of the top 100 movies.
# + [markdown] _cell_guid="ac8b6a6d-28bd-45e6-8dc4-edd5f88fd27f" _uuid="96023276caa8e8b0efa64524656fc40c77ff16d9"
# We now calculate the average revenue per movie for each country accross the original list. To be relevant, we now only consider countries that produce more than 10 movies to avoid the same case as Japan in the Top-100 list.
# + _cell_guid="763b844f-78bf-45f6-b83d-0d01204d417d" _uuid="fcaf54383e8b67bfb5b369751052a5d9ab5dcacf"
country_t = defaultdict(int)
for data in data_movies.production_countries:
for item in data:
country_t[item] += 1
country_top = dict()
for key in country_t:
if country_t[key] > 10:
country_top[key] = country_t[key]
print('List of production countries that produce more than 10 movies:')
pprint.pprint(country_top)
# + _cell_guid="ebb9610b-ff33-4f2f-86c7-1e36f7bb9760" _uuid="1551241d0d78ced55b4463f8a5f33ceb30bdfa5a"
country_avg_rvn = defaultdict(int)
for index, row in data_movies.iterrows():
for item in row['production_countries']:
if item in list(country_top.keys()):
country_avg_rvn[item] += row['revenue']
for key in country_avg_rvn:
country_avg_rvn[key] = country_avg_rvn[key]/country_top[key]
pprint.pprint(dict(country_avg_rvn))
# + _cell_guid="17e9223f-291d-4e5b-a1f0-f22be5783117" _uuid="a9cdbc73cc70bd9ec18cc83f1cc09ece4a870eb4"
df = pd.Series(dict(country_avg_rvn),name='avg_rvn')
df.index.name='country'
df.plot(kind='bar',grid=True,title="Avg revenue per movie in original list for each country",figsize=(10,5))
# + [markdown] _cell_guid="d4ba3b0b-3ab9-4292-9a94-2810999de34f" _uuid="9c6d40101dcd9030e5568b9556d3235f9ee20c75"
# *New Zealand* has its average revenue per movie significantly higher than the rest of the list. It is possible to say that *New Zealand* has the highest influence on the revenue.
# + [markdown] _cell_guid="07c65a82-58f2-4f38-b314-ba761a059ebb" _uuid="8c87f3a1606f18282a2cd9caf6b9e8d4baef6334"
# ### 2.How is a movie’s revenue and average score affected by its genre? ###
# + _cell_guid="da0a9f72-67bd-47f1-bcb2-92118a6a5f3c" _uuid="15fd6f940ae96158046d002ced8ffc5b51e58659"
data_movies.genres = data_movies.genres.apply(json_decode,key='name')
# + _cell_guid="322997c7-5370-4a0f-abe8-b4459c471c3d" _uuid="d0f1a57cedba49425f0d2d78cdcce6f9546504bc"
data_movies.genres
# + [markdown] _cell_guid="8fa0b3e6-225c-475c-87ef-7324dc824ccd" _uuid="508f2a7029b40e76d61b341d1aa46565f4f41cac"
# ### Genres and Revenue ###
# + [markdown] _cell_guid="0d074bad-8cfd-48b9-980f-0894aa325e11" _uuid="0691e1ba6e64dc4787997aab0d5785029b636251"
# prepare for ***multivariate linear regression***
# + _cell_guid="68d3d8ec-13d0-4ba2-be15-fdec984b04a4" _uuid="3bb102aa40306bcaf2a26e1cafe873beac8f29c4"
genres = set()
for item in data_movies.genres:
for genre in item:
genres.add(genre)
genres = list(genres)
genres.append('revenue')
print(genres)
# + _cell_guid="77050db3-19ad-49c7-a258-3d56913045d8" _uuid="e935d3aeb645a1ff5d13caa81e862fd7a722b95d"
df = pd.DataFrame(columns=genres)
# + _cell_guid="89725d20-721b-49b8-8efb-3af4a4709ed4" _uuid="d609550dbb26c08c1441fecb952bcfb2c456af99"
for index, row in data_movies.iterrows():
for item in row['genres']:
df.loc[index,item] = 1
df.loc[index,'revenue'] = row['revenue']
# + _cell_guid="0df0d781-d958-4f09-9d66-4ce515556ada" _uuid="6133a88e12f14fe43a91ecbdd02f598ea2bed7e9"
df.fillna(0,inplace=True)
df.head()
# + [markdown] _cell_guid="5e68669e-0d2f-4c45-97ce-99c55a343769" _uuid="9effe91c74e50d9baf258e45094d1f4bbe7fe2aa"
# Standardize the *revenue* for calculation
# + _cell_guid="9d548f92-1e6d-41e5-9d5b-12c5af8c4461" _uuid="5edcc1bfc584f3944c49f325e0a44d421173100e"
df.revenue = (df.revenue - df.revenue.mean())/df.revenue.std()
# + [markdown] _cell_guid="7ba26437-fad7-4cbc-9094-cc5bdcda483d" _uuid="437804c3a1163d670a44a7307b6688bf92b5d887"
# Preparing the matrixes for the model:
# <div style="text-align:center"> **Y = w0 + w1X1 + w2X2 + ...** </div>
# + _cell_guid="007cf0ac-6243-4f80-bd9a-aa1fa834f116" _uuid="4e1c1442cc7e305c0f47d5f96403b743f6a81607"
Y = np.array(df.revenue)
Y
# + _cell_guid="6b051d31-523f-4c2f-a294-35742ae33153" _uuid="5a902df7d1d7e0988a448d171447c616826841d8"
x1 = np.ones(len(df)).reshape(len(df),1)
x2 = df.iloc[:,:-1].as_matrix()
X = np.concatenate((x1,x2),axis=1)
X
# + [markdown] _cell_guid="3653608b-7906-4b05-9067-7537528f089a" _uuid="ecc87aeeae961fdd64dde4c62438c767e2ee4173"
# The resulting coefficients matrix of the formula:
# <div style="text-align:center"> **W = inv(X'X)X'Y** </div>
# <div> are the weights determining the impact of its respective genre because the revenue is standardized</div>
# + _cell_guid="58d8fbf9-0255-473f-b9eb-f36547f402ff" _uuid="d5e451d57c91cd9f9a17b340999c0b97c413d502"
W = np.linalg.pinv(np.transpose(X).dot(X)).dot(np.transpose(X)).dot(Y)
pprint.pprint(list(W))
# + [markdown] _cell_guid="14cc8a94-70f6-4a03-8983-6e20af976baa" _uuid="3261e316d4077a7c99c15d22d363fa06ebf00863"
# Labeling the resulting weights give:
# <div>Noted that W0 is constant thus omitted</div>
# + _cell_guid="6dcf8222-fac9-4af5-80e3-bd03f75ea72f" _uuid="a3d03950ab081b7eb74a98264b72682b6d37cb13"
genres_d = genres[:-1]
weights = dict(zip(genres_d, W[1:]))
weights
# + _cell_guid="ef695a67-ea5d-4622-9fe4-18100150369e" _uuid="307fcafc3dadbb683fd76d70355eb6ec07728d4c"
df = pd.Series(weights,name='genre_rvn_weight')
df.index.name='genre'
df.plot(kind='bar',grid=True,title="Weights of genres on the revenue",figsize=(15,5))
# + [markdown] _cell_guid="a1aaf436-149f-469e-b061-534f06e2759a" _uuid="c76391b2658d9e1498ba88a49f8b385bcd280263"
# The plot clearly shows the impacts of genres on the revenue. <span style='color:red'>*Adventure*</span>, <span style='color:red'>*Animation*</span> and <span style='color:red'>*Fantasy*</span> genres have the outstanding impact that lead to the high value in revenue.
# + [markdown] _cell_guid="35c18f19-ebee-44bb-90a6-f2a211722223" _uuid="0f438b1f05053aee1adaa952e84789bec223051a"
# On the other hand,*Comedy*, <span style='color:red'>*Documentary*</span>, *Foreign*, *Horror*, *Western* and *TV Movie* have negative impact on the revenue.
# + [markdown] _cell_guid="ebd6bc26-54a8-467f-99ad-07de2ccd5601" _uuid="df09f75f5c2dc0bcce653856b48cca9b9d4cabcb"
# ### Genres and Average Score ###
# + [markdown] _cell_guid="906f11d5-c0cf-4afd-a2ac-c7f95e3a5505" _uuid="75ff32f68541a2643118fb69438f7f99ffe23d34"
# Applying ***multivariate linear regression*** as above
# + _cell_guid="56adc847-737e-4c86-899d-45776ec3c520" _uuid="c410c778fefe4cf18d6131579c14e7a0af5d993f"
del genres[-1]
genres.append('avg_vote')
# + _cell_guid="52de1e18-b462-43fb-9a76-3866c2f57d7b" _uuid="de103afb104248b9a0f0659d3fba3bd451477a57"
genres
# + _cell_guid="7e2396fd-b05a-46e2-bd11-1e31b824039c" _uuid="a72b354fc823964a1115a229a6181578e76d4835"
df = pd.DataFrame(columns=genres)
# + _cell_guid="0d0d544f-fc1e-4f81-b144-d6c6a6dffa4c" _uuid="3ac97c3363324ebec6b32842607bb22ae6bfc636"
for index, row in data_movies.iterrows():
for item in row['genres']:
df.loc[index,item] = 1
df.loc[index,'avg_vote'] = row['vote_average']
# + _cell_guid="d8f6535b-ec40-40a2-a20b-be54701504cb" _uuid="dcfd5bcef701363ccd882f226b85783f645e9e12"
df.fillna(0,inplace=True)
df.head()
# + [markdown] _cell_guid="ee796819-9247-4186-848c-9d87abbd064a" _uuid="1124af9914b2bfec878ba3ee6fb6b3b8d398f173"
# Standardize the *avg_vote* for calculation
# + _cell_guid="5760be44-210a-4300-ab2d-1a27d518d588" _uuid="c24c3461ea64a71a0c74ecf38582d02877664580"
df.avg_vote = (df.avg_vote - df.avg_vote.mean())/df.avg_vote.std()
# + _cell_guid="aa5ffe66-3f4f-4d4e-bafa-8d71d1300db4" _uuid="e73762727223101ac3871a5549fcd01fc3ebf3f8"
Y = np.array(df.avg_vote)
Y
# + _cell_guid="f898a2ab-5ef4-4203-a2d3-b024bdc1b5b1" _uuid="3160b63475d958baf209c51da73c42f26ad952f6"
W = np.linalg.pinv(np.transpose(X).dot(X)).dot(np.transpose(X)).dot(Y)
pprint.pprint(list(W))
# + _cell_guid="72299dc5-ba84-4cc1-a4e9-a1f34669d8bb" _uuid="4ad709ff5f1968c3b7f8efe3e8a45294d4322989"
genres_d = genres[:-1]
weights = dict(zip(genres_d, W[1:]))
weights
# + _cell_guid="6b64bd97-58bb-44f6-a178-2d9b761accc8" _uuid="b776224f07d6a1f340a0d442364c0a4f18f16877"
df = pd.Series(weights,name='genre_avgvote_weight')
df.index.name='genre'
df.plot(kind='bar',grid=True,title="Weights of genres on the avg_vote",figsize=(15,5))
# + [markdown] _cell_guid="c416d9d9-1516-4d5f-bd52-de51e72d0061" _uuid="e528f9cf560c24c12fa889f010af815e434b888d"
# *TV Movies* has a significant negative impact on the avg_vote. Most genres have good impact, the highest include: *Animation*, *Documentary*, *Drama* and *War*.
# + [markdown] _cell_guid="1c6b5f71-f69d-44dc-80ea-aadda4d4d0e9" _uuid="366778cc8b866084d94422296c73f2fa404d2cef"
# ### 3.What influence does release date have on revenue? ###
# + _cell_guid="d6886726-c162-472f-9abc-34905b84f27b" _uuid="642922d872f8b45e90e694f6899f00e284a6e390"
import datetime
# + _cell_guid="4757547a-3e36-4d1d-9929-69c413aaf446" _uuid="46854af75ba51b1b413f2c6f0583756d37821da1"
string_date = data_movies.release_date[0]
datetime.datetime.strptime(string_date,"%Y-%m-%d").isoweekday()
# + [markdown] _cell_guid="e76a4bad-1a2c-47d1-aeee-0964eb653766" _uuid="69f9f803803206c84c00092f6d1d049de4a793d6"
# First guess: assesing the release_dates as week days and their influence on the revenue
# + [markdown] _cell_guid="3e5923bf-2e40-4f10-9dcf-73137f47d086" _uuid="11523417176e25bfda118fb3ec229a8aba4c2dc2"
# convert the date to week days:
# + _cell_guid="e8f3a75a-b64d-490c-a9de-34a89c50bf8f" _uuid="787a0e5dba0ff8d0664deae2b3f6c9ae4fb5825d"
weekdays = {
1 : 'Mon',
2 : 'Tue',
3 : 'Wed',
4 : 'Thu',
5 : 'Fri',
6 : 'Sat',
7 : 'Sun'
}
def to_weekday(string_date):
if string_date != '':
weekday = datetime.datetime.strptime(string_date,"%Y-%m-%d").isoweekday()
return weekdays[weekday]
else:
return np.nan
# + _cell_guid="32c21568-08c0-45fe-967b-95c1b2a03499" _uuid="0ac4bc63eb4aad0ec65df3b3c8d27fd759a889f0"
data_movies['release_weekday'] = data_movies.release_date.apply(to_weekday)
# + _cell_guid="be37dfba-922c-4d73-9c86-19cedd7a2f1c" _uuid="8e9b806c004619791defdfd76ac81fd90f8db3f2"
data_movies.release_weekday.value_counts(dropna=False)
# + [markdown] _cell_guid="ca3da957-9f8c-43a9-b1ce-eb1258dfc9ef" _uuid="f26007b5a8558f630ac7d77973590fd4776ea1cd"
# create new df containing weekdays and revenue:
# + _cell_guid="7f143956-feaf-4d25-ace2-0b8fec944b5d" _uuid="e242f830a79a0b56c874393f60caa9e08f7d9af0"
df = data_movies[pd.notnull(data_movies['release_weekday'])] #omit the null weekdays
df = df.loc[:,['revenue','release_weekday']]
df = df[df['revenue']!=0] #omit the zero revenues
# + _cell_guid="3b4edd59-03ce-4448-abdd-b0081f17299f" _uuid="923d36cdaf20ef2417ca90689c4e640860779a23"
df
# + [markdown] _cell_guid="126e94e5-3107-4b03-a326-613b4b8bd9b4" _uuid="3654c26c4f79ff0d2817fae778c118b63e1f3830"
# It is plausible to perform ANOVA on weekday categorized values to see if week days have any influence on the revenue
# + [markdown] _cell_guid="45b4381a-21a7-4891-a58a-4e4673981775" _uuid="4f076e1fc27a48ef8862ba93723547fd74014eef"
# <div> **Null Hypothesis**: Weekdays have the same influence on revenue</div>
# <div> **Alternative Hypothesis**: There are difference influence base on week days </div>
# + [markdown] _cell_guid="afc9fbcf-23e5-421d-8096-f623519fe144" _uuid="e44b91fd6bf25d2a23fd0abb9831f02f50eaf02b"
# Categorize revenue by weekdays and randomly pick 30 entries from each category:
# + _cell_guid="d78ed465-1b9b-4e43-8776-fa852581ef4b" _uuid="3497a63d05d4d67bb6f552f7ecab40b3088d7506"
#reset index to join to dataframe
mon = df[df['release_weekday']=='Mon'].sample(100).reset_index()
tue = df[df['release_weekday']=='Tue'].sample(100).reset_index()
wed = df[df['release_weekday']=='Wed'].sample(100).reset_index()
thu = df[df['release_weekday']=='Thu'].sample(100).reset_index()
fri = df[df['release_weekday']=='Fri'].sample(100).reset_index()
sat = df[df['release_weekday']=='Sat'].sample(100).reset_index()
sun = df[df['release_weekday']=='Sun'].sample(100).reset_index()
# + _cell_guid="ab31930e-2109-4192-b7ef-2f486ca62cea" _uuid="6cadc54b8af0590add508b9803e503008eff1f9d"
df = pd.DataFrame({
'Mon':mon['revenue'],
'Tue':tue['revenue'],
'Wed':wed['revenue'],
'Thu':thu['revenue'],
'Fri':fri['revenue'],
'Sat':sat['revenue'],
'Sun':sun['revenue']
})
# + _cell_guid="61621f6f-8e31-4737-9fbd-45823aa96fce" _uuid="00b67893e77a65cd61f9f4bc66712f73e5078c9d"
df
# + _cell_guid="5cd1c026-2e24-44fd-8494-26270c3818b3" _uuid="475d4b840ac08aec3bce9e9edfb6bf613aaff9d5"
import scipy.stats as stats
# + _cell_guid="e853eba7-4c49-4654-bd9c-3d29e2b8e04f" _uuid="e15d27ff88ad163aeb4cd5ac95dd798a5462e2ba"
F,p = stats.f_oneway(
df['Mon'],
df['Tue'],
df['Wed'],
df['Thu'],
df['Fri'],
df['Sat'],
df['Sun']
)
# + _cell_guid="19c4a810-491f-49ae-88e7-ca2e4de5fd55" _uuid="46addac9422ffd5f9247859e927d6fda6d61a4e2"
F,p
# + [markdown] _cell_guid="7f00c8a8-eb09-4dff-86e7-be183c7db500" _uuid="598ece25215b35640af7df17f4c0576b23a2996d"
# for the p-value of 0.4 > 0.05, we cannot reject the null hypothesis that weekdays have same effect on revenue
# + [markdown] _cell_guid="2d7cc71a-d4e7-4c15-91e5-27566e29f81b" _uuid="e2be33f4d2973ce76d58a5be8bbd5cba8a3ada1c"
# This time we might want to perform ANOVA again on a different category method: weekends :['Fri', 'Sat', 'Sun'] and the rest ['Mon','Tue','Wed','Thu']
# + [markdown] _cell_guid="f750733d-7231-4e2d-b701-afe842dab719" _uuid="be74f8af7ef989c45504df01adbe1051e35ed860"
# <div> **Null Hypothesis**: Weekdays have the same influence on revenue</div>
# <div> **Alternative Hypothesis**: There are difference influence between weekends and other days </div>
# + _cell_guid="72149c15-093b-4cbc-b286-4a0315bbfeed" _uuid="d5c6edf19782735dd81ed66dc7f880bc105bf635"
weekend = pd.concat([fri, sat, sun]).sample(30).reset_index()
other = pd.concat([mon, tue, wed, thu]).sample(30).reset_index()
# + _cell_guid="39529173-e94c-4135-b6da-7a4971cbccea" _uuid="7a4bb6b8593a1a043ec32c666b16e2e40ac0a572"
weekend
# + _cell_guid="63433292-5f21-4bee-b93b-6527b082b490" _uuid="15649ef5b817afb7ce7f9251084133e634a3f280"
other
# + _cell_guid="b66a24b4-94c6-4c99-9583-876247ef361e" _uuid="449ac49bfd92d6d7f127500b93e77a617bd81bca"
df = pd.DataFrame({
'Weekend':weekend['revenue'],
'Other':other['revenue']
})
# + _cell_guid="7fccad45-24f4-45ff-b07c-de49c7efeb06" _uuid="d0a8cedd8d225a7305856abec72d3435603b0557"
df
# + _cell_guid="9023012f-1a42-420a-93b4-2fd3d1d99d82" _uuid="d274582e3e78752026c61ef0235b0cab5f3b85cc"
F,p = stats.f_oneway(
df['Other'],
df['Weekend'],
)
# + _cell_guid="f7360f8e-7282-412c-9340-c594baf6fb94" _uuid="877728bbb59c96f4ad5f76f62305cf5cf6e43524"
F, p
# + [markdown] _cell_guid="7608e752-d8bd-4d94-8328-f8a375df9aa1" _uuid="656b74d1b02df5f5a4272bd49d7529e79137f001"
# We still cannot conclude on the impact of weekend on the revenue due to high p-value
# + [markdown] _cell_guid="f21f3627-93d1-48b2-8648-0b7e89d6af6b" _uuid="98a606d532eba61ff4d451f7cea3526c92af7058"
# How about the month? Would the month in release date affect the revenue?
# <div> **Null Hypothesis**:Months have the same influence on revenue</div>
# <div> **Alternative Hypothesis**: There are difference influence betwee nmonths </div>
# + _cell_guid="468afa08-4160-4b09-b1f9-3fe4d2ea6b6f" _uuid="4945de4975ced41aa4c76dd7d2fab2445c5cbf33"
df = data_movies[pd.notnull(data_movies['release_date'])] #omit the null weekdays
df = df.loc[:,['revenue','release_date']]
df = df[df['revenue']!=0] #omit the zero revenues
df['release_date'] = pd.to_datetime(df.release_date)
def to_month(date):
return date.month
df['release_date'] = df.release_date.apply(to_month)
jan = df[df['release_date'] ==1].sample(30).reset_index()
feb = df[df['release_date'] ==2].sample(30).reset_index()
mar = df[df['release_date'] ==3].sample(30).reset_index()
apr = df[df['release_date'] ==4].sample(30).reset_index()
may = df[df['release_date'] ==5].sample(30).reset_index()
jun = df[df['release_date'] ==6].sample(30).reset_index()
jul = df[df['release_date'] ==7].sample(30).reset_index()
aug = df[df['release_date'] ==8].sample(30).reset_index()
sep = df[df['release_date'] ==9].sample(30).reset_index()
oct_ = df[df['release_date'] ==10].sample(30).reset_index()
nov = df[df['release_date'] ==11].sample(30).reset_index()
dec = df[df['release_date']==12].sample(30).reset_index()
df = pd.DataFrame({
'Jan':jan['revenue'],
'Feb':feb['revenue'],
'Mar':mar['revenue'],
'Apr':apr['revenue'],
'May':may['revenue'],
'Jun':jun['revenue'],
'Jul':jul['revenue'],
'Aug':aug['revenue'],
'Sep':sep['revenue'],
'Oct':oct_['revenue'],
'Nov':nov['revenue'],
'Dec':dec['revenue']
})
F,p = stats.f_oneway(
df['Jan'],
df['Feb'],
df['Mar'],
df['Apr'],
df['May'],
df['Jun'],
df['Jul'],
df['Aug'],
df['Sep'],
df['Oct'],
df['Nov'],
df['Dec']
)
F,p
# + [markdown] _cell_guid="56dc6ee5-4c33-4f38-ba76-a8321d1b140e" _uuid="29be62698024c5d831ad70dfeac2afebc9102060"
# with such small p-value, we can reject the Null Hypothesis, suggesting that there is differnces among the release month on the revenue.
# + [markdown] _cell_guid="9a1aa246-79b6-45b6-b4f2-2acd8802e31f" _uuid="8918ed75a14abbd83de432c9aa94a790382531c7"
# For a simple approach, I want to calculate the avg revenue among the months
# + _cell_guid="25bdf9a5-03c4-490f-ad1f-427326f3915d" _uuid="1071d1ed7e5910d2455862a198de7f929d3c2fec"
df = data_movies[pd.notnull(data_movies['release_date'])] #omit the null weekdays
df = df.loc[:,['revenue','release_date']]
df = df[df['revenue']!=0] #omit the zero revenues
df['release_date'] = pd.to_datetime(df.release_date)
def to_month(date):
return date.month
df['release_date'] = df.release_date.apply(to_month)
jan = df[df['release_date'] ==1].revenue.mean()
feb = df[df['release_date'] ==2].revenue.mean()
mar = df[df['release_date'] ==3].revenue.mean()
apr = df[df['release_date'] ==4].revenue.mean()
may = df[df['release_date'] ==5].revenue.mean()
jun = df[df['release_date'] ==6].revenue.mean()
jul = df[df['release_date'] ==7].revenue.mean()
aug = df[df['release_date'] ==8].revenue.mean()
sep = df[df['release_date'] ==9].revenue.mean()
oct_ = df[df['release_date'] ==10].revenue.mean()
nov = df[df['release_date'] ==11].revenue.mean()
dec = df[df['release_date']==12].revenue.mean()
# + _cell_guid="66df5310-d1eb-434b-8d5b-4161ed364f12" _uuid="6aa5a3724199c8bab474165e50b4b2abb593c497"
month_avg_rvn = {
1:jan,
2:feb,
3:mar,
4:apr,
5:may,
6:jun,
7:jul,
8:aug,
9:sep,
10:oct_,
11:nov,
12:dec
}
df = pd.Series(dict(month_avg_rvn),name='avg_rvn')
df.index.name='month'
df.plot(kind='bar',grid=True,title="Avg revenue per movie in original list for each month",figsize=(10,5))
# + [markdown] _cell_guid="ae4989df-f3df-42ec-88d3-1fe9a352cd0e" _uuid="ea5a825b4e0b388dd801023ec425412d22142e77"
# As the plot says, movies released in June and May have the highest average revenue while on January, Septemper and August the average revenue are low
# + [markdown] _cell_guid="fc75e682-fe6a-4cf6-a27d-7b36adddb56d" _uuid="d86be7667b24b138d30b1adeaa9ad656c5f318e8"
# At this point, I propose another guess, how does the revenue vary over the course of time? **Would movie released late get higher revenue?**
# <div> I now sort the revenue base on its chronological release date and drop the zero-revenue rows </div>
# + _cell_guid="bdf2cd01-51ac-47e8-af01-60c9f3f257c8" _uuid="9997a01e03c56a3e303175eca4f88d5b7136a873"
df = data_movies.loc[:,['revenue','release_date']]
df['release_date'] = pd.to_datetime(df.release_date)
df = df.sort_values('release_date')
df = df[df['revenue']!=0]
df = df.set_index('release_date')
df
# + _cell_guid="c756d1a1-bad4-42fc-b129-083ef6c65c93" _uuid="bd648ba950ff040d7e70340a4f73a23d3c2e2b93"
df.plot(grid=True,figsize=(20,10,),title='Revenue over the period').set_ylabel('Revenue')
# + [markdown] _cell_guid="15500dbf-0481-4b8d-b850-76f2a12e9372" _uuid="45d75cd231d68b74dd3afd4da4211c4db83d581e"
# Our guess is right and we can conclude base on the plot that the revenue gets higher and higher over time.
# + _cell_guid="68e6c7d2-3661-4f11-9129-5371c78a0038" _uuid="14452b836bfc052ffc291986ca18152f0fb44e8a"
| 28,463 |
/7 kyu/7kyu_Especially Joyful Numbers.ipynb
|
14b99d8334e1c406d23942280e6c9c2626aa951b
|
[] |
no_license
|
azukiyuna/CodeWars_Python
|
https://github.com/azukiyuna/CodeWars_Python
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 3,724 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #### [7 kyu]
# #### [Especially Joyful Numbers](https://www.codewars.com/kata/especially-joyful-numbers/train/python)
# [Instructions]
#
# Positive integers that are divisible exactly by the sum of their digits are called Harshad numbers.
# The first few Harshad numbers are: 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 18, ...
#
# We are interested in Harshad numbers where the product of its digit sum s
# and s with the digits reversed, gives the original number n.
#
# For example consider number 1729:
# its digit sum, s = 1 + 7 + 2 + 9 = 19
# reversing s = 91
# and 19 * 91 = 1729 --> the number that we started with.
#
# Complete the function which tests if a positive integer n is Harshad number,
# and returns True if the product of its digit sum and its digit sum reversed equals n;
# otherwise return False.
# #### [Method 1]
def number_joy(n):
s = 0
for i in range(len(str(n))):
s += int(str(n)[i])
s_r = int(str(s)[::-1])
return (n == s * s_r)
# sample test
print(number_joy(1997)) #'Not a Harshad number'
print(number_joy(1998)) #'Harshad but digit sum=27, and 27x72 does not equal 1998'
print(number_joy(1729)) #'Harshad and digit sum=19, and 19x91 = 1729'
print(number_joy(18)) #'Harshad but digit sum=9, and 9x9 does not equal 18'
# #### [Method 2]
def number_joy(n):
num = n
s = 0
while (num > 0):
rem = num % 10
s = s + rem
num = num // 10
if s < 10:
s_r = s
else:
s_r = (s % 10)*10 + (s // 10)
return (n == s * s_r)
# sample test
print(number_joy(1997)) #'Not a Harshad number'
print(number_joy(1998)) #'Harshad but digit sum=27, and 27x72 does not equal 1998'
print(number_joy(1729)) #'Harshad and digit sum=19, and 19x91 = 1729'
print(number_joy(18)) #'Harshad but digit sum=9, and 9x9 does not equal 18'
pplying random forest to prevent over fitting can
# -
#Random Forest
from sklearn.ensemble import RandomForestClassifier
forest = RandomForestClassifier(n_estimators = 100, criterion = 'entropy', random_state = 0)
forest.fit(x_train, y_train)
cv_forest_pred = forest.predict(x_cv)
from sklearn.metrics import confusion_matrix
cm3 = confusion_matrix(y_cv, cv_forest_pred)
cm3
from sklearn.metrics import accuracy_score
print('Accuracy score:' , accuracy_score(y_cv, cv_forest_pred))
# +
#84.7% accuracy on fitting random forest with 100 random trees obtained
# -
# Performing PCA to reduce dimension
from sklearn.decomposition import PCA
titanic_PCA_components = PCA(n_components = 4)
titanic_PCA_components.fit(train_x)
test = titanic_PCA_components.transform(train_x)
plt.plot(list(titanic_PCA_components.explained_variance_ratio_), '-o')
plt.xlabel('feature')
plt.ylabel('variance explained')
plt.show()
| 3,026 |
/Titanic.ipynb
|
de8e7e4f1416d8fa44e5c72e7a42963df65aca8b
|
[] |
no_license
|
brentkuenzi/Kaggle-Titanic
|
https://github.com/brentkuenzi/Kaggle-Titanic
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 51,431 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="vuuMs13D9-ab" outputId="c74e3a10-8638-480d-c5fb-0232c82031b4"
import pandas as pd
file_path = '/content/drive/MyDrive/Forsk coding school code practices/Churn_Modelling.csv'
df = pd.read_csv(file_path) #to read the csv file
df.columns.to_list() #to display column names into list
df.shape #shape of the dataset
df['Geography'].head() #first 5 values in this column
df['Geography'].unique() #unique values in the region/country column
import numpy as np
import matplotlib.pyplot as plt
features = df.iloc[:,3:13].values #without values it will be another dataset, with values it gets converted to numpy module
labels = df.iloc[:,13].values
#df.dtypes
#features[0:10,:]
#OneHotEncoder to convert geography and gender to numeric representation
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
column_transformer = ColumnTransformer([('encoder', OneHotEncoder(), [1,2])], remainder = 'passthrough')
#([(what we are performing, which encoder, columns)], remainder='passthrough' --> means rest columns gets unaffected)
features = np.array(column_transformer.fit_transform(features), dtype=np.float32)
features[0:10,:] #1st and 4th are dummy variables --> 1 to 3 column for Geography and 4 to 5 for Gender
#Handling dummy variables
features = features[:,1:] #for geography - handling dummy variables
features[0]
features = features[:,[0,1,3,4,5,6,7,8,9,10,11]] #for gender column - handling dummy variables
features[0]
features.shape
from sklearn.model_selection import train_test_split
features_train, features_test, labels_train, labels_test = train_test_split(features, labels, test_size=0.3, random_state=42)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
features_train = sc.fit_transform(features_train)
features_test = sc.transform(features_test)
features_train[0]
features_test[0]
#keras libraries and packages for model training
import keras
from keras.models import Sequential
from keras.layers import Dense
classifier = Sequential()
#Adding the first hidden layer and input layer
classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu', input_dim = 11))
#Adding the second hidden layer
classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu'))
#Adding the output layer
classifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))
#Now, compiling the ANN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
classifier.fit(features_train, labels_train, batch_size = 20, epochs = 20)
#change number of epochs to change the score/accuracy
# + id="A9mTVNCcAo6K"
prediction = np.argmax(classifier.predict(features_test), axis=1)
#print(prediction)
# + colab={"base_uri": "https://localhost:8080/"} id="JlypojkKAwjo" outputId="6183b661-88a0-4173-c436-68ee78c06e3a"
len(prediction)
# + colab={"base_uri": "https://localhost:8080/"} id="c6Mpa2g0B0IR" outputId="d9bdb9c8-c405-41d6-d1b5-54c0393382b4"
list(zip(labels_test, prediction))
# + colab={"base_uri": "https://localhost:8080/"} id="wv7WgIFsADef" outputId="cace431c-b6c0-4119-b981-2439e86eea6b"
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(labels_test, prediction)
score = (cm[0][0] + cm[1][1])/cm.sum()
print(score)
Class/ML0101ENv3/labs/loan_train.csv
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="Z2dLBQNE3vTc"
# ### Load Data From CSV File
# + button=false deletable=true new_sheet=false run_control={"read_only": false} id="aeWq8MXN3vTd" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="0ae9a34c-eb8b-4b38-c420-40152f2142b1"
df = pd.read_csv('loan_train.csv')
df.head()
# + id="PR0yGZcw3vTh" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d0bfb614-add3-481d-ee35-122e3ffd7765"
df.shape
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="098mX2n_3vTk"
# ### Convert to date time object
# + button=false deletable=true new_sheet=false run_control={"read_only": false} id="UYTmJsZ33vTl" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="dc9b4944-a99c-4f45-e401-8f317608f4f6"
df['due_date'] = pd.to_datetime(df['due_date'])
df['effective_date'] = pd.to_datetime(df['effective_date'])
df.head()
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="4m-UPYWs3vTp"
# # Data visualization and pre-processing
#
#
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="dxVqWjq83vTq"
# Let’s see how many of each class is in our data set
# + button=false deletable=true new_sheet=false run_control={"read_only": false} id="kVQKGTto3vTq" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="5a7d6f1c-fc43-468f-ee46-7577d2ca510f"
df['loan_status'].value_counts()
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="rXu_pSAD3vTt"
# 260 people have paid off the loan on time while 86 have gone into collection
#
# + [markdown] id="uLgiF_nT3vTu"
# Lets plot some columns to underestand data better:
# + id="nEEWRF0g3vTy" colab={"base_uri": "https://localhost:8080/", "height": 225} outputId="e342130b-872c-42bd-ccd2-06a58b7c8462"
import seaborn as sns
bins = np.linspace(df.Principal.min(), df.Principal.max(), 10)
g = sns.FacetGrid(df, col="Gender", hue="loan_status", palette="Set1", col_wrap=2)
g.map(plt.hist, 'Principal', bins=bins, ec="k")
g.axes[-1].legend()
plt.show()
# + button=false deletable=true new_sheet=false run_control={"read_only": false} id="XOQ_2ygr3vT1" colab={"base_uri": "https://localhost:8080/", "height": 225} outputId="30a174f7-2919-40ae-a6cc-aac458cee446"
bins = np.linspace(df.age.min(), df.age.max(), 10)
g = sns.FacetGrid(df, col="Gender", hue="loan_status", palette="Set1", col_wrap=2)
g.map(plt.hist, 'age', bins=bins, ec="k")
g.axes[-1].legend()
plt.show()
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="dN_6o0wm3vT5"
# # Pre-processing: Feature selection/extraction
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="8CznE_WP3vT6"
# ### Lets look at the day of the week people get the loan
# + button=false deletable=true new_sheet=false run_control={"read_only": false} id="-KrqEZmZ3vT6" colab={"base_uri": "https://localhost:8080/", "height": 225} outputId="0cf4b96f-cfa1-4b3c-9465-cdcb9987ab42"
df['dayofweek'] = df['effective_date'].dt.dayofweek
bins = np.linspace(df.dayofweek.min(), df.dayofweek.max(), 10)
g = sns.FacetGrid(df, col="Gender", hue="loan_status", palette="Set1", col_wrap=2)
g.map(plt.hist, 'dayofweek', bins=bins, ec="k")
g.axes[-1].legend()
plt.show()
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="fLJTNWnh3vT-"
# We see that people who get the loan at the end of the week dont pay it off, so lets use Feature binarization to set a threshold values less then day 4
# + button=false deletable=true new_sheet=false run_control={"read_only": false} id="-hG2zPZD3vT_" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="2abed032-b44f-4dfc-a6e6-71eefd825b22"
df['weekend'] = df['dayofweek'].apply(lambda x: 1 if (x>3) else 0)
df.head()
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="v8sjJEWM3vUC"
# ## Convert Categorical features to numerical values
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="ompaUV2N3vUD"
# Lets look at gender:
# + button=false deletable=true new_sheet=false run_control={"read_only": false} id="zDpwAvic3vUE" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="a2efcc3b-b652-4f2b-da59-1c20c8e40a30"
df.groupby(['Gender'])['loan_status'].value_counts(normalize=True)
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="BmdXVMIR3vUI"
# 86 % of female pay there loans while only 73 % of males pay there loan
#
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="BK3zBGPH3vUJ"
# Lets convert male to 0 and female to 1:
#
# + button=false deletable=true new_sheet=false run_control={"read_only": false} id="wsC4HMuk3vUK" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="e54bbcc7-2a30-4a35-aafa-5e4a6d256b23"
df['Gender'].replace(to_replace=['male','female'], value=[0,1],inplace=True)
df.head()
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="ivrt6uOZ3vUN"
# ## One Hot Encoding
# #### How about education?
# + button=false deletable=true new_sheet=false run_control={"read_only": false} id="QC2Ww3a_3vUO" colab={"base_uri": "https://localhost:8080/", "height": 187} outputId="40d6d8db-5d49-4e2a-c3b4-ee090c7cf6f7"
df.groupby(['education'])['loan_status'].value_counts(normalize=True)
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="0Ou8l4Ei3vUR"
# #### Feature befor One Hot Encoding
# + button=false deletable=true new_sheet=false run_control={"read_only": false} id="MMAsp0mX3vUU" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="2c9b7034-da37-41b3-c227-bd160706ef78"
df[['Principal','terms','age','Gender','education']].head()
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="u22hsm3U3vUb"
# #### Use one hot encoding technique to conver categorical varables to binary variables and append them to the feature Data Frame
# + button=false deletable=true new_sheet=false run_control={"read_only": false} id="7O-1uHao3vUc" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="52f6b2ad-272f-40db-f272-86c1f2aff832"
Feature = df[['Principal','terms','age','Gender','weekend']]
Feature = pd.concat([Feature,pd.get_dummies(df['education'])], axis=1)
Feature.drop(['Master or Above'], axis = 1,inplace=True)
Feature.head()
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="OYVMRdcs3vU3"
# ### Feature selection
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="It2t9eEO3vU5"
# Lets defind feature sets, X:
# + button=false deletable=true new_sheet=false run_control={"read_only": false} id="hE3xTe7x3vU6" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="40cd48f1-d551-4682-cf80-2e9b61076821"
X = Feature
X[0:5]
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="vVMKhk9j3vU-"
# What are our lables?
# + button=false deletable=true new_sheet=false run_control={"read_only": false} id="42DgjNQW3vU_" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="216d71cb-fc9e-4f3e-cea2-bcca974f2b14"
y = df['loan_status'].apply(lambda loan_status: 0 if loan_status == 'PAIDOFF' else 1)
y[0:5]
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="f63I7dWr3vVB"
# ## Normalize Data
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="wOhh1Upo3vVC"
# Data Standardization give data zero mean and unit variance (technically should be done after train test split )
# + button=false deletable=true new_sheet=false run_control={"read_only": false} id="mUkz9KYs3vVD" colab={"base_uri": "https://localhost:8080/", "height": 187} outputId="47604b7f-74c3-4c82-925c-e67e770b8c59"
X= preprocessing.StandardScaler().fit(X).transform(X)
X[0:5]
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="n-OnABBS3vVH"
# # Classification
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="tXucr08s3vVI"
# Now, it is your turn, use the training set to build an accurate mod
| 12,286 |
/archive_checkpoint/image_ready-Copy3-checkpoint.ipynb
|
1169ab3a18bcab2bfe1be065ad5d18d131c72675
|
[] |
no_license
|
shichaoji/sourceIpythons
|
https://github.com/shichaoji/sourceIpythons
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 5,096 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Step 1: Implement the vector gradient.
#
# Implement the vector gradient as described in the lecture slides and the assignment description in the `color_sobel_edges()` function below. Use sobel filters to estimate partial derivatives. Use the `color_dot_product()` function (provided) to compute the necessary dot products to obtain gxx, gyy, and gxy. Return an array containing the gradient magnitudes for each pixel, i.e. a graident magnitude image. Optionally, return a second array containing the gradient directions for each pixel.
#
# As usual, the input image must be dtype `float` or `uint8`. If it is `uint8` convert it to `float` before processing. Leave the magnitude image ouput as dtype `float` regardless of the input's dtype.
#
#
# +
import skimage.util as util
import skimage.filters as filt
import numpy as np
def color_dot_product(A, B):
'''
Element-by-element dot product in a 2D array of vectors.
:return: An array in which index [i,j,:] is the dot product of A[i,j,:] and B[i,j,:].
'''
return np.sum(A.conj()*B, axis=2)
def color_sobel_edges(I):
'''
Finish me!
:param I:
:return:
'''
if I.dtype == np.uint8:
I = util.img_as_float(I)
v = np.zeros_like(I)
h = np.zeros_like(I)
for chnl in range(I.shape[2]):
v[:,:,chnl] = filt.sobel_v(I[:,:,chnl])
h[:,:,chnl] = filt.sobel_h(I[:,:,chnl])
xx = color_dot_product(h, h)
xy = color_dot_product(h, v)
yy = color_dot_product(v, v)
theta = 0.5 * np.arctan2(2 * xy, xx - yy)
mag = np.sqrt(0.5 * (xx + yy + (xx - yy) * np.cos(2 * theta) + 2 * xy * np.sin(2 * theta)))
return (mag, theta)
# -
# # Step 2: Examine behavior of the kurtosis sharpness metric.
#
# Write a function which:
#
# * takes as input an input image, a minimum value of sigma, and a maximum value of sigma.
# * applies different amounts of Gaussian blur to the original image for all integer values of sigma between the provided minimum and maximum values of sigma, inclusive. (reminder: sigma describes the standard deviation of the gaussian filter mask used to blur the image).
# * For each blurred image, compute the gradient magnitude using `color_sobel_edges()`, then compute compute the kurtosis sharpness measure. This is the log(kurtosis+3) where kurtosis is the kurtosis of the gradient magnitude image of the blurred image as described in the assignment description document. See `scipy.stats.kurtosis()`.
# * Returns a tuple consisting of the range object of sigma values used and the list of computed kurtosis values for each sigma.
#
# Call the function using `mushroom.jpg` as the input image, a minimum sigma of 1, and a maximum sigma of 30. Use a smaller max sigma until you are sure it's working, then increase to 30, as it can take a few minutes to do all the filtering. Use the return values from your function to plot a line graph of gaussian blur sigma vs. blur measure (kurtosis) for the waterfall image. Add appropriate axis labels and a descriptive title. Sample output is provided in the assignment description document.
#
#
# +
% matplotlib inline
import scipy.stats as stat
import skimage.io as io
import matplotlib
import matplotlib.pyplot as plt
def test_blur_measure(I, min_sigma, max_sigma):
'''
Finish me!
:param I:
:param min_sigma:
:param max_sigma:
:return:
'''
sigma = []
shrp = []
for i in range(min_sigma, max_sigma + 1):
sigma.append(i)
I_blur = filt.gaussian(I, sigma = i, multichannel=True)
(I_mag, I_theta) = color_sobel_edges(I_blur)
shrp.append(np.asscalar(np.log(stat.kurtosis(np.reshape(I_mag, [I_mag.shape[0] * I_mag.shape[1], 1])) + 3)))
return (sigma, shrp)
myimage = io.imread('mushroom.jpg')
(sigma, shrp) = test_blur_measure(myimage,1,30)
plt.plot(sigma, shrp)
plt.xlabel('Gaussian blur sigma')
plt.ylabel('Sharpness (kurtosis)')
plt.title('Kurtosis sharpness measure of mushroom.jpg for increasing blur.')
# -
# # Step 3: Create a local blur map
#
# Write a function which:
#
# * takes as input an input image and a square window size (in pixels). e.g. if `window_size = 11`, this means an 11 by 11 window.
# * computes the local sharpness of the input image (i.e. log(kurtosis+3)) for each tiled, non-overlapping square window of the given window size
# * stores each local sharpness in an array where each entry represents one window of the input image (the size of this array can be computed by integer division of the original image dimensions by the window size)
# * returns the array of local sharpnesses.
#
# Then call the function you just wrote with `mushroom.jpg` as the input image and 100 as the window size. Plot the returned array as an image using `plt.imshow()`. Do not rescale this image with `vmin=` or `vmax=`, and use the default colormap (don't change it to `'gray'`). Add a color scale bar to the figure using `plt.colorbar()`. Sample output is provided in the assignment description document.
#
#
# +
def sharpness_map(I, window_size):
'''
Finish me!
:param I:
:param window_size:
:return:
'''
local_shrp = np.zeros((I.shape[0] // int(window_size), I.shape[1] // int(window_size)))
for i in range(local_shrp.shape[0]):
for j in range(local_shrp.shape[1]):
local = I[i * window_size : (i + 1) * window_size, j * window_size : (j + 1) * window_size,:]
(local_mag, local_theta) = color_sobel_edges(local)
local_shrp[i, j] = np.asscalar(np.log(stat.kurtosis(np.reshape(local_mag, [local_mag.shape[0] * local_mag.shape[1], 1])) + 3))
return local_shrp
local_shrp = sharpness_map(myimage, 100)
plt.imshow(local_shrp)
plt.colorbar()
# -
# # Step 4: Try it on another image.
#
# Use the functions you wrote to produce the same plots as in steps 2 and 3 but for the `waterfall.jpg` image instead.
#
# +
# Write your code here.
myimage = io.imread('waterfall.jpg')
(sigma, shrp) = test_blur_measure(myimage,1,30)
myfigure3 = plt.figure()
plt.plot(sigma, shrp)
plt.xlabel('Gaussian blur sigma')
plt.ylabel('Sharpness (kurtosis)')
plt.title('Kurtosis sharpness measure of waterfall.jpg for increasing blur.')
myfigure4 = plt.figure()
local_shrp = sharpness_map(myimage, 100)
plt.imshow(local_shrp)
plt.colorbar()
# -
# # Step 5: Thinking and Qualitative Analysis
#
# ### Answer the following questions, right here in this block.
#
# 1. Do you think that the log(kurtosis+3) measurement of sharpness (hereafter called the "sharpness measure") is a good measure for characterizing global image blur (the general amount that the entire image is blurred)? Explain and justify your answer.
#
# _Your answer:_ I don't think the sharpness measure is a good measure for characterizing global image blur becuase in the mushroom image, log(kurtosis+3) changes few even if the bulrness of images changes a lot when the blurness of images are very large.
#
# 2. Is the sharpness measure effective at characterizing variations in local blur? Does it respond to different regions of images appropriately? Explain and justify your answers.
#
# _Your answer:_ It's also not very effective at characterizing variations in local blur. In the waterfall image, the rock and water part is much clearer and sharper than the cloud part. However blur map shows that the rock and water part has lower log(kurtosis+3) values which indicate much blurness.
#
# 3. Think of what the shape of a histogram of gradient magnitudes would look like for a sharp image. Why does this set of gradient magnitudes have high kurtosis? (It might help to look up kurtosis and see what it measures about a histogram!)
#
# _Your answer:_ In the histogram of gradient magnitudes, most bars(most pixels) have small gradient magnitude values. Intensity becomes less as gradient magnitude becomes larger. But there are a small number intensities with very large gradient magnitudes. And higher kurtosis is just the result of infrequent extreme deviations.
#
# 4. Now think what would happen as that same image gets blurrier. Explain how the shape of the histogram would change, and the corresponding effect on the kurtosis.
#
# _Your answer:_ Bars of histogram move in the direction to 0 gradient magnitude and less intensites have extreme large gradient magnitude values. The kurtosis of this image will decrease.
| 8,727 |
/DGGM/.ipynb_checkpoints/AAE druGAN-checkpoint.ipynb
|
f1cc7817df1c4882ad01d3a5bac047b2132bfbc7
|
[] |
no_license
|
VedangW/DGGM
|
https://github.com/VedangW/DGGM
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 38,288 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# A working implementation of the AAE in Tensorflow as in the old GitHub repository.
import numpy as np
import tensorflow as tf
from tqdm import tqdm
# +
# Load the train and test files
train = 'train_aae.txt'
test = 'test_aae.txt'
# -
# ```batch_gen``` generates the batches and randomly shuffles them and ```buffered_gen``` generates the buffer and uses it to store the batches.
def batch_gen(data, batch_n):
"""
Given the data, returns the batches using random shuffling.
Parameters
----------
data: np.array
Consists of all the instances set into a numpy array
batch_n: int
Size of each batch
Returns
-------
data: Generator object
A generator object with all the batches in it.
"""
# Create a list of indices for the data,
# which is of the same size as the data.
# For eg,
# If data is of size 1000,
# inds = [0, 1, ..., 999]
inds = list(range(data.shape[0]))
# Randomly shuffle the indices.
# For eg,
# inds = [650, 720, ..., 2]
np.random.shuffle(inds)
# Generate size of data / batch size
# number of batches.
# For eg,
# If data is of size 1000 and
# batch_n = 50, then i will be
# in range 0, 19
for i in range(int(data.shape[0] / batch_n)):
# Split inds according to the batches
# created by i
ii = inds[i*batch_n:(i+1)*batch_n]
# Return a generator with each index
# in ii matching that tuple from data.
# For eg,
# If data = [[0, 1], [1, 2], ..., [999, 1000]]
# and ii = [50, 55, 1] (and batch_n = 3)
# then return [[50, 51], [55, 56], [1, 2]]
yield data[ii, :]
def buffered_gen(f, batch_n=1024, buffer_size=2000):
"""
Creates the batches by reading a file 'f', where the
data is stored.
The data is stored in a buffer.
Parameters
----------
f: str
String containing address of file
batch_n: int
Size of each batch
buffer_size: int
Size of the buffer. Denotes total number
of batches which can be possibly stored in
the buffer.
Returns
-------
A generator object with all the batches in it.
"""
# Open file f
inp = open(f)
# Create new data list
data = []
# i = index of line, line is the line read
for i, line in enumerate(inp):
# For each line,
# the line is first stripped, and the split according to tabs
# the first element is read into an array and each input of it
# is converted to a float. This is then appended to data.
data.append(np.array(list(map(float, line.strip().split('\t')[1]))))
# If size of buffer is finished, that is the buffer can store
# data only uptill the number of instances = buffer_size * batch_n
# and if the next instance fills up the buffer, then...
if (i+1) % (buffer_size * batch_n) == 0:
# Generate batches for whatever has been stored in data so far
bgen = batch_gen(np.vstack(data), batch_n)
# Yield the batches
for batch in bgen:
yield batch
# Empty data
data = []
else:
# Generate batches while leaving out the last element of data
bgen = batch_gen(np.vstack(data[:-1]), batch_n)
# Yield the batches
for batch in bgen:
yield batch
# To load the data from the test samples.
def load_test():
with open(test) as inp:
data = [np.array(list(map(float, line.strip().split('\t')[1]))) for line in inp]
return np.vstack(data)
# Utility functions for AAE
def he_initializer(size):
return tf.random_normal_initializer(mean=0.0, stddev=np.sqrt(1. / size), seed=None, dtype=tf.float32)
def linear_layer(tensor, input_size, out_size, init_fn=he_initializer,):
W = tf.get_variable('W', shape=[input_size, out_size], initializer=init_fn(input_size))
b = tf.get_variable('b', shape=[out_size], initializer=tf.constant_initializer(0.1))
return tf.add(tf.matmul(tensor, W), b)
def bn_layer(tensor, size, epsilon=0.0001):
batch_mean, batch_var = tf.nn.moments(tensor, [0])
scale = tf.get_variable('scale', shape=[size], initializer=tf.constant_initializer(1.))
beta = tf.get_variable('beta', shape=[size], initializer=tf.constant_initializer(0.))
return tf.nn.batch_normalization(tensor, batch_mean, batch_var, beta, scale, epsilon)
def sample_prior(loc=0., scale=1., size=(64, 10)):
return np.random.normal(loc=loc, scale=scale, size=size)
# Actual implementation of the AAE
# +
class AAE(object):
def __init__(self,
gpu_config=None,
batch_size=1024,
input_space=167,
latent_space=20,
middle_layers=None,
activation_fn=tf.nn.tanh,
learning_rate=0.001,
initializer=he_initializer):
self.batch_size = batch_size
self.input_space = input_space
self.latent_space = latent_space
if middle_layers is None:
self.middle_layers = [256, 256]
else:
self.middle_layers = middle_layers
self.activation_fn = activation_fn
self.learning_rate = learning_rate
self.initializer = initializer
tf.reset_default_graph()
self.input_x = tf.placeholder(tf.float32, [None, input_space])
self.z_tensor = tf.placeholder(tf.float32, [None, latent_space])
# Encoder net: 152->256->256->10
with tf.variable_scope("encoder"):
self.encoder_layers = self.encoder()
self.encoded = self.encoder_layers[-1]
# Decoder net: 10->256->256->152
with tf.variable_scope("decoder"):
self.decoder_layers = self.decoder(self.encoded)
self.decoded = self.decoder_layers[-1]
tf.get_variable_scope().reuse_variables()
self.generator_layers = self.decoder(self.z_tensor)
self.generated = tf.nn.sigmoid(self.generator_layers[-1])
# Discriminator net: 10->64->64->8->1
sizes = [64, 64, 8, 1]
with tf.variable_scope("discriminator"):
self.disc_layers_neg = self.discriminator(self.encoded, sizes)
self.disc_neg = self.disc_layers_neg[-1]
tf.get_variable_scope().reuse_variables()
self.disc_layers_pos = self.discriminator(self.z_tensor, sizes)
self.disc_pos = self.disc_layers_pos[-1]
self.pos_loss = tf.nn.relu(self.disc_pos) - self.disc_pos + tf.log(1.0 + tf.exp(-tf.abs(self.disc_pos)))
self.neg_loss = tf.nn.relu(self.disc_neg) + tf.log(1.0 + tf.exp(-tf.abs(self.disc_neg)))
self.disc_loss = tf.reduce_mean(tf.add(self.pos_loss, self.neg_loss))
tf.summary.scalar("discriminator_loss", self.disc_loss)
self.enc_loss = tf.reduce_mean(tf.nn.relu(self.disc_neg) - self.disc_neg + tf.log(1.0 + tf.exp(-tf.abs(self.disc_neg))))
batch_logloss = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.decoded, labels=self.input_x), 1)
self.dec_loss = tf.reduce_mean(batch_logloss)
tf.summary.scalar("encoder_loss", self.enc_loss)
tf.summary.scalar("decoder_loss", self.dec_loss)
disc_ws = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='discriminator')
enc_ws = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='encoder')
ae_ws = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='encoder') + \
tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='decoder')
self.train_discriminator = tf.train.AdamOptimizer(self.learning_rate).minimize(self.disc_loss, var_list=disc_ws)
self.train_encoder = tf.train.AdamOptimizer(self.learning_rate).minimize(self.enc_loss, var_list=enc_ws)
self.train_autoencoder = tf.train.AdamOptimizer(self.learning_rate).minimize(self.dec_loss, var_list=ae_ws)
if gpu_config is None:
gpu_config = tf.ConfigProto()
gpu_config.gpu_options.per_process_gpu_memory_fraction = 0.4
self.sess = tf.Session(config=gpu_config)
self.init_net()
def encoder(self):
sizes = self.middle_layers + [self.latent_space]
with tf.variable_scope("layer-0"):
encoder_layers = [linear_layer(self.input_x, self.input_space, sizes[0])]
for i in range(len(sizes) - 1):
with tf.variable_scope("layer-%i" % (i+1)):
activated = self.activation_fn(encoder_layers[-1])
# normed = bn_layer(activated, sizes[i])
next_layer = linear_layer(activated, sizes[i], sizes[i+1])
encoder_layers.append(next_layer)
return encoder_layers
def decoder(self, tensor):
sizes = self.middle_layers[::-1] + [self.input_space]
with tf.variable_scope("layer-0"):
decoder_layers = [linear_layer(tensor, self.latent_space, sizes[0])]
for i in range(len(sizes) - 1):
with tf.variable_scope("layer-%i" % (i+1)):
activated = self.activation_fn(decoder_layers[-1])
# normed = bn_layer(activated, sizes[i])
next_layer = linear_layer(activated, sizes[i], sizes[i+1])
decoder_layers.append(next_layer)
return decoder_layers
def discriminator(self, tensor, sizes):
with tf.variable_scope("layer-0"):
disc_layers = [linear_layer(tensor, self.latent_space, sizes[0])]
for i in range(len(sizes) - 1):
with tf.variable_scope("layer-%i" % (i+1)):
activated = tf.nn.tanh(disc_layers[-1])
# normed = bn_layer(activated, sizes[i])
next_layer = linear_layer(activated, sizes[i], sizes[i+1])
disc_layers.append(next_layer)
return disc_layers
def init_net(self):
init = tf.global_variables_initializer()
self.sess.run(init)
def train(self, log):
sess = self.sess
saver = tf.train.Saver()
hist = []
test_data = load_test()
merged_summary = tf.summary.merge_all()
train_writer = tf.summary.FileWriter('/tmp/aae/1')
train_writer.add_graph(sess.graph)
for e in tqdm(range(5)):
print (log, "epoch #%d" % (e+1))
log.flush()
train_gen = buffered_gen(train, batch_n=self.batch_size)
for i, batch_x in enumerate(train_gen):
if i%3 == 0:
batch_z = sample_prior(scale=1.0, size=(len(batch_x), self.latent_space))
sess.run(self.train_discriminator, feed_dict={self.input_x: batch_x, self.z_tensor: batch_z})
elif i%3 == 1:
sess.run(self.train_encoder, feed_dict={self.input_x: batch_x})
else:
sess.run(self.train_autoencoder, feed_dict={self.input_x: batch_x})
if i % 5 == 0:
s = sess.run(merged_summary, feed_dict={self.input_x: test_data, self.z_tensor: batch_z})
train_writer.add_summary(s, i)
if i%10000 == 0:
batch_z = sample_prior(scale=1.0, size=(len(test_data), self.latent_space))
losses = sess.run([merged_summary, self.disc_loss, self.enc_loss, self.dec_loss],
feed_dict={self.input_x: test_data, self.z_tensor: batch_z})
discriminator_loss, encoder_loss, decoder_loss = losses
print (log, "disc: %f, encoder : %f, decoder : %f" % (discriminator_loss/2., encoder_loss, decoder_loss)
log.flush()
else:
saver.save(sess, './fpt.aae.%de.model.ckpt' % e)
batch_z = sample_prior(scale=1.0, size=(len(test_data), self.latent_space))
losses = sess.run([merged_summary, self.disc_loss, self.enc_loss, self.dec_loss],
feed_dict={self.input_x: test_data, self.z_tensor: batch_z})
discriminator_loss, encoder_loss, decoder_loss = losses
print (log, "disc: %f, encoder : %f, decoder : %f" % (discriminator_loss/2., encoder_loss, decoder_loss))
# train_writer.add_summary(discriminator_loss, e)
# train_writer.add_summary(encoder_loss, e)
# train_writer.add_summary(decoder_loss, e)
log.flush()
hist.append(decoder_loss)
return hist
def load(self, model):
saver = tf.train.Saver()
saver.restore(self.sess, model)
# -
aae = AAE(batch_size=1024)
with open('./fpt.aae.log', 'w') as log:
aae_0 = aae.train(log)
# Generation of new samples
test_data = load_test()
test_data.shape
test_gen = buffered_gen(test, batch_n=aae.batch_size)
batch_x = test_gen.__next__()
batch_x[0]
enc = aae.sess.run(aae.encoded, feed_dict={aae.input_x: test_data})
enc.shape
dec = aae.sess.run(aae.decoded, feed_dict={aae.input_x: batch_x})
dec.shape
dec[0]
samp = dec[0]
np.mean(samp)
count = 0
for a in samp:
if a <= -0.25 or (a >= 0.25 and a <= 0.75) or a >= 1.25:
count += 1
count
total = 0
for a in aae.sess.run(tf.trainable_variables()):
total += np.size(a)
total
for value in :
print (value)
| 14,114 |
/COMP9313_project_1/.ipynb_checkpoints/COMP9313 Project 1-specs-checkpoint.ipynb
|
71d429ac859b4238d31ae12a73198674149fd085
|
[] |
no_license
|
simplemj/comp9313
|
https://github.com/simplemj/comp9313
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 10,334 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/khamoh/NaturalLangProcessing/blob/master/TextProcessing_using_NLTK.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="Gu84qG4pa1u8" colab_type="text"
# NLTK - widely used for text processing
#
#
# 1. Tokenisation - converting string to collection of words or sentences
# 2. Morphological analysis - convert a word to its root form
# 3. Part of speech tagging
# 4. Named entity recognition
# 5. Spelling correction
# + id="lW-1VK6PbUEH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 194} outputId="f850f220-1b63-4bcd-c1c8-e827b2a2c853"
import nltk
nltk.download("punkt")
nltk.download("wordnet")
nltk.download("tagsets")
nltk.download("averaged_perceptron_tagger")
# + [markdown] id="IuOLwdpMc_4i" colab_type="text"
# Tokenization
# + id="_mVCy0PrcHP7" colab_type="code" colab={}
data = """ My name is XYZ, I live in Solihull. My home is in near shopping center. sometime I
go out for shopping, you can contact my at [email protected] and ask for Mr.XYZ
"""
# + id="ZznOkJ2icWBW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 105} outputId="c8ba2772-9877-4e8f-bf89-1d8255b23918"
data.split('.')
# + [markdown] id="GH-C6VVKdRGf" colab_type="text"
# Sentence Tokenization
# + id="XOxBjOz_dE9a" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 70} outputId="fbd34e90-2ae9-4388-da32-0cf91b77494c"
nltk.sent_tokenize(data)
# + id="oHI5piXrdj-p" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="8120a94c-9e4b-4e91-82c6-d7520a603437"
x = nltk.sent_tokenize(data)
print(x[2])
# + [markdown] id="EJAqmOXSdVs9" colab_type="text"
# Word Tokenization
# + id="RK2CQu8ydYMp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 674} outputId="97c8a381-3e37-4113-cb95-175ac45817b2"
nltk.word_tokenize(data)
# + [markdown] id="Mt0ITmE4d-ny" colab_type="text"
# #Morphological analysis
# - Converting a word to a root form
# - children to child
# - wives to wife
# - knives to knife
#
#
# Two methods:
# * Stemming - Faster, less accurate, works on spelling level
# * Lemmatization - slower, more accurate - works on meaning level
#
#
# + [markdown] id="FU6q6N3iea7E" colab_type="text"
# **Stemming **
# + id="OojhqidxeZLK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="105b5443-c36d-464d-8cb9-78edc458bfe3"
from nltk.stem import PorterStemmer
ps = PorterStemmer()
ps.stem("cars")
# + id="FShyZhVaeqTK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="efdbb03f-b3b0-423b-88aa-5e91f2114354"
ps = PorterStemmer()
ps.stem("boxes")
# + id="7_uFzEmRetHt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="8a90da06-508a-40f9-9c1c-b0cf62122e63"
ps = PorterStemmer()
ps.stem("knives")
# + id="Ged-IZ-3ezn4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="413cb658-59ba-4be6-d3e2-809f8d0fe397"
ps = PorterStemmer()
ps.stem("children")
# + id="BjP5QcpXfICa" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="177661a8-4185-4f61-bec2-046e87c19280"
from nltk.stem import WordNetLemmatizer
wd = WordNetLemmatizer()
wd.lemmatize("cars")
# + id="tTJHLavcfV3n" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="ebd6a3d6-a8ab-4b21-b32b-28ec6a54a112"
wd = WordNetLemmatizer()
wd.lemmatize("children")
# + id="YFqPj6H5fj8I" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="2f169643-b816-408c-edb7-58d9fa7b3e4b"
wd = WordNetLemmatizer()
wd.lemmatize("wives")
# + [markdown] id="MHmdqVRpf4SO" colab_type="text"
# **Part of Speech Taggin POS Tagging**
# + id="kiZfFM70f2zX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 247} outputId="94cd4792-d6ef-4eab-de8c-ea8dd738a4b7"
nltk.pos_tag(nltk.word_tokenize("There was an eagle in the sky and it was looking for food"))
# + id="AEXuxKkIgZ-2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 87} outputId="bf59d90d-b07b-4d71-dccf-2e8b4e9b659c"
nltk.help.upenn_tagset("VBD")
# + id="NJ9sK7fQgqGi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 87} outputId="c0509451-028c-4835-8d03-08cb61beedbe"
nltk.help.upenn_tagset("NN")
# + id="M4GYrrLnet-6" colab_type="code" colab={}
# + [markdown] id="ex-fx95Ogut7" colab_type="text"
# ## **#NER - Named Entity Research **
# + id="r9VXuoN_g564" colab_type="code" colab={}
import spacy
# + id="9o9lGjAOg88K" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="5a0848b8-5a25-46f3-c439-e0be3b09fb28"
nlp = spacy.load("en")
doc = nlp("The big grey dog ate all of the chocolate, but fortunately he wasn't sick!")
doc.text.split()
# + id="JMF4tfoAhz8B" colab_type="code" colab={}
nlp = spacy.load("en_core_web_sm")
# + id="02R_LZqxh-iG" colab_type="code" colab={}
data = nlp("Microdoft developped a solution for corona pandemic and we will all work towards finding it in UK on date 01-01-2021, Bill Gates will help us")
# + id="QMr9d_h3iHbB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="12885409-6d83-456a-8628-d6fa52ac7a4f"
from spacy import displacy
displacy.render(data, style= 'ent', jupyter =True)
# organisation read the data and GDPR data can be hidden
# + [markdown] id="Yc_fK1ixi6_y" colab_type="text"
# **Spelling Correction**
# + id="aA5t_Wbdi-g1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c7577c19-2874-43f6-ba1f-804b05b8e724"
#Higher the distance between the words, lowe the similarity
nltk.jaccard_distance(set("orange"), set('orenge'))
# + id="6s7OLis1jY7i" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="06df7009-f0b3-4518-df5f-449389f82918"
nltk.jaccard_distance(set("orange"), set('random'))
# + id="C1jWJ27KjdDR" colab_type="code" colab={}
dictionary = ['Mango','Orange','Icecream']
def correct(word):
score =1
ans = ""
for w in dictionary:
dist = nltk.jaccard_distance(set(w), set(word))
if dist << score:
ans = w
score = dist
return ans
# + id="IFmiLcdZkJs0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 292} outputId="c47849a3-82c1-4d44-aca2-cf200bcaa972"
correct('Mangi')
# + id="IiH_Imr6kfVB" colab_type="code" colab={}
score = dist
ion (EXAMPLE)
# +
from pyspark import SparkContext, SparkConf
from time import time
import pickle
import submission
def createSC():
conf = SparkConf()
conf.setMaster("local[*]")
conf.setAppName("C2LSH")
sc = SparkContext(conf = conf)
return sc
with open("toy/toy_hashed_data", "rb") as file:
data = pickle.load(file)
with open("toy/toy_hashed_query", "rb") as file:
query_hashes = pickle.load(file)
alpha_m = 10
beta_n = 10
sc = createSC()
data_hashes = sc.parallelize([(index, x) for index, x in enumerate(data)])
start_time = time()
res = submission.c2lsh(data_hashes, query_hashes, alpha_m, beta_n).collect()
end_time = time()
sc.stop()
# print('running time:', end_time - start_time)
print('Number of candidate: ', len(res))
print('set of candidate: ', set(res))
# -
# # Project Submission and Feedback
#
# For the project submission, you are required to submit the following files:
#
# 1. Your implementation in the python file `submission.py`.
# 2. The report `report.pdf`.
#
# Detailed instruction about using `give` to submit the project files will be announced later via Piazza.
| 8,063 |
/module/.ipynb_checkpoints/TensorFlow-checkpoint.ipynb
|
5a9e71f0ae6e8db79cafc132e641058b87f91e30
|
[] |
no_license
|
adra2n/notebook
|
https://github.com/adra2n/notebook
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 139,200 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Test the StarNet model
#
# This notebook takes you through the steps of how to test a trained model on given test data sets.
# Requirements:
# * python packages: `numpy h5py keras matplotlib seaborn sklearn`
# * data files:
# - test_data.h5
# - mean_and_std.npy
# - starnet_cnn.h5
# +
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import keras
import h5py
import time
from matplotlib import gridspec
datadir = "/media/apogee/starnet/neworig/"
# -
# **Obtain data for normalizing labels and define function to denormalize labels****
# +
mean_and_std = np.load(datadir + 'mean_and_std.npy')
mean_labels = mean_and_std[0]
std_labels = mean_and_std[1]
num_labels = mean_and_std.shape[1]
def denormalize(lb_norm):
return ((lb_norm*std_labels) + mean_labels)
# -
# **Define functions to obtain test data**
def get_data(filename):
f = h5py.File(datadir + filename, 'r')
spectra_array = f['spectrum']
ap_ids = f['Ap_ID'][:]
labels_array = np.column_stack((f['TEFF'][:],f['LOGG'][:],f['FE_H'][:]))
snr_array = f['combined_snr'][:]
return (ap_ids, snr_array, spectra_array, labels_array)
# **Load test data and model**
test_ap_ids, test_snr, test_spectra, test_labels = get_data('test_data.h5')
print('Test set contains ' + str(len(test_spectra))+' stars')
model = keras.models.load_model(datadir + 'starnet_cnn.h5')
# ** Define a function that predicts on a test set by using batches **
#
# This allows the user to use and hdf5 matrix as the input rather than loading the entire dataset in to a numpy array
def batch_predictions(model, spectra, batch_size, denormalize):
predictions = np.zeros((len(spectra),3))
num_batches = int(len(spectra)/batch_size)
for i in range(num_batches):
inputs = spectra[i*batch_size:(i+1)*batch_size]
# Mask any nan values
indices_nan = np.where(np.isnan(inputs))
inputs[indices_nan]=0.
predictions[i*batch_size:(i+1)*batch_size] = denormalize(model.predict(inputs))
num_remainder = int(len(spectra)%batch_size)
if num_remainder>0:
inputs = spectra[-num_remainder:]
indices_nan = np.where(np.isnan(inputs))
inputs[indices_nan]=0.
predictions[-num_remainder:] = denormalize(model.predict(inputs))
return predictions
# ** Predict on test set**
time1 = time.time()
test_predictions = batch_predictions(model, test_spectra, 500, denormalize)
print("{0:.2f}".format(time.time()-time1)+' seconds to make '+str(len(test_spectra))+' predictions')
# ** Show residuals on test set label predictions**
# +
# Some plotting variables for asthetics
# %matplotlib inline
# Label names
label_names = ['$T_{\mathrm{eff}}$', '$\log(g)$', '$[Fe/H]$']
# Pipeline names
x_lab = 'ASPCAP'
y_lab = 'StarNet'
plt.rcParams['axes.facecolor']='white'
sns.set_style("ticks")
plt.rcParams['axes.grid']=True
plt.rcParams['grid.color']='gray'
plt.rcParams['grid.alpha']='0.4'
# -PS #plt.rc('text', usetex=True)
plt.rc('font', family='serif')
# +
all_targets = test_labels
all_pred = test_predictions
z = test_snr
z[z>250]=250
resid = all_pred - all_targets
# Overplot high S/N
order = (z).reshape(z.shape[0],).argsort()
all_targets = all_targets[order]
resid = resid[order]
z = z[order,0]
bias = np.median(resid, axis=0)
scatter = np.std(resid, axis=0)
indices_a = np.where(z>=150)
indices_b = np.where(z<100)
resid_a = resid[indices_a,:]
resid_b = resid[indices_b,:]
cmap = sns.cubehelix_palette(8, start=2.8, rot=.1, dark=0, light=.95, as_cmap=True)
lims = [[(3800,5800),(-0.50,4.50),(-2.5,0.6)],[(-1000,1000),(-2.0,2.0),(-1.,1.)]]
ditribution_lims = [(-200,200),(-0.4,0.4),(-0.2,0.2)]
fig = plt.figure(figsize=(38, 30))
gs = gridspec.GridSpec(3, 2, width_ratios=[4., 1])
for i in range(num_labels):
ax0 = plt.subplot(gs[i,0])
points = ax0.scatter(all_targets[:,i], resid[:,i], c=z, s=100, cmap=cmap)
ax0.set_xlabel(x_lab + ' ' + label_names[i], fontsize=70)
ax0.set_ylabel(r'$\Delta$ %s ' % (label_names[i]) +
'\n' + r'(%s - %s)' % (y_lab, x_lab), fontsize=70)
ax0.tick_params(labelsize=50, width=1, length=10)
ax0.set_xlim(lims[0][i])
ax0.set_ylim(lims[1][i])
ax0.plot([lims[0][i][0],lims[0][i][1]], [0,0], 'k--', lw=2)
xmin, xmax = ditribution_lims[i]
y_a = resid_a[0,:,i][(resid_a[0,:,i]>=xmin)&(resid_a[0,:,i]<=xmax)]
y_b = resid_b[0,:,i][(resid_b[0,:,i]>=xmin)&(resid_b[0,:,i]<=xmax)]
ax1 = plt.subplot(gs[i,1])
a = sns.distplot(y_a, vertical=True,hist=False, rug=False, ax=ax1,kde_kws={"color": cmap(200), "lw": 10})
b = sns.distplot(y_b,vertical=True,hist=False, rug=False, ax=ax1,kde_kws={"color": cmap(100), "lw": 10})
a.set_ylim(ditribution_lims[i])
b.set_ylim(ditribution_lims[i])
ax1.tick_params(
axis='x',
which='both',
bottom='off',
top='off',
labelbottom='off',width=1,length=10)
ax1.tick_params(
axis='y',
which='both',
left='off',
right='on',
labelleft='off',
labelright='on',
labelsize=50,width=1,length=10)
bbox_props = dict(boxstyle="square,pad=0.3", fc="w", ec="k", lw=3)
if i==0:
plt.figtext(0.185, (1-((i*0.332)+0.24)),
'$\widetilde{m}$='+'{0:.2f}'.format(bias[i])+' $s$='+'{0:.2f}'.format(scatter[i]),
size=70, bbox=bbox_props)
else:
plt.figtext(0.185, (1-((i*0.332)+0.24)),
'$\widetilde{m}$='+'{0:.3f}'.format(bias[i])+' $s$='+'{0:.3f}'.format(scatter[i]),
size=70, bbox=bbox_props)
cbar_ax = fig.add_axes([0.9, 0.1, 0.02, 0.83])
fig.colorbar(points,cax=cbar_ax)
cbar = fig.colorbar(points, cax=cbar_ax, extend='neither', spacing='proportional', orientation='vertical', format="%.0f")
cbar.set_label('SNR', size=65)
cbar.ax.tick_params(labelsize=50,width=1,length=10)
cbar_ax.set_yticklabels(['','100','','150','','200','','$>$250'])
plt.tight_layout()
fig.subplots_adjust(right=0.8)
plt.show()
# -
**Figure 3**: <br> </u> <font color='purple'> In the next part, we will call the pictures from left to right: Anchor (A), Positive (P), Negative (N) </center></caption>
#
#
# ### 1.2 - The Triplet Loss
#
# For an image $x$, we denote its encoding $f(x)$, where $f$ is the function computed by the neural network.
#
# <img src="images/f_x.png" style="width:380px;height:150px;">
#
# <!--
# We will also add a normalization step at the end of our model so that $\mid \mid f(x) \mid \mid_2 = 1$ (means the vector of encoding should be of norm 1).
# !-->
#
# Training will use triplets of images $(A, P, N)$:
#
# - A is an "Anchor" image--a picture of a person.
# - P is a "Positive" image--a picture of the same person as the Anchor image.
# - N is a "Negative" image--a picture of a different person than the Anchor image.
#
# These triplets are picked from our training dataset. We will write $(A^{(i)}, P^{(i)}, N^{(i)})$ to denote the $i$-th training example.
#
# You'd like to make sure that an image $A^{(i)}$ of an individual is closer to the Positive $P^{(i)}$ than to the Negative image $N^{(i)}$) by at least a margin $\alpha$:
#
# $$\mid \mid f(A^{(i)}) - f(P^{(i)}) \mid \mid_2^2 + \alpha < \mid \mid f(A^{(i)}) - f(N^{(i)}) \mid \mid_2^2$$
#
# You would thus like to minimize the following "triplet cost":
#
# $$\mathcal{J} = \sum^{m}_{i=1} \large[ \small \underbrace{\mid \mid f(A^{(i)}) - f(P^{(i)}) \mid \mid_2^2}_\text{(1)} - \underbrace{\mid \mid f(A^{(i)}) - f(N^{(i)}) \mid \mid_2^2}_\text{(2)} + \alpha \large ] \small_+ \tag{3}$$
#
# Here, we are using the notation "$[z]_+$" to denote $max(z,0)$.
#
# Notes:
# - The term (1) is the squared distance between the anchor "A" and the positive "P" for a given triplet; you want this to be small.
# - The term (2) is the squared distance between the anchor "A" and the negative "N" for a given triplet, you want this to be relatively large, so it thus makes sense to have a minus sign preceding it.
# - $\alpha$ is called the margin. It is a hyperparameter that you should pick manually. We will use $\alpha = 0.2$.
#
# Most implementations also normalize the encoding vectors to have norm equal one (i.e., $\mid \mid f(img)\mid \mid_2$=1); you won't have to worry about that here.
#
# **Exercise**: Implement the triplet loss as defined by formula (3). Here are the 4 steps:
# 1. Compute the distance between the encodings of "anchor" and "positive": $\mid \mid f(A^{(i)}) - f(P^{(i)}) \mid \mid_2^2$
# 2. Compute the distance between the encodings of "anchor" and "negative": $\mid \mid f(A^{(i)}) - f(N^{(i)}) \mid \mid_2^2$
# 3. Compute the formula per training example: $ \mid \mid f(A^{(i)}) - f(P^{(i)}) \mid - \mid \mid f(A^{(i)}) - f(N^{(i)}) \mid \mid_2^2 + \alpha$
# 3. Compute the full formula by taking the max with zero and summing over the training examples:
# $$\mathcal{J} = \sum^{m}_{i=1} \large[ \small \mid \mid f(A^{(i)}) - f(P^{(i)}) \mid \mid_2^2 - \mid \mid f(A^{(i)}) - f(N^{(i)}) \mid \mid_2^2+ \alpha \large ] \small_+ \tag{3}$$
#
# Useful functions: `tf.reduce_sum()`, `tf.square()`, `tf.subtract()`, `tf.add()`, `tf.maximum()`.
# For steps 1 and 2, you will need to sum over the entries of $\mid \mid f(A^{(i)}) - f(P^{(i)}) \mid \mid_2^2$ and $\mid \mid f(A^{(i)}) - f(N^{(i)}) \mid \mid_2^2$ while for step 4 you will need to sum over the training examples.
# +
# GRADED FUNCTION: triplet_loss
def triplet_loss(y_true, y_pred, alpha = 0.2):
"""
Implementation of the triplet loss as defined by formula (3)
Arguments:
y_true -- true labels, required when you define a loss in Keras, you don't need it in this function.
y_pred -- python list containing three objects:
anchor -- the encodings for the anchor images, of shape (None, 128)
positive -- the encodings for the positive images, of shape (None, 128)
negative -- the encodings for the negative images, of shape (None, 128)
Returns:
loss -- real number, value of the loss
"""
anchor, positive, negative = y_pred[0], y_pred[1], y_pred[2]
### START CODE HERE ### (≈ 4 lines)
# Step 1: Compute the (encoding) distance between the anchor and the positive, you will need to sum over axis=-1
pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), axis=-1)
# Step 2: Compute the (encoding) distance between the anchor and the negative, you will need to sum over axis=-1
neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), axis=-1)
# Step 3: subtract the two previous distances and add alpha.
basic_loss = tf.add(tf.subtract(pos_dist, neg_dist), alpha)
# Step 4: Take the maximum of basic_loss and 0.0. Sum over the training examples.
loss = tf.reduce_sum(tf.maximum(basic_loss, 0))
### END CODE HERE ###
return loss
# -
with tf.Session() as test:
tf.set_random_seed(1)
y_true = (None, None, None)
y_pred = (tf.random_normal([3, 128], mean=6, stddev=0.1, seed = 1),
tf.random_normal([3, 128], mean=1, stddev=1, seed = 1),
tf.random_normal([3, 128], mean=3, stddev=4, seed = 1))
loss = triplet_loss(y_true, y_pred)
print("loss = " + str(loss.eval()))
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **loss**
# </td>
# <td>
# 528.143
# </td>
# </tr>
#
# </table>
# ## 2 - Loading the trained model
#
# FaceNet is trained by minimizing the triplet loss. But since training requires a lot of data and a lot of computation, we won't train it from scratch here. Instead, we load a previously trained model. Load a model using the following cell; this might take a couple of minutes to run.
FRmodel.compile(optimizer = 'adam', loss = triplet_loss, metrics = ['accuracy'])
load_weights_from_FaceNet(FRmodel)
# Here're some examples of distances between the encodings between three individuals:
#
# <img src="images/distance_matrix.png" style="width:380px;height:200px;">
# <br>
# <caption><center> <u> <font color='purple'> **Figure 4**:</u> <br> <font color='purple'> Example of distance outputs between three individuals' encodings</center></caption>
#
# Let's now use this model to perform face verification and face recognition!
# ## 3 - Applying the model
# Back to the Happy House! Residents are living blissfully since you implemented happiness recognition for the house in an earlier assignment.
#
# However, several issues keep coming up: The Happy House became so happy that every happy person in the neighborhood is coming to hang out in your living room. It is getting really crowded, which is having a negative impact on the residents of the house. All these random happy people are also eating all your food.
#
# So, you decide to change the door entry policy, and not just let random happy people enter anymore, even if they are happy! Instead, you'd like to build a **Face verification** system so as to only let people from a specified list come in. To get admitted, each person has to swipe an ID card (identification card) to identify themselves at the door. The face recognition system then checks that they are who they claim to be.
# ### 3.1 - Face Verification
#
# Let's build a database containing one encoding vector for each person allowed to enter the happy house. To generate the encoding we use `img_to_encoding(image_path, model)` which basically runs the forward propagation of the model on the specified image.
#
# Run the following code to build the database (represented as a python dictionary). This database maps each person's name to a 128-dimensional encoding of their face.
database = {}
database["danielle"] = img_to_encoding("images/danielle.png", FRmodel)
database["younes"] = img_to_encoding("images/younes.jpg", FRmodel)
database["tian"] = img_to_encoding("images/tian.jpg", FRmodel)
database["andrew"] = img_to_encoding("images/andrew.jpg", FRmodel)
database["kian"] = img_to_encoding("images/kian.jpg", FRmodel)
database["dan"] = img_to_encoding("images/dan.jpg", FRmodel)
database["sebastiano"] = img_to_encoding("images/sebastiano.jpg", FRmodel)
database["bertrand"] = img_to_encoding("images/bertrand.jpg", FRmodel)
database["kevin"] = img_to_encoding("images/kevin.jpg", FRmodel)
database["felix"] = img_to_encoding("images/felix.jpg", FRmodel)
database["benoit"] = img_to_encoding("images/benoit.jpg", FRmodel)
database["arnaud"] = img_to_encoding("images/arnaud.jpg", FRmodel)
# Now, when someone shows up at your front door and swipes their ID card (thus giving you their name), you can look up their encoding in the database, and use it to check if the person standing at the front door matches the name on the ID.
#
# **Exercise**: Implement the verify() function which checks if the front-door camera picture (`image_path`) is actually the person called "identity". You will have to go through the following steps:
# 1. Compute the encoding of the image from image_path
# 2. Compute the distance about this encoding and the encoding of the identity image stored in the database
# 3. Open the door if the distance is less than 0.7, else do not open.
#
# As presented above, you should use the L2 distance (np.linalg.norm). (Note: In this implementation, compare the L2 distance, not the square of the L2 distance, to the threshold 0.7.)
# +
# GRADED FUNCTION: verify
def verify(image_path, identity, database, model):
"""
Function that verifies if the person on the "image_path" image is "identity".
Arguments:
image_path -- path to an image
identity -- string, name of the person you'd like to verify the identity. Has to be a resident of the Happy house.
database -- python dictionary mapping names of allowed people's names (strings) to their encodings (vectors).
model -- your Inception model instance in Keras
Returns:
dist -- distance between the image_path and the image of "identity" in the database.
door_open -- True, if the door should open. False otherwise.
"""
### START CODE HERE ###
# Step 1: Compute the encoding for the image. Use img_to_encoding() see example above. (≈ 1 line)
encoding = img_to_encoding(image_path, FRmodel)
# Step 2: Compute distance with identity's image (≈ 1 line)
dist = np.linalg.norm(encoding-database[identity])
# Step 3: Open the door if dist < 0.7, else don't open (≈ 3 lines)
if dist < 0.7:
print("It's " + str(identity) + ", welcome home!")
door_open = True
else:
print("It's not " + str(identity) + ", please go away")
door_open = False
### END CODE HERE ###
return dist, door_open
# -
# Younes is trying to enter the Happy House and the camera takes a picture of him ("images/camera_0.jpg"). Let's run your verification algorithm on this picture:
#
# <img src="images/camera_0.jpg" style="width:100px;height:100px;">
verify("images/camera_0.jpg", "younes", database, FRmodel)
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **It's younes, welcome home!**
# </td>
# <td>
# (0.65939283, True)
# </td>
# </tr>
#
# </table>
# Benoit, who broke the aquarium last weekend, has been banned from the house and removed from the database. He stole Kian's ID card and came back to the house to try to present himself as Kian. The front-door camera took a picture of Benoit ("images/camera_2.jpg). Let's run the verification algorithm to check if benoit can enter.
# <img src="images/camera_2.jpg" style="width:100px;height:100px;">
verify("images/camera_2.jpg", "kian", database, FRmodel)
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **It's not kian, please go away**
# </td>
# <td>
# (0.86224014, False)
# </td>
# </tr>
#
# </table>
# ### 3.2 - Face Recognition
#
# Your face verification system is mostly working well. But since Kian got his ID card stolen, when he came back to the house that evening he couldn't get in!
#
# To reduce such shenanigans, you'd like to change your face verification system to a face recognition system. This way, no one has to carry an ID card anymore. An authorized person can just walk up to the house, and the front door will unlock for them!
#
# You'll implement a face recognition system that takes as input an image, and figures out if it is one of the authorized persons (and if so, who). Unlike the previous face verification system, we will no longer get a person's name as another input.
#
# **Exercise**: Implement `who_is_it()`. You will have to go through the following steps:
# 1. Compute the target encoding of the image from image_path
# 2. Find the encoding from the database that has smallest distance with the target encoding.
# - Initialize the `min_dist` variable to a large enough number (100). It will help you keep track of what is the closest encoding to the input's encoding.
# - Loop over the database dictionary's names and encodings. To loop use `for (name, db_enc) in database.items()`.
# - Compute L2 distance between the target "encoding" and the current "encoding" from the database.
# - If this distance is less than the min_dist, then set min_dist to dist, and identity to name.
# +
# GRADED FUNCTION: who_is_it
def who_is_it(image_path, database, model):
"""
Implements face recognition for the happy house by finding who is the person on the image_path image.
Arguments:
image_path -- path to an image
database -- database containing image encodings along with the name of the person on the image
model -- your Inception model instance in Keras
Returns:
min_dist -- the minimum distance between image_path encoding and the encodings from the database
identity -- string, the name prediction for the person on image_path
"""
### START CODE HERE ###
## Step 1: Compute the target "encoding" for the image. Use img_to_encoding() see example above. ## (≈ 1 line)
encoding = img_to_encoding(image_path, model)
## Step 2: Find the closest encoding ##
# Initialize "min_dist" to a large value, say 100 (≈1 line)
min_dist = 100
# Loop over the database dictionary's names and encodings.
for (name, db_enc) in database.items():
db_enc = np.array(db_enc)
# Compute L2 distance between the target "encoding" and the current "emb" from the database. (≈ 1 line)
dist = np.linalg.norm(encoding-db_enc)
# If this distance is less than the min_dist, then set min_dist to dist, and identity to name. (≈ 3 lines)
if dist < min_dist:
min_dist = dist
identity = name
### END CODE HERE ###
if min_dist > 0.7:
print("Not in the database.")
else:
print ("it's " + str(identity) + ", the distance is " + str(min_dist))
return min_dist, identity
# -
# Younes is at the front-door and the camera takes a picture of him ("images/camera_0.jpg"). Let's see if your who_it_is() algorithm identifies Younes.
who_is_it("images/camera_0.jpg", database, FRmodel)
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **it's younes, the distance is 0.659393**
# </td>
# <td>
# (0.65939283, 'younes')
# </td>
# </tr>
#
# </table>
# You can change "`camera_0.jpg`" (picture of younes) to "`camera_1.jpg`" (picture of bertrand) and see the result.
# Your Happy House is running well. It only lets in authorized persons, and people don't need to carry an ID card around anymore!
#
# You've now seen how a state-of-the-art face recognition system works.
#
# Although we won't implement it here, here're some ways to further improve the algorithm:
# - Put more images of each person (under different lighting conditions, taken on different days, etc.) into the database. Then given a new image, compare the new face to multiple pictures of the person. This would increae accuracy.
# - Crop the images to just contain the face, and less of the "border" region around the face. This preprocessing removes some of the irrelevant pixels around the face, and also makes the algorithm more robust.
#
# <font color='blue'>
# **What you should remember**:
# - Face verification solves an easier 1:1 matching problem; face recognition addresses a harder 1:K matching problem.
# - The triplet loss is an effective loss function for training a neural network to learn an encoding of a face image.
# - The same encoding can be used for verification and recognition. Measuring distances between two images' encodings allows you to determine whether they are pictures of the same person.
# Congrats on finishing this assignment!
#
# ### References:
#
# - Florian Schroff, Dmitry Kalenichenko, James Philbin (2015). [FaceNet: A Unified Embedding for Face Recognition and Clustering](https://arxiv.org/pdf/1503.03832.pdf)
# - Yaniv Taigman, Ming Yang, Marc'Aurelio Ranzato, Lior Wolf (2014). [DeepFace: Closing the gap to human-level performance in face verification](https://research.fb.com/wp-content/uploads/2016/11/deepface-closing-the-gap-to-human-level-performance-in-face-verification.pdf)
# - The pretrained model we use is inspired by Victor Sy Wang's implementation and was loaded using his code: https://github.com/iwantooxxoox/Keras-OpenFace.
# - Our implementation also took a lot of inspiration from the official FaceNet github repository: https://github.com/davidsandberg/facenet
#
| 23,953 |
/curriculum/week-05/1.2-knn-classification-imputation-lab/starter-code/1.2-knn-classification-imputation-lab-starter.ipynb
|
8e57d88c7ab282b671b5366765ab917e4868959b
|
[] |
no_license
|
austinmwhaley/DSI-SF-4-austinmwhaley
|
https://github.com/austinmwhaley/DSI-SF-4-austinmwhaley
| 0 | 1 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 15,724 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python [dsi]
# language: python
# name: Python [dsi]
# ---
# # KNN for classification and imputation
#
# In this lab you'll practice using KNN for classification first, then explore how it can be used for effective variable imputation.
# ---
#
# ### 1. Load packages
# +
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
from sklearn.neighbors import KNeighborsClassifier
# -
import imp
plotter = imp.load_source('plotter', '/Users/austinwhaley/Desktop/DSI-SF-4-austinmwhaley/utils/plotting/knn_plotter.py')
from plotter import KNNBoundaryPlotter
# ---
#
# ### 3. Load datasets
#
affair = pd.read_csv('/Users/austinwhaley/Desktop/DSI-SF-4-austinmwhaley/datasets/affairs/affair.csv')
churn = pd.read_csv('/Users/austinwhaley/Desktop/DSI-SF-4-austinmwhaley/datasets/cell_phone_churn/cell_phone_churn.csv')
coffee = pd.read_csv('/Users/austinwhaley/Desktop/DSI-SF-4-austinmwhaley/datasets/coffee_preferences/dat12-coffee-preferences.csv')
# ---
#
# ### 4. Encode affairs vs. not in affair dataset
#
# This will be your binary target class variable.
affair.head()
affair.nbaffairs.unique()
affair.drop(affair.columns[0], axis=1, inplace=True)
affair.dropna(inplace=True)
affair.nbaffairs = affair.nbaffairs.map(lambda x: 1 if x >= 1 else 0)
# ---
#
# ### 5. Clean and convert string variables
affair.head(1)
affair.sex = affair.sex.map(lambda x: 1 if x == 'male' else 0)
affair.child = affair.child.map(lambda x: 0 if x == 'no' else 1)
# ---
#
# ### 6. Fit a `KNeighborsClassifier` with `weights='uniform'` and `n_neighbors=3`
#
# You should choose **2 predictor variables** to predict had affair vs. not
# +
from sklearn.neighbors import KNeighborsClassifier
X = affair[['age','religious']]
y = affair.nbaffairs.values
knn_uniform_n3 = KNeighborsClassifier(n_neighbors=50, weights='uniform')
knn_uniform_n3.fit(X, y)
# -
person9 = X.iloc[9,:]
print 'baseline:', 1 - np.mean(y)
print knn_uniform_n3.predict(person9)
print knn_uniform_n3.predict_proba(person9)
# ---
#
# ### 7. Cross-validate the classifier with `StratifiedKFold`
#
#
# +
from sklearn.cross_validation import StratifiedKFold
#ysimple = np.array([1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
cv_folds = StratifiedKFold(y, n_folds=5)
scores = []
for i, (train, test) in enumerate(cv_folds):
print 'fold, ', i
X_train, y_train = X.iloc[train, :], y[train]
X_test, y_test = X.iloc[test, :], y[test]
model = KNeighborsClassifier(n_neighbors=3, weights='uniform')
model.fit(X_train, y_train)
test_accuracy = model.score(X_test, y_test)
scores.append(test_accuracy)
print scores
print np.mean(scores)
# -
# ---
#
# ### 8. Do the same but with `n_neighbors=11`
#
# Use the same predictor variables and cv folds.
# ---
#
# ### 9. Cross-validate a model with `n_neighbors=11` and `weights='distance'`
# ---
#
# ### 10. [Optional] Explore the model visually with the `KNNBoundaryPlotter`
# ---
#
# ### 11. With the churn dataset, find the optimal neighbors and weighting to predict churn using gridsearch
#
# Show the cross-validated accuracy of the model.
# ---
#
# ## Variable imputation with KNeighbors
#
# You can actually do both classification _and_ regression with KNN. It is quite flexible due to its simplicity. One of it's most useful features is the ability to perform very nice imputation.
#
# ---
#
# ### 12. Look at the coffee data, count the missing values
# ---
#
# ### 13. For each of the missing columns, build a `KNeighborsClassifier` to predict rating for that column based on the other columns
#
# Another great benefit of KNN is the ease with which it can do multi-class problems like this.
#
# [Note: there is a more complicated way to do this, but I am doing it the simple way in the solutions.]
| 4,097 |
/Lab for 1-20-23.ipynb
|
59dc4ebdc61a23075593323b9b979063f6fcc779
|
[] |
no_license
|
cwcurtis/Math-340-Notebooks
|
https://github.com/cwcurtis/Math-340-Notebooks
| 3 | 12 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 4,848 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="8if-ZJmp7jRb"
# 4-1. 들어가며
# 학습 목표
# 이번 시간의 목표입니다.
#
# 로이터 뉴스 데이터 이해하기
# F1-score와 confusion matrix 출력해보기
# 여러 머신러닝 모델에 대해서 성능 비교해보기
# 단어 수에 따른 성능 비교해보기
# 준비물
# 터미널을 열어 실습에 필요한 디렉토리를 생성합니다.
#
# ```
# $ mkdir -p ~/aiffel/reuters_classifiaction
# ```
#
# 머신러닝을 이용한 텍스트 분류
# 텍스트 분류(Text Classification)란 주어진 텍스트를 사전 정의된 클래스(Pre-defined Class)들로 분류하는 자연어 처리 태스크입니다. 이 태스크는 자연어 처리 태스크 중 가장 기본이 되면서, 비지니스 영역에서도 가장 수요가 높은 편입니다.
#
# 우리가 평소에 쉽게 접할 수 있는 텍스트 분류의 영역으로는 예를 들어 '스팸 메일 자동 분류', '사용자의 리뷰로부터 긍정, 부정을 판단하는 감성 분류', '포털 사이트가 언론사의 뉴스들을 문화, 정치 등 자동으로 분류하여 보여주는 뉴스 카테고리 분류' 등이 있습니다.
#
# 이러한 분류들을 AI 알고리즘을 통해 수행한다고 하면 일반적으로 아래와 같은 과정을 거칩니다.
#
# 
#
# 주어진 문장 또는 문서를 벡터화하고, 이를 AI 모델에 입력값으로 넣어주면 모델은 예측한 카테고리를 리턴하게 되죠. 만약, 딥러닝 모델을 사용하고 있다면 벡터화 방법으로는 워드 임베딩을 사용하고 RNN, CNN, BERT와 같은 딥러닝 모델을 사용해서 클래스를 예측할 수 있을 거예요.
#
# 하지만 AI 알고리즘에는 딥러닝이 아닌 다른 우수한 수많은 머신러닝 알고리즘들이 존재합니다. 오늘은 딥러닝이 아니라 여러 머신러닝 알고리즘을 사용해서 텍스트를 분류해 보겠습니다. 딥러닝을 사용하지 않는 상황에서 텍스트를 벡터화하여 좋은 feature를 뽑아내기 위하여, 이전 노드에서 다루었던 내용들이 어떻게 활용되는지 경험해 볼수 있을 것입니다.
#
# 텍스트 분류에도 여러 종류가 있는데 대표적으로 클래스가 2개인 경우를 이진 분류(Binary Classification)라고 하고, 클래스 3개 이상인 경우를 다중 클래스 분류(Multiclass Classification)라고 합니다. 메일이 스팸인지 아닌지를 결정하는 스팸 메일 분류기가 이진 분류의 대표적인 예일 것입니다. 오늘 우리가 하게 될 텍스트 분류는 다중 클래스 분류(Multiclass Classification)에 속하는데 구체적인 내용은 데이터를 살펴보면서 이해해 보겠습니다.
# + [markdown] id="8vO6ZMUM8ke7"
# 4-2. 로이터 뉴스 데이터 (1) 데이터 확인하기
# 우리가 사용할 데이터는 로이터 뉴스 데이터입니다. 이 데이터는 총 46개의 클래스로 구성되며, 해당 뉴스가 어느 카테고리에 속하는지를 예측하기 위한 데이터입니다. 텐서플로우 데이터셋에서 제공하고 있는 데이터로 아주 쉽게 다운로드가 가능합니다.
# + id="_qsbqhFz7cSl"
from tensorflow.keras.datasets import reuters
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
# + [markdown] id="S-0OhRpq8q4K"
# 훈련 데이터와 테스트 데이터 로드하기
# 텐서플로우 데이터셋으로부터 로이터 뉴스 데이터를 받아옵니다.
# + colab={"base_uri": "https://localhost:8080/"} id="wXdPxklk8qYi" outputId="c311f0f8-1fff-4714-99fe-0ec6feb34210"
(x_train, y_train), (x_test, y_test) = reuters.load_data(num_words=10000, test_split=0.2)
# + [markdown] id="C9WEjEzS82-o"
# num_words는 이 데이터에서 빈도수 기준으로 상위 몇 번째 단어까지 사용할 것인지 조절합니다. 각 단어는 고유한 번호가 정해져 있는 상태이고, 이를 통해서 사용할 단어의 수를 정하는 것이죠.
#
# 이 데이터의 단어들은 등장 빈도수가 높은 순서대로 낮은 정수가 맵핑되어져 있습니다. 예를 들어서 50번 단어보다는 10번 단어가 데이터 전체에서 빈도수가 등장 빈도 순위로 40등이 더 높은 단어입니다. 위에서 num_words=10000은 1~10,000번 단어만 사용한다는 의미입니다. 등장 빈도수가 높은 단어들만 사용하겠다는 의미지요.
#
# 그런데 주의할 점은 데이터를 받아올 때, num_words의 인자로 10,000을 기재한다고 해서 10,000보다 높은 정수가 맵핑된 단어들이 받아온 데이터에서 사라지는 게 아니라는 겁니다. num_words로 주어진 값보다 큰 번호를 가졌던 단어들은 특정 번호로 전부 맵핑이 됩니다. 이는 OOV 문제라는 자연어 처리의 대표적인 문제와 연관이 있는데요. 이에 대해서는 뒤에서 언급하겠습니다.
#
# test_split = 0.2를 하게 되면 전체 데이터 중에서 80%를 훈련 데이터, 20%를 테스트 데이터로 사용한다는 의미입니다. x_train과 x_test가 각각 훈련용 뉴스 데이터와 테스트용 뉴스 데이터, y_train과 y_test는 각각 훈련용 뉴스 데이터의 레이블, 테스트용 뉴스 데이터의 레이블이 되는 것이지요.
#
# 이렇게 받아온 데이터가 어떤 구성을 가지고 있는지 출력해 볼까요?
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="fA825dXp8z_g" outputId="f8a6c323-f2e6-40bd-f8b9-f507d21bfb20"
print('훈련 샘플의 수: {}'.format(len(x_train)))
print('테스트 샘플의 수: {}'.format(len(x_test)))
# + [markdown] id="e7MHk8gt8_LZ"
# 데이터 출력해보기
# 훈련용 뉴스는 8,982개. 그리고 테스트용 뉴스는 2,246개가 있네요. 8:2의 비율을 가지는데, 첫 번째 훈련용 뉴스와 첫 번째 테스트용 뉴스를 출력해 보겠습니다.
# + colab={"base_uri": "https://localhost:8080/"} id="Oztxnh1v85HK" outputId="122288ee-17b8-454d-9809-75a2aa144979"
print(x_train[0])
print(x_test[0])
# + [markdown] id="xQPoKnNu9E8I"
# 우리는 뉴스 데이터를 다루기로 했는데, 실제 출력해보면 텍스트가 아니라 숫자 시퀀스가 출력됩니다. 사실 각 단어가 빈도수가 높은 순서대로 낮은 정수가 맵핑되어져 있다고 했었는데, 이미 뉴스 데이터를 다운받을 때는 단어가 아니라 해당 번호로 변환이 되어서 출력이 되고 있습니다.
#
# 대부분의 자연어 처리에서는 텍스트를 숫자로 수치화하는 과정이 필요한데, 텐서플로우 데이터셋에서는 이미 전처리를 한 데이터를 제공해주는 셈입니다.
#
# 이제 레이블도 출력해 볼까요? 첫 번째 훈련용 뉴스와 테스트용 뉴스의 레이블을 출력해봅시다.
# + colab={"base_uri": "https://localhost:8080/"} id="l_KQG-Za9CsI" outputId="94d06213-d06c-4df9-cf7c-82f99cfbde71"
print(y_train[0])
print(y_test[0])
# + [markdown] id="Aahc6YOa9P0M"
# 둘 다 숫자 3이 출력되네요. 각 뉴스는 정수로 된 레이블을 가지고 있군요. 이 레이블은 숫자 0부터 시작되므로, 모든 레이블 중 최댓값을 구하고 1을 더하면 현재 클래스의 개수를 볼 수 있어요.
# + colab={"base_uri": "https://localhost:8080/"} id="1l8ugiYF9Mf0" outputId="1344257e-a30b-40da-b274-91d47f3b3cb9"
num_classes = max(y_train) + 1
print('클래스의 수 : {}'.format(num_classes))
# + [markdown] id="PVlB0mie9VZv"
# 이 뉴스 데이터는 총 46개의 클래스를 가지고 있습니다. 이 정도면 적지는 않은 클래스라서 높은 정확도를 얻는 일이 쉽지는 않을 것 같은 예감이 드네요.
#
# 데이터 분포 확인해보기
# 자연어 처리 과정에서 문장, 문서들의 길이는 보통 다릅니다. 우리가 평소에 보는 뉴스 기사들이 길이가 항상 똑같지는 않죠. 이 데이터도 마찬가지예요. 모든 뉴스 데이터들은 길이가 다 다를 거예요. 뉴스 데이터의 길이 분포를 볼까요?
# + colab={"base_uri": "https://localhost:8080/", "height": 313} id="Y59yts8B9Tv1" outputId="7fea9259-e24b-4bdf-dc06-fa5c3c235fc1"
print('훈련용 뉴스의 최대 길이 :{}'.format(max(len(l) for l in x_train)))
print('훈련용 뉴스의 평균 길이 :{}'.format(sum(map(len, x_train))/len(x_train)))
plt.hist([len(s) for s in x_train], bins=50)
plt.xlabel('length of samples')
plt.ylabel('number of samples')
plt.show()
# + [markdown] id="1JDOrsgA9a3X"
# 길이가 가장 긴 뉴스의 길이는 2,376입니다. 평균 길이는 145구요. 아무래도 길이가 가장 긴 뉴스의 길이가 다른 뉴스에 비해 유독 긴 편인 것 같습니다. 그리고 시각화된 그래프의 분포로 봤을 때 500~1,000 사이의 길이를 가지는 뉴스도 일부 있어 보이죠?
#
# 우리가 맞춰야 하는 클래스의 분포를 확인해볼까요? 모델의 성능에 영향을 줄 수 있는 요소이기 때문에 실제로 모든 클래스가 다 존재는 하는 것인지, 또 어떤 클래스가 유독 많은지, 이런 것들을 사전에 확인해 보아야 합니다.
# + colab={"base_uri": "https://localhost:8080/", "height": 391} id="f4N69GMz9Xaf" outputId="8d88c9c8-7a2b-431e-81ec-b45e681f61d6"
fig, axe = plt.subplots(ncols=1)
fig.set_size_inches(12,5)
sns.countplot(y_train)
# + [markdown] id="nF38CUuN9iqm"
# 이 뉴스 데이터는 3번, 4번 클래스가 대부분을 차지하고 있습니다. 그 뒤로는 19번, 16번, 1번, 11번 등이 높은 분포를 가지고 있네요. 시각화를 통해 어떤 클래스가 많고, 적은 것을 쉽게 확인할 수는 있었지만 수치적으로 정확히 몇 개인지도 출력해보겠습니다.
# + colab={"base_uri": "https://localhost:8080/"} id="HFOnj_dx9e3V" outputId="2d61eed9-6133-4e54-ea1b-ad2ee75a24ca"
unique_elements, counts_elements = np.unique(y_train, return_counts=True)
print("각 클래스 빈도수:")
print(np.asarray((unique_elements, counts_elements)))
# + [markdown] id="-QaxFuRN9rE5"
# 3번, 4번 클래스의 경우에는 각각 3,159개와 1,949개가 존재합니다. 그 뒤로는 549개로 19번 클래스가 세 번째로 많고, 16번 클래스가 444개로 네 번째로 많네요. 이렇게 각 클래스가 몇 개가 존재하는지도 확인해 봤어요.
# + [markdown] id="nPOG6jsN-EQU"
# 4-3. 로이터 뉴스 데이터 (2) 데이터 복원하기
# 원본 뉴스 데이터로 복원해보기
# 이번에는 조금은 일반적이지 않은 전처리를 해볼 겁니다! 이 데이터는 이미 어느 정도 전처리가 되어서 각 단어가 정수 시퀀스로 변환된 채 우리에게 제공되고 있다고 언급했었어요. 하지만, 일반적인 상황이라면 텍스트를 숫자로 수치화하는 과정을 우리의 힘으로 처음부터 해야 합니다.
#
# 그래서 우리는 여기서 정수 시퀀스로 변환된 데이터를 '굳이' 다시 텍스트로 돌려보겠습니다.
#
# 아래와 같이 로이터 뉴스 데이터는 '단어'를 key값으로, 고유한 '정수'를 value로 가지는 dictionary를 제공합니다. 이를 word_index로 저장해보겠습니다.
# + colab={"base_uri": "https://localhost:8080/"} id="lOg6Ok439k2m" outputId="08f03247-aa52-4014-e1c5-a8d568572789"
word_index = reuters.get_word_index(path="reuters_word_index.json")
# + [markdown] id="O7k8tdfF-I-J"
# 이게 이 데이터의 단어장(Vocabulary) 입니다. 이제 word_index[] 안에 단어를 입력하면 이 단어가 어떤 정수와 맵핑이 되는지를 알 수 있습니다. 예를 들어 단어 'the'는 어떤 정수와 맵핑이 될까요?
# + colab={"base_uri": "https://localhost:8080/"} id="Kz5CZH05-Gd2" outputId="fda4bb0b-527d-44a1-b81e-66c53cc53959"
word_index['the']
# + [markdown] id="9cDJb5mz-NVE"
# 어 'the'는 정수 1과 맵핑이 됩니다. 그렇다면, 단어 'it'은 어떤 정수와 맵핑이 될까요?
# + colab={"base_uri": "https://localhost:8080/"} id="0V2LatLo-LHy" outputId="94d49f38-5e38-4499-d376-4b5d59379329"
word_index['it']
# + [markdown] id="bHHTycKc-TsW"
# 단어 'it'은 정수 13과 맵핑이 됩니다. 다른 단어들도 시도해 보세요! 그런데 지금 우리가 해야 할 일은 현재 갖고 있는 정수 시퀀스로 되어 있는 데이터를 텍스트 형태로 되돌려야 하는 일이므로 사실 word_index보다는 정수로부터 단어를 얻을 수 있는 index_word가 필요합니다.
#
# 그런데 이 데이터에는 숨겨진 비밀이 하나 있는데, 뉴스 데이터 안에서 단어 'the'는 사실 1번이 아니라 4번 단어입니다. 그리고 단어 'it'은 사실 13번 단어가 아니라 16번 단어입니다. 다른 단어들도 마찬가지로 word_index에 입력으로 했을 때, 얻는 숫자보다는 +3을 한 숫자가 원래 고유한 숫자입니다. 이건 로이터 뉴스 데이터가 정의한 일종의 약속입니다.
#
# reuters.get_word_index에는 실제 단어에 맵핑한 정수에 -3을 한 정수를 입력해 놓았거든요. 그렇기 때문에 word_index에서 index_word를 만들 때, 각 정수에 +3을 해주어야 합니다.
#
#
# + id="auJ1FU39-QaY"
index_to_word = {index + 3 : word for word, index in word_index.items()}
# + [markdown] id="SnrnSyKM-YiI"
# 숫자 4와 16을 넣어서 the와 it이 출력되는지 확인해볼까요?
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="LuFnhe4C-Vh1" outputId="f08619da-64d5-4f02-9411-f907e904e125"
print(index_to_word[4])
print(index_to_word[16])
# + [markdown] id="kN4FhMH7-fcN"
# 이 데이터를 위해 약속된 비밀은 더 있습니다. 0번, 1번, 2번은 사실 각각
# ```
# <pad>, <sos>, <unk>
# ```
# 라는 자연어 처리를 위한 특별한 토큰들을 위해 맵핑되어진 번호입니다. 그래서 만들어진 index_to_word에 추가적으로 이 작업을 해주어야 진짜 index_to_word가 완성됩니다.
# + id="g1VMD9gm-bPJ"
# index_to_word에 숫자 0은 <pad>, 숫자 1은 <sos>, 숫자 2는 <unk>를 넣어줍니다.
for index, token in enumerate(("<pad>", "<sos>", "<unk>")):
index_to_word[index]=token
# + [markdown] id="dLS-T_dK-6BE"
# 자, 이제 index_to_word를 통해서 첫 번째 훈련용 뉴스 기사를 원래 텍스트로 복원해보겠습니다!
# + colab={"base_uri": "https://localhost:8080/"} id="nHX6qfXu-4Sj" outputId="3d74c3fd-b4f4-43bc-eb1c-aed352859a3c"
print(' '.join([index_to_word[index] for index in x_train[0]]))
# + colab={"base_uri": "https://localhost:8080/"} id="w6HAq6juJ5ca" outputId="2455b836-0489-48c2-c5a1-ba5d64717596"
print(x_train[0])
# + [markdown] id="TIkTYOQF_HAR"
# 첫 번째 훈련용 뉴스 기사가 정수 시퀀스에서 텍스트로 다시 원복되었습니다. 이 데이터도 어느 정도 전처리가 된 상태라서, 자연스럽게 읽히지는 않습니다. 하지만 문맥을 가진 텍스트이기 때문에, 이 데이터를 가지고 머신러닝을 시작해도 별 무리는 없어 보입니다.
#
# 원복된 첫 번째 훈련용 뉴스 기사에 특별 토큰인 \<sos>와 \<unk>가 보이는데, \<sos>는 텍스트 분류를 위한 토큰이 아니므로 크게 신경을 쓰지 않아도 됩니다. 하지만 \<unk>는 자연어 처리 전반에서 쓰이는 특별 토큰이므로 이에 대해서는 이해할 필요가 있습니다.
#
# OOV 문제와 UNK 토큰
# 기계가 알고 있는 단어들의 집합을 단어장(Vocabulary)이라고 합니다. 그리고 기계가 미처 배우지 못한 모르는 단어가 등장하게 되면 이 단어를 OOV(Out-Of-Vocabulary) 또는 UNK(Unknown)이라고 표현합니다. 어떤 단어가 기계가 모르는 단어로 판단되면, 기계는 해당 단어를 전부 \<unk>라는 일관된 특별 토큰으로 변환하는 식으로 처리합니다.
#
# 앞서 우리가 가장 맨 처음 사용했던 코드를 상기해봅시다.
# ```
# (x_train, y_train), (x_test, y_test) = reuters.load_data(num_words=10000, test_split=0.2)
# ```
#
# num_words = 10000는 사실 정수 번호가 10,000이 넘는 단어들을 자동으로 \<unk> 토큰으로 처리합니다. 이 \<unk>은 로이터 뉴스 데이터에서는 정수 2번을 가지도록 약속되어져 있으므로 데이터를 로드하는 과정에서 정수 번호가 10,000이 넘는 단어들은 전부 정수 1로 변환되어서 로드가 되었던 것입니다. 아래 퀴즈에 답해보세요.
#
#
# Q1. 로이터 뉴스 데이터 중 다음과 같은 정수 시퀀스가 있습니다.
# ```
# [4, 587, 23, 133, 6, 30, 515]
# ```
# 현재 우리가 가진 index_to_word를 사용하여 이 문장을 텍스트 시퀀스로 변환해 보세요.
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="6ZKoTRzJ_Dm0" outputId="c7fe78f6-fd36-4561-d52e-bef087129c1d"
ts = [4, 587, 23, 133, 6, 30, 515]
result=''
for w in ts:
result += index_to_word[w]
result += ' '
result
# + [markdown] id="6n2pAU8f_6gr"
# 정답 문장
# the transaction is expected to be completed
# 코드
# ```
# print(' '.join([index_to_word[index] for index in [4, 587, 23, 133, 6, 30, 515]]))
# ```
#
# Q2. 데이터를 로드하기 전 로이터 뉴스 데이터에는 다음과 같은 정수 시퀀스가 있었다고 해봅시다.
#
# ```
# [4, 12000, 23, 133, 6, 30, 515]
# ```
# 그런데 만약 우리가 로이터 뉴스 데이터를 아래와 같은 코드로 데이터를 받아왔다고 해보겠습니다.
#
# ```
# (x_train, y_train), (x_test, y_test) = reuters.load_data(num_words=10000, test_split=0.2)
# ```
# 실제로 로드한 데이터에서 우리가 저 문장을 출력하면 다른 정수 시퀀스가 출력되는데요. 해당 정수 시퀀스를 추측해 보세요.
#
# 예시답안
# ```
# [4, 1, 23, 133, 6, 30, 515]
# ```
# 12,000은 10,000을 넘는 숫자로 OOV에 해당되므로 인 1로 변환되어 데이터가 로드됩니다.
#
# 이제 전체 훈련용 뉴스 데이터와 전체 테스트용 뉴스 데이터를 텍스트 데이터로 변환해 보겠습니다.
# + id="nfzGOlP0_ter"
decoded = []
for i in range(len(x_train)):
t = ' '.join([index_to_word[index] for index in x_train[i]])
decoded.append(t)
x_train = decoded
# + id="qT2uibJAG4Ok"
decoded = []
for i in range(len(x_test)):
t = ' '.join([index_to_word[index] for index in x_test[i]])
decoded.append(t)
x_test = decoded
# + [markdown] id="IYPIUda5KLY_"
# 각각 5개씩 출력해보고, 제대로 변환이 되었는지 확인해 보겠습니다.
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="sHyaB0GgKIXO" outputId="44c74c38-fbd7-4f64-e082-ff71966b6321"
x_train[:5]
# + colab={"base_uri": "https://localhost:8080/"} id="oShPV_EbKQqH" outputId="1d6b3f6d-68da-4484-9e41-e96f4281d318"
x_test[:5]
# + [markdown] id="SISeyM7LKXem"
# 4-4. 벡터화 하기
# 원활한 실습을 위해 아래의 도구들을 먼저 임포트하겠습니다.
# + id="1gMa9nYCKTQe"
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
# + [markdown] id="3Y95SfIgKcSQ"
# 앞서 본 텍스트 분류의 과정을 표현한 그림을 다시 보겠습니다.
#
# 
#
# 텍스트 데이터가 있을 때, 모델의 입력으로 넣기 위해서는 우선 각 문서를 벡터화할 필요가 있습니다. 항상 그런 것은 아니지만, 일반적으로 텍스트 분류를 할 모델로 인공 신경망을 사용하는 경우, 벡터화 방법 또한 인공 신경망을 사용하는 것이 보편적입니다. Word Embedding, Document Embedding, Contextual Embedding 등 다양한 벡터화 방법이 존재하는데요.
#
# 이번 실습에서는 딥러닝이 아닌 머신러닝 방법을 사용하여 텍스트 분류를 진행할 예정이니만큼, 벡터화 방법도 인공 신경망이 아닌 방법을 사용하겠습니다. 저희가 사용할 벡터화 방법은 Bag of Words 가설을 기반으로 하는 DTM, TF-IDF 행렬입니다.
#
# Bag of Words 가설과 이를 통해 만드는 DTM에 대한 정의는 아래의 링크를 통해 알아보겠습니다.
#
# [위키독스: 문서 단어 행렬(DTM)](https://wikidocs.net/24559)
#
#
# Q3. (O/X 퀴즈) 총 3개의 문서가 있습니다. 단어 단위로 토크나이징을 하였을 때, 첫 번째 문서의 길이가 3, 두 번째 문서의 길이가 2, 세 번째 문서의 길이가 4로 나왔습니다. 이 세 개의 문서로부터 만든 DTM의 열의 개수는 9입니다. (O/X)
#
# 예시답안
# X
# DTM의 열의 개수는 중복을 제거한 단어들의 집합인 단어장(Vocabulary)의 크기를 가지므로 반드시 9라고 할 수는 없습니다. 문서 내 단어가 중복되어 있다면 9보다 작을 수 있습니다.
#
# Q4. DTM을 기반으로 문서를 군집하거나, 분류를 한다면 같은 단어가 많이 등장한 문서들일수록 그 유사도가 높다고 판단할 수 있을 것입니다. 군집이나 분류 시에 이러한 가정을 사용할 때 성능을 저하시킬 수 있는 요소가 있다면 무엇이 있을까요?
#
# 예시답안
# 불용어는 모든 문서에 자주 등장하지만 실제로 중요한 의미를 가지지는 않으므로 이러한 가정에서는 성능 저하의 원인이 될 수 있습니다.
#
#
# Document Term Matrix, DTM은 사이킷런의 CountVectorizer()를 통해서 생성할 수 있습니다.
#
# DTM을 생성하고, DTM의 크기를 확인해보겠습니다.
# + colab={"base_uri": "https://localhost:8080/"} id="j6RKRh08Kf_z" outputId="b647b975-7465-4274-a185-b5b11b4eff20"
dtmvector = CountVectorizer()
x_train_dtm = dtmvector.fit_transform(x_train)
print(x_train_dtm.shape)
# + [markdown] id="BcVOteogLNJQ"
# 훈련용 뉴스의 수가 총 8,982개였기 때문에 DTM의 행의 개수는 8,982개가 됩니다. 그런데 이상한 점은 앞에서 데이터를 로드할 때, num_words=10,000이라는 값을 사용했음에도 DTM 열의 개수는 이보다 현저하게 적은 9,670개밖에 되지 않습니다.이는 DTM이 자체적인 규칙에 따라서 불필요하다고 판단하는 토큰들을 제거하기 때문입니다.
#
# DTM 행렬을 이어서 TF-IDF 행렬을 만들어보겠습니다. TF-IDF 행렬은 퀴즈 4에서 알아본 DTM의 단점을 보정해주는 효과를 가지고 있습니다. TF-IDF 행렬에 대한 설명은 아래의 글을 참고하겠습니다.
#
# [위키독스: TF-IDF](https://wikidocs.net/31698)
#
# Q5. (O/X 퀴즈) TF-IDF 행렬은 모든 문서에 걸쳐서 중요하다고 판단되는 단어에 가중치를 주는 효과를 가지고 있습니다. 만약, 모든 문서에 걸쳐서 자주 등장하는 단어라면 TF-IDF는 다른 단어들보다 상대적으로 높은 가중치를 주게 됩니다.
#
# 예시답안
# X
# 모든 문서에 걸쳐서 자주 등장한다면 다른 단어들보다 중요도가 낮은 단어로 판단합니다.
#
# + colab={"base_uri": "https://localhost:8080/"} id="alWtU3MtLFyg" outputId="ea80c3d5-b2ea-4870-ff13-5c7197501c1c"
tfidf_transformer = TfidfTransformer()
tfidfv = tfidf_transformer.fit_transform(x_train_dtm)
print(tfidfv.shape)
# + [markdown] id="utmJkyjMLnbC"
# #4-5. 나이브 베이즈 분류기
#
# + id="dlDXvTfDLia7"
from sklearn.naive_bayes import MultinomialNB #다항분포 나이브 베이즈 모델
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.naive_bayes import ComplementNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.svm import LinearSVC
from sklearn.metrics import accuracy_score #정확도 계산
# + [markdown] id="T0dtmmm_LyQ1"
# 먼저 아래 영상을 통해서 나이브 베이즈 분류기에 대해 간단히 알아보겠습니다.
#
# https://youtu.be/3JWLIV3NaoQ
#
#
# Q6. 모든 메일에는 같은 단어가 중복으로 등장하지 않는다고 가정하였을 때, 베이즈 정리만을 이용하여 coupon이라는 단어가 들어갔을 때 해당 메일이 스팸 메일일 확률을 구해보세요. 가정은 아래와 같습니다!
#
#
# ```
# 총 8개의 메일 중에 4개의 메일이 스팸 메일입니다.
#
# 전체 메일 중에서 coupon이라는 단어가 들어간 메일은 3개입니다.
#
# 모든 스팸 메일 중에서 단어 coupon이 들어간 메일은 2개입니다.
#
# 예시답안
#
# 총 8개의 메일 중 4개의 메일이 스팸 메일이므로 P(spam) = 4/8 = 1/2
#
# 총 8개의 메일 중 3개의 메일이 coupon이라는 단어를 포함하므로 P(coupon) = 3/8
#
# 총 4개의 스팸 메일 중 2개의 메일이 coupon이라는 단어를 포함하므로 P(coupon | spam) = 2/4 = 1/2
#
#
# P(coupon | spam)과 P(spam)를 곱한 후에 P(coupon)로 나눠줍니다.
# 정답은 2/3 즉, 66.7%입니다.
# ```
#
# 나이브 베이즈 분류기는 사이킷런의 MultinomialNB()를 통해 사용할 수 있습니다.
#
# 사이킷런이 제공하는 머신러닝 모델들은 공통적으로 fit()이라는 함수를 제공하고 있는데요.
#
# 훈련 데이터와 해당 훈련 데이터에 대한 레이블을 인자로 사용하면 모델이 이를 학습합니다.
#
# + colab={"base_uri": "https://localhost:8080/"} id="2yOTCs1aLvvx" outputId="224649ca-2ce6-4bac-8382-45dc89fa65bc"
mod = MultinomialNB()
mod.fit(tfidfv, y_train)
# + [markdown] id="9zAXeZ2QMXtw"
# 테스트 데이터에 대한 정확도를 측정하기 위해서는 훈련 데이터와 동일한 전처리를 거쳐야 합니다. 다시 말해 테스트 데이터도 TF-IDF 행렬로 변환해 주어야 합니다. 그리고 해당 행렬과 predict() 함수를 통해 예측값을 얻어 정확도를 측정합니다.
# + colab={"base_uri": "https://localhost:8080/"} id="cOt38LRHMYEQ" outputId="f6d7dcbc-e504-4c85-f7d5-63640fc92346"
x_test_dtm = dtmvector.transform(x_test) #테스트 데이터를 DTM으로 변환
tfidfv_test = tfidf_transformer.transform(x_test_dtm) #DTM을 TF-IDF 행렬로 변환
predicted = mod.predict(tfidfv_test) #테스트 데이터에 대한 예측
print("정확도:", accuracy_score(y_test, predicted)) #예측값과 실제값 비교
# + [markdown] id="o3FIWOvINkpa"
# 65%의 정확도를 얻습니다. 사실 그렇게 좋은 성능은 아닙니다. 뒤에서 다른 모델들을 통해서 더 좋은 성능을 얻기 위해서 노력해 봅시다.
#
# 나이브 베이즈 분류기가 임의의 샘플에 대해서 클래스를 정확히 예측하는지 테스트를 해보겠습니다. 네 번째 샘플(인덱스 상으로는 3)의 원문을 출력하면 다음과 같습니다.
# + colab={"base_uri": "https://localhost:8080/", "height": 154} id="IfftDK80Ng__" outputId="ded27de8-9709-41d0-a5e8-75e8c34a8d55"
x_test[3]
# + [markdown] id="U_fOs4PENoKm"
# 이 샘플의 레이블은 몇이었을까요?
# + colab={"base_uri": "https://localhost:8080/"} id="A10UzS00NnBG" outputId="7e5965b8-2d22-4bb9-aeba-34375c5399c9"
y_test[3]
# + [markdown] id="CaJ4rcX2NuS8"
# 이 샘플의 레이블은 4네요. 과연 모델은 제대로 예측하고 있는지 그리고 어느 정도의 확신을 가지고 판단하는지 모델이 결정한 확률을 그래프로 시각화해봅시다.
# + colab={"base_uri": "https://localhost:8080/", "height": 161} id="MgKxIMG1Nq-0" outputId="58dc93b4-f3b4-4849-b304-a8ef49f7a10c"
plt.subplot(211)
plt.rcParams["figure.figsize"] = (10,10)
plt.bar(mod.classes_, mod.predict_proba(tfidfv_test[3])[0])
plt.xlim(-1, 21)
plt.xticks(mod.classes_)
plt.xlabel("Class")
plt.ylabel("Probability")
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="7FSdU0jMNwDN" outputId="c184f2f9-1c06-4e54-c785-1c904a23808c"
mod.predict(tfidfv_test[3])
# + [markdown] id="UJ2sEV0IN8Z8"
# 모델은 4번 클래스를 약 90%의 확률로 확신하는데, 10%의 확률로 3번 클래스라고 판단합니다. 90%의 확률로 확신하므로 모델이 예측한 레이블은 4번입니다. 해당 샘플은 정확하게 예측했네요.
#
#
# # 4-6. F1-Score, Confusion Matrix
#
# 오늘의 실습에서는 성능 비교를 위한 척도로 정확도(Accuracy)를 사용합니다. 하지만 Accuracy 외에 또 다른 방법인 F1-score도 존재하는데요.
# + id="WgqHSe1bN5QO"
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
# + [markdown] id="CyqU5xwsOMU-"
# Precision, Recall & F1 Score
# 아래의 글을 통해서 Precision과 Recall 그리고 F1 score에 대한 설명을 읽어봅시다.
#
# 분류성능평가지표 - Precision(정밀도), Recall(재현율) and Accuracy(정확도)
# Q7. Accuracy는 가장 직관적으로 모델의 성능을 나타낼 수 있는 평가 지표입니다. 하지만, [ ]인 경우를 제대로 고려하지 못하기 때문에 [ ]인 경우에는 이를 고려한 f1-score를 사용하기도 합니다. [ ]에 들어갈 설명은 무엇일까요?
#
# label 불균형
#
# 사이킷런의 metrics 패키지에서는 정밀도, 재현율, F1점수를 구하는 classification_report() 함수를 제공합니다. 이 함수는 각각의 클래스를 양성(positive) 클래스로 보았을 때의 정밀도, 재현율, F1점수를 각각 구하고 그 평균값으로 전체 모델의 성능을 평가합니다.
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="6HmqHpT_OKAT" outputId="977ed11e-f4a3-4438-803a-b425567cdb91"
print(classification_report(y_test, mod.predict(tfidfv_test)))
# + [markdown] id="mGa2t_LjOfqe"
# 최종 결과에서 각각이 의미하는 바는 다음과 같습니다.
#
# macro: 단순평균
# weighted: 각 클래스에 속하는 표본의 개수로 가중평균
# accuracy: 정확도. 전체 학습 데이터의 개수에서 클래스를 정확하게 맞춘 개수의 비율.
# Confusion Matrix
# Confusion Matrix는 사실 생소한 개념이라기보다는 앞서 f1-score를 이해하기 위해 읽었던 위의 글의 가장 처음에 있던 행렬입니다. Confusion matrix를 아래의 동영상을 통해 이해해 봅시다.
#
# https://youtu.be/M0GBAzFV9_A
#
#
# 지금까지 배운 내용들을 아래의 링크를 통해 간단히 정리해 보겠습니다!
#
# [Confusion Matrix 혼동 행렬](https://mjdeeplearning.tistory.com/31)
# Q8. TP와 FN에 대해서 각각 설명해보세요.
# 예시답안
# TP는 True로 예측하고 실제 값도 True, FN은 False로 예측하고 실제는 True
#
# 아래의 함수를 통해서 혼동 행렬(confusion matrix)을 시각화할 수 있습니다.
#
#
#
# + id="cfm-0EPKOXfn"
def graph_confusion_matrix(model, x_test, y_test):#, classes_name):
df_cm = pd.DataFrame(confusion_matrix(y_test, model.predict(x_test)))#, index=classes_name, columns=classes_name)
fig = plt.figure(figsize=(15,15))
heatmap = sns.heatmap(df_cm, annot=True, fmt="d")
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(), rotation=0, ha='right', fontsize=12)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(), rotation=45, ha='right', fontsize=12)
plt.ylabel('label')
plt.xlabel('predicted value')
# + colab={"base_uri": "https://localhost:8080/", "height": 887} id="TVDLJ2-ROyJy" outputId="5554ef6c-31c8-4c94-cadc-ab49407cefd1"
graph_confusion_matrix(mod, tfidfv_test, y_test)
# + [markdown] id="IjWHYlwyO8Ps"
# # 4-7. 다양한 머신러닝 모델 사용해보기 (1)
# 앞서 사용한 나이브 베이즈 분류기(MultinomialNB)로는 65%의 정확도밖에 얻지 못했습니다. 사실 클래스의 개수가 46개인 것을 감안해도 그리 좋은 성능은 아닙니다. 좀 더 좋은 성능을 얻기 위해서 여러 다른 모델들을 시도해 봅시다.
#
# Complement Naive Bayes Classifier(CNB)
# 나이브 베이지안 분류기는 조건부로 독립적이라는 가정을 하기 때문에, 문서가 특정 분류에 속할 실제 확률로 사용할 때 문제가 발생할 수 있습니다. 바로 많은 샘플(sample)이 특정 클래스에 치우쳐져 있을 경우, 결정 경계의 가중치가 한쪽으로 치우쳐져 모델이 특정 클래스를 선호할 수 있다는 점인데요. 앞서 우리는 로이터 뉴스 데이터에서 3번, 4번 클래스가 다른 클래스에 비해 상대적으로 많은 클래스를 갖고 있음을 확인했었죠.
#
# 이렇게 데이터가 불균형할 경우를 대비해서 나이브 베이즈 분류기를 보완한 것이 컴플리먼트 나이브 베이즈 분류기입니다. 컴플리먼트 나이브 베이즈 분류기는 데이터의 불균형을 고려하여 가중치를 부여하는 특징을 가지고 있습니다. 컴플리먼트 나이브 베이즈 분류기는 앞서 사용한 나이브 베이즈 분류기 즉, MultinomialNB보다 성능이 일반적으로 더 좋다고 할 수 있습니다.
# + colab={"base_uri": "https://localhost:8080/"} id="G7XmXLJ0O00D" outputId="dae0b880-a448-4d8b-b7c1-8f2f4a5b95d3"
cb = ComplementNB()
cb.fit(tfidfv, y_train)
# + colab={"base_uri": "https://localhost:8080/"} id="gvNIXC7hPAJy" outputId="29098d0c-e21f-4dab-a10e-1669a6bfc21a"
predicted = cb.predict(tfidfv_test) #테스트 데이터에 대한 예측
print("정확도:", accuracy_score(y_test, predicted)) #예측값과 실제값 비교
# + [markdown] id="spFhYLi3PJbO"
# 로지스틱 회귀(Logistic Regression)
# 다음으로는 가장 널리 알려진 두 개의 선형 분류 알고리즘인 로지스틱 회귀와 서포트 벡터 머신을 통해서 로이터 뉴스를 분류해 봅시다.
#
# 로지스틱 회귀는 소프트맥스(softmax) 함수를 사용한 다중 클래스 분류 알고리즘을 지원합니다. 다중 클래스 분류를 위한 로지스틱 회귀를 소프트맥스 회귀(Softmax Regression)라고도 합니다. 주의할 점은 이름은 회귀지만, 실제로는 분류를 수행한다는 점입니다.
#
# 
#
#
# 소프트맥스 함수는 클래스가 N개일 때, N차원의 벡터가 각 클래스가 정답일 확률을 표현하도록 정규화를 해주는 함수입니다. 예를 들어 위의 그림은 4차원의 벡터를 입력으로 받으면서 3개의 클래스를 가지는 경우의 소프트맥스 회귀의 동작 과정을 보여주고 있습니다. 3개의 클래스 중 1개의 클래스를 예측해야 하므로 소프트맥스 회귀의 출력은 3차원의 벡터고, 각 벡터의 차원은 특정 클래스일 확률입니다. 그리고 오차와 실제값의 차이를 줄이는 과정에서 가중치와 편향이 학습됩니다.
#
# 사이킷런에서 소프트맥스 회귀는 LogisticRegression()을 통해서 구현할 수 있습니다.
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="oP4TEIvQPGJA" outputId="fa684958-d68d-4786-f7c5-ff2c88dc9753"
lr = LogisticRegression(C=10000, penalty='l2')
lr.fit(tfidfv, y_train)
# + colab={"base_uri": "https://localhost:8080/"} id="uv1rPoc6Pr6K" outputId="0f5a7571-1ca9-434b-97c6-ec5159f75ae9"
predicted = lr.predict(tfidfv_test) #테스트 데이터에 대한 예측
print("정확도:", accuracy_score(y_test, predicted)) #예측값과 실제값 비교
# + [markdown] id="syTvP215PyqS"
# 80%의 정확도를 얻었습니다. 지금까지의 모델 중 가장 높은 정확도를 얻었습니다.
#
# 선형 서포트 벡터 머신
# 서포트 벡터 머신은 대표적인 선형 분류 알고리즘입니다.
#
# 아래의 영상을 통해 서포트 벡터 머신에 대해서 이해해보겠습니다.
#
# https://youtu.be/y4CYcpRiNsw
#
# Q9. 서포트 벡터 머신에서 서포트 벡터란 어떤 벡터를 의미하는지 설명해보세요.
#
# 많은 선형 분류 모델은 태생적으로는 이진 분류만을 지원하는 이진 분류 모델입니다. (위에서 설명한 로지스틱 회귀는 예외입니다.) 서포트 벡터 머신의 구현체인 사이킷런의 LinearSVC 또한 태생적으로는 이진 분류를 위한 모델입니다. 그런데 이진 분류 알고리즘을 다중 클래스 분류 알고리즘으로 사용하는 방법이 있습니다. 바로 일대다(one-vs.-rest 또는 one-vs.-all) 방법입니다. 일대다 방식은 각 클래스를 다른 모든 클래스와 구분하도록 이진 분류 모델을 학습시킵니다.
#
# 결국 클래스의 수만큼 이진 분류 모델이 만들어집니다. 예측할 때는 만들어진 모든 이진 분류기가 작동하여 가장 높은 점수를 내는 분류기의 클래스를 예측값으로 선택합니다. 서포트 벡터 머신을 사용하여 로이터 뉴스를 분류해봅시다.
# + colab={"base_uri": "https://localhost:8080/"} id="VwVpg6ADPwGw" outputId="42528d92-620c-433d-c6c6-086745260f52"
lsvc = LinearSVC(C=1000, penalty='l1', max_iter=500, dual=False)
lsvc.fit(tfidfv, y_train)
# + colab={"base_uri": "https://localhost:8080/"} id="nJsTvvUxQAtY" outputId="0ec352b0-7107-48fb-b0b8-9acefc6f59e8"
predicted = lsvc.predict(tfidfv_test) #테스트 데이터에 대한 예측
print("정확도:", accuracy_score(y_test, predicted)) #예측값과 실제값 비교
# + [markdown] id="WB212H7EQLb5"
# # 4-8. 다양한 머신러닝 모델 사용해보기 (2)
# 결정 트리(Decision Tree)
# 결정 트리는 분류와 회귀 문제에 널리 사용하는 모델입니다. 기본적으로 결정 트리는 결정에 다다르기 위해 예/아니오 질문을 이어 나가면서 학습합니다. 이 질문은 스무고개 놀이의 질문과 비슷합니다. 아래의 영상을 통해서 결정 트리에 대해서 이해해봅시다.
#
# https://youtu.be/n0p0120Gxqk
#
# 사이킷런에서는 DecisionTreeClassifier()를 사용해서 결정 트리를 구현할 수 있습니다. 결정 트리의 깊이는 max_depth라는 인자를 통해서 정해줄 수 있습니다.
# + colab={"base_uri": "https://localhost:8080/"} id="9v0o2_ePQJXj" outputId="3f91da2f-9e19-470e-efa1-713b0a818db5"
tree = DecisionTreeClassifier(max_depth=10, random_state=0)
tree.fit(tfidfv, y_train)
# + colab={"base_uri": "https://localhost:8080/"} id="pKI04FXWQR5t" outputId="86855106-9c7b-457a-9147-505f97d10065"
predicted = tree.predict(tfidfv_test) #테스트 데이터에 대한 예측
print("정확도:", accuracy_score(y_test, predicted)) #예측값과 실제값 비교
# + [markdown] id="61edsMQPQXIs"
# 지금까지의 모델 중 가장 낮은 정확도인 62%를 얻습니다. 사실 트리 계열의 모델들은 고차원이고 희소한 데이터에 대해서는 성능이 나오지 않는다는 특징이 있습니다. DTM이나 TF-IDF 행렬의 경우 고차원이면서 대부분의 값이 0인 희소한 데이터이므로 트리 계열의 모델보다는 선형 분류 모델을 통해 접근하는 것이 더 나은 접근일 수 있습니다.
#
# ## 랜덤 포레스트(Random Forest)
#
# 앙상블(Ensemble)이란 여러 머신러닝 모델을 연결하여 더 강력한 모델을 만드는 기법입니다. 뒤에서 보팅(Voting)이라는 앙상블 기법을 알아볼 텐데, 모델 자체가 앙상블을 사용하는 앙상블 모델들도 존재합니다. 랜덤 포레스트와 그래디언트 부스팅 트리는 결정 트리를 사용하는 앙상블 모델입니다.
#
# 랜덤 포레스트에 대해서 아래의 영상을 통해서 이해해보겠습니다.
#
# https://youtu.be/nZB37IBCiSA
#
# Q10. 위 영상에서 언급된 결정 트리의 단점을 랜덤 포레스트는 해결할 수 있다고 합니다. 영상에서 언급된 단점이 무엇이었는지 언급해보고 랜덤 포레스트가 어떻게 그 문제를 해결할 수 있는지 토의해봅시다.
#
# 예시답안
# 결정 트리는 훈련 데이터에 과적합(Overfitting) 되는 경향이 있습니다. 랜덤 포레스트는 이 문제를 앙상블로 해결합니다. 가령, 서로 다른 방향으로 과적합 된 트리들을 조합하면 오히려 모델 전체에서는 과적합을 피할 수 있다는 것이죠.
#
# 랜덤 포레스트로 모델을 훈련시키고, 테스트 데이터에 대해서 정확도를 평가해 봅시다.
# + colab={"base_uri": "https://localhost:8080/"} id="Gn7Vc07IQVJ1" outputId="08b1cab1-5165-473f-f0bc-6f3400fcf7ec"
forest = RandomForestClassifier(n_estimators=5, random_state=0)
forest.fit(tfidfv, y_train)
# + colab={"base_uri": "https://localhost:8080/"} id="ssamqlRPQkUM" outputId="6ba86574-5dab-4b45-9a05-6dada4711a56"
predicted = forest.predict(tfidfv_test) #테스트 데이터에 대한 예측
print("정확도:", accuracy_score(y_test, predicted)) #예측값과 실제값 비교
# + [markdown] id="qZCaUw10Qp9I"
# 그래디언트 부스팅 트리(GradientBoostingClassifier)
# 그래디언트 부스팅 트리는 여러 개의 결정 트리를 묶어 만드는 앙상블 모델입니다. 그레디언트 부스팅은 랜덤 포레스트와 다르게 이전 트리의 오차를 보완하는 방식으로 순차적으로 트리를 만듭니다.
#
# 그래디언트 부스팅 트리는 일부 특성을 무시한다는 특징을 가지고 있습니다. 그래서 보통 랜덤 포레스트를 먼저 사용해보고, 성능이나 예측 시간 면에서 만족스럽지 않은 경우에 그래디언트 부스팅 트리를 시도해보는 것이 좋습니다.
#
# 일반적으로 1 ~ 5 정도의 깊지 않은 트리를 사용하므로 메모리도 적게 사용하고 예측도 빠릅니다.
#
# 정확도도 준수하고, 특히 예측 속도가 빠르다는 점에서 데이터 경진 대회에서 많은 우승을 했던 모델입니다. 다만, 훈련 시간의 속도가 좀 오래 걸린다는 단점이 있으며 트리 기반 모델의 특성으로 인해서 희소한 고차원 데이터에 대해서는 잘 동작하지 않는다는 단점이 있습니다.
#
# 현재 우리가 사용하는 TF-IDF 행렬은 희소하고 고차원 데이터지만, 결정 트리나 랜덤 포레스트보다 더 높은 성능을 얻을 수 있을지 한번 테스트해 볼까요?
#
# 저는 아래 코드 실행에 약 12분 정도 소요되었습니다. verbose=3을 GradientBoostingClassifier안에 인자로 넣어 진행 상황을 확인할 수 있으니 참고해 주세요!
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="YN5fV3VEQmQf" outputId="8acd051e-4abc-4d00-8afd-02d479d505e4"
grbt = GradientBoostingClassifier(random_state=0) # verbose=3
grbt.fit(tfidfv, y_train)
# + colab={"base_uri": "https://localhost:8080/"} id="Ya0F3B8eQsnJ" outputId="deccf4b9-80a6-4ad5-bb0e-22edfa3b2b9d"
predicted = grbt.predict(tfidfv_test) #테스트 데이터에 대한 예측
print("정확도:", accuracy_score(y_test, predicted)) #예측값과 실제값 비교
# + [markdown] id="DEodkJ3xQwKZ"
# 76%의 정확도로 결정 트리와 랜덤 포레스트와 비교하여 좋은 성능을 얻었습니다.
#
#
# ## 보팅(Voting)
# 데이터 경진 대회 캐글(Kaggle)에서 상위권을 차지한 나오는 많은 솔루션들이 앙상블(Ansemble learning)이라는 방법을 사용합니다. 그중에서 오늘은 투표를 통해 결과를 도출하는 보팅(Voting)이라는 방법에 대해서 이해해 봅시다.
#
# https://youtu.be/y4Wh0E1d4oE
#
# Q11. 보팅은 하드 보팅과 소프트 보팅 두 가지로 나누어집니다. 하드 보팅은 결과물에 대한 최종값을 투표해서 결정합니다. 그렇다면, 소프트 보팅의 결정 방식에 대해 설명해보세요.
#
# 예시답안
# 소프트 보팅은 최종 결과물이 나올 확률값을 다 더해서 최종 결과물에 대한 각각의 확률을 구한 뒤 최종값을 도출해 냅니다.
#
# 이전에 사용했던 모델인 로지스틱 회귀, CNB, 그래디언트 부스팅 트리 세 가지를 사용하여 소프트 보팅을 하였을 때의 성능을 비교해봅시다.
# + colab={"background_save": true} id="8ywVln5-Qumx" outputId="94229101-5d46-4e68-a81f-f15415a923bc"
voting_classifier = VotingClassifier(estimators=[
('lr', LogisticRegression(C=10000, penalty='l2')),
('cb', ComplementNB()),
('grbt', GradientBoostingClassifier(random_state=0))
], voting='soft', n_jobs=-1)
voting_classifier.fit(tfidfv, y_train)
# + colab={"background_save": true} id="b4jjURb2Q-RT" outputId="40f173dd-88b2-4db9-c24a-0169b8f297c6"
predicted = voting_classifier.predict(tfidfv_test) #테스트 데이터에 대한 예측
print("정확도:", accuracy_score(y_test, predicted)) #예측값과 실제값 비교
# + [markdown] id="Y_ALOdHSRBQL"
# 오늘의 실습 중 가장 높은 정확도를 보였던 소프트맥스 회귀보다 좀 더 높은 정확도를 얻었습니다.
# + [markdown] id="mViBCyKDRLoE"
# 4-9. 프로젝트: Vocabulary Size를 변경해서 시도해보기
# 지금까지는 모델을 변경하고, 모델을 조합해서 성능을 올리는 일에 힘썼습니다. 그런데 어쩌면 성능을 높이는 방법은 단순히 모델을 조정하는 일이 한정되지 않을 수 있습니다. 데이터의 전처리는 모델의 성능에 영향을 직접적으로 줍니다. 특히나 Bag of Words를 기반으로 하는 DTM이나 TF-IDF의 경우, 사용하는 단어의 수를 어떻게 결정하느냐에 따라서 성능에 영향을 줄 수 있겠죠.
#
# 중요도가 낮은 단어들까지 포함해서 너무 많은 단어를 사용하는 경우에도 성능이 저하될 수 있고, 반대로 너무 적은 단어들을 사용해도 성능이 저하될 수 있습니다. 그리고 이렇게 변화된 단어의 수는 또 어떤 모델을 사용하느냐에 따라 유리할 수도, 불리할 수도 있습니다.
#
# 단어의 수에 따라서 모델의 성능이 어떻게 변하는지 테스트해 봅시다.
# ```
# (x_train, y_train), (x_test, y_test) = reuters.load_data(num_words=10000, test_split=0.2)
# ```
#
# 앞서 num_words로 사용할 단어의 수를 조정할 수 있다는 것을 배웠습니다. 빈도수가 많은 순서대로 나열했을 때, num_words의 인자로 준 정숫값만큼의 단어를 사용하고 나머지 단어는 전부 \<unk>로 처리하는 원리였었죠.
#
# 아래의 두 가지 경우에 대해서 지금까지 사용했던 모델들의 정확도를 직접 확인해 보세요.
#
# 1. 모든 단어 사용
# ```
# (x_train, y_train), (x_test, y_test) = reuters.load_data(num_words=None, test_split=0.2)
# ```
# 2. 빈도수 상위 5,000개의 단어만 사용
# ```
# (x_train, y_train), (x_test, y_test) = reuters.load_data(num_words=5000, test_split=0.2)
# ```
#
# 3. 직접 단어 갯수를 설정해서 사용
# 위 단계에서 5000으로 제시된 num_words를 다양하게 바꾸어 가며 성능을 확인해보세요. 변화된 단어 수에 따른 모델의 성능을 연구해 보세요. 최소 3가지 경우 이상을 실험해 보기를 권합니다.
#
# 사용할 모델
#
# 나이브 베이즈 분류기, CNB, 로지스틱 회귀, 서포트 벡터 머신, 결정 트리, 랜덤 포레스트, 그래디언트 부스팅 트리, 보팅
#
# 4. 딥러닝 모델과 비교해 보기
# 위 과정을 통해 나온 최적의 모델과 단어 수 조건에서, 본인이 선택한 다른 모델을 적용한 결과와 비교해 봅시다. 감정분석 등에 사용했던 RNN이나 1-D CNN 등의 딥러닝 모델 중 하나를 선택해서 오늘 사용했던 데이터셋을 학습해 보고 나오는 결과를 비교해 봅시다. 단, 공정한 비교를 위해 이때 Word2Vec 등의 pretrained model은 사용하지 않도록 합니다.
# + id="hCe54lBpQ_5q"
| 72,895 |
/src/GO SCI/.ipynb_checkpoints/load_data-checkpoint.ipynb
|
88f6afcd61a6975a564df51bd4503e9e7e26a081
|
[] |
no_license
|
Ryuchen/RON_Model
|
https://github.com/Ryuchen/RON_Model
| 10 | 2 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 16,629 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # INSTALL
# Installing `JupyterLibrary` will bring along Robot Framework and SeleniumLibrary. Jupyter components, like `notebook`, `jupyterlab` and `nteract_on_jupyter`, and browser executors (e.g. `chromedriver`, `geckodriver`) and various utilities (e.g. `nodejs`) are up to you, depending on what you want to test. Here are some examples.
# ## `pip`
# ```bash
# pip install robotframework-jupyterlibrary
# ```
# ## TODO `conda`
# ## `master`
# `JupyterLibrary` is under active development, and is heavily invested in `conda` because of the complexity of managing browser execution dependencies. But `conda` (rightly) makes it hard to install Random Repos from the Internet, so you'll need a bit of `pip`, too.
#
# Here's a complete setup:
# ```shell
# conda create \
# -n testing-jupyter \ # as good a name as any
# python=3 # 2020 is right around the corner
#
# conda activate testing-jupyter # get on the right PATH
#
# conda install \
# -c conda-forge \ # can't get all these from Austin
# jupyterlab \ # mostly this
# robotframework-seleniumlibrary \ # includes robotframework... and selenium
# geckodriver \ # moz:\\a FTW
# python-chromedriver-binary # for the rest of the marketshare
#
# pip install --no-deps \ # don't want any surprises
# nteract_on_jupyter \ # now with more ✨
# git+http://github.com/robots-from-jupyter/robotframework-jupyterlibrary#egg=JupyterLibrary
# ```
# Also take a gander at this project's `environment.yml` or `anaconda-project.yml` at that URL there.
# ## DEV
# - get Firefox
# - get Miniconda
# - clone
#
# git clone http://github.com/robots-from-jupyter/robotframework-jupyterlibrary
#
# - update and activate
#
# conda env update
# conda activate robotframework-jupyterlibrary
#
# - then
#
# pip install -e . --no-deps --ignore-installed
#
# - run the tests
#
# python -m scripts.atest
| 2,298 |
/final_submission_FedotovD.ipynb
|
5b442ddff1bd8f89b520e1f22711d4b9605de898
|
[] |
no_license
|
FedotovD/MERC2017
|
https://github.com/FedotovD/MERC2017
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 16,238 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import seaborn as sns
import pandas as pd
import numpy as np
import scipy as sp
from scipy import stats as stats
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.model_selection import GridSearchCV, train_test_split, cross_val_score
from sklearn.preprocessing import PolynomialFeatures, StandardScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
# %matplotlib inline
# -
df_leaderboard = pd.DataFrame(pd.read_csv('./Data/predict-west-nile-virus-publicleaderboard.csv'))
df_leaderboard.head(10)
df_leaderboard.describe()
import datetime as dt
st_date = dt.date(2015,4,20)
df_cleaned = df_leaderboard[(df_leaderboard.Score < 10e6)]
df_cleaned['SubmissionDate'] = pd.to_datetime(df_cleaned['SubmissionDate'])
df_cleaned['DaysFromStart'] = (df_cleaned['SubmissionDate'].dt.date- st_date)
df_cleaned['DaysFromStart'] = df_cleaned['DaysFromStart'].dt.days
df_cleaned.dtypes
X = df_cleaned.loc[:,['DaysFromStart']]
y= df_cleaned.Score
type(X)
sns.distplot(df_cleaned.Score)
plt.plot(df_cleaned.DaysFromStart, df_cleaned.Score)
sorted_scors = list(df_cleaned.Score.sort_values())
count = len(sorted_scors)
count
sorted_scors[int(.6 * count)]
# +
closest = min(sorted_scors, key=lambda x: abs(x-0.766))
sorted_scors.index(closest) / count
# -
# ## let's try this - kmeans
# +
#https://stackoverflow.com/questions/37374983/get-data-points-from-seaborn-distplot
# -
from sklearn.cluster import KMeans
short_data = X
short_data['Score'] = y
alphabet=['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']
checkn = 4
kmm = KMeans(n_clusters=checkn, random_state=0).fit(short_data)
kmm_model=KMeans(n_clusters=checkn, random_state=0)
kmm_model.fit(short_data)
# +
k_means_list = [kmm.cluster_centers_[i][1] for i in range(checkn)]
k_means_list = sorted(k_means_list)
k_means_list = sorted(k_means_list)
k_means_dict = {}
for i in range(len(k_means_list)):
k_means_dict[k_means_list[i]] = alphabet[i]
k_means_dict
# -
_temp_list = [abs(45000 - i) for i in k_means_dict.keys()]
k_means_dict[k_means_list[_temp_list.index(min(_temp_list))]]
def get_k(number):
_temp_list = [abs(number - i) for i in k_means_dict.keys()]
return k_means_dict[k_means_list[_temp_list.index(min(_temp_list))]]
# +
short_data['k_cat'] = short_data.Score
short_data['k_cat'] =short_data['k_cat'].apply(lambda x: get_k(x))
df_graphable = pd.concat([short_data, pd.get_dummies(short_data['k_cat'])], axis=1)
# -
df_temp = df_graphable.groupby(['k_cat']).mean()
df_temp['k_catz'] = df_temp.index.values
sns.boxplot(x='k_cat', y='Score', data=df_graphable.sort_values(by='Score'))
df_graphable = df_graphable.join(df_cleaned.TeamId, how='left')
top_teams = list(df_graphable.loc[:, ['TeamId', 'Score']].groupby('TeamId').max().sort_values(by='Score', ascending = False).iloc[:10, 0:0].index)
_mid_hi = int(len(df_graphable.TeamId.value_counts())/2) + 5
_mid_lo = int(len(df_graphable.TeamId.value_counts())/2) - 5
mid_teams = list(df_graphable.loc[:, ['TeamId', 'Score']].groupby('TeamId').max().sort_values(by='Score', ascending = False).iloc[_mid_lo:_mid_hi, 0:0].index)
bottom_teams = list(df_graphable.loc[:, ['TeamId', 'Score']].groupby('TeamId').min().sort_values(by='Score', ascending = False).iloc[-10:, 0:0].index)
both_teams = [a for a in mid_teams if a in top_teams or a in bottom_teams]
df_graphable['RankGroup'] = df_graphable['TeamId'].apply(lambda x:
'Middle' if x in mid_teams
else ('High' if x in top_teams
else ('Low' if x in bottom_teams
else 'Other' )))
df_graphable.RankGroup.value_counts()
# +
#Saves on next line
# -
df_graphable.loc[:, ['TeamId', 'RankGroup', 'DaysFromStart', 'Score', 'k_cat', 'a', 'b', 'c', 'd']].to_csv('./Data/CategorizedData9.csv', index=False)
| 4,331 |
/EDA.ipynb
|
b41d4bc65a0b44f1118e587393b89cbf069f603d
|
[] |
no_license
|
akrawat912/911_Analysis_Streamlit-Heroku-
|
https://github.com/akrawat912/911_Analysis_Streamlit-Heroku-
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 67,447 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### CNN:
# - Powerful deep networks that are qwide;y aused in image related tasks like image recog, segmentation
# - Imput to these netwoks are images
# ### Problem with multilayer perceptron:
# - ***Overfitting*** due to large parameters(millions) in case of medium and large size imabes
# - Fail to handle variance in images -- traslation,rotation,illumunation, size etc
# ### CNN Working:
# - Its like template matching
# - A small grid or box iterate over the image (sliding window) and when get the desired peice of image inside that iterartign box, it throws neurons (neurons fire)
# - So it can detect any image whether its rotated, or if we are given a small part o fthat image or if the image is slightly moved in any direction
# - Activation map
# - Filter
# - resultant image dimension
# ### Resultant image :
# - image = n * n
# - Filter = F * F
# #### Then, Convolved feature will have dimension:
# - dimension =(n-F+1,n-F+1)
# #### Implementing convolution & filters
# - When the feature is present in a part of a image, the convolution operation between the filter and that part of image results in the real number with huigher value
# - If the feature is not present , the resukting value will be low
import numpy as np
import matplotlib.pyplot as plt
import cv2
# +
img_ = cv2.imread(r"C:\Users\Asus\Desktop\thumb-1920-568857.jpg")
# -
plt.imshow(img_) ##default mode is BGR
img_ = cv2.cvtColor(img_,cv2.COLOR_BGR2RGB)
img_ = cv2.resize(img_,(100,100))
plt.imshow(img_)
plt.axis('off')
def drawImg(img,title="Image"):
plt.imshow(img,cmap='gray')
plt.axis("off")
plt.style.use('seaborn')
plt.title(title+str(img.shape))
plt.show()
# ### For multicolor image, we require 3D filter an d3D image convolution
# - For simplification, we can use grayscale image which will require only 2D computation
img_gray = cv2.cvtColor(img_,cv2.COLOR_BGR2GRAY)
drawImg(img_gray)
# ### So, What is Convolution in this case?
#
# - The step of getting activation map by applying a filter over an entire image is called convolution
# +
### Use hardcoded filter here
def convolution(img,img_filter):
W = img.shape[0]
H = img.shape[1]
## filter propertiy
F = img_filter.shape[0]
new_image = np.zeros((W-F+1,H-F+1))
for row in range(W-F+1): ## These 2 loops iterate over new image
for col in range(H-F+1):
for i in range(F): ## These 2 loops iterate over filter
for j in range(F):
new_image[row][col] += img[row+i][col+j]*img_filter[i][j]
if new_image[row][col] > 255:
new_image[row][col] = 255
elif new_image[row][col] < 0:
new_image[row][col] = 0
return new_image ## this is our activation map
# +
blur_filter = np.ones((3,3))/9.0
print(blur_filter)
output1 = convolution(img_gray,blur_filter)
drawImg(output1) ## blured (filtered)
drawImg(img_gray) # original image
# -
# ### Reason of getting blurred:
# - when we apply the filter, each value of pixel is getting multiplied with (0.1)
# - When we take average of it, it gets blured
# +
### Edge filter
edge_filter = np.array([[2,0,-2],[2,0,-2],[2,0,-2]])
# -
output2 = convolution(img_gray,edge_filter)
plt.imshow(output2)
# ### Extracting various features of image using filters
# ### Convolution Layer
#
# - Contains various filters
# - each filter extracts different kinds of features and gives 1 activation map
# - multiple activation maps are combined by stacking to form output volume
# - ==> CNN layers take input a volume and produces an output volume o fdifferent shapes
# ### Buzz Words:
# - Convolution layer
# - Valid vs Same Convolution
# - Padding
# - Stride
# - Filters/Kernels
# - Pooling (Average/ Maxpooling)
# Same conv : output size == imnput size
# ### Stride :
# - Filters can have different siz and movement
# - Stride define how filter shud move across the image
# - No. of pixels we skip each time is called ***stride
# - for large stride, output volume will be less as less information will be collected (less discrete information)
#
#
# - H_ = (H-F) / S(h) + 1 : S(h) = Stride along horizontal dir
#
#
# - W_ = (W-F) / S(w) + 1 : S(w) = Stride along Vertical dir
#
#
#
# - If we have stride = 0, it is called ***valid convolution***
#
# ### Padding:
# - Addition of rows and col on every side of image convolution set such that their is no change is dimension of output %set_env
#
# - for ex: the output dimension is N-F+1
#
# - But we padd it to get output dimension of N
#
# - we require output dimension after padding = N
# - so, output dimension = N+2(P)+1
#
# - ex: for padding of 2, and filter dimension of 5, output dimension = N+4-5+1 which is equal to N (original)
#
# - H_ = (H-F+2(P)) / S(h) + 1 : S(h) = Stride along horizontal dir
#
#
# - W_ = (W-F+2(P)) / S(w) + 1 : S(w) = Stride along Vertical dir
#
# ### IMplementation of padding
# +
# np.pad?
# -
print(img_.shape)
drawImg(img_)
### Padding
pad_img = np.pad(img_,((10,10),(20,20),(0,0)),'constant',constant_values=0)
drawImg(pad_img)
# ### Pooling Layers
# ##### GEnaral CNN arch
#
#
# [Conv Layer => RELU => POOLING] => FC => Softmax
#
# - RELU = Activation function
# -
# - Pooling is performed after convo opretion
# - Two types : Avg pooling and MAx poolling
#
# = Max pooling:
# - Here no fuilter is present , a window slides over entire image
# - it stores maximum value in that window and moves to next position
#
# = Avg pooling:
# - Here no fuilter is present , a window slides over entire image
# - it stores avg value in that window and moves to next position
#
# - It helps reduce computation by discarding 75% of neurons (for 2x2 filter with stride of 2)
# - Makes feature detectors more robust
# - No parameters for learning , only hyperparameters such as size and type of pooling
# ### Imeplem of pooling
# +
X = np.array([[1,0,2,3],
[4,6,6,8],
[3,1,1,0],
[1,2,2,4]])
def pooling(X,mode = "max"):
stride = 2
f = 2
H,W = X.shape
H0 = int((H-f)/stride) +1
W0 = int(((W-f)/stride)) +1
output = np.zeros((H0,W0))
for r in range(H0):
for c in range(W0):
r_start = r*stride
r_end = r_start + f
c_start = c*stride
c_end = c_start + f
X_slice = X[r_start:r_end,c_start:c_end]
if mode == "max":
output[r][c] = np.max(X_slice)
else:
output[r][c] == np.mean(X_slice)
return output
# -
pool_out = pooling(X)
print(pool_out)
# ### Dropout - A Regularization
# - This techq can be used for both CNN and MLP
# - We fix a prob, then we randomly turn off neurons with that probbality
# - That neorion will not pass any value to next layer
# - By randomly truning off neurons we are creating different number of models while training
# - Result are avg of all the models
# - generally, p = 0.3 to 0.5
#
# ## CNN using keras on fashio datas3et
| 7,676 |
/.ipynb_checkpoints/exploratory_data_analysis_jonas-checkpoint.ipynb
|
a8522209d6b13421304ccc8651845f6448cb2b32
|
[] |
no_license
|
metzj/Road_Identification
|
https://github.com/metzj/Road_Identification
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 6,845 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exploratory Data Analysis
# + active=""
# Exploratory data analysis strongly inspired by 'https://www.kaggle.com/ekami66/detailed-exploratory-data-analysis-with-python'. The steps to procede are:
# -Data Exploration
# --Purely visual description (DONE)
# --Transform images into scalar values (DONE)
# --Find Correlations (TO DO)
#
# -Data Cleaning
# --Remove/Adapt Features (TO DO)
# --Test Correlations (TO DO)
# -
# ## Visual Description
# + active=""
# We first do a quick visual description of the training data set. There are two sets of 90 different images. The first set corresponds to the satellite images of what seems to be a city. These images are colored so when transforming those into scalars, there will be three different values (RGB ?). The second set correspond to the black and white version of the first set with white where there is road and black everywhere else.
#
# The distinct elements that can be found in the pictures are houses, trees, buildings, parking lots, stadiums, train tracks, wastelands, highway and water. The detail analysis can be find in the file 'VisualExploratoryDataAnalysis.xlsx'.
#
# The first conclusions that can be drawn from this analysis is that the pictures are taken in a dense urban area. A high percentage of the picture have houses (89%), larger buildings (36%) or even parking lots (39%). Moreover, it is clear that certain elements will make the process of detecting the road harder: trees that covers parts of or even large portion of the road are largely represented (91%), parking lots that have a road inside them, pavement is omnipresent, large portion of non-road surface (water: 4%, stadium-like structures: 9%, buildings: 36%), certain portion of wasteland (5%) that have the same colour than the roads, and last but not least the train tracks (12%) that are road-like structures that are not roads and highway portion (5%) that are road-like structures that are indeed much larger roads!
#
# From this first quick analysis we can find three different groups:
# -The group of pictures presenting only houses and trees: Group 1 = 45%;
# -The group of pictures preventing houses, trees, larger building and parking lots: Group 2 = 14%;
# -The group of very peculiar pictures that don't correspond at all with the others: Group 0 = 8%.
# -
# ## Transform images into scalar values
# + active=""
# Before trying to approach the data analysis using correlation functions with first need to convert our image from a picture representation to a RGB-scalar representation. We do this with the help of the segment_aerial_image.ipynb notebook given with the project description.
#
# Whether we have to use the three different values separatly or do a mean is still to be determined.
# -
# %matplotlib inline
import matplotlib.image as mpimg
import numpy as np
import matplotlib.pyplot as plt
import os,sys
from PIL import Image
# +
# Helper functions
def load_image(infilename):
data = mpimg.imread(infilename)
return data
def img_float_to_uint8(img):
rimg = img - np.min(img)
rimg = (rimg / np.max(rimg) * 255).round().astype(np.uint8)
return rimg
# Concatenate an image and its groundtruth
def concatenate_images(img, gt_img):
nChannels = len(gt_img.shape)
w = gt_img.shape[0]
h = gt_img.shape[1]
if nChannels == 3:
cimg = np.concatenate((img, gt_img), axis=1)
else:
gt_img_3c = np.zeros((w, h, 3), dtype=np.uint8)
gt_img8 = img_float_to_uint8(gt_img)
gt_img_3c[:,:,0] = gt_img8
gt_img_3c[:,:,1] = gt_img8
gt_img_3c[:,:,2] = gt_img8
img8 = img_float_to_uint8(img)
cimg = np.concatenate((img8, gt_img_3c), axis=1)
return cimg
def img_crop(im, w, h):
list_patches = []
imgwidth = im.shape[0]
imgheight = im.shape[1]
is_2d = len(im.shape) < 3
for i in range(0,imgheight,h):
for j in range(0,imgwidth,w):
if is_2d:
im_patch = im[j:j+w, i:i+h]
else:
im_patch = im[j:j+w, i:i+h, :]
list_patches.append(im_patch)
return list_patches
# +
# Loaded a set of images
root_dir = "training/"
image_dir = root_dir + "images/"
files = os.listdir(image_dir)
n = min(100, len(files)) # Load maximum 100 images
print("Loading " + str(n) + " images")
imgs = [load_image(image_dir + files[i]) for i in range(n)]
# +
rimgs = img_float_to_uint8(imgs)
print(np.shape(rimgs))
# rimgs contains 100 picture of size 400*400 pixels in RGB values!
# -
# ## Find Correlations
| 4,814 |
/raw_data_processing.ipynb
|
da65dd12e534220ea9229b674f1ac7cdd2be60d2
|
[] |
no_license
|
skyu0221/Solar-RL
|
https://github.com/skyu0221/Solar-RL
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 42,245 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Market Segmentation
# ## Hierarchical Agglomerative Clustering
# ### Import Data
# +
import pandas as pd
import numpy as np
import math
import matplotlib.pyplot as plt
# %matplotlib inline
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import KernelDensity
import warnings
warnings.filterwarnings('ignore')
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 40)
pd.set_option('display.width', 1000)
# -
# ### Scale Data
# +
# Read CSV to DataFrame
artists = pd.read_csv('Data/artists_train.csv')
# Drop unnamed column
artists.drop(columns=['Unnamed: 0', 'Unnamed: 0.1'], inplace=True)
# Create id_name_genre column
#artists['id_name'] = artists['artist_id'] + ', ' + artists['artist_name'].fillna('None')
# Preview dataframe
print(artists.info())
artists.head()
# -
# ### Normalize Data
# +
# Standardize with scalers
# Define X
X_train = artists.drop(columns=['artist_id', 'artist_name'])
# Standardize with Standard Scaler
from sklearn.preprocessing import StandardScaler
ss = StandardScaler()
X_train_sscaled = ss.fit_transform(X_train)
X_train_sscaled = pd.DataFrame(X_train_sscaled, columns=X_train.columns)
# Standardize with MinMax Scaler
from sklearn.preprocessing import MinMaxScaler
mms = MinMaxScaler()
X_train_mmscaled = mms.fit_transform(X_train)
X_train_mmscaled = pd.DataFrame(X_train_mmscaled, columns=X_train.columns)
# -
# HAC
agg_clust = AgglomerativeClustering(n_clusters=10)
agg_clust
assigned_clust = agg_clust.fit_predict(X_train_sscaled)
# Visualizing how HAC works
from plot_agg_alg import plot_agglomerative_algorithm
plot_agglomerative_algorithm()
from plot_agg import plot_agglomerative
plot_agglomerative()
# +
# Dendrogram
from scipy.cluster.hierarchy import dendrogram, ward
linkage_array = ward(X)
dendrogram(linkage_array)
ax = plt.gca()
bounds = ax.get_xbound()
ax.plot(bounds, [16, 16], '--', c='k')
ax.plot(bounds, [9, 9], '--', c='k')
ax.text(bounds[1], 16, ' 2 clusters', va='center', fontdict={'size': 12})
ax.text(bounds[1], 9, ' 3 clusters', va='center', fontdict={'size': 12})
plt.xlabel("Data index")
plt.ylabel("Cluster distance")
# Create data - 6 Cluster Example
k = 6
m = 400
X, y = make_blobs(n_samples= m, n_features=2, centers=k, cluster_std=0.8, random_state = 1234)
plt.scatter(X[:, 0], X[:, 1], c = y, s = 10);
# Try different linkage settings in HAC algorithm
agg_comp = AgglomerativeClustering(linkage ="complete", n_clusters=6)
agg_avg = AgglomerativeClustering(linkage ="average", n_clusters=6)
agg_ward = AgglomerativeClustering(linkage ="ward", n_clusters=6)
as_comp = agg_comp.fit_predict(X)
as_avg = agg_avg.fit_predict(X)
as_ward = agg_ward.fit_predict(X)
# Visualize predictions
plt.scatter(X[:, 0], X[:, 1], c = as_comp, s = 10);
plt.scatter(X[:, 0], X[:, 1], c = as_avg, s = 10);
plt.scatter(X[:, 0], X[:, 1], c = as_ward, s = 10);
# Dendrogram for Ward cluster
from scipy.cluster.hierarchy import dendrogram, ward
linkage_array = ward(X)
dendrogram(linkage_array)
ax = plt.gca()
bounds = ax.get_xbound()
plt.xlabel("Sample index")
plt.ylabel("Cluster distance");
# Make visualization more interpretable
plt.title('Hierarchical Clustering Dendrogram (truncated)')
dendrogram(linkage_array, truncate_mode='lastp', p=12)
plt.xlabel('cluster size')
plt.ylabel('distance')
plt.show()
# Run k-means to compare
from sklearn.cluster import KMeans
k_means = KMeans(n_clusters = 6)
k_means.fit(X)
y_hat = k_means.predict(X)
plt.scatter(X[:, 0], X[:, 1], c = y_hat, s = 10)
cl_centers = k_means.cluster_centers_
plt.scatter(cl_centers[:, 0], cl_centers[:, 1], c='black', s=40);
# Evaluation
labels_kmeans = k_means.labels_
labels_comp = agg_comp.labels_
labels_avg = agg_avg.labels_
labels_ward = agg_ward.labels_
# Adjusted Rand Index
# Bounded between -1 and 1.
# Closer to 1 is good, while closer to -1 is bad.
from sklearn import metrics
metrics.adjusted_rand_score(labels_kmeans, y)
metrics.adjusted_rand_score(labels_ward, y)
metrics.adjusted_rand_score(labels_avg, y)
metrics.adjusted_rand_score(labels_comp, y)
# Fowlkes Mallows Score
# Bounded between 0 and 1. Closer to 1 is better.
metrics.fowlkes_mallows_score(labels_kmeans, y)
metrics.fowlkes_mallows_score(labels_ward, y)
metrics.fowlkes_mallows_score(labels_avg, y)
metrics.fowlkes_mallows_score(labels_comp, y)
# Calinski-Harabaz Index
# This score is not bounded. The higher, the better.
metrics.calinski_harabaz_score(X, labels_kmeans)
metrics.calinski_harabaz_score(X,labels_ward)
metrics.calinski_harabaz_score(X,labels_avg)
metrics.calinski_harabaz_score(X,labels_comp)
# Silhouette Coefficient
# Bounded at -1 and 1.
# Closer to -1 suggests incorrect clustering.
# Closer to +1 shows that each cluster is very dense.
metrics.silhouette_score(X, labels_kmeans)
metrics.silhouette_score(X, labels_ward)
metrics.silhouette_score(X,labels_avg)
metrics.silhouette_score(X, labels_comp)
# -
#
| 5,201 |
/TSFEL_HAR_Example.ipynb
|
b05184c393542d78a90ecc8287f024b097c9660a
|
[
"MIT"
] |
permissive
|
ghayth82/tsfel
|
https://github.com/ghayth82/tsfel
| 1 | 0 | null | 2019-02-22T10:38:42 | 2019-01-28T16:38:33 | null |
Jupyter Notebook
| false | false |
.py
| 195,936 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: ds19
# language: python
# name: ds19
# ---
# +
# Day 4
# +
# Input range 172851-675869
# -
import numpy as np
# +
def checkOrder(num):
if "".join(sorted(num)) == num:
return True
def checkSamenum(num):
for num_1, num_2 in zip(num, num[1:]):
if num_1 == num_2:
return True
def construct_passwords(input_range):
begin_range = int(input_range.split("-")[0])
ending_range = int(input_range.split("-")[1])
valid_passwords = []
count_p=0
count_repeat = 0
# password contains 6 digits
#password = np.zeros(6)
for i in range(begin_range, ending_range):
password = str(i)
# check if the digits are in ascending order
if checkOrder(password):
if checkSamenum(password):
count_p += 1
valid_passwords.append(password)
#print("count:",count_p)
if countRepeatingNum(password):
count_repeat +=1
return count_p, count_repeat
# +
import time
start = time.time()
inputs = "172851-675869"
n_pass,n_repeat = construct_passwords(inputs)
end = time.time()
print(end - start)
# -
n_pass
n_repeat
# +
# Part 2:
# -
def countRepeatingNum(num):
count = 1
repeatDigit = False
for num1,num2 in zip(num, num[1:]):
if num1 == num2:
count += 1
else:
if count == 2:
return True
count = 1
#repeatDigit = False
return count == 2
| 1,804 |
/A#4.ipynb
|
5f399a01e9df757b74ffa80ea01560eee0640920
|
[] |
no_license
|
alirezasalamat/AI-CarPricePrediction
|
https://github.com/alirezasalamat/AI-CarPricePrediction
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 1,398,455 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Machine Learning
#
# ### FALL-99 A#4
# ### Bahar Emami Afshar
# ### STD number: 810197662
# +
import pandas as pd
import numpy as np
train_df = pd.read_csv("./house-prices-advanced-regression-techniques/train.csv")
train_df
# -
# # Phase 0: Visualization and EDA
# # 1.
#
train_df.info()
train_df.describe()
# # 2.
# ## missing value percent for each attribute is as below:
# dataset contains some categorical feature which have a meaningful NaN, and we should not mistaken them with missing values. so we first replace them with another string and then find missing value percentage.
# +
meaning_ful_nan_cols = ["Alley","BsmtQual","BsmtCond","BsmtExposure","BsmtFinType1","BsmtFinType2"
,"GarageType","GarageFinish","GarageQual","GarageCond","FireplaceQu","PoolQC"
,"Fence","MiscFeature"]
for col in meaning_ful_nan_cols:
train_df[col] = train_df[col].fillna("not")
missing_values = train_df.isna().sum()/len(train_df)*100
print(missing_values[missing_values != 0].sort_values())
missing_values[missing_values != 0].sort_values().plot.bar()
# -
# # 3.
# #### corrolary between two features means they are related to eachother, the more the absoloute corrolary value gets near to 1, the more two features are simillar.
# so here's what we're gonna do:
# for each two non-target features which have an absoloute corrolary value near to 1, we drop one of the features.
# like GarageYrBlt and YearBuilt,GarageCars and GarageArea.
#
# for each feature and target feature, we drop those with absoloute corrolary value near to zero, because they are not realated enough to the target.
# like: BsmtFinSF2,Id,BsmtHalfBath,LowQualFinSF,MiscVal,YrSold.
# best features are those who have an absoloute corrolary value near to 1 with target.
# #### so best features are:
#
# 1. OverallQual
# 2. GrLivArea
# 3. 1stFlrSF
# 4. TotalBsmtSF
# 5. GarageCars
# 6. GarageArea
# 7. YearBuilt
#
#
#
# +
corr = train_df.corr()
corr.style.background_gradient(cmap ='coolwarm')
# -
#
# # 4.
# a feature which has an exponential relation with target will have a linear relation with log(target). so by transforming target to log(target) we wish to simplify relation of target with feature and find more proper features for our learning. however, in here transforming target to log(target) didn't make a change in corrolations.
train_df['SalePrice'] = np.log(train_df['SalePrice'])
corr = train_df.corr()
corr.style.background_gradient(cmap ='coolwarm')
# # 5.
# corrolation by itself is not enough, because it can not calculate the corrolation of categorical variables correctly, so we need more feature engineering.
#
# # 6.
# #### hexbin diagram for each attribute is plotted as below:
best_cols = ["OverallQual","GrLivArea","1stFlrSF","TotalBsmtSF","GarageCars","GarageArea","YearBuilt"]
for x in best_cols:
train_df.plot.hexbin(x= x,y="SalePrice", gridsize=20)
# #### scatter diagram for each attribute is plotted as below:
# +
for x in best_cols:
train_df.plot.scatter(x= x,y="SalePrice", c='DarkBlue')
# -
# # 7.
#
# Utilities: Type of utilities available
#
# Neighborhood: Physical locations within Ames city limits
#
# OverallQual: Rates the overall material and finish of the house
#
# FullBath: Full bathrooms above grade
#
best_cols = ["Utilities","Neighborhood","OverallQual","FullBath"]
for x in best_cols:
train_df.plot.scatter(x= x,y="SalePrice", c='DarkBlue')
# # 8.
# more columns can be ommited based on corrolation map
# # Phase 1: Preprocessing
# # 1.
#
# 1. filling nan values with column mean value can be usefull in small datasets with few attributes, in which we don't want to drop any feature. however,it can cause an error in our prediction as the mean value is not the exact same value for the cell.
#
# 2. deleting column which have nan values can be usefull in huge datasets with alot of feautures to train, in which droping a number of cols won't affect the result. but in small datasets which a considerable number of columns contain nan values, deleting them will make our train model so shallow and it won't fit well on data, because it has few number of features to learn.
#
# other methods of dealing with nan values can be, deleting rows which contain nan value. this approch works well on huge datasets in which droping a number of rows won't affect the ratio of train set to test set.
#
# # 2.
# feautures that contain missing values are as below:
#
# 1. Electrical 0.068493%
# 2. MasVnrType 0.547945%
# 3. MasVnrArea 0.547945%
# 4. GarageYrBlt 5.547945%
# 5. LotFrontage 17.739726%
#
# we fill the first four and drop the last one.
# +
train_df = train_df.drop(columns=["LotFrontage"])
train_df = train_df.fillna(train_df.mean())
categorical_nan_cols = ["Electrical","MasVnrType"]
print(train_df[categorical_nan_cols].mode())
train_df.loc[:,"Electrical"].fillna("SBrkr",inplace = True)
train_df.loc[:,"MasVnrType"].fillna("None",inplace = True)
# -
# # 3.
# The goal of normalization is to change the values of numeric columns in the dataset to use a common scale, without distorting differences in the ranges of values or losing information.
#
# Normalization avoids problems such as combinig multiple features with considerable difference between their range by creating new values that maintain the general distribution and ratios in the source data, while keeping values within a scale applied across all numeric columns used in the model.
#
# Normalization usually means to scale a variable to have a values between 0 and 1, while standardization transforms data to have a mean of zero and a standard deviation of 1.
# +
#normalization part
from sklearn import preprocessing
num_cols = list(train_df._get_numeric_data().columns)
categorical_cols = [x for x in list(train_df.columns) if x not in num_cols]
norm_array = preprocessing.normalize(train_df[num_cols],norm='l2',copy= False)
numerical_df = pd.DataFrame(norm_array,columns = num_cols)
numerical_df
# -
# # 4. categorical features
# many machine learning algorithms including ones we have used in this project can't work with categorical features. so we have to encode them into numeric values.
# we can eaither drop categorical variables or encod them using appropriate techniques.
# droping columns is not always the best choice as the dropped column may have important effect on our prediction and by droping them we will result in a not accurate prediction.
#
# techniques for encoding categorical varables to numerics are:
# 1. if the order of categorical variable is important we can use Ordinal encoding label ech value with an integer, using a map.
# 2. if the order is not important and values are not compareble to each other we can use One-Hot or Label-encoding.
#
# since our categorical features are from second type, we use on hot to encode them.
# +
#one hot
from sklearn.preprocessing import OneHotEncoder
encoder = OneHotEncoder()
onehot = encoder.fit_transform(train_df[categorical_cols]).toarray()
encoder_cols = encoder.get_feature_names(categorical_cols)
categorical_df = pd.DataFrame(onehot,columns = encoder_cols)
categorical_df
# -
# # 5.
# according to the heatmap obtained in question3 of phase 0, droping features which have near zero corrolary with target and droping one of each two features who have near one corrolary seems to be usefull.
#
# +
train_df = pd.concat([numerical_df, categorical_df], axis=1)
drop_cols = ["BsmtFinSF2","Id","BsmtHalfBath","LowQualFinSF","MiscVal","YrSold"] #zero corrolary with target
drop_cols_2 =["GarageCars","GarageYrBlt"]#near one corrolary with another feature
train_df = train_df.drop(columns=drop_cols+drop_cols_2)
train_df
# -
# # 6.
# ratios tested for p where 0.2 and 0.25, which 0.25 predicted better.
# we can split data to train and test with order or randomly, which in here spliting with order gets better results.
# +
from sklearn.model_selection import train_test_split
cols = list(train_df.columns)
print(train_df.shape)
X = train_df.iloc[:,:-1]
y = train_df["SalePrice"]
print(X.shape,y.shape)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# +
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
def preprocess(train_df,nan_values="fill",categorical="encode",normalize=True,
drop_cols=True,test_size=0.2,order_split=False):
y = train_df["SalePrice"]
df = train_df.drop(columns=["SalePrice"],errors ='ignore')
meaning_ful_nan_cols = ["Alley","BsmtQual","BsmtCond","BsmtExposure","BsmtFinType1","BsmtFinType2"
,"GarageType","GarageFinish","GarageQual","GarageCond","FireplaceQu","PoolQC"
,"Fence","MiscFeature"]
for col in meaning_ful_nan_cols:
df[col] = df[col].fillna("not")
nan_columns = df.isna().any()
columns_with_nan = df.columns[nan_columns].tolist()
if nan_values == "drop":
df = df.drop(columns=["LotFrontage"],errors ='ignore')
df = df.drop(columns=columns_with_nan,errors ='ignore')
if nan_values == "fill":
df = df.fillna(df.mean())
df.loc[:,"Electrical"].fillna("SBrkr",inplace = True)
df.loc[:,"MasVnrType"].fillna("None",inplace = True)
num_cols = list(df._get_numeric_data().columns)
numerical_df = df[num_cols]
categorical_cols = [x for x in list(df.columns) if x not in num_cols]
categorical_df = df[categorical_cols]
if normalize:
norm_array = preprocessing.normalize(df[num_cols],norm='l2',copy= False)
numerical_df = pd.DataFrame(norm_array,columns = num_cols)
if categorical == "drop":
categorical_df = categorical_df.drop(columns=categorical_cols,errors ='ignore')
elif categorical == "encode":
encoder = OneHotEncoder()
onehot = encoder.fit_transform(df[categorical_cols]).toarray()
encoder_cols = encoder.get_feature_names(categorical_cols)
categorical_df = pd.DataFrame(onehot,columns = encoder_cols)
df = pd.concat([numerical_df, categorical_df], axis=1)
if drop_cols:
drop_cols_1 = ["BsmtFinSF2","Id","BsmtHalfBath","LowQualFinSF","MiscVal","YrSold"] #zero corrolary with target
drop_cols_2 =["GarageCars","GarageYrBlt"]#near one corrolary with another feature
df = df.drop(columns=drop_cols_1+drop_cols_2,errors ='ignore')
cols = list(df.columns)
X = df
if order_split:
size = int(len(df)*(1-test_size))
X_train = df.iloc[:size,:]
y_train = y.iloc[:size]
X_test = df.iloc[size:,:]
y_test = y.iloc[size:]
else:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=42)
return X_train, X_test, y_train, y_test
# -
# # Phase 2: Model Training, Evaluation and Hyperparameter Tuning
def train_and_test(model,X_train, X_test, y_train, y_test):
model.fit(X_train, y_train)
y_pred= model.predict(X_test)
mae = mean_absolute_error(np.array(y_test), y_pred)
rmse = mean_squared_error(np.array(y_test), y_pred,squared = False)
return mae,rmse
# # KNN
# +
from sklearn.neighbors import KNeighborsRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
df = pd.read_csv("./house-prices-advanced-regression-techniques/train.csv")
X_train, X_test, y_train, y_test = preprocess(df,nan_values="fill",categorical="encode",
normalize=True,drop_cols=True,test_size=0.25,order_split=False)
param_grid_knn = {
'random_state': [0,42,60],
'n_neighbors': range(3,50),
'n_estimators': [20,100, 200, 300, 1000]
}
knn = KNeighborsRegressor()
grid_search = GridSearchCV(estimator = knn, param_grid = param_grid_knn,cv = 3, n_jobs = -1, verbose = 2)
mae,rmse = train_and_test(knn,X_train, X_test, y_train, y_test)
mae,rmse
# -
# # trying different preprocessing methods
#
#
# +
df = pd.read_csv("./house-prices-advanced-regression-techniques/train.csv")
result = pd.DataFrame(columns=["model","test/train ratio","normalize","categorical values"
,"missing values","split with order","mae","rmse"])
agents = [KNeighborsRegressor(n_neighbors=20),DecisionTreeRegressor(max_depth = 22),LinearRegression()]
nan_values = ["fill","drop"]
categorical_var = ["encode","drop"]
normalize = drop_cols = order_split = [True,False]
test_ratio = [0.2,0.25]
for nan in nan_values:
for c_var in categorical_var:
for norm in normalize:
for order in order_split:
for ratio in test_ratio:
for model in agents:
X_train, X_test, y_train, y_test = preprocess(df,nan_values=nan,categorical=c_var,
normalize=norm,test_size=ratio,order_split = order)
mae,rmse = train_and_test(model,X_train, X_test, y_train, y_test)
result = result.append({"model":model,"test/train ratio":ratio,"normalize":norm,
"categorical values":c_var,"missing values":nan,
"split with order":order,"mae":mae,"rmse":rmse},
ignore_index=True)
# +
result.sort_values(by = ["mae","rmse"])[:25]
# -
# # Decision Tree
# +
from sklearn.tree import DecisionTreeRegressor
X_train, X_test, y_train, y_test = preprocess(df,nan_values="fill",categorical="drop",
normalize=False,drop_cols=True,test_size=0.25,order_split=False)
param_grid = {
'random_state': [0,42,60],
'max_depth': range(5,70),
'n_estimators': [20,100, 200, 300, 1000]
}
clf = DecisionTreeRegressor()
grid_search = GridSearchCV(estimator = clf, param_grid = param_grid,cv = 3, n_jobs = -1, verbose = 2)
mae,rmse = train_and_test(clf,X_train, X_test, y_train, y_test)
mae,rmse
# -
# # Linear Regression
# +
from sklearn.linear_model import LinearRegression
X_train, X_test, y_train, y_test = preprocess(df,nan_values="drop",categorical="encode",
normalize=False,drop_cols=True,test_size=0.25,order_split=True)
param_grid = {
'fit_intercept':[True,False],
'n_jobs': [20,100, 200, 300, 1000]
}
reg = LinearRegression()
grid_search = GridSearchCV(estimator = reg, param_grid = param_grid,cv = 3, n_jobs = -1, verbose = 2)
mae,rmse = train_and_test(reg,X_train, X_test, y_train, y_test)
print("mae: ",mae,"rmse: ",rmse)
# -
# ## Overfitting vs Underfitting
# A model is underfitting the training data when the model performs poorly on the training data. This is because the model is unable to capture the relationship between the features and the target values.
#
# A model is overfitting the training data when it performs well on the training data but does not perform well on the evaluation data. This is because the model is memorizing the data it has seen and is unable to generalize to unseen examples.
#
# all 3 trained models, perform acceptable on both training data an validation data so the model is not over fitted. by changing training data order the model fits differently so it is not under fitted.
#
# # Phase 3: Ensemble Methods
# # Random Forest
# +
from sklearn.ensemble import RandomForestClassifier
from tqdm import tqdm
from sklearn.model_selection import GridSearchCV
X_train, X_test, y_train, y_test = preprocess(df,nan_values="fill",categorical="drop",
normalize=False,drop_cols=True,test_size=0.25,order_split=False)
param_grid = {
'random_state': [0,42,60],
'max_depth': range(5,70),
'n_estimators': [20,100, 200, 300, 1000]
}
clf = RandomForestClassifier()
grid_search = GridSearchCV(estimator = clf, param_grid = param_grid,cv = 3, n_jobs = -1, verbose = 2)
mae,rmse = train_and_test(clf,X_train, X_test, y_train, y_test)
print("mae: ",mae,"rmse: ",rmse)
# -
# # Voting Regression
# +
from sklearn.ensemble import VotingRegressor
X_train, X_test, y_train, y_test = preprocess(df,nan_values="drop",categorical="encode",
normalize=False,drop_cols=False,test_size=0.25,order_split=True)
param_grid = {
'weights': [[1,1,1],[5,15,1],[3,15,2],[7,4,10],[5,1,10],[18,13,14]]
}
r1 = KNeighborsRegressor(n_neighbors=22)
r2 = DecisionTreeRegressor(max_depth= 17)
r3 = LinearRegression()
vreg = VotingRegressor([('KNN', r1), ('DT', r2),('LR',r3)])
grid_search = GridSearchCV(estimator = vreg, param_grid = param_grid,cv = 3, n_jobs = -1, verbose = 2)
mae,rmse = train_and_test(vreg,X_train, X_test, y_train, y_test)
print("mae: ",mae,"rmse: ",rmse)
# -
# # 3.
# voting regression ensembles number of agents, so if on agents fail in prediction a sample, other agent can cover that. if we use agents who have acceptable results alone in a voting regression, the overall results will get better, but if all those agents fail on a sample voting regression won't do a favor.
| 17,488 |
/.ipynb_checkpoints/Untitled-checkpoint.ipynb
|
b924be322bc6a173857b0854f1fb6c42b7fe4318
|
[] |
no_license
|
zsun1990/mnist_classification
|
https://github.com/zsun1990/mnist_classification
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 1,210 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import numpy as np
np.zeros((3,3,3))
n, datasets
from sklearn.neighbors import KNeighborsClassifier
import numpy as np
import pandas as pd
# -
# %pylab inline
digits = datasets.load_digits()
X = digits.data
y = digits.target
# +
n = len(X)
X_train=np.array(X[:int(0.75*n)])
X_test=np.array(X[int(0.75*n):])
y_train=np.array(y[:int(0.75*n)])
y_test=np.array(y[int(0.75*n):])
# -
neigh = KNeighborsClassifier(n_neighbors=1)
neigh.fit(X_train, y_train)
predictions = neigh.predict(X_test)
score = 1.*sum([y!=t for (y, t) in zip(y_test, predictions)])/len(y_test)
print score
with open("knn_ans1.txt", "w") as fout:
fout.write(str(score))
from sklearn import ensemble
rf = ensemble.RandomForestClassifier(n_estimators = 1000)
rf.fit(X_train, y_train)
pred_rf = rf.predict(X_test)
# +
score_rf = 1.*sum([y!=t for (y, t) in zip(y_test, pred_rf)])/len(y_test)
print score_rf
with open("knn_ans2.txt", "w") as fout:
fout.write(str(score_rf))
# -
nimizado do paciente <br>
# fonte: https://repositoriodatasharingfapesp.uspdigital.usp.br/
# ### Hospital Sírio Libanes
# %%time
hsl_exames = pyarrow_read_csv(path_data + "FAPESP/HSL_Exames_3.csv",sep="|")
print(hsl_exames.shape)
hsl_pacientes = pyarrow_read_csv(path_data + "FAPESP/HSL_Pacientes_3.csv",sep="|")
print(hsl_pacientes.shape)
hsl_desfechos = pyarrow_read_csv(path_data + "FAPESP/HSL_Desfechos_3.csv",sep="|")
print(hsl_desfechos.shape)
# ___
# # Analisando a bases de dados
# ### Ajustando dados do Fleury
# +
hsl = hsl_exames.merge(hsl_desfechos,
on = ["ID_PACIENTE","ID_ATENDIMENTO"],
how = 'left').merge(hsl_pacientes,
on = "ID_PACIENTE",
how = 'left')
describe(hsl)
# -
hsl_ = hsl.copy()
# #### DT_COLETA
#DT_COLETA é um dado no formato de data que deve ser convertido
hsl_.DT_COLETA = pd.to_datetime(hsl_.DT_COLETA,errors='coerce')
print("Quantidade de registros que não puderam ser convertidos:",hsl_.DT_COLETA.isna().sum())
# #### DE_ORIGEM
# <font color = 'red'><b> Verificar se existe alguma forma de reduzir esse volume de DE_ORIGEM</b></font>
#
# De acordo com o dicionario de dados, o formato deveria ser composto por elementos de 4 caracteres, mas não é isso que ocorre aqui
hsl_.query("'Unidades de Internação' == DE_ORIGEM").groupby(["DE_ORIGEM","DE_CLINICA"]).ID_PACIENTE.count()
hsl_.DE_ORIGEM.value_counts().to_dict()
# #### CD_UNIDADE
#Existem alguns casos vazios para CD_Unidade
hsl_.CD_UNIDADE.value_counts()
#Existem algumas unidades vazias, portanto vamos aplicar NaN a elas e transformar em categoria
hsl_.CD_UNIDADE = hsl_.CD_UNIDADE.replace({"":np.nan})
hsl_.CD_UNIDADE = pd.Categorical(hsl_.CD_UNIDADE,categories=hsl_.CD_UNIDADE.value_counts().index)
# #### DE_VALOR_REFERENCIA
#Existem alguns valores vazios em valor de referencia
hsl_.DE_VALOR_REFERENCIA.value_counts()
# Vamos pegar esses valores em branco e transformá-los
hsl_.DE_VALOR_REFERENCIA = hsl_.DE_VALOR_REFERENCIA.replace({"":np.nan})
# #### DT_ATENDIMENTO
hsl_.DT_ATENDIMENTO = pd.to_datetime(hsl_.DT_ATENDIMENTO,errors = 'coerce')
print("Quantidade de registros que não puderam ser convertidos:",hsl_.DT_ATENDIMENTO.isna().sum())
#Alguns registros não puderam ser convertidos
#Motivo: os valores já era registros faltantes antes da conversão
pd.DataFrame({"dt_convertida": hsl_.DT_ATENDIMENTO , "dt_original": hsl.DT_ATENDIMENTO}).query("dt_convertida != dt_convertida").dt_original.value_counts(dropna=False)
# #### DE_TIPO_ATENDIMENTO
#São apenas 5 possíveis, então, vamos converter para dados categoricos
hsl_.DE_TIPO_ATENDIMENTO.value_counts(dropna = False)
hsl_.DE_TIPO_ATENDIMENTO = pd.Categorical(hsl_.DE_TIPO_ATENDIMENTO,categories=hsl_.DE_TIPO_ATENDIMENTO.value_counts().index)
# #### DE_CLINICA
hsl_.DE_CLINICA.value_counts(dropna=False)
#Convertendo para dados numericos
hsl_.DE_CLINICA = pd.Categorical(hsl_.DE_CLINICA,categories=hsl_.DE_TIPO_ATENDIMENTO.value_counts().index)
# #### IC_SEXO
# #### AA_NASCIMENTO
# #### CD_PAIS
# #### CD_UF
# #### CD_MUNICIPIO
# #### CD_CEPREDUZIDO
# #### DE_RESULTADO
# De acordo com o dicionário de dados:
#
# <i>"Se DE_ANALITO exige valor numérico, NNNN se inteiro ou NNNN,NNN se casas decimais
# Se DE_ANALITO exige qualitativo, String com domínio restrito
# Se DE_ANALITO por observação microscópica, String conteúdo livre "</i>
#
# Portanto, vamos fazer essa divisão
hsl_.DE_RESULTADO.value_counts()
def func_filtro_numerico_DE_RESULTADO(x):
if((re.search("[0-9]+",x.lower()) == None) & (re.search("[0-9]+,[0-9]+",x.lower()) == None)):
return False
elif(re.search("[0-9]+,[0-9]+",x.lower()) != None):
if(re.search("[0-9]+,[0-9]+",x.lower()).span()[1] - re.search("[0-9]+,[0-9]+",x.lower()).span()[0] == len(x)):
return True
else:
return False
else:
if(re.search("[0-9]+",x.lower()).span()[1] - re.search("[0-9]+",x.lower()).span()[0] == len(x)):
return True
else:
return False
filtro_numerico_DE_RESULTADO = hsl_.DE_RESULTADO.apply(func_filtro_numerico_DE_RESULTADO)
hsl_["DE_RESULTADO_NUMERICO"] = pd.to_numeric(hsl_.DE_RESULTADO[filtro_numerico_DE_RESULTADO].str.replace(",","."),errors="coerce")
hsl_["DE_RESULTADO_NAO_NUMERICO"] = hsl_.DE_RESULTADO[~filtro_numerico_DE_RESULTADO]
# #### DE_ANALITO
hsl_.DE_ANALITO.value_counts()
# #### DE_EXAME
hsl_.DE_EXAME.value_counts()
-
# #### Question 6: On average, by how much does life expectancy increase every year around the world?
# On average, life expectancy increases by 0.3259038 years every year around the world.
# #### Question 7: Do you reject the null hypothesis of no relationship between year and life expectancy? Why?
from statsmodels.formula.api import ols
regression = ols(formula='lifeExp ~ year', data=data).fit()
regression.f_pvalue
# We reject the null hypothesis because the p-value is less than 0.05, in fact, it is extremely small (7.54679-80). The p-value represents the probability that we obtaining the observed results assuming that the null hypothesis is correct (that there is no relationship between year and life expectancy). A p-value of 0.05 is used as the cutoff for significance,
# such that a small p-value (typically < 0.05) indicates strong evidence against the null hypothesis, thus we reject the null hypothesis.
# #### Exercise 3: Make a violin plot of residuals vs. year for the linear model from Exercise 2.
# +
# Residual = Observed value - Predicted value
y_pred = reg.predict(X)
data['actual'] = y
data['predicted'] = y_pred
data['residual'] = data['actual'] - data['predicted']
data.head()
# -
plt.figure(figsize=(12, 6))
sbn.violinplot(data['year'], data['residual'], data=data, palette="pastel")
plt.title("Residuals vs. Year")
plt.xlabel("Year")
plt.ylabel("Residuals")
plt.show()
# #### Question 8: Does the plot of Exercise 3 match your expectations (as you answered Question 4)?
# Yes, the violin plot above does match the expectations I made earlier. The shape of the violins are very similar to the lifeExp vs. year violin plot, and the violins are descending over time. The violins are also clustered around 0, meaning that there are observed values that are both greater and less than the predicted values.
# #### Exercise 4: Make a boxplot (or violin plot) of model residuals vs. continent.
plt.figure(figsize=(12, 6))
sbn.violinplot(data['continent'], data['residual'], data=data, palette="pastel")
plt.title("Residuals vs. Continent")
plt.xlabel("Continent")
plt.ylabel("Residuals")
plt.show()
# #### Question 9: Is there a dependence between model residual and continent? If so, what would that suggest when performing a regression analysis of life expectancy across time?
# Yes, there does appear to be a dependence between model residual and continent, as the violins (distributions of residuals) are different for each continent. Because there is a variation in residuals as a function of continent, we know the residuals to be dependent, suggesting that performing a regression analysis of life expectancy across time is not only dependent on year, but also on continent as well.
# #### Exercise 5: As in the Moneyball project, make a scatter plot of life expectancy vs. year, grouped by continent, and add a regression line.
# +
lrm_dict = {}
lrm_str_dict = {}
for continent, groups in data.groupby('continent'):
groups.plot(kind='scatter',x='year',y='lifeExp', title=continent, marker='.',
xlim=(xMin,xMax), ylim=(yMin,yMax), figsize=(12,6))
X = np.array(groups['year']).reshape(-1, 1)
y = np.array(groups['lifeExp']).reshape(-1, 1)
reg = LinearRegression()
reg.fit(X, y)
y_pred = reg.predict(X)
coef = reg.coef_[0][0]
y_int = reg.intercept_[0]
lrm = 'Life Expectancy = '+str(round(y_int, 6))+' + '+str(round(coef, 6))+' * Year'
lrm_dict[continent] = round(coef, 6)
lrm_str_dict[continent] = [lrm]
plt.plot(X, y_pred, color = 'red', label=lrm)
plt.ylim(yMin, yMax)
plt.legend(loc='lower right')
plt.title("Predicted Life Expectency vs. Year for "+continent)
plt.xlabel("Year")
plt.ylabel("Life Expectancy")
plt.show()
# Display Linear Regression Model of [Life Expectancy vs. Year] for each Continent
lrm_df = pd.DataFrame.from_dict(lrm_str_dict, orient='index', columns=['Linear Regression Model'])
display(lrm_df)
# -
# #### Question 10: Based on this plot, should your regression model include an interaction term for continent and year? Why?
# Yes, our regression model should include an interaction term for continent and year, as the coefficients for the linear models for each continent are all different. For instance, we can see that the regression line for Asia is much steeper that Africa's regression line. In our models, life expectancies are increasing at an average of 0.453122/year for Asia and 0.289529/year for Africa.
#
# In order to fit a more accurate regression model, we need to account for the differences among continents and we can do so by incorporating an interaction term for continent and year.
# #### Exercise 6: Fit a linear regression model for life expectancy including a term for an interaction between continent and year.
# +
dummies = data['continent'].str.get_dummies(" ")
# interaction term = year x continent
dummies2 = dummies.copy()
for i, row in data.iterrows():
dummies2.at[i, row['continent']] = row['year']
dummies2 = dummies2.rename(columns={"Africa": "Africa_IT", "Americas": "Americas_IT", "Asia": "Asia_IT",
"Europe": "Europe_IT", "Oceania": "Oceania_IT"})
df = pd.concat([data, dummies, dummies2], axis=1, sort=False)
df.head()
# -
# When defining dummy variables, a common mistake is to define too many variables. If a categorical variable can take on k values, we only need k - 1 dummy variables as a kth dummy variable is redundant; it carries no new information.
#
# Having a kth dummy variable also creates a severe multicollinearity problem for the analysis. Using k dummy variables when only k - 1 dummy variables are required is known as the dummy variable trap.
# +
X = df[['year','Africa','Asia','Europe','Oceania','Africa_IT','Asia_IT','Europe_IT','Oceania_IT']]
y = df['lifeExp']
reg = LinearRegression()
reg.fit(X, y)
y_pred = reg.predict(X)
c = reg.coef_
y_int = reg.intercept_
# Display coefficients
coef_df = pd.DataFrame(c, X.columns, columns=['coefficient'])
display(coef_df)
# Display y-intercept
y_int_df = pd.DataFrame([[y_int]], columns=['y-intercept'])
y_int_df.set_index('y-intercept', inplace=True)
display(y_int_df)
# Display Linear Regressiom Model with Interaction Terms
# mlr = 'y = '+str(round(y_int,2))+' + '+str(round(c[0],2))+'*year + '+str(round(c[1],2))+'*\u03B4Africa + '+str(round(c[2],2))+'*\u03B4Asia + '+str(round(c[3],2))+'*\u03B4Europe + '+str(round(c[4],2))+'*\u03B4Oceania\n + '+str(round(c[5],2))+'*\u03B4Africa*year + '+str(round(c[6],2))+'*\u03B4Asia*year + '+str(round(c[7],2))+'*\u03B4Europe*year + '+str(round(c[8],2))+'*\u03B4Oceania*year'
# print(mlr)
# -
# $$y = \beta_{0} + \beta_{1}year + \beta_{2}\delta_{Africa} + \beta_{3}\delta_{Asia}
# + \beta_{4}\delta_{Europe} + \beta_{5}\delta_{Oceania} + \beta_{6}\delta_{Africa}year
# + \beta_{7}\delta_{Asia}year + \beta_{8}\delta_{Europe}year + \beta_{9}\delta_{Oceania}year$$
# When we replace the coefficients 𝛽0,𝛽1,..,𝛽k in the multiple linear regression model with the coefficients that we found from fitting our data after including the interaction terms, we have the following as our regression model:
# $$y = -663.106 + 0.368year + 138.848(\delta_{Africa}) - 173.785(\delta_{Asia})
# + 295.695(\delta_{Europe}) + 321.198(\delta_{Oceania}) - 0.078\delta_{Africa}\times{year}
# + 0.085\delta_{Asia}\times{year} - 0.146\delta_{Europe}\times{year} -0.157\delta_{Oceania}\times{year}$$
# Where 𝛿𝐴𝑓𝑟𝑖𝑐𝑎, 𝛿𝐴𝑠𝑖𝑎, 𝛿𝐸𝑢𝑟𝑜𝑝𝑒, and 𝛿𝑂𝑐𝑒𝑎𝑛𝑖𝑎 are our dummy variables, equaling either 1 or 0 (1 indicates if a data entry is in that continent, else 0).
# #### Question 11: Are all parameters in the model significantly different from zero? If not, which are not significantly different from zero?
# All of the parameters in the model are very close to 0.
regression = ols(formula='lifeExp ~ year + continent + year*continent', data=data).fit()
regression.summary()
# #### Question 12: On average, by how much does life expectancy increase each year for each continent? (Provide code to answer this question by extracting relevant estimates from model fit)
# +
# Extract the coefficients for each continent
# Estimates = Year + Continent Interaction Term
year_coef = coef_df['coefficient']['year']
estimates = coef_df + year_coef
estimates = estimates.iloc[5:]
estimates.loc['Americas_IT'] = year_coef
estimates
# -
# On average, we find that life expectancy increases for each continent at the following rates per year:
# <ul>
# <li>Africa: 0.289529 per year</li>
# <li>Asia: 0.453122 per year</li>
# <li>Europe: 0.221932 per year</li>
# <li>Oceania: 0.210272 per year</li>
# <li>Americas: 0.367651 per year</li>
# </ul>
# #### Exercise 8: Make a residuals vs. year violin plot for the interaction model. Comment on how well it matches assumptions of the linear regression model. Do the same for a residuals vs. fitted values model.
# +
# Residual = Observed value - Predicted value
y_pred = reg.predict(X)
df['actual_IM'] = y
df['predicted_IM'] = y_pred
df['residual_IM'] = df['actual_IM'] - df['predicted_IM']
data.head()
plt.figure(figsize=(12, 6))
sbn.violinplot(df['year'], df['residual_IM'], data=df, palette="pastel")
plt.title("Interaction Model Residuals vs. Year")
plt.xlabel("Year")
plt.ylabel("Interaction Model Residuals")
plt.show()
# -
# The residuals are unimodal, zero-centered, and close to symmetrical, indicating a well-fitted linear regression model.
# ## Part 2: Classification
# #### Problem 1: Implement the gradient descent algorithm (either batch or stochastic versions) for multiple linear regression. I.e., extend the version of the algorithm in the lecture notes to multiple parameters.
# <i>Gradient Descent Update Equation for Logistic Regression:</i>
# $$ \beta^{k+1} = \beta^k + \alpha \sum_{i=1}^{n} (y_i - p_i(\beta^k))x_i $$
#
# <i>where (from the definition of log-odds):</i>
# $$ p_i(\beta^k) = \frac{e^{f_i(\beta^k)}}{1 + e^{f_i(\beta^k)}} $$
#
# <i>and</i>
# $$ f_i(\beta^k) = \beta^k_0 + \beta^k_1x_{i1} + \beta^k_2x_{i2} +...+ \beta^k_px_{ip} $$
def grad_descent(X, y, T, alpha):
m, n = X.shape # m = #examples, n = #features
theta = np.zeros(n) # initialize parameters
f = np.zeros(T) # track loss over time
g = 0
for i in range(T):
# loss for current parameter vector theta
f[i] = 0.5*np.linalg.norm(X.dot(theta) - y)**2
# compute steepest ascent at f(theta)
g = X.T.dot(X.dot(theta) - y)
# step down the gradient
theta = theta - alpha*g
return theta, f
# #### Problem 2: Derive the above update equation. Write the derivation in a markdown ipynb cell. (+5 points of extra credit)
# <i>Derived Gradient Descent Update Equation for Logistic Regression:</i>
# $$ \beta^{k+1} = \beta^k + \alpha \sum_{i=1}^{n} [y_i - (\frac{e^{\beta^k * x_i}}{1 + e^{\beta^k * x_i}})] x_i $$
# #### Problem 3: Implement the gradient descent algorithm (either batch or stochastic versions) for multiple logistic regression. I.e., modify your code in problem 1 for the logistic regression update equation.
# +
def log_grad_descent(X, Y, T, alpha):
# Initialize weights, loss, and gradient
m, n = X.shape
theta = np.zeros(n)
f = np.zeros(m)
g = 0
for t in range(T):
# Compute loss and gradient by taking partial derivative of our loss function with respect to our weights theta
for i in range(m):
f[i] = compute_loss(X[i], Y[i], theta)
g = f[i] * X[i]
# Update theta by taking a step in direction of gradient
theta = theta + alpha * g
theta = theta / np.linalg.norm(theta)
return theta, f
def compute_loss(x, y, theta):
return y - (math.exp(np.dot(theta, x))) / (1 + math.exp(np.dot(theta, x)))
# -
# #### Problem 4: To test your programs, simulate data from the linear regression and logistic regression models and check that your implementations recover the simulation parameters properly.
# We use the following functions to simulate data for our testing:
# +
from sklearn import datasets
# Generate data for linear regression:
gen_data_x, gen_data_y = datasets.make_regression(n_samples=100, n_features=20, noise = 1.5)
# Generate data for logistic regression. This is similar to linear, only now values are either 0 or 1.
log_gen_data_x, dump_y = datasets.make_regression(n_samples=100, n_features=20, noise = 1.5)
log_gen_data_y = [0 if i>0 else 1 for i in dump_y]
# +
(theta, f) = grad_descent(gen_data_x, gen_data_y, 5, 5)
reg = LinearRegression()
reg.fit(gen_data_x, gen_data_y)
plt.plot(reg.coef_, theta, 'o')
plt.xlabel("Simulation Parameters")
plt.ylabel("Estimated Parameters")
plt.show()
# +
(theta, f) = log_grad_descent(log_gen_data_x, log_gen_data_y, 5, 5)
reg = LogisticRegression(C=1e5, solver='lbfgs', multi_class='multinomial')
reg.fit(log_gen_data_x, log_gen_data_y)
true = reg.coef_[0]
plt.plot(true, theta, 'o')
plt.xlabel("Simulation Parameters")
plt.ylabel("Estimated Parameters")
plt.show()
# -
# #### Try it out!
# +
from sklearn.model_selection import cross_val_predict
from sklearn import tree
from sklearn import metrics
from sklearn import svm
from scipy import stats
# Import dataset (classification)
iris = datasets.load_iris()
gen_data_x, gen_data_y = datasets.make_regression(n_samples=20, n_features=20, noise = 1.5)
# Simulate data for logistic regression
X, gen_y = datasets.make_regression(n_samples=100, n_features=20, noise = 1.5)
y = [0 if i>20 else 1 for i in gen_y]
# +
# Gradient descent scores
(theta, f) = grad_descent(X, y, 20, 20)
reg = LinearRegression()
reg.fit(gen_data_x, gen_data_y)
plt.plot(reg.coef_, theta, 'o')
plt.xlabel("Simulation Parameters")
plt.ylabel("Estimated Parameters")
plt.show()
# -
# Scores when using gradient descent
(theta,loss) = log_grad_descent(X, y, 20, 20)
estimated = np.zeros(len(y))
for i in range(len(X[0])):
estimated += X[:, i] * theta[i]
estimated += loss
# +
# Decision Tree Model
model = tree.DecisionTreeClassifier()
# Get scores for ten fold validation (cv=10 means ten fold)
scores1 = cross_val_predict(model, X, y, cv=10)
stats.ttest_rel(scores1, estimated)
# +
# Linear SVM Model
model2 = svm.LinearSVC()
# Get scores for ten fold validation (cv=10 means ten fold)
scores2 = cross_val_predict(model2, X, y, cv=10)
stats.ttest_rel(scores2, estimated)
# -
| 20,209 |
/labs/mxboard/mxboard_cifar10.ipynb
|
3c91329ca219a467b7f5e1028157b246f370faac
|
[] |
no_license
|
ThomasDelteil/GluonBootcamp
|
https://github.com/ThomasDelteil/GluonBootcamp
| 2 | 0 | null | 2018-08-01T20:39:03 | 2018-07-31T10:39:37 | null |
Jupyter Notebook
| false | false |
.py
| 14,935 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Environment (conda_mxb_1_tb1.6)
# language: python
# name: conda_mxb_1_tb1.6
# ---
# !rm -rf ./logs
# +
import os
import subprocess
import signal
class TensorBoardServer():
def __init__(self):
pass
def start(self):
self.process = subprocess.Popen("tensorboard --logdir ./logs --host 127.0.0.1 --port 6006",
shell=True, preexec_fn=os.setsid)
def stop(self):
os.killpg(self.process.pid, signal.SIGTERM)
tb_server = TensorBoardServer()
tb_server.start()
# -
import datetime
import math
from mxboard import SummaryWriter
import mxnet as mx
from mxnet.gluon import nn
from mxnet.gluon.data.vision import transforms
import os
# ## Data
# +
batch_size = 128
transform_fn = transforms.Compose([
transforms.ToTensor()
])
train_dataset = mx.gluon.data.vision.CIFAR10(train=True).transform_first(transform_fn)
train_dataloader = mx.gluon.data.DataLoader(train_dataset, batch_size)
# -
test_dataset = mx.gluon.data.vision.CIFAR10(train=False).transform_first(transform_fn)
test_dataloader = mx.gluon.data.DataLoader(test_dataset, batch_size)
# ## Model
# +
class BasicBlock(nn.HybridBlock):
"""
Pre-activation Residual Block with 2 convolution layers.
"""
def __init__(self, channels, stride=1, dim_match=True):
super(BasicBlock, self).__init__()
self.stride = stride
self.dim_match = dim_match
with self.name_scope():
self.bn1 = nn.BatchNorm(epsilon=2e-5)
self.conv1 = nn.Conv2D(channels=channels, kernel_size=3, padding=1, strides=stride, use_bias=False)
self.bn2 = nn.BatchNorm(epsilon=2e-5)
self.conv2 = nn.Conv2D(channels=channels, kernel_size=3, padding=1, strides=1, use_bias=False)
if not self.dim_match:
self.conv3 = nn.Conv2D(channels=channels, kernel_size=1, padding=0, strides=stride, use_bias=False)
def hybrid_forward(self, F, x):
act1 = F.relu(self.bn1(x))
act2 = F.relu(self.bn2(self.conv1(act1)))
out = self.conv2(act2)
if self.dim_match:
shortcut = x
else:
shortcut = self.conv3(act1)
return out + shortcut
class ResNet(nn.HybridBlock):
def __init__(self, num_classes):
super(ResNet, self).__init__()
with self.name_scope():
net = self.net = nn.HybridSequential()
# data normalization
net.add(nn.BatchNorm(epsilon=2e-5, scale=True))
# pre-stage
net.add(nn.Conv2D(channels=16, kernel_size=3, strides=1, padding=1, use_bias=False))
# Stage 1 (4 total)
net.add(BasicBlock(16, stride=1, dim_match=False))
for _ in range(3):
net.add(BasicBlock(16, stride=1, dim_match=True))
# Stage 2 (4 total)
net.add(BasicBlock(32, stride=2, dim_match=False))
for _ in range(3):
net.add(BasicBlock(32, stride=1, dim_match=True))
# Stage 3 (4 in total)
net.add(BasicBlock(64, stride=2, dim_match=False))
for _ in range(3):
net.add(BasicBlock(64, stride=1, dim_match=True))
# post-stage (required as using pre-activation blocks)
net.add(nn.BatchNorm(epsilon=2e-5))
net.add(nn.Activation('relu'))
net.add(nn.GlobalAvgPool2D())
net.add(nn.Dense(num_classes))
def hybrid_forward(self, F, x):
out = x
for i, b in enumerate(self.net):
out = b(out)
return out
# -
# # Training
def markdown_table(data):
content = ""
content += "Key | Value" + "\n"
content += "-----|-----" + "\n"
for key, value in data.items():
content += "{} | {}".format(key, value) + "\n"
return content
# +
def accuracy(output, label):
output_argmax = output.argmax(axis=1).astype('int32')
label_argmax = label.astype('int32')
equal = output_argmax==label_argmax
accuracy = mx.nd.mean(equal.astype('float32')).asscalar()
return accuracy
def evaluate_accuracy(valid_data, model, ctx):
acc = 0.
count = 0
for batch_idx, (data, label) in enumerate(valid_data):
data = data.as_in_context(ctx)
label = label.as_in_context(ctx)
output = model(data)
acc = acc + accuracy(output, label)
count += 1
return acc / count
# -
def train_resnet(train_dataloader, test_dataloader, optimizer, description):
run_id = datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S") + "/" + description
writer = SummaryWriter(logdir=os.path.join("./logs/cifar10", run_id))
ctx = mx.gpu()
kvstore = "device"
net = ResNet(num_classes=10)
# lazy initialize parameters
net.initialize(mx.init.Xavier(), ctx=ctx)
trainer = mx.gluon.Trainer(params=net.collect_params(), optimizer=optimizer, kvstore=kvstore)
train_metric = mx.metric.Accuracy()
loss_fn = mx.gluon.loss.SoftmaxCrossEntropyLoss()
run_description = markdown_table({
"batch_size": train_dataloader._batch_sampler._batch_size,
"optimizer": type(optimizer),
"optimizer_momentum": optimizer.momentum,
"optimizer_wd": optimizer.wd
})
writer.add_text(tag='run_description', text=run_description, global_step=0)
num_epochs = 10
for epoch in range(1, num_epochs + 1):
for batch_idx, (data_batch, label_batch) in enumerate(train_dataloader, start=1):
# move to required context (e.g. gpu)
data_batch = data_batch.as_in_context(ctx)
label_batch = label_batch.as_in_context(ctx)
# take forward and backward pass
with mx.autograd.record():
pred_batch = net(data_batch)
loss = loss_fn(pred_batch, label_batch)
loss.backward()
bs = data_batch.shape[0]
trainer.step(bs)
train_metric.update(label_batch, pred_batch)
# mxboard logging at end of each epoch
## sample of the images passed to network
adj_data_batch = (data_batch - data_batch.min())/(data_batch.max() - data_batch.min())
writer.add_image(tag="batch", image=adj_data_batch, global_step=epoch)
## histograms of input, output and loss
writer.add_histogram(tag='input', values=data_batch, global_step=epoch, bins=100)
writer.add_histogram(tag='output', values=pred_batch, global_step=epoch, bins=100)
writer.add_histogram(tag='loss', values=loss, global_step=epoch, bins=100)
## learning rate
writer.add_scalar(tag="learning_rate", value=trainer.learning_rate, global_step=epoch)
## training accuracy
_, trn_acc = train_metric.get()
writer.add_scalar(tag='accuracy/training', value=trn_acc * 100, global_step=epoch)
## test accuracy
test_acc = evaluate_accuracy(test_dataloader, net, ctx)
writer.add_scalar(tag='accuracy/testing', value=test_acc * 100, global_step=epoch)
print("Completed epoch {}".format(epoch))
writer.close()
return net
lr_schedule = lambda iteration: min(iteration ** -0.5, iteration * 782 ** -1.5)
optimizer = mx.optimizer.SGD(lr_scheduler=lr_schedule)
trained_net = train_resnet(train_dataloader, test_dataloader, optimizer, description="baseline")
# ## Update 1: Shuffle training data
train_dataloader = mx.gluon.data.DataLoader(train_dataset, batch_size, shuffle=True)
optimizer = mx.optimizer.SGD(lr_scheduler=lr_schedule) # reset optimizer state (for momentum, lr schedule, etc)
trained_net = train_resnet(train_dataloader, test_dataloader, optimizer, description="w_shuffle")
# ## Update 2: Increase batch size
# +
batch_size = batch_size * 4
train_dataloader = mx.gluon.data.DataLoader(train_dataset, batch_size, shuffle=True)
test_dataloader = mx.gluon.data.DataLoader(test_dataset, batch_size)
# lr_schedule = lambda iteration: min(iteration ** -0.5, iteration * 782 ** -1.5)
new_lr_schedule = lambda iteration: lr_schedule(iteration*4) * 4
optimizer = mx.optimizer.SGD(lr_scheduler=new_lr_schedule)
trained_net = train_resnet(train_dataloader, test_dataloader, optimizer, description="inc_bs")
# -
# ## Update 3: Normalize data
# +
transform_fn = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.4914, 0.4822, 0.4465],[0.2023, 0.1994, 0.2010])
])
train_dataset = mx.gluon.data.vision.CIFAR10(train=True).transform_first(transform_fn)
train_dataloader = mx.gluon.data.DataLoader(train_dataset, batch_size, shuffle=True)
test_dataset = mx.gluon.data.vision.CIFAR10(train=False).transform_first(transform_fn)
test_dataloader = mx.gluon.data.DataLoader(test_dataset, batch_size)
optimizer = mx.optimizer.SGD(lr_scheduler=new_lr_schedule)
trained_net = train_resnet(train_dataloader, test_dataloader, optimizer, description="normalized_input")
# -
tb_server.stop()
| 9,159 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.