| content
				 stringlengths 73 1.12M | license
				 stringclasses 3
				values | path
				 stringlengths 9 197 | repo_name
				 stringlengths 7 106 | chain_length
				 int64 1 144 | 
|---|---|---|---|---|
| 
	<jupyter_start><jupyter_text># checking to make sure the data is indeed different for day/night<jupyter_code>plt.hist(day_argmax.flatten() - night_argmax.flatten(), bins=100);
plt.show()
plt.imshow(day_argmax - night_argmax);
np.mean(day_argmax.flatten() - night_argmax.flatten())<jupyter_output><empty_output><jupyter_text># iterate over some years<jupyter_code>var = 'AFC_num' # [AFC_num, AFC_perc, FRP_mean, FRP_max, FRP_total]
days, nights=[],[]
day_an, night_an = [],[]
for year in range(2003,2019):
    day_files, night_files = get_fire_year_files(raster_folder, var, year)
    day_argmax = nanargmax(day_files)
    night_argmax = nanargmax(night_files)
    
    days.append(day_argmax)
    nights.append(night_argmax)
    day_title = 'FY-{} Month of Maximum MODIS AFC - Daytime'.format(year)
    day_an.append(plot_max_month(day_argmax, raster_folder, day_title))
    
    night_title = 'FY-{} Month of Maximum MODIS AFC - Nighttime'.format(year)
    night_an.append(plot_max_month(night_argmax, raster_folder, night_title))
    
    print('*************************************************************************')
# sanity check that they aren't all the same
ind=0
plt.hist(days[ind].flatten() - days[ind+4].flatten(), bins=100);
plt.show()
plt.imshow(days[ind] - days[ind+1]);
kwargs_write = {'fps':1.0, 'quantizer':'nq'}
imageio.mimsave('./peak_month_night.gif', night_an, fps=1)
imageio.mimsave('./peak_month_day.gif', day_an, fps=1)
# trying to remap to 3 month intervals
var = 'AFC_num' # [AFC_num, AFC_perc, FRP_mean, FRP_max, FRP_total]
days, nights=[],[]
day_an, night_an = [],[]
for year in range(2003,2019):
    day_files, night_files = get_fire_year_files(raster_folder, var, year)
    day_argmax = nanargmax(day_files)
    night_argmax = nanargmax(night_files)
    
    #remap
    day_argmax = remap_months3(day_argmax)
    night_argmax = remap_months3(night_argmax)
    
    days.append(day_argmax)
    nights.append(night_argmax)
    day_title = 'FY-{} Month of Maximum MODIS AFC - Daytime'.format(year)
    day_an.append(plot_max_month_groups(day_argmax, raster_folder, day_title))
    
    night_title = 'FY-{} Month of Maximum MODIS AFC - Nighttime'.format(year)
    night_an.append(plot_max_month_groups(night_argmax, raster_folder, night_title))
    
    print('*************************************************************************')<jupyter_output><empty_output><jupyter_text>## plot average season<jupyter_code>mean_day = np.ma.round(np.ma.array(days).mean(axis=0))
mean_night = np.ma.round(np.ma.array(nights).mean(axis=0))
plot_max_month_groups(mean_day, raster_folder, 'mean day season');
plot_max_month_groups(mean_night, raster_folder, 'mean night season');<jupyter_output><empty_output><jupyter_text>## look at trends in peak month / season<jupyter_code>len(days), len(nights)
var = 'AFC_num' # [AFC_num, AFC_perc, FRP_mean, FRP_max, FRP_total]
raster_folder = r"../vars/grid_0_25_degree_vars/" # data refresh!
days_sum, nights_sum=[],[]
day_an, night_an = [],[]
for year in range(2003,2019):
    day_files, night_files = get_fire_year_files(raster_folder, var, year)
    day_sum = nansum(day_files)
    night_sum = nansum(night_files)
    
    days_sum.append(day_sum)
    nights_sum.append(night_sum)
    day_title = 'FY-{} Sum MODIS AFC - Daytime'.format(year)
    #day_an.append(gen_plot_xarr(day_sum, raster_folder, day_title))
    
    night_title = 'FY-{} Sum AFC - Nighttime'.format(year)
    #night_an.append(gen_plot_xarr(night_sum, raster_folder, night_title))
    
    #print('*************************************************************************')
np.ma.array(days_sum).shape
# total sum:
tot_day_sum = np.nansum(np.ma.array(days_sum), axis=0)
tot_night_sum = np.nansum(np.ma.array(nights_sum), axis=0)
<jupyter_output><empty_output><jupyter_text># Look at Fire Season Lengthwe define the duration of the fire season as the number of months during which the average monthly overpass‐corrected fire counts was at least 10% of the average annual overpass‐corrected fire counts.
We can look at this per year, where we calculate the 10% mark and find number of pixels per row-column entry that extend above that.<jupyter_code># open and resample to 2.5 deg res
from skimage.transform import resize
with rio.open('../vars/aqua-terra-overpasses-per-day_0.25-degree-grid.tif') as src:
    op_0_25 = src.read()[0]
    print(src.profile)
    
    op_0_25[op_0_25<=0] = 0
with rio.open('../vars/aqua-terra-overpasses-per-day_2.5-degree-grid.tif') as src:
    op_0_2_5 = src.read()[0]
    print(src.profile)
    
    op_0_2_5[op_0_2_5<=0] = 0
    
def calc_fire_season(files_array):
    
    # stack the files
    arr,_ = stack(files_array, nodata=-32768)
    
    # take the mean across the time axis
    p10 = np.nanmean(arr, axis=0)*0.1
    
    # sum across time axis
    fs_arr = arr > p10
    
    
    return fs_arr.sum(axis=0)
# need to figure out the best way to get the overpass correction integrated monthly
def calc_fire_season_agg_oc(files_array, agg_fac=1):
    
    # stack the files
    arr,_ = es_stack(files_array, nodata=-32768)
    op_oc = np.ma.masked_equal(op_0_25*30.44/2, 0)
    arr *= op_oc
    arr = np.ma.masked_equal(arr, -32768)
    
    p10 = np.nanmedian(arr, axis=0)*0.1
    #print(f'p10: {np.nansum(p10)}')
    #arr*= op_oc #op_0_25*30.44/2 # number of days per year, divide by two for night/day. NEED TO DO PER MONTH... avg. days per mo?
    
    arr_list = []
    if agg_fac > 1:
        
        for a in arr:
            a[a.mask] = 0
            temp = block_reduce(a, (agg, agg), func=np.ma.sum)
            arr_list.append(np.ma.masked_equal(temp, 0))
            
        arr = np.array(arr_list)
    
    
    
    #rr[arr == 0] = np.nan
    # take the mean across the time axis
    p10 = np.nanmean(arr, axis=0)*0.1
    #print(f'after agg p10: {np.nansum(p10)}')
    
    # sum across time axis
    fs_arr = arr > p10    
    
    fs_sum = np.ma.sum(fs_arr, axis=0)
    fs_sum = np.ma.masked_equal(fs_sum, 0)
    return fs_sum
    
days_fl, nights_fl=[],[]
day_an, night_an = [],[]
# for year in range(2003,2019):
for year in range(2003,2019): # data update 11/11/2020
    
    day_files, night_files = get_fire_year_files(raster_folder, var, year)
    #day_fl = calc_fire_season(day_files)
    #night_fl = calc_fire_season(night_files)
    
    agg = 4
    day_fl = calc_fire_season_agg_oc(day_files, agg_fac=agg)
    night_fl = calc_fire_season_agg_oc(night_files, agg_fac=agg)
    
    days_fl.append(day_fl)
    nights_fl.append(night_fl)
    
    day_title = 'FY-{} Fire Season Length AFC - Daytime'.format(year)
    #day_an.append(gen_plot_xarr(day_fl, raster_folder, day_title, cmap='Spectral_r'))
    
    night_title = 'FY-{} Fire Season Length - Nighttime'.format(year)
    #night_an.append(gen_plot_xarr(night_fl, raster_folder, night_title, cmap='Spectral_r'))
    
    #print('*************************************************************************')
# debug block reduce
arr,_ = es_stack(day_files, nodata=-32768)
print(type(arr))
op_oc = np.ma.masked_equal(op_0_25*30.44/2, 0)
print(type(op_oc))
arr *= op_oc
print(type(arr))
#arr = np.ma.masked_equal(arr, -32768)
# arr_resized = np.array([ block_reduce(a, (agg_fac, agg_fac), np.nansum) for a in arr])
for a in arr:
    a[a.mask] = 0
    temp = block_reduce(a, (agg, agg), func=np.ma.sum)
    print(type(temp), np.ma.sum(temp), np.sum(a))
-32768*4, -524288/-32768<jupyter_output><empty_output><jupyter_text>try scipy.stats.linregress and apply along axis<jupyter_code>oc = True
# no overpass correction
day_arr = np.array(days_fl)
night_arr = np.array(nights_fl)
from scipy.stats import linregress<jupyter_output><empty_output><jupyter_text>Try as row vectors first<jupyter_code>s,i,r,p,ste, n = [],[],[],[],[],[]
x = np.arange(night_arr.shape[0])
y_arr = np.rollaxis(night_arr,0,3)
y_arr = np.reshape(y_arr, (night_arr.shape[1]*night_arr.shape[2], night_arr.shape[0]))
for y in y_arr:
    _s, _i, _r, _p, _ste = linregress(x,y)
    _n = ~np.isnan(y).sum()
    
    s.append(_s)
    i.append(_i)
    r.append(_r)
    p.append(_p)
    ste.append(_ste)
    n.append(_n)
s_arr = np.array(s).reshape(night_arr.shape[1:])
p_arr = np.array(p).reshape(night_arr.shape[1:])
gen_plot_xarr(np.where((p_arr < 0.1) & (s_arr > 0), p_arr, np.nan), raster_folder, night_title, cmap='Spectral_r', agg=agg);
gen_plot_xarr(np.where((s_arr > 0), p_arr, np.nan), raster_folder, night_title, cmap='Spectral_r', agg=agg);
norm=True
if norm:
    # try to normalize the data
    day_max = day_arr.max()
    day_min = day_arr.min()
    night_max = night_arr.max()
    night_min = night_arr.min()
    tot_min = min(night_min, day_min)
    tot_max = max(night_max, day_max)
    test_day = (day_arr - tot_min) / (tot_max - tot_min)
    test_night = (night_arr - tot_min) / (tot_max - tot_min)
    #test_years -= base_year # normalize
        
s,i,r,p,ste, n = [],[],[],[],[],[]
x = np.arange(night_arr.shape[0])
y_arr = np.rollaxis(test_night,0,3)
y_arr = np.reshape(y_arr, (test_night.shape[1]*test_night.shape[2], test_night.shape[0]))
for y in y_arr:
    _s, _i, _r, _p, _ste = linregress(x,y)
    #_n = ~np.isnan(y).sum()
    _n = (y>0).sum()
    
    s.append(_s)
    i.append(_i)
    r.append(_r)
    p.append(_p)
    ste.append(_ste)
    n.append(_n)
s_arr = np.array(s).reshape(night_arr.shape[1:])
p_arr = np.array(p).reshape(night_arr.shape[1:])
n_arr = np.array(n).reshape(night_arr.shape[1:])
night_title = 'Night fire season length slope > 0, p < 0.05'
gen_plot_xarr(np.where((p_arr < 0.05) & (s_arr > 0), p_arr, np.nan), raster_folder, night_title, cmap='Spectral_r', agg=agg);
gen_plot_xarr(np.where((p_arr < 0.05) & (s_arr > 0), n_arr, np.nan), raster_folder, "number of data points", cmap='Spectral_r', agg=agg);
night_title = 'Night fire season length slope < 0, p < 0.05'
gen_plot_xarr(np.where((p_arr < 0.05) & (s_arr < 0), p_arr, np.nan), raster_folder, night_title, cmap='Spectral_r', agg=agg);
gen_plot_xarr(np.where((p_arr < 0.05) & (s_arr < 0), n_arr, np.nan), raster_folder, "number of data points", cmap='Spectral_r', agg=agg);
night_title = 'Night fire season length slope > 0'
gen_plot_xarr(s_arr, raster_folder, night_title, cmap='Spectral_r', agg=agg);
gen_plot_xarr(np.where(p_arr != 1, p_arr, np.nan), raster_folder, "p-values", cmap='Spectral_r', agg=agg);
gen_plot_xarr(n_arr, raster_folder, "number of data points", cmap='Spectral_r', agg=agg);<jupyter_output><empty_output><jupyter_text># Overpass corrected
### write out the nighttime data<jupyter_code>if not os.path.exists(r'D:\projects\RD\night_fire\figs\global\fire_year\overpass_corrected_agg'):
    os.makedirs(r'D:\projects\RD\night_fire\figs\global\fire_year\overpass_corrected_agg')
    
# write out all nighttime rasters
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\overpass_corrected_agg\all_pvals_fireyear.tif'
write_out(np.where(p_arr != 1, p_arr, np.nan), fn, 'climate_grid_meta_0_25_wgs84.pickle')
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\overpass_corrected_agg\all_night_slope_pos_fireyear.tif'
write_out(np.where(s_arr > 0, s_arr, np.nan), fn, 'climate_grid_meta_0_25_wgs84.pickle')
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\overpass_corrected_agg\all_night_slope_neg_fireyear.tif'
write_out(np.where(s_arr < 0, s_arr, np.nan), fn, 'climate_grid_meta_0_25_wgs84.pickle')
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\overpass_corrected_agg\all_night_numyears_fireyear.tif'
write_out(n_arr, fn, 'climate_grid_meta_0_25_wgs84.pickle')
# write out slopes where p < 0.05
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\overpass_corrected_agg\all_night_slope_pos_PVAL0_05_fireyear.tif'
write_out(np.where((s_arr > 0) & (p_arr <= 0.05), s_arr, np.nan), fn, 'climate_grid_meta_0_25_wgs84.pickle')
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\overpass_corrected_agg\all_night_slope_neg_PVAL0_05_fireyear.tif'
write_out(np.where((s_arr < 0) & (p_arr <= 0.05), s_arr, np.nan), fn, 'climate_grid_meta_0_25_wgs84.pickle')
# write out slopes where p < 0.1
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\overpass_corrected_agg\all_night_slope_pos_PVAL0_1_fireyear.tif'
write_out(np.where((s_arr > 0) & (p_arr <= 0.1), s_arr, np.nan), fn, 'climate_grid_meta_0_25_wgs84.pickle')
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\overpass_corrected_agg\all_night_slope_neg_PVAL0_1_fireyear.tif'
write_out(np.where((s_arr < 0) & (p_arr <= 0.1), s_arr, np.nan), fn, 'climate_grid_meta_0_25_wgs84.pickle')
# write out slopes where p < 0.05
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\overpass_corrected_agg\BIN_all_night_slope_pos_PVAL0_05_fireyear.tif'
write_out(np.where((s_arr > 0) & (p_arr <= 0.05), s_arr, np.nan) *0+1, fn, 'climate_grid_meta_0_25_wgs84.pickle')
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\overpass_corrected_agg\BIN_all_night_slope_neg_PVAL0_05_fireyear.tif'
write_out(np.where((s_arr < 0) & (p_arr <= 0.05), s_arr, np.nan)*0+1, fn, 'climate_grid_meta_0_25_wgs84.pickle')
# write out slopes where p < 0.1
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\overpass_corrected_agg\BIN_all_night_slope_pos_PVAL0_1_fireyear.tif'
write_out(np.where((s_arr > 0) & (p_arr <= 0.1), s_arr, np.nan)*0+1, fn, 'climate_grid_meta_0_25_wgs84.pickle')
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\overpass_corrected_agg\BIN_all_night_slope_neg_PVAL0_1_fireyear.tif'
write_out(np.where((s_arr < 0) & (p_arr <= 0.1), s_arr, np.nan)*0+1, fn, 'climate_grid_meta_0_25_wgs84.pickle')<jupyter_output><empty_output><jupyter_text>### write out the nighttime data<jupyter_code># write out all nighttime rasters
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\all_pvals_fireyear.tif'
write_out(np.where(p_arr != 1, p_arr, np.nan), fn, 'climate_grid_meta_0_25_wgs84.pickle')
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\all_night_slope_pos_fireyear.tif'
write_out(np.where(s_arr > 0, s_arr, np.nan), fn, 'climate_grid_meta_0_25_wgs84.pickle')
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\all_night_slope_neg_fireyear.tif'
write_out(np.where(s_arr < 0, s_arr, np.nan), fn, 'climate_grid_meta_0_25_wgs84.pickle')
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\all_night_numyears_fireyear.tif'
write_out(n_arr, fn, 'climate_grid_meta_0_25_wgs84.pickle')
# write out slopes where p < 0.05
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\all_night_slope_pos_PVAL0_05_fireyear.tif'
write_out(np.where((s_arr > 0) & (p_arr <= 0.05), s_arr, np.nan), fn, 'climate_grid_meta_0_25_wgs84.pickle')
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\all_night_slope_neg_PVAL0_05_fireyear.tif'
write_out(np.where((s_arr < 0) & (p_arr <= 0.05), s_arr, np.nan), fn, 'climate_grid_meta_0_25_wgs84.pickle')
# write out slopes where p < 0.1
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\all_night_slope_pos_PVAL0_1_fireyear.tif'
write_out(np.where((s_arr > 0) & (p_arr <= 0.1), s_arr, np.nan), fn, 'climate_grid_meta_0_25_wgs84.pickle')
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\all_night_slope_neg_PVAL0_1_fireyear.tif'
write_out(np.where((s_arr < 0) & (p_arr <= 0.1), s_arr, np.nan), fn, 'climate_grid_meta_0_25_wgs84.pickle')
# write out slopes where p < 0.05
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\BIN_all_night_slope_pos_PVAL0_05_fireyear.tif'
write_out(np.where((s_arr > 0) & (p_arr <= 0.05), s_arr, np.nan) *0+1, fn, 'climate_grid_meta_0_25_wgs84.pickle')
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\BIN_all_night_slope_neg_PVAL0_05_fireyear.tif'
write_out(np.where((s_arr < 0) & (p_arr <= 0.05), s_arr, np.nan)*0+1, fn, 'climate_grid_meta_0_25_wgs84.pickle')
# write out slopes where p < 0.1
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\BIN_all_night_slope_pos_PVAL0_1_fireyear.tif'
write_out(np.where((s_arr > 0) & (p_arr <= 0.1), s_arr, np.nan)*0+1, fn, 'climate_grid_meta_0_25_wgs84.pickle')
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\BIN_all_night_slope_neg_PVAL0_1_fireyear.tif'
write_out(np.where((s_arr < 0) & (p_arr <= 0.1), s_arr, np.nan)*0+1, fn, 'climate_grid_meta_0_25_wgs84.pickle')<jupyter_output><empty_output><jupyter_text>### do the same for daytime data<jupyter_code>s_d,i_d,r_d,p_d,ste_d, n_d = [],[],[],[],[],[]
x = np.arange(day_arr.shape[0])
y_arr = np.rollaxis(test_day,0,3)
y_arr = np.reshape(y_arr, (test_day.shape[1]*test_day.shape[2], test_day.shape[0]))
for y in y_arr:
    _s, _i, _r, _p, _ste = linregress(x,y)
    #_n = ~np.isnan(y).sum()
    _n = (y>0).sum()
    
    s_d.append(_s)
    i_d.append(_i)
    r_d.append(_r)
    p_d.append(_p)
    ste_d.append(_ste)
    n_d.append(_n)
sd_arr = np.array(s_d).reshape(day_arr.shape[1:])
pd_arr = np.array(p_d).reshape(day_arr.shape[1:])
nd_arr = np.array(n_d).reshape(day_arr.shape[1:])
if not os.path.exists(r'D:\projects\RD\night_fire\figs\global\fire_year\overpass_corrected_agg'):
    os.makedirs(r'D:\projects\RD\night_fire\figs\global\fire_year\overpass_corrected_agg')
    
# write out all nighttime rasters
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\overpass_corrected_agg\all_pvals_day_fireyear.tif'
write_out(np.where(pd_arr != 1, pd_arr, np.nan), fn, 'climate_grid_meta_0_25_wgs84.pickle')
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\overpass_corrected_agg\all_day_slope_pos_fireyear.tif'
write_out(np.where(sd_arr > 0, sd_arr, np.nan), fn, 'climate_grid_meta_0_25_wgs84.pickle')
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\overpass_corrected_agg\all_day_slope_neg_fireyear.tif'
write_out(np.where(sd_arr < 0, sd_arr, np.nan), fn, 'climate_grid_meta_0_25_wgs84.pickle')
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\overpass_corrected_agg\all_day_numyears_fireyear.tif'
write_out(nd_arr, fn, 'climate_grid_meta_0_25_wgs84.pickle')
# write out slopes where p < 0.05
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\overpass_corrected_agg\all_day_slope_pos_PVAL0_05_fireyear.tif'
write_out(np.where((sd_arr > 0) & (pd_arr <= 0.05), sd_arr, np.nan), fn, 'climate_grid_meta_0_25_wgs84.pickle')
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\overpass_corrected_agg\all_day_slope_neg_PVAL0_05_fireyear.tif'
write_out(np.where((sd_arr < 0) & (pd_arr <= 0.05), sd_arr, np.nan), fn, 'climate_grid_meta_0_25_wgs84.pickle')
# write out slopes where p < 0.1
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\overpass_corrected_agg\all_day_slope_pos_PVAL0_1_fireyear.tif'
write_out(np.where((sd_arr > 0) & (pd_arr <= 0.1), sd_arr, np.nan), fn, 'climate_grid_meta_0_25_wgs84.pickle')
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\overpass_corrected_agg\all_day_slope_neg_PVAL0_1_fireyear.tif'
write_out(np.where((sd_arr < 0) & (pd_arr <= 0.1), sd_arr, np.nan), fn, 'climate_grid_meta_0_25_wgs84.pickle')
# write out slopes where p < 0.05
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\overpass_corrected_agg\BIN_all_day_slope_pos_PVAL0_05_fireyear.tif'
write_out(np.where((sd_arr > 0) & (pd_arr <= 0.05), sd_arr, np.nan) *0+1, fn, 'climate_grid_meta_0_25_wgs84.pickle')
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\overpass_corrected_agg\BIN_all_day_slope_neg_PVAL0_05_fireyear.tif'
write_out(np.where((sd_arr < 0) & (pd_arr <= 0.05), sd_arr, np.nan)*0+1, fn, 'climate_grid_meta_0_25_wgs84.pickle')
# write out slopes where p < 0.1
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\overpass_corrected_agg\BIN_all_day_slope_pos_PVAL0_1_fireyear.tif'
write_out(np.where((sd_arr > 0) & (pd_arr <= 0.1), sd_arr, np.nan)*0+1, fn, 'climate_grid_meta_0_25_wgs84.pickle')
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\overpass_corrected_agg\BIN_all_day_slope_neg_PVAL0_1_fireyear.tif'
write_out(np.where((sd_arr < 0) & (pd_arr <= 0.1), sd_arr, np.nan)*0+1, fn, 'climate_grid_meta_0_25_wgs84.pickle')
# write out all nighttime rasters
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\all_pvals_day_fireyear.tif'
write_out(np.where(pd_arr != 1, pd_arr, np.nan), fn, 'climate_grid_meta_0_25_wgs84.pickle')
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\all_day_slope_pos_fireyear.tif'
write_out(np.where(sd_arr > 0, sd_arr, np.nan), fn, 'climate_grid_meta_0_25_wgs84.pickle')
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\all_day_slope_neg_fireyear.tif'
write_out(np.where(sd_arr < 0, sd_arr, np.nan), fn, 'climate_grid_meta_0_25_wgs84.pickle')
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\all_day_numyears_fireyear.tif'
write_out(nd_arr, fn, 'climate_grid_meta_0_25_wgs84.pickle')
# write out slopes where p < 0.05
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\all_day_slope_pos_PVAL0_05_fireyear.tif'
write_out(np.where((sd_arr > 0) & (pd_arr <= 0.05), sd_arr, np.nan), fn, 'climate_grid_meta_0_25_wgs84.pickle')
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\all_day_slope_neg_PVAL0_05_fireyear.tif'
write_out(np.where((sd_arr < 0) & (pd_arr <= 0.05), sd_arr, np.nan), fn, 'climate_grid_meta_0_25_wgs84.pickle')
# write out slopes where p < 0.1
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\all_day_slope_pos_PVAL0_1_fireyear.tif'
write_out(np.where((sd_arr > 0) & (pd_arr <= 0.1), sd_arr, np.nan), fn, 'climate_grid_meta_0_25_wgs84.pickle')
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\all_day_slope_neg_PVAL0_1_fireyear.tif'
write_out(np.where((sd_arr < 0) & (pd_arr <= 0.1), sd_arr, np.nan), fn, 'climate_grid_meta_0_25_wgs84.pickle')
# write out slopes where p < 0.05
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\BIN_all_day_slope_pos_PVAL0_05_fireyear.tif'
write_out(np.where((sd_arr > 0) & (pd_arr <= 0.05), sd_arr, np.nan) *0+1, fn, 'climate_grid_meta_0_25_wgs84.pickle')
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\BIN_all_day_slope_neg_PVAL0_05_fireyear.tif'
write_out(np.where((sd_arr < 0) & (pd_arr <= 0.05), sd_arr, np.nan)*0+1, fn, 'climate_grid_meta_0_25_wgs84.pickle')
# write out slopes where p < 0.1
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\BIN_all_day_slope_pos_PVAL0_1_fireyear.tif'
write_out(np.where((sd_arr > 0) & (pd_arr <= 0.1), sd_arr, np.nan)*0+1, fn, 'climate_grid_meta_0_25_wgs84.pickle')
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\BIN_all_day_slope_neg_PVAL0_1_fireyear.tif'
write_out(np.where((sd_arr < 0) & (pd_arr <= 0.1), sd_arr, np.nan)*0+1, fn, 'climate_grid_meta_0_25_wgs84.pickle')
day_title = 'day fire season length slope > 0'
gen_plot_xarr(sd_arr, raster_folder, day_title, cmap='Spectral_r');
gen_plot_xarr(np.where(np.abs(sd_arr) >=0.01, sd_arr, np.nan), raster_folder, day_title, cmap='Spectral_r');
gen_plot_xarr(np.where(pd_arr != 1, pd_arr, np.nan), raster_folder, "p-values", cmap='Spectral_r');
gen_plot_xarr(nd_arr, raster_folder, "number of data points", cmap='Spectral_r');
(sd_arr - s_arr).sum()
night_title = 'night fire season length slope > 0'
gen_plot_xarr(s_arr, raster_folder, night_title, cmap='Spectral_r');
gen_plot_xarr(np.where(np.abs(s_arr) >=0.01, s_arr, np.nan), raster_folder, night_title, cmap='Spectral_r');
gen_plot_xarr(np.where(p_arr != 1, p_arr, np.nan), raster_folder, "p-values", cmap='Spectral_r');
gen_plot_xarr(n_arr, raster_folder, "number of data points", cmap='Spectral_r');
np.abs(n_arr - nd_arr).sum()
# np.apply_along_axis(linregress, 0, np.arange(day_arr.shape[0]), y=day_arr)
temp = np.apply_along_axis(linregress, 0, day_arr, y=np.arange(day_arr.shape[0]))
s_day, i_day, r_day, p_day, stderr_day = temp
gen_plot_xarr(np.where((p_day < 0.05) & (s_day > 0), p_day, np.nan), raster_folder, night_title, cmap='Spectral_r')
night_arr = np.array(nights_fl)
temp_n = np.apply_along_axis(linregress, 0, night_arr, y=np.arange(night_arr.shape[0]))
s_night, i_night, r_night, p_night, stderr_night = temp_n
gen_plot_xarr(np.where((p_night < 0.05) & (s_night > 0), p_night, np.nan), raster_folder, night_title, cmap='Spectral_r')
ex_x = np.arange(night_arr.shape[0])
ex_x = ex_x[np.newaxis, np.newaxis, :]
temp_n = np.apply_along_axis(linregress, 0, ex_x, y=night_arr)
n=10
_,_,_,_=plot_regress_var(np.array(days_fl), np.array(nights_fl),
                 raster_folder, af_var='AFC_num', reg_month='FIRE SEASON LENGTH', reg_var='slope', n_obsv=n, agg_fact=1,
                    absmin=-.06, # was 3000 un normalized
                    absmax=.06,
                    cm='coolwarm',
                    save=False,
                    save_dir="",
                    cartoplot=True,
                    norm=True,
                    min_year=None)
n=10
plot_regress_var(np.array(days_fl), np.array(nights_fl),
                 raster_folder, af_var='AFC_num', reg_month='FIRE SEASON LENGTH', reg_var='slope', n_obsv=n, agg_fact=1,
                    absmin=-0.75, # was 3000 un normalized
                    absmax=0.75,
                    cm='coolwarm',
                    save=False,
                    save_dir="",
                    cartoplot=True,
                    norm=False,
                    min_year=None)<jupyter_output><empty_output><jupyter_text>## check the overpass estimate raster<jupyter_code># open and resample to 2.5 deg res
from skimage.transform import resize
with rio.open('../vars/aqua-terra-overpasses-per-day_0.25-degree-grid.tif') as src:
    op_0_25 = src.read()[0]
    print(src.profile)
    
    op_0_25[op_0_25<=0] = 0
with rio.open('../vars/aqua-terra-overpasses-per-day_2.5-degree-grid.tif') as src:
    op_0_2_5 = src.read()[0]
    print(src.profile)
    
    op_0_2_5[op_0_2_5<=0] = 0
    
plt.figure(figsize=(10,5))
plt.imshow(op_0_25)
plt.colorbar()
plt.show()
plt.figure(figsize=(10,5))
plt.imshow(op_0_2_5)
plt.colorbar()
plt.show()
n=10
dson_slope, nson_slope, nd_slope_ratio, nd_slope_dif= plot_regress_var(np.array(days_fl), np.array(nights_fl),
                                                                         raster_folder, af_var='AFC_num', reg_month='SUM', reg_var='slope', n_obsv=n, agg_fact=1,
                                                                            absmin=-0.03, # was 3000 un normalized
                                                                            absmax=0.01,
                                                                            cm='gnuplot',
                                                                            save=False,
                                                                            save_dir="",
                                                                            cartoplot=True,
                                                                            norm=True,
                                                                            min_year=None)
night_slope_sign = np.sign(nson_slope)
day_slope_sign = np.sign(dson_slope)
night_slope_pos = np.where(night_slope_sign > 0, nson_slope, np.nan)
# plot it
_ = gen_plot_xarr(night_slope_pos, raster_folder, title='Where Night Time Slope Across Fire Years 2003-2018 is Positive\n \
                  Active Fire Counts Summed Across Each Fire Year (overpass corrected)', cmap='coolwarm');
night_slope_sign = np.sign(nson_slope)
day_slope_sign = np.sign(dson_slope)
night_slope_pos = np.where(night_slope_sign > 0, nson_slope, np.nan)
day_slope_pos = np.where(day_slope_sign > 0, dson_slope, np.nan)
night_slope_neg = np.where(night_slope_sign < 0, nson_slope, np.nan)
day_slope_neg = np.where(day_slope_sign < 0, dson_slope, np.nan)
# plot it
_ = gen_plot_xarr(night_slope_pos, raster_folder, title='Where Night Time Slope Across Fire Years 2003-2018 is Positive\n \
                  Active Fire Counts Summed Across Each Fire Year (not overpass corrected)');
# plot it
_ = gen_plot_xarr(night_slope_pos, raster_folder, title='Where DAY Time Slope Across Fire Years 2003-2018 is Positive\n \
                  Active Fire Counts Summed Across Each Fire Year (not overpass corrected)');
# a 0.1 deg latitude is 11km @ equator... so 2.5 deg is 275km. 
# an equal area world projection maps to 278298.727 meters for x-y dimensions (World_Cylindrical_Equal_Area) 2.5 deg
# an equal area world projection maps to 27829.872 meters for x-y dimensions (World_Cylindrical_Equal_Area) 0.25 deg
eq_area = 27829.872
# earth surface area with positive night time fire slope:
num_pos_pixels = night_slope_pos[~np.isnan(night_slope_pos)].size
surf_area = num_pos_pixels*eq_area*eq_area
surf_area_km2 = surf_area / 1000 / 1000
print(f'earth surface area with positive NIGHT time fire slope: {surf_area_km2:.2E} km^2')
# total surface area of earth 148.847x10^6 km2
earth_land_surf_area_km2 = 148.847e6
print(f'% of land surface: {100*surf_area_km2/earth_land_surf_area_km2:.2f}%')
# a 0.1 deg latitude is 11km @ equator... so 2.5 deg is 275km. 
# an equal area world projection maps to 278298.727 meters for x-y dimensions (World_Cylindrical_Equal_Area) 2.5 deg
# an equal area world projection maps to 27829.872 meters for x-y dimensions (World_Cylindrical_Equal_Area) 0.25 deg
eq_area = 27829.872
# earth surface area with positive night time fire slope:
num_pos_pixels = day_slope_pos[~np.isnan(day_slope_pos)].size
surf_area = num_pos_pixels*eq_area*eq_area
surf_area_km2 = surf_area / 1000 / 1000
print(f'earth surface area with positive DAY time fire slope: {surf_area_km2:.2E} km^2')
# total surface area of earth 148.847x10^6 km2
earth_land_surf_area_km2 = 148.847e6
print(f'% of land surface: {100*surf_area_km2/earth_land_surf_area_km2:.2f}%')
# a 0.1 deg latitude is 11km @ equator... so 2.5 deg is 275km. 
# an equal area world projection maps to 278298.727 meters for x-y dimensions (World_Cylindrical_Equal_Area) 2.5 deg
# an equal area world projection maps to 27829.872 meters for x-y dimensions (World_Cylindrical_Equal_Area) 0.25 deg
eq_area = 27829.872
# earth surface area with positive night time fire slope:
num_pos_pixels = day_slope_neg[~np.isnan(day_slope_neg)].size
surf_area = num_pos_pixels*eq_area*eq_area
surf_area_km2 = surf_area / 1000 / 1000
print(f'earth surface area with negative DAY time fire slope: {surf_area_km2:.2E} km^2')
# total surface area of earth 148.847x10^6 km2
earth_land_surf_area_km2 = 148.847e6
print(f'% of land surface: {100*surf_area_km2/earth_land_surf_area_km2:.2f}%')
# a 0.1 deg latitude is 11km @ equator... so 2.5 deg is 275km. 
# an equal area world projection maps to 278298.727 meters for x-y dimensions (World_Cylindrical_Equal_Area) 2.5 deg
# an equal area world projection maps to 27829.872 meters for x-y dimensions (World_Cylindrical_Equal_Area) 0.25 deg
eq_area = 27829.872
# earth surface area with positive night time fire slope:
num_pos_pixels = night_slope_neg[~np.isnan(night_slope_neg)].size
surf_area = num_pos_pixels*eq_area*eq_area
surf_area_km2 = surf_area / 1000 / 1000
print(f'earth surface area with negative NIGHT time fire slope: {surf_area_km2:.2E} km^2')
# total surface area of earth 148.847x10^6 km2
earth_land_surf_area_km2 = 148.847e6
print(f'% of land surface: {100*surf_area_km2/earth_land_surf_area_km2:.2f}%')
# compare histograms
plt.figure(figsize=(20,10))
plt.hist(night_slope_pos.ravel(), bins=100, alpha=0.2, edgecolor='k', label='FY Length Trend Positive')
plt.legend(fontsize=13)
plt.xlabel('Normalized Slope', fontsize=13)
plt.ylabel('# 0.25 Deg Pixels', fontsize=13)
plt.show()
day_files
with rio.open(day_files[3]) as src:
    tform = src.profile['transform']
num_x = night_slope_neg.shape[1]
num_y = night_slope_neg.shape[0]
# incorporate aggregation factor
tlon = np.linspace(tform.c - tform.a, tform.c+num_x*tform.a, num_x)
tlat = np.linspace(tform.f - tform.e, tform.f+num_y*tform.e, num_y)
dplot = night_slope_neg*0+1
nplot = night_slope_pos*0+1
x_dplot = xr.DataArray(dplot, coords=[tlat, tlon], dims=['lat', 'lon'])
x_nplot = xr.DataArray(nplot, coords=[tlat, tlon], dims=['lat', 'lon'])
fig,ax = plt.subplots(figsize=(20,10))
ax = plt.axes(projection=ccrs.EqualEarth())
ax.set_global()
ax.coastlines()
# ax.gridlines()
x_dplot.plot(ax=ax, transform=ccrs.PlateCarree(), alpha=1, cmap='coolwarm', add_colorbar=False) # blue
x_nplot.plot(ax=ax, transform=ccrs.PlateCarree(), alpha=0.1, cmap='coolwarm_r',add_colorbar=False)    # red
# ax.imshow(nplot, alpha=1., cmap='coolwarm') # blue, night
# ax.imshow(dplot, alpha=0.25, cmap='coolwarm_r') # red, day
plt.title('Where NIGHT slope is positive (blue) and negative (red)')
plt.show()
num_x = night_slope_neg.shape[1]
num_y = night_slope_neg.shape[0]
# incorporate aggregation factor
tlon = np.linspace(tform.c - tform.a, tform.c+num_x*tform.a, num_x)
tlat = np.linspace(tform.f - tform.e, tform.f+num_y*tform.e, num_y)
dplot = day_slope_neg*0+1
nplot = day_slope_pos*0+1
x_dplot = xr.DataArray(dplot, coords=[tlat, tlon], dims=['lat', 'lon'])
x_nplot = xr.DataArray(nplot, coords=[tlat, tlon], dims=['lat', 'lon'])
fig,ax = plt.subplots(figsize=(20,10))
ax = plt.axes(projection=ccrs.EqualEarth())
ax.set_global()
ax.coastlines()
# ax.gridlines()
x_nplot.plot(ax=ax, transform=ccrs.PlateCarree(), alpha=0.1, cmap='coolwarm',add_colorbar=False)    # blue
x_dplot.plot(ax=ax, transform=ccrs.PlateCarree(), alpha=0.1, cmap='coolwarm_r', add_colorbar=False) # red
# ax.imshow(nplot, alpha=1., cmap='coolwarm') # blue, night
# ax.imshow(dplot, alpha=0.25, cmap='coolwarm_r') # red, day
plt.title('Where DAY slope is positive (blue) and negative (red)')
plt.show()
# where night and day slope is positive
num_x = night_slope_neg.shape[1]
num_y = night_slope_neg.shape[0]
# incorporate aggregation factor
tlon = np.linspace(tform.c - tform.a, tform.c+num_x*tform.a, num_x)
tlat = np.linspace(tform.f - tform.e, tform.f+num_y*tform.e, num_y)
dplot = day_slope_pos*0+1
nplot = night_slope_pos*0+1
x_dplot = xr.DataArray(dplot, coords=[tlat, tlon], dims=['lat', 'lon'])
x_nplot = xr.DataArray(nplot, coords=[tlat, tlon], dims=['lat', 'lon'])
fig,ax = plt.subplots(figsize=(20,18))
ax = plt.axes(projection=ccrs.EqualEarth())
ax.set_global()
ax.coastlines()
# ax.gridlines()
# x_dplot.plot(ax=ax, transform=ccrs.PlateCarree(), alpha=0.15, cmap='autumn', add_colorbar=False)
# x_nplot.plot(ax=ax, transform=ccrs.PlateCarree(), alpha=0.15, cmap='winter',add_colorbar=False)
# night positive
x_nplot.plot(ax=ax, transform=ccrs.PlateCarree(), alpha=0.3, cmap='coolwarm', add_colorbar=False) # blue
# day positive
x_dplot.plot(ax=ax, transform=ccrs.PlateCarree(), alpha=0.1, cmap='coolwarm_r',add_colorbar=False)    # red
# ax.imshow(nplot, alpha=1., cmap='coolwarm') # blue, night
# ax.imshow(dplot, alpha=0.25, cmap='coolwarm_r') # red, day
plt.title('Where NIGHT slope is positive (blue) and DAY slope is positive (red)')
plt.show()
# write out the rasters
def write_out(xarr, fname, meta_file):
    
    # load metadata
    with open(meta_file, 'rb') as fp:
        meta = pickle.load(fp)
        
    with rio.open(fname, 'w', **meta) as dst:
        dst.write(xarr[np.newaxis,...].astype(np.float32))
        
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\day_slope_pos_fireyear.tif'
write_out(day_slope_pos*0+1, fn, 'climate_grid_meta_0_25_wgs84.pickle')
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\day_slope_neg_fireyear.tif'
write_out(day_slope_neg*0+1, fn, 'climate_grid_meta_0_25_wgs84.pickle')
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\night_slope_pos_fireyear.tif'
write_out(night_slope_pos*0+1, fn, 'climate_grid_meta_0_25_wgs84.pickle')
fn = r'D:\projects\RD\night_fire\figs\global\fire_year\night_slope_neg_fireyear.tif'
write_out(night_slope_neg*0+1, fn, 'climate_grid_meta_0_25_wgs84.pickle')
print('day\tnight')
print(np.nansum(day_slope_pos*0+1), np.nansum(night_slope_pos*0+1))
print(np.nansum(day_slope_neg*0+1), np.nansum(night_slope_neg*0+1))
print(np.nansum(dson_slope*0+1), np.nansum(nson_slope*0+1))<jupyter_output><empty_output><jupyter_text>## using regionmask for data masking (test stuff)<jupyter_code>import regionmask
import xshape
import geopandas as gpd
import xarray as xr
conus = gpd.read_file('../CONUS_boundary/CONUS_boundary.shp')
conus.plot()
conus_wgs84 = conus.to_crs(epsg=4326)
# try to use regionmask to clip xarray
day_files, night_files = get_fire_year_files(raster_folder.replace('2_5', '0_25'), var, 2010)
data = xr.open_rasterio(day_files[1])
data
# construct the region object
numbers = conus_wgs84.index
names = [str(i) for i in conus_wgs84.OBJECTID]
abbrevs = [str(i) for i in conus_wgs84.OBJECTID]
geoms = list(conus_wgs84.geometry)
rmask = regionmask.Regions_cls('USmask', numbers, names, abbrevs, geoms)
# construct the mask, matching spatial coordinate dimensions
data_mask = rmask.mask(data, lon_name='x', lat_name='y')
# mask the data, as well as nodata
nodata=-32768
masked_data = data.where(data_mask>=0)
masked_data = masked_data.where(masked_data!=nodata)
masked_data.plot(figsize=(20,10))
masked_data.min(), masked_data.max()<jupyter_output><empty_output> | 
	no_license | 
	/notebooks/fire_season_length-scipy-aggregate.ipynb | 
	joemcglinchy/night_fire | 12 | 
| 
	<jupyter_start><jupyter_text>元组<jupyter_code>tup = (4,5,6)
tup
tuple([1,2,3])
a,b,c = tup
print(a)
seq = [(1,2,3),(4,5,6),(7,8,9)]
for a,b,c in seq:
    print('a={0},b={1},c={2}'.format(a,b,c))
a = range(10)
a = list(a)
a.append(10)
a.insert(2,100)
a
a.pop(1)
a
a = [1,2,5,5,8,7,6,3,1,1]
a.sort()
a
import bisect
c = [1,2,4,8,5,1,3,4]
c.sort()
c
bisect.bisect(c,5)
bisect.insort(c,7)
c
c[1:5]
some_list = ['foo','bar','baz']
mapping = {}
for i,v in enumerate(some_list):
    mapping[v] = i
mapping
sorted("schedule")
seq1 = ['foo','bar','zae']
seq2 = ['one','two','three']
for i,(a,b) in enumerate(zip(seq1,seq2)):
    print('{0}:{1},{2}'.format(i,a,b))
words = ['apple','bat','bar','atom','car']
by_letter = {}
for word in words:
    letter = word[0]
    by_letter.setdefault(letter,[]).append(word)
by_letter
strings = ['a','as','bat','car','dove']
a = [x.upper() for x in strings if len(x)>2]
a
unique_lengths = [len(x) for x in strings]
unique_lengths
loc_mapping = {val : index for index, val in enumerate(strings)}
loc_mapping
all_data = [['John', 'Emily', 'Michael', 'Mary', 'Steven'],
            ['Maria', 'Juan', 'Javier', 'Natalia', 'Pilar']]
result = [name for names in all_data for name in names if name.count('e')>=2]
result
def f():
    a = 5
    b = 6
    c = 7
    return a,b,c
a,b,c = f()
a,b,c
states = ['Alabama','   Hen','Garmel!',' ufof hh###']
import re 
def clean_string(strings):
    result = []
    for value in strings:
        value = value.strip()
        value = re.sub('[!#?]','',value)
        result.append(value)
    return result
clean_string(states)
import numpy as np
data = np.random.randn(2,3)
data
data*10
data+data
data = [[1,2.3,3.5,1.2,2],[1,5,4,6,5]]
arr1 = np.array(data)
arr1
arr1.ndim
arr1.shape
import random
position = 0
walk = [position]
steps = 1000
for i in range(steps):
    step = 1 if random.randint(0,1) else -1
    position += step
    walk.append(position)
    
plt.plot(walk[:100])<jupyter_output><empty_output> | 
	no_license | 
	/Mouse_3.ipynb | 
	Mizaoz/test | 1 | 
| 
	<jupyter_start><jupyter_text>print(results[0])<jupyter_code>import os
from os import path
import matplotlib.pyplot as plt
from wordcloud import WordCloud,STOPWORDS
text=open('Twitter.txt',"r")
wordcloud = WordCloud().generate(text)
wordcloud = WordCloud(font_path='ARegular.ttf',background_color='white',mode='RGB',width=4000,height=2000).generate(text)
plt.title("Tweets related to Donald Trump")
plt.imshow(wordcloud)
plt.axis("off")
plt.show()
wordcloud.to_file("twitter.png")<jupyter_output><empty_output> | 
	no_license | 
	/NLP-Twitter.ipynb | 
	jaypatel333/ML | 1 | 
| 
	<jupyter_start><jupyter_text># The Workflows of Data-centric AI for Classification with Noisy Labels
In this tutorial, you will learn how to easily incorporate [cleanlab](https://github.com/cleanlab/cleanlab) into your ML development workflows to:
- Automatically find label issues lurking in your classification data.
- Score the label quality of every example in your dataset.
- Train robust models in the presence of label issues.
- Identify overlapping classes that you can merge to make the learning task less ambiguous.
- Generate an overall label health score to track improvements in your labels as you clean your datasets over time.
This tutorial provides an in-depth survey of many possible different ways that cleanlab can be utilized for Data-Centric AI. If you have a different use-case in mind that is not supported, please [tell us about it](https://github.com/cleanlab/cleanlab/issues)!
While this tutorial focuses on standard multi-class (and binary) classification datasets, cleanlab also supports other tasks including: [data labeled by multiple annotators](multiannotator.html), [multi-label classification](../cleanlab/filter.rst#cleanlab.filter.find_label_issues), and [token classification of text](token_classification.html).
**cleanlab is grounded in theory and science**. Learn more:
[Research Publications](https://cleanlab.ai/research)  |  [Label Errors found by cleanlab](https://labelerrors.com/)  |  [Examples using cleanlab](https://github.com/cleanlab/examples)## Install dependencies and import themYou can use pip to install all packages required for this tutorial as follows:
```
!pip install sklearn matplotlib
!pip install cleanlab
# Make sure to install the version corresponding to this tutorial
# E.g. if viewing master branch documentation:
#     !pip install git+https://github.com/cleanlab/cleanlab.git
```<jupyter_code># Package installation (hidden on docs website).
# Package versions used: matplotlib==3.5.1 
dependencies = ["cleanlab", "sklearn", "matplotlib"]
if "google.colab" in str(get_ipython()):  # Check if it's running in Google Colab
    %pip install cleanlab  # for colab
    cmd = ' '.join([dep for dep in dependencies if dep != "cleanlab"])
    %pip install $cmd
else:
    missing_dependencies = []
    for dependency in dependencies:
        try:
            __import__(dependency)
        except ImportError:
            missing_dependencies.append(dependency)
    if len(missing_dependencies) > 0:
        print("Missing required dependencies:")
        print(*missing_dependencies, sep=", ")
        print("\nPlease install them before running the rest of this notebook.")
%config InlineBackend.print_figure_kwargs={"facecolor": "w"}
import numpy as np
import cleanlab
from cleanlab.classification import CleanLearning
from cleanlab.benchmarking import noise_generation
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_predict
from numpy.random import multivariate_normal
from matplotlib import pyplot as plt<jupyter_output><empty_output><jupyter_text>## Create the data (can skip these details)
See the code for data generation **(click to expand)**
```python
# Note: This pulldown content is for docs.cleanlab.ai, if running on local Jupyter or Colab, please ignore it.
SEED = 0
def make_data(
    means=[[3, 2], [7, 7], [0, 8], [0, 10]],
    covs=[
        [[5, -1.5], [-1.5, 1]],
        [[1, 0.5], [0.5, 4]],
        [[5, 1], [1, 5]],
        [[3, 1], [1, 1]],
    ],
    sizes=[100, 50, 50, 50],
    avg_trace=0.8,
    seed=SEED,  # set to None for non-reproducible randomness
):
    np.random.seed(seed=SEED)
    K = len(means)  # number of classes
    data = []
    labels = []
    test_data = []
    test_labels = []
    for idx in range(K):
        data.append(
            np.random.multivariate_normal(
                mean=means[idx], cov=covs[idx], size=sizes[idx]
            )
        )
        test_data.append(
            np.random.multivariate_normal(
                mean=means[idx], cov=covs[idx], size=sizes[idx]
            )
        )
        labels.append(np.array([idx for i in range(sizes[idx])]))
        test_labels.append(np.array([idx for i in range(sizes[idx])]))
    X_train = np.vstack(data)
    y_train = np.hstack(labels)
    X_test = np.vstack(test_data)
    y_test = np.hstack(test_labels)
    # Compute p(y=k) the prior distribution over true labels.
    py_true = np.bincount(y_train) / float(len(y_train))
    noise_matrix_true = noise_generation.generate_noise_matrix_from_trace(
        K,
        trace=avg_trace * K,
        py=py_true,
        valid_noise_matrix=True,
        seed=SEED,
    )
    # Generate our noisy labels using the noise_marix.
    s = noise_generation.generate_noisy_labels(y_train, noise_matrix_true)
    s_test = noise_generation.generate_noisy_labels(y_test, noise_matrix_true)
    ps = np.bincount(s) / float(len(s))  # Prior distribution over noisy labels
    return {
        "data": X_train,
        "true_labels": y_train,  # You never get to see these perfect labels.
        "labels": s,  # Instead, you have these labels, which have some errors.
        "test_data": X_test,
        "test_labels": y_test,  # Perfect labels used for "true" measure of model's performance during deployment.
        "noisy_test_labels": s_test,  # With IID train/test split, you'd have these labels, which also have some errors.
        "ps": ps,
        "py_true": py_true,
        "noise_matrix_true": noise_matrix_true,
        "class_names": ["purple", "blue", "seafoam green", "yellow"],
    }
data_dict = make_data()
for key, val in data_dict.items():  # Map data_dict to variables in namespace
    exec(key + "=val")
# Display dataset visually using matplotlib
def plot_data(data, circles, title, alpha=1.0):
    plt.figure(figsize=(14, 5))
    plt.scatter(data[:, 0], data[:, 1], c=labels, s=60)
    for i in circles:
        plt.plot(
            data[i][0],
            data[i][1],
            "o",
            markerfacecolor="none",
            markeredgecolor="red",
            markersize=14,
            markeredgewidth=2.5,
            alpha=alpha
        )
    _ = plt.title(title, fontsize=25)
```
<jupyter_code>SEED = 0
def make_data(
    means=[[3, 2], [7, 7], [0, 8], [0, 10]],
    covs=[
        [[5, -1.5], [-1.5, 1]],
        [[1, 0.5], [0.5, 4]],
        [[5, 1], [1, 5]],
        [[3, 1], [1, 1]],
    ],
    sizes=[100, 50, 50, 50],
    avg_trace=0.8,
    seed=SEED,  # set to None for non-reproducible randomness
):
    np.random.seed(seed=SEED)
    K = len(means)  # number of classes
    data = []
    labels = []
    test_data = []
    test_labels = []
    for idx in range(K):
        data.append(
            np.random.multivariate_normal(
                mean=means[idx], cov=covs[idx], size=sizes[idx]
            )
        )
        test_data.append(
            np.random.multivariate_normal(
                mean=means[idx], cov=covs[idx], size=sizes[idx]
            )
        )
        labels.append(np.array([idx for i in range(sizes[idx])]))
        test_labels.append(np.array([idx for i in range(sizes[idx])]))
    X_train = np.vstack(data)
    y_train = np.hstack(labels)
    X_test = np.vstack(test_data)
    y_test = np.hstack(test_labels)
    # Compute p(y=k) the prior distribution over true labels.
    py_true = np.bincount(y_train) / float(len(y_train))
    noise_matrix_true = noise_generation.generate_noise_matrix_from_trace(
        K,
        trace=avg_trace * K,
        py=py_true,
        valid_noise_matrix=True,
        seed=SEED,
    )
    # Generate our noisy labels using the noise_marix.
    s = noise_generation.generate_noisy_labels(y_train, noise_matrix_true)
    s_test = noise_generation.generate_noisy_labels(y_test, noise_matrix_true)
    ps = np.bincount(s) / float(len(s))  # Prior distribution over noisy labels
    return {
        "data": X_train,
        "true_labels": y_train,  # You never get to see these perfect labels.
        "labels": s,  # Instead, you have these labels, which have some errors.
        "test_data": X_test,
        "test_labels": y_test,  # Perfect labels used for "true" measure of model's performance during deployment.
        "noisy_test_labels": s_test,  # With IID train/test split, you'd have these labels, which also have some errors.
        "ps": ps,
        "py_true": py_true,
        "noise_matrix_true": noise_matrix_true,
        "class_names": ["purple", "blue", "seafoam green", "yellow"],
    }
data_dict = make_data()
for key, val in data_dict.items():  # Map data_dict to variables in namespace
    exec(key + "=val")
# Display dataset visually using matplotlib
def plot_data(data, circles, title, alpha=1.0):
    plt.figure(figsize=(14, 5))
    plt.scatter(data[:, 0], data[:, 1], c=labels, s=60)
    for i in circles:
        plt.plot(
            data[i][0],
            data[i][1],
            "o",
            markerfacecolor="none",
            markeredgecolor="red",
            markersize=14,
            markeredgewidth=2.5,
            alpha=alpha
        )
    _ = plt.title(title, fontsize=25)
true_errors = np.where(true_labels != labels)[0]
plot_data(data, circles=true_errors, title="A realistic, messy dataset with 4 classes", alpha=0.3)<jupyter_output><empty_output><jupyter_text>The figure above represents a toy dataset we'll use to demonstrate various cleanlab functionality. In this data, the features *X* are 2-dimensional and examples are colored according to their *given* label above.
Like [many real-world datasets](https://labelerrors.com/), the given label happens to be incorrect for some of the examples (**circled in red**) in this dataset!## **Workflow 1:** Use CleanLearning() for everything
<jupyter_code>yourFavoriteModel = LogisticRegression(verbose=0, random_state=SEED)
# CleanLearning: Machine Learning with cleaned data (given messy, real-world data)
cl = cleanlab.classification.CleanLearning(yourFavoriteModel, seed=SEED)
# Fit model to messy, real-world data, automatically training on cleaned data.
_ = cl.fit(data, labels)
# See the label quality for every example, which data has issues, and more.
cl.get_label_issues().head()<jupyter_output><empty_output><jupyter_text>### Clean Learning = Machine Learning with cleaned data
<jupyter_code># For comparison, this is how you would have trained your model normally (without Cleanlab)
yourFavoriteModel = LogisticRegression(verbose=0, random_state=SEED)
yourFavoriteModel.fit(data, labels)
print(f"Accuracy using yourFavoriteModel: {yourFavoriteModel.score(test_data, test_labels):.0%}")
# But CleanLearning can do anything yourFavoriteModel can do, but enhanced.
# For example, CleanLearning gives you predictions (just like yourFavoriteModel)
# but the magic is that CleanLearning was trained as if your data did not have label errors.
print(f"Accuracy using yourFavoriteModel (+ CleanLearning): {cl.score(test_data, test_labels):.0%}")<jupyter_output><empty_output><jupyter_text>Note! *Accuracy* refers to the accuracy with respect to the *true* error-free labels of a test set., i.e. what we actually care about in practice because that's what real-world model performance is based on. If you don't have a clean test set, you can use cleanlab to make one :)## **Workflow 2:** Use CleanLearning to find_label_issues in one line of code
<jupyter_code># One line of code. Literally.
issues = CleanLearning(yourFavoriteModel, seed=SEED).find_label_issues(data, labels)
issues.head()<jupyter_output><empty_output><jupyter_text>### Visualize the twenty examples with lowest label quality to see if Cleanlab works.
<jupyter_code>lowest_quality_labels = issues["label_quality"].argsort()[:20]
plot_data(data, circles=lowest_quality_labels, title="The 20 lowest label quality examples")<jupyter_output><empty_output><jupyter_text>Above, the top 20 label issues circled in red are found automatically using cleanlab (no true labels given).
If you've already computed the label issues using ``CleanLearning``, you can pass them into `fit()` and it will train **much** faster (skips label-issue identification step).<jupyter_code># CleanLearning can train faster if issues are provided at fitting time.
cl.fit(data, labels, label_issues=issues)<jupyter_output><empty_output><jupyter_text>## **Workflow 3:** Use cleanlab to find dataset-level and class-level issues
- Did you notice that the yellow and seafoam green class above are overlapping?
- How can a model ever know (or learn) what's ground truth inside the yellow distribution?
- If these two classes were merged, the model can learn more accurately from 3 classes (versus 4).
cleanlab automatically finds data-set level issues like this, in one line of code. Check this out!
<jupyter_code>cleanlab.dataset.find_overlapping_classes(
    labels=labels,
    confident_joint=cl.confident_joint,  # cleanlab uses the confident_joint internally to quantify label noise (see cleanlab.count.compute_confident_joint)
    class_names=class_names,
)<jupyter_output><empty_output><jupyter_text>Do the results surprise you? Did you expect the purple and seafoam green to also have so much overlap?
There are two things being happening here:
1. **Distribution Overlap**: The green distribution has huge variance and overlaps with other distributions.
   - Cleanlab handles this for you: read the theory behind cleanlab for overlapping classes here: https://arxiv.org/abs/1705.01936
2. **Label Issues**: A ton of examples (which actually belong to the purple class) have been mislabeled as "green" in our dataset.
### Now, let's see what happens if we merge classes "seafoam green" and "yellow"
* The top two classes found automatically by ``cleanlab.dataset.find_overlapping_classes()``<jupyter_code>yourFavoriteModel1 = LogisticRegression(verbose=0, random_state=SEED)
yourFavoriteModel1.fit(data, labels)
print(f"[Original classes] Accuracy of yourFavoriteModel: {yourFavoriteModel1.score(test_data, test_labels):.0%}")
merged_labels, merged_test_labels = np.array(labels), np.array(test_labels)
# Merge classes: map all yellow-labeled examples to seafoam green
merged_labels[merged_labels == 3] = 2
merged_test_labels[merged_test_labels == 3] = 2
# Re-run our comparison. Re-run your model on the newly labeled dataset.
yourFavoriteModel2 = LogisticRegression(verbose=0, random_state=SEED)
yourFavoriteModel2.fit(data, merged_labels)
print(f"[Modified classes] Accuracy of yourFavoriteModel: {yourFavoriteModel2.score(test_data, merged_test_labels):.0%}")
# Re-run CleanLearning as well.
yourFavoriteModel3 = LogisticRegression(verbose=0, random_state=SEED)
cl3 = cleanlab.classification.CleanLearning(yourFavoriteModel, seed=SEED)
cl3.fit(data, merged_labels)
print(f"[Modified classes] Accuracy of yourFavoriteModel (+ CleanLearning): {cl3.score(test_data, merged_test_labels):.0%}")<jupyter_output><empty_output><jupyter_text>While on one hand that's a huge improvement, it's important to remember that choosing among three classes is an easier task than choosing among four classes, so it's not fair to directly compare these numbers.
Instead, the big takeaway is...
if you get to choose your classes, combining overlapping classes can make the learning task easier for your model. But if you have lots of classes, how do you know which ones to merge?? That's when you use `cleanlab.dataset.find_overlapping_classes`.
## **Workflow 4:** Clean your test set too if you're doing ML with noisy labels!If your test and training data were randomly split (IID), then be aware that your test labels are likely noisy too! It is thus important to fix label issues in them before we can trust measures like test accuracy.
* More about what can go wrong if you don't use a clean test set [in this paper](https://arxiv.org/abs/2103.14749).<jupyter_code>from sklearn.metrics import accuracy_score
# Fit your model on noisily labeled train data
yourFavoriteModel = LogisticRegression(verbose=0, random_state=SEED)
yourFavoriteModel.fit(data, labels)
# Get predicted probabilities for test data (these are out-of-sample)
my_test_pred_probs = yourFavoriteModel.predict_proba(test_data)
my_test_preds = my_test_pred_probs.argmax(axis=1)  # predicted labels
# Find label issues in the test data
issues_test = CleanLearning(yourFavoriteModel, seed=SEED).find_label_issues(
    labels=noisy_test_labels, pred_probs=my_test_pred_probs)
# You should inspect issues_test and fix issues to ensure high-quality test data labels.
corrected_test_labels = test_labels  # Here we'll pretend you have done this perfectly :)
# Fit more robust version of model on noisily labeled training data
cl = CleanLearning(yourFavoriteModel, seed=SEED).fit(data, labels)
cl_test_preds = cl.predict(test_data)
print(f" Noisy Test Accuracy (on given test labels) using yourFavoriteModel: {accuracy_score(noisy_test_labels, my_test_preds):.0%}")
print(f" Noisy Test Accuracy (on given test labels) using yourFavoriteModel (+ CleanLearning): {accuracy_score(noisy_test_labels, cl_test_preds):.0%}")
print(f"Actual Test Accuracy (on corrected test labels) using yourFavoriteModel: {accuracy_score(corrected_test_labels, my_test_preds):.0%}")
print(f"Actual Test Accuracy (on corrected test labels) using yourFavoriteModel (+ CleanLearning): {accuracy_score(corrected_test_labels, cl_test_preds):.0%}")<jupyter_output><empty_output><jupyter_text>## **Workflow 5:** One score to rule them all -- use cleanlab's overall dataset health score
This score can be fairly compared across datasets or across versions of a dataset to track overall dataset quality (a.k.a. *dataset health*) over time.
<jupyter_code># One line of code.
health = cleanlab.dataset.overall_label_health_score(
    labels, confident_joint=cl.confident_joint
    # cleanlab uses the confident_joint internally to quantify label noise (see cleanlab.count.compute_confident_joint)
)<jupyter_output><empty_output><jupyter_text>### How accurate is this dataset health score?
Because we know the true labels (we created this toy dataset), we can compare with ground truth.<jupyter_code>label_acc = sum(labels != true_labels) / len(labels)
print(f"Percentage of label issues guessed by cleanlab {1 - health:.0%}")
print(f"Percentage of (ground truth) label errors): {label_acc:.0%}")
offset = (1 - label_acc) - health
print(
    f"\nQuestion: cleanlab seems to be overestimating."
    f" How do we account for this {offset:.0%} difference?"
)
print(
    "Answer: Data points that fall in between two overlapping distributions are often "
    "impossible to label and are counted as issues."
)<jupyter_output><empty_output><jupyter_text>## **Workflow(s) 6:** Use count, rank, filter modules directly
- Using these modules directly is intended for more experienced cleanlab users. But once you understand how they work, you can create numerous powerful workflows.
- For these workflows, you **always** need two things:
  1.  Out-of-sample predicted probabilities (e.g. computed via cross-validation)
  2.  Labels (can contain label errors and various issues)
#### cleanlab can compute out-of-sample  predicted probabilities for you:
<jupyter_code>pred_probs = cleanlab.count.estimate_cv_predicted_probabilities(
    data, labels, clf=yourFavoriteModel, seed=SEED
)
print(f"pred_probs is a {pred_probs.shape} matrix of predicted probabilities")<jupyter_output><empty_output><jupyter_text>### **Workflow 6.1 (count)**: Fully characterize label noise (noise matrix, joint, prior of true labels, ...)
Now that we have `pred_probs` and `labels`, advanced users can compute everything in `cleanlab.count`.
- `py: prob(true_label=k)`
  - For all classes K, this is the distribution over the actual true labels (which cleanlab can estimate for you even though you don't have the true labels).
- `noise_matrix: p(noisy|true)`
  - This describes how errors were introduced into your labels. It's a conditional probability matrix with the probability of flipping from the true class to every other class for the given label.
- `inverse_noise_matrix: p(true|noisy)`
  - This tells you the probability, for every class, that the true label is actually a different class.
- `confident_joint`
  - This is an unnormalized (count-based) estimate of the number of examples in our dataset with each possible (true label, given label) pairing.
- `joint: p(true label, noisy label)`
  - The joint distribution of noisy (given) and true labels is the most useful of all these statistics. From it, you can compute every other statistic listed above. One entry from this matrix can be interpreted as: "The proportion of examples in our dataset whose true label is *i* and given label is *j*".
These five tools fully characterize class-conditional label noise in a dataset.
#### Use cleanlab to estimate and visualize the joint distribution of label noise and noise matrix of label flipping rates:<jupyter_code>(
    py, noise_matrix, inverse_noise_matrix, confident_joint
) = cleanlab.count.estimate_py_and_noise_matrices_from_probabilities(labels, pred_probs)
# Note: you can also combine the above two lines of code into a single line of code like this
(
    py, noise_matrix, inverse_noise_matrix, confident_joint, pred_probs
) = cleanlab.count.estimate_py_noise_matrices_and_cv_pred_proba(
    data, labels, clf=yourFavoriteModel, seed=SEED
)
# Get the joint distribution of noisy and true labels from the confident joint
# This is the most powerful statistic in machine learning with noisy labels.
joint = cleanlab.count.estimate_joint(
    labels, pred_probs, confident_joint=confident_joint
)
# Pretty print the joint distribution and noise matrix
cleanlab.internal.util.print_joint_matrix(joint)
cleanlab.internal.util.print_noise_matrix(noise_matrix)<jupyter_output><empty_output><jupyter_text>In some applications, you may have a priori knowledge regarding some of these quantities. In this case, you can pass them directly into cleanlab which may be able to leverage this information to better identify label issues.
<jupyter_code>cl3 = cleanlab.classification.CleanLearning(yourFavoriteModel, seed=SEED)
_ = cl3.fit(data, labels, noise_matrix=noise_matrix_true)  # CleanLearning with a prioiri known noise_matrix<jupyter_output><empty_output><jupyter_text>### **Workflow 6.2 (filter):** Find label issues for any dataset and any model in one line of code
Features of ``cleanlab.filter.find_label_issues``:
* Versatility -- Choose from several [state-of-the-art](https://arxiv.org/abs/1911.00068) label-issue detection algorithms using ``filter_by=``.
* Works with any model by using predicted probabilities (no model needed).
* One line of code :)
Remember ``CleanLearning.find_label_issues``? It uses this method internally.<jupyter_code># Get out of sample predicted probabilities via cross-validation.
# Here we demonstrate the use of sklearn cross_val_predict as another option to get cross-validated predicted probabilities
pred_probs = cross_val_predict(
    estimator=yourFavoriteModel, X=data, y=labels, cv=3, method="predict_proba"
)
# Find label issues
label_issues_indices = cleanlab.filter.find_label_issues(
    labels=labels,
    pred_probs=pred_probs,
    filter_by="both", # 5 available filter_by options
    return_indices_ranked_by="self_confidence",  # 3 available label quality scoring options for rank ordering
    rank_by_kwargs={
        "adjust_pred_probs": True  # adjust predicted probabilities (see docstring for more details)
    },
)
# Return dataset indices of examples with label issues
label_issues_indices<jupyter_output><empty_output><jupyter_text>
#### Again, we can visualize the twenty examples with lowest label quality to see if Cleanlab works.<jupyter_code>plot_data(data, circles=label_issues_indices[:20], title="Top 20 label issues found by cleanlab.filter.find_label_issues()")<jupyter_output><empty_output><jupyter_text>### Workflow 6.2 supports lots of methods to ``find_label_issues()`` via the ``filter_by`` parameter.
* Here, we evaluate precision/recall/f1/accuracy of detecting true label issues for each method.<jupyter_code>from sklearn.metrics import precision_score, recall_score, f1_score
import pandas as pd
yourFavoriteModel = LogisticRegression(verbose=0, random_state=SEED)
# Get cross-validated predicted probabilities
# Here we demonstrate the use of sklearn cross_val_predict as another option to get cross-validated predicted probabilities
pred_probs = cross_val_predict(
    estimator=yourFavoriteModel, X=data, y=labels, cv=3, method="predict_proba"
)
# Ground truth label issues to use for evaluating different filter_by options
true_label_issues = (true_labels != labels)
# Find label issues with different filter_by options
filter_by_list = [
    "prune_by_noise_rate",
    "prune_by_class",
    "both",
    "confident_learning",
    "predicted_neq_given",
]
results = []
for filter_by in filter_by_list:
    # Find label issues
    label_issues = cleanlab.filter.find_label_issues(
        labels=labels,
        pred_probs=pred_probs,
        filter_by=filter_by
    )
    precision = precision_score(true_label_issues, label_issues)
    recall = recall_score(true_label_issues, label_issues)
    f1 = f1_score(true_label_issues, label_issues)
    acc = accuracy_score(true_label_issues, label_issues)
    result = {
        "filter_by algorithm": filter_by,
        "precision": precision,
        "recall": recall,
        "f1": f1,
        "accuracy": acc
    }
    results.append(result)
# summary of results
pd.DataFrame(results).sort_values(by='f1', ascending=False)<jupyter_output><empty_output><jupyter_text>### **Workflow 6.3 (rank):** Automatically rank every example by a unique label quality score. Find errors using `cleanlab.count.num_label_issues` as a threshold.
cleanlab can analyze every label in a dataset and provide a numerical score gauging its overall quality. Low-quality labels indicate examples that should be more closely inspected, perhaps because their given label is incorrect, or simply because they represent an ambiguous edge-case that's worth a second look.<jupyter_code># Estimate the number of label issues
label_issues_count = cleanlab.count.num_label_issues(
    labels=labels,
    pred_probs=pred_probs
)
# Get label quality scores
label_quality_scores = cleanlab.rank.get_label_quality_scores(
    labels=labels,
    pred_probs=pred_probs,
    method="self_confidence"
)
# Rank-order by label quality scores and get the top estimated number of label issues
label_issues_indices = np.argsort(label_quality_scores)[:label_issues_count]
label_issues_indices<jupyter_output><empty_output><jupyter_text>#### Again, we can visualize the label issues found to see if Cleanlab works.<jupyter_code>plot_data(data, circles=label_issues_indices[:20], title="Top 20 label issues using cleanlab.rank with cleanlab.count.num_label_issues()")<jupyter_output><empty_output><jupyter_text>#### Not sure when to use Workflow 6.2 or 6.3 to find label issues?
* Workflow 6.2 is the easiest to use as its just one line of code.
* Workflow 6.3 is modular and extensible. As we add more label and data quality scoring functions in ``cleanlab.rank``, Workflow 6.3 will always work.
* Workflow 6.3 is also for users who have a custom way to rank their data by label quality, and they just need to know what the cut-off is, found via ``cleanlab.count.num_label_issues``.## **Workflow 7:** Ensembling label quality scores from multiple predictors<jupyter_code>from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
# 3 models in ensemble
model1 = LogisticRegression(penalty="l2", verbose=0, random_state=SEED)
model2 = RandomForestClassifier(max_depth=5, random_state=SEED)
model3 = GradientBoostingClassifier(
    n_estimators=100, learning_rate=1.0, max_depth=3, random_state=SEED
)
# Get cross-validated predicted probabilities from each model
cv_pred_probs_1 = cross_val_predict(
    estimator=model1, X=data, y=labels, cv=3, method="predict_proba"
)
cv_pred_probs_2 = cross_val_predict(
    estimator=model2, X=data, y=labels, cv=3, method="predict_proba"
)
cv_pred_probs_3 = cross_val_predict(
    estimator=model3, X=data, y=labels, cv=3, method="predict_proba"
)
# List of predicted probabilities from each model
pred_probs_list = [cv_pred_probs_1, cv_pred_probs_2, cv_pred_probs_3]
# Get ensemble label quality scores
label_quality_scores_best = cleanlab.rank.get_label_quality_ensemble_scores(
    labels=labels, pred_probs_list=pred_probs_list, verbose=False
)
# Alternative approach: create single ensemble predictor and get its pred_probs
cv_pred_probs_ensemble = (cv_pred_probs_1 + cv_pred_probs_2 + cv_pred_probs_3)/3  # uniform aggregation of predictions
# Use this single set of pred_probs to find label issues
label_quality_scores_better = cleanlab.rank.get_label_quality_scores(
    labels=labels, pred_probs=cv_pred_probs_ensemble
)<jupyter_output><empty_output> | 
	non_permissive | 
	/docs/source/tutorials/indepth_overview.ipynb | 
	cgnorthcutt/cleanlab | 21 | 
| 
	<jupyter_start><jupyter_text># Setting up environment:<jupyter_code>pip install k-means-constrained
from k_means_constrained import KMeansConstrained
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd<jupyter_output><empty_output><jupyter_text># Sample 1:<jupyter_code>X = np.array([[1, 2],[1.5,1],[3,1],[2,2],[3,3],[2.5,3.5], [2, 4], [1, 0],[4, 2],[1.5,3.5], [3, 4], [4, 0], [3,3],[4,1]])
clf = KMeansConstrained(n_clusters=3, size_min=2, size_max=5,random_state=0)
clf.fit_predict(X)
test_df = pd.DataFrame(X)
test_df = test_df.rename(columns={0:'x',1:'y'})
sns.scatterplot(x=test_df.x, y=test_df.y)
test_df
test_df['cluster'] = clf.fit_predict(X).tolist()
test_df
sns.scatterplot(x=test_df.x, y=test_df.y, hue=test_df.cluster, palette='Accent')
sns.scatterplot(x=clf.cluster_centers_[:,0], y=clf.cluster_centers_[:,1], color='b')
clf.cluster_centers_<jupyter_output><empty_output><jupyter_text># Sample 2:<jupyter_code># opening file:
df_original = pd.read_excel('/content/Base_de_datos_con_direcciones_limpias.xlsx')
df_original.columns
df = df_original.copy()
df = df[df['latitudes'] != 'not found'].reset_index()
real_test_df = df[df['RUTA'] < 5].reset_index()
real_test_df = real_test_df.drop(['level_0','index','Unnamed: 0'], axis = 1)
real_test_df
len(real_test_df)
sns.scatterplot(x=real_test_df.latitudes, y=real_test_df.longitudes, hue=real_test_df.RUTA, palette='jet_r')<jupyter_output><empty_output><jupyter_text>'Accent', 'Accent_r', 'Blues', 'Blues_r', 'BrBG', 'BrBG_r', 'BuGn', 'BuGn_r', 'BuPu', 'BuPu_r', 'CMRmap', 'CMRmap_r', 'Dark2', 'Dark2_r', 'GnBu', 'GnBu_r', 'Greens', 'Greens_r', 'Greys', 'Greys_r', 'OrRd', 'OrRd_r', 'Oranges', 'Oranges_r', 'PRGn', 'PRGn_r', 'Paired', 'Paired_r', 'Pastel1', 'Pastel1_r', 'Pastel2', 'Pastel2_r', 'PiYG', 'PiYG_r', 'PuBu', 'PuBuGn', 'PuBuGn_r', 'PuBu_r', 'PuOr', 'PuOr_r', 'PuRd', 'PuRd_r', 'Purples', 'Purples_r', 'RdBu', 'RdBu_r', 'RdGy', 'RdGy_r', 'RdPu', 'RdPu_r', 'RdYlBu', 'RdYlBu_r', 'RdYlGn', 'RdYlGn_r', 'Reds', 'Reds_r', 'Set1', 'Set1_r', 'Set2', 'Set2_r', 'Set3', 'Set3_r', 'Spectral', 'Spectral_r', 'Wistia', 'Wistia_r', 'YlGn', 'YlGnBu', 'YlGnBu_r', 'YlGn_r', 'YlOrBr', 'YlOrBr_r', 'YlOrRd', 'YlOrRd_r', 'afmhot', 'afmhot_r', 'autumn', 'autumn_r', 'binary', 'binary_r', 'bone', 'bone_r', 'brg', 'brg_r', 'bwr', 'bwr_r', 'cividis', 'cividis_r', 'cool', 'cool_r', 'coolwarm', 'coolwarm_r', 'copper', 'copper_r', 'crest', 'crest_r', 'cubehelix', 'cubehelix_r', 'flag', 'flag_r', 'flare', 'flare_r', 'gist_earth', 'gist_earth_r', 'gist_gray', 'gist_gray_r', 'gist_heat', 'gist_heat_r', 'gist_ncar', 'gist_ncar_r', 'gist_rainbow', 'gist_rainbow_r', 'gist_stern', 'gist_stern_r', 'gist_yarg', 'gist_yarg_r', 'gnuplot', 'gnuplot2', 'gnuplot2_r', 'gnuplot_r', 'gray', 'gray_r', 'hot', 'hot_r', 'hsv', 'hsv_r', 'icefire', 'icefire_r', 'inferno', 'inferno_r', 'jet', 'jet_r'<jupyter_code>locations = np.array(real_test_df[['latitudes','longitudes']])
clf = KMeansConstrained(n_clusters=4, size_min=13, size_max=32,random_state=0)
clf.fit_predict(locations)
real_test_df['cluster'] = clf.fit_predict(locations).tolist()
#real_test_df
sns.scatterplot(x=real_test_df.latitudes, y=real_test_df.longitudes, hue=real_test_df.RUTA, palette='jet_r')
sns.scatterplot(x=real_test_df.latitudes, y=real_test_df.longitudes, hue=real_test_df.cluster, palette='jet_r', legend='brief')
import folium
from folium import plugins
from folium.plugins import HeatMap
from folium.plugins import MarkerCluster
def mapa_de_cluster(cluster):
  temp_df = real_test_df[real_test_df['cluster']== cluster].reset_index()
  temp_locations =  temp_df[['latitudes', 'longitudes']]
  temp_locationlist = temp_locations.values.tolist()
  avg_latitud = temp_df['latitudes'].mean()
  avg_longitud = temp_df['longitudes'].mean()
  avg_location = [avg_latitud, avg_longitud]
  map = folium.Map(location=avg_location, zoom_start=16)
  for point in range(0, len(temp_locationlist)):
    folium.Marker(temp_locationlist[point], popup=temp_df['ESTUDIANTE'][point]).add_to(map)
  return map
mapa_de_cluster(3)
def mapa_de_ruta_actual(ruta):
  temp_df = real_test_df[real_test_df['RUTA']== ruta].reset_index()
  temp_locations =  temp_df[['latitudes', 'longitudes']]
  temp_locationlist = temp_locations.values.tolist()
  avg_latitud = temp_df['latitudes'].mean()
  avg_longitud = temp_df['longitudes'].mean()
  avg_location = [avg_latitud, avg_longitud]
  map = folium.Map(location=avg_location, zoom_start=16)
  for point in range(0, len(temp_locationlist)):
    folium.Marker(temp_locationlist[point], popup=temp_df['ESTUDIANTE'][point]).add_to(map)
  return map
mapa_de_ruta_actual(4)
<jupyter_output><empty_output> | 
	no_license | 
	/STS_0_4.ipynb | 
	econdavidzh/Optimizing_school_routes_with_Machine_Learning | 4 | 
| 
	<jupyter_start><jupyter_text># Initialization
Welcome to the first assignment of "Improving Deep Neural Networks". 
Training your neural network requires specifying an initial value of the weights. A well chosen initialization method will help learning.  
If you completed the previous course of this specialization, you probably followed our instructions for weight initialization, and it has worked out so far. But how do you choose the initialization for a new neural network? In this notebook, you will see how different initializations lead to different results. 
A well chosen initialization can:
- Speed up the convergence of gradient descent
- Increase the odds of gradient descent converging to a lower training (and generalization) error 
To get started, run the following cell to load the packages and the planar dataset you will try to classify.<jupyter_code>import numpy as np
import matplotlib.pyplot as plt
import sklearn
import sklearn.datasets
from init_utils import sigmoid, relu, compute_loss, forward_propagation, backward_propagation
from init_utils import update_parameters, predict, load_dataset, plot_decision_boundary, predict_dec
%matplotlib inline
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# load image dataset: blue/red dots in circles
train_X, train_Y, test_X, test_Y = load_dataset()<jupyter_output><empty_output><jupyter_text>You would like a classifier to separate the blue dots from the red dots.## 1 - Neural Network model You will use a 3-layer neural network (already implemented for you). Here are the initialization methods you will experiment with:  
- *Zeros initialization* --  setting `initialization = "zeros"` in the input argument.
- *Random initialization* -- setting `initialization = "random"` in the input argument. This initializes the weights to large random values.  
- *He initialization* -- setting `initialization = "he"` in the input argument. This initializes the weights to random values scaled according to a paper by He et al., 2015. 
**Instructions**: Please quickly read over the code below, and run it. In the next part you will implement the three initialization methods that this `model()` calls.<jupyter_code>def model(X, Y, learning_rate = 0.01, num_iterations = 15000, print_cost = True, initialization = "he"):
    """
    Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID.
    
    Arguments:
    X -- input data, of shape (2, number of examples)
    Y -- true "label" vector (containing 0 for red dots; 1 for blue dots), of shape (1, number of examples)
    learning_rate -- learning rate for gradient descent 
    num_iterations -- number of iterations to run gradient descent
    print_cost -- if True, print the cost every 1000 iterations
    initialization -- flag to choose which initialization to use ("zeros","random" or "he")
    
    Returns:
    parameters -- parameters learnt by the model
    """
        
    grads = {}
    costs = [] # to keep track of the loss
    m = X.shape[1] # number of examples
    layers_dims = [X.shape[0], 10, 5, 1]
    
    # Initialize parameters dictionary.
    if initialization == "zeros":
        parameters = initialize_parameters_zeros(layers_dims)
    elif initialization == "random":
        parameters = initialize_parameters_random(layers_dims)
    elif initialization == "he":
        parameters = initialize_parameters_he(layers_dims)
    # Loop (gradient descent)
    for i in range(0, num_iterations):
        # Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID.
        a3, cache = forward_propagation(X, parameters)
        
        # Loss
        cost = compute_loss(a3, Y)
        # Backward propagation.
        grads = backward_propagation(X, Y, cache)
        
        # Update parameters.
        parameters = update_parameters(parameters, grads, learning_rate)
        
        # Print the loss every 1000 iterations
        if print_cost and i % 1000 == 0:
            print("Cost after iteration {}: {}".format(i, cost))
            costs.append(cost)
            
    # plot the loss
    plt.plot(costs)
    plt.ylabel('cost')
    plt.xlabel('iterations (per hundreds)')
    plt.title("Learning rate =" + str(learning_rate))
    plt.show()
    
    return parameters<jupyter_output><empty_output><jupyter_text>## 2 - Zero initialization
There are two types of parameters to initialize in a neural network:
- the weight matrices $(W^{[1]}, W^{[2]}, W^{[3]}, ..., W^{[L-1]}, W^{[L]})$
- the bias vectors $(b^{[1]}, b^{[2]}, b^{[3]}, ..., b^{[L-1]}, b^{[L]})$
**Exercise**: Implement the following function to initialize all parameters to zeros. You'll see later that this does not work well since it fails to "break symmetry", but lets try it anyway and see what happens. Use np.zeros((..,..)) with the correct shapes.<jupyter_code># GRADED FUNCTION: initialize_parameters_zeros 
def initialize_parameters_zeros(layers_dims):
    """
    Arguments:
    layer_dims -- python array (list) containing the size of each layer.
    
    Returns:
    parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
                    W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
                    b1 -- bias vector of shape (layers_dims[1], 1)
                    ...
                    WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
                    bL -- bias vector of shape (layers_dims[L], 1)
    """
    
    parameters = {}
    L = len(layers_dims)            # number of layers in the network
    
    for l in range(1, L):
        ### START CODE HERE ### (≈ 2 lines of code)
        parameters['W' + str(l)] = np.zeros((layers_dims[l], layers_dims[l-1]))
        parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))
        ### END CODE HERE ###
    return parameters
parameters = initialize_parameters_zeros([3,2,1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))<jupyter_output>W1 = [[ 0.  0.  0.]
 [ 0.  0.  0.]]
b1 = [[ 0.]
 [ 0.]]
W2 = [[ 0.  0.]]
b2 = [[ 0.]]
<jupyter_text>**Expected Output**:
 
    
    
    **W1**
    
        
    [[ 0.  0.  0.]
 [ 0.  0.  0.]]
    
    
    
    
    **b1**
    
        
    [[ 0.]
 [ 0.]]
    
    
    
    
    **W2**
    
        
    [[ 0.  0.]]
    
    
    
    
    **b2**
    
        
    [[ 0.]]
    
    
 Run the following code to train your model on 15,000 iterations using zeros initialization.<jupyter_code>parameters = model(train_X, train_Y, initialization = "zeros")
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)<jupyter_output>Cost after iteration 0: 0.6931471805599453
Cost after iteration 1000: 0.6931471805599453
Cost after iteration 2000: 0.6931471805599453
Cost after iteration 3000: 0.6931471805599453
Cost after iteration 4000: 0.6931471805599453
Cost after iteration 5000: 0.6931471805599453
Cost after iteration 6000: 0.6931471805599453
Cost after iteration 7000: 0.6931471805599453
Cost after iteration 8000: 0.6931471805599453
Cost after iteration 9000: 0.6931471805599453
Cost after iteration 10000: 0.6931471805599455
Cost after iteration 11000: 0.6931471805599453
Cost after iteration 12000: 0.6931471805599453
Cost after iteration 13000: 0.6931471805599453
Cost after iteration 14000: 0.6931471805599453
<jupyter_text>The performance is really bad, and the cost does not really decrease, and the algorithm performs no better than random guessing. Why? Lets look at the details of the predictions and the decision boundary:<jupyter_code>print ("predictions_train = " + str(predictions_train))
print ("predictions_test = " + str(predictions_test))
plt.title("Model with Zeros initialization")
axes = plt.gca()
axes.set_xlim([-1.5,1.5])
axes.set_ylim([-1.5,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)<jupyter_output><empty_output><jupyter_text>The model is predicting 0 for every example. 
In general, initializing all the weights to zero results in the network failing to break symmetry. This means that every neuron in each layer will learn the same thing, and you might as well be training a neural network with $n^{[l]}=1$ for every layer, and the network is no more powerful than a linear classifier such as logistic regression. 
**What you should remember**:
- The weights $W^{[l]}$ should be initialized randomly to break symmetry. 
- It is however okay to initialize the biases $b^{[l]}$ to zeros. Symmetry is still broken so long as $W^{[l]}$ is initialized randomly. 
## 3 - Random initialization
To break symmetry, lets intialize the weights randomly. Following random initialization, each neuron can then proceed to learn a different function of its inputs. In this exercise, you will see what happens if the weights are intialized randomly, but to very large values. 
**Exercise**: Implement the following function to initialize your weights to large random values (scaled by \*10) and your biases to zeros. Use `np.random.randn(..,..) * 10` for weights and `np.zeros((.., ..))` for biases. We are using a fixed `np.random.seed(..)` to make sure your "random" weights  match ours, so don't worry if running several times your code gives you always the same initial values for the parameters. <jupyter_code># GRADED FUNCTION: initialize_parameters_random
def initialize_parameters_random(layers_dims):
    """
    Arguments:
    layer_dims -- python array (list) containing the size of each layer.
    
    Returns:
    parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
                    W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
                    b1 -- bias vector of shape (layers_dims[1], 1)
                    ...
                    WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
                    bL -- bias vector of shape (layers_dims[L], 1)
    """
    
    np.random.seed(3)               # This seed makes sure your "random" numbers will be the as ours
    parameters = {}
    L = len(layers_dims)            # integer representing the number of layers
    
    for l in range(1, L):
        ### START CODE HERE ### (≈ 2 lines of code)
        parameters['W' + str(l)] = np.random.randn(layers_dims[l], layers_dims[l-1]) * 10
        parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))
        ### END CODE HERE ###
    return parameters
parameters = initialize_parameters_random([3, 2, 1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))<jupyter_output>W1 = [[ 17.88628473   4.36509851   0.96497468]
 [-18.63492703  -2.77388203  -3.54758979]]
b1 = [[ 0.]
 [ 0.]]
W2 = [[-0.82741481 -6.27000677]]
b2 = [[ 0.]]
<jupyter_text>**Expected Output**:
 
    
    
    **W1**
    
        
    [[ 17.88628473   4.36509851   0.96497468]
 [-18.63492703  -2.77388203  -3.54758979]]
    
    
    
    
    **b1**
    
        
    [[ 0.]
 [ 0.]]
    
    
    
    
    **W2**
    
        
    [[-0.82741481 -6.27000677]]
    
    
    
    
    **b2**
    
        
    [[ 0.]]
    
    
 Run the following code to train your model on 15,000 iterations using random initialization.<jupyter_code>parameters = model(train_X, train_Y, initialization = "random")
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)<jupyter_output>/home/jovyan/work/week5/Initialization/init_utils.py:145: RuntimeWarning: divide by zero encountered in log
  logprobs = np.multiply(-np.log(a3),Y) + np.multiply(-np.log(1 - a3), 1 - Y)
/home/jovyan/work/week5/Initialization/init_utils.py:145: RuntimeWarning: invalid value encountered in multiply
  logprobs = np.multiply(-np.log(a3),Y) + np.multiply(-np.log(1 - a3), 1 - Y)
<jupyter_text>If you see "inf" as the cost after the iteration 0, this is because of numerical roundoff; a more numerically sophisticated implementation would fix this. But this isn't worth worrying about for our purposes. 
Anyway, it looks like you have broken symmetry, and this gives better results. than before. The model is no longer outputting all 0s. <jupyter_code>print (predictions_train)
print (predictions_test)
plt.title("Model with large random initialization")
axes = plt.gca()
axes.set_xlim([-1.5,1.5])
axes.set_ylim([-1.5,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)<jupyter_output><empty_output><jupyter_text>**Observations**:
- The cost starts very high. This is because with large random-valued weights, the last activation (sigmoid) outputs results that are very close to 0 or 1 for some examples, and when it gets that example wrong it incurs a very high loss for that example. Indeed, when $\log(a^{[3]}) = \log(0)$, the loss goes to infinity.
- Poor initialization can lead to vanishing/exploding gradients, which also slows down the optimization algorithm. 
- If you train this network longer you will see better results, but initializing with overly large random numbers slows down the optimization.
**In summary**:
- Initializing weights to very large random values does not work well. 
- Hopefully intializing with small random values does better. The important question is: how small should be these random values be? Lets find out in the next part! ## 4 - He initialization
Finally, try "He Initialization"; this is named for the first author of He et al., 2015. (If you have heard of "Xavier initialization", this is similar except Xavier initialization uses a scaling factor for the weights $W^{[l]}$ of `sqrt(1./layers_dims[l-1])` where He initialization would use `sqrt(2./layers_dims[l-1])`.)
**Exercise**: Implement the following function to initialize your parameters with He initialization.
**Hint**: This function is similar to the previous `initialize_parameters_random(...)`. The only difference is that instead of multiplying `np.random.randn(..,..)` by 10, you will multiply it by $\sqrt{\frac{2}{\text{dimension of the previous layer}}}$, which is what He initialization recommends for layers with a ReLU activation. <jupyter_code># GRADED FUNCTION: initialize_parameters_he
def initialize_parameters_he(layers_dims):
    """
    Arguments:
    layer_dims -- python array (list) containing the size of each layer.
    
    Returns:
    parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
                    W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
                    b1 -- bias vector of shape (layers_dims[1], 1)
                    ...
                    WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
                    bL -- bias vector of shape (layers_dims[L], 1)
    """
    
    np.random.seed(3)
    parameters = {}
    L = len(layers_dims) - 1 # integer representing the number of layers
     
    for l in range(1, L + 1):
        ### START CODE HERE ### (≈ 2 lines of code)
        parameters['W' + str(l)] = np.random.randn(layers_dims[l], layers_dims[l-1]) * np.sqrt(2./layers_dims[l-1])
        parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))
        ### END CODE HERE ###
        
    return parameters
parameters = initialize_parameters_he([2, 4, 1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))<jupyter_output>W1 = [[ 1.78862847  0.43650985]
 [ 0.09649747 -1.8634927 ]
 [-0.2773882  -0.35475898]
 [-0.08274148 -0.62700068]]
b1 = [[ 0.]
 [ 0.]
 [ 0.]
 [ 0.]]
W2 = [[-0.03098412 -0.33744411 -0.92904268  0.62552248]]
b2 = [[ 0.]]
<jupyter_text>**Expected Output**:
 
    
    
    **W1**
    
        
    [[ 1.78862847  0.43650985]
 [ 0.09649747 -1.8634927 ]
 [-0.2773882  -0.35475898]
 [-0.08274148 -0.62700068]]
    
    
    
    
    **b1**
    
        
    [[ 0.]
 [ 0.]
 [ 0.]
 [ 0.]]
    
    
    
    
    **W2**
    
        
    [[-0.03098412 -0.33744411 -0.92904268  0.62552248]]
    
    
    
    
    **b2**
    
        
    [[ 0.]]
    
    
 Run the following code to train your model on 15,000 iterations using He initialization.<jupyter_code>parameters = model(train_X, train_Y, initialization = "he")
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
plt.title("Model with He initialization")
axes = plt.gca()
axes.set_xlim([-1.5,1.5])
axes.set_ylim([-1.5,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)<jupyter_output><empty_output> | 
	no_license | 
	/Neural-Networks-and-Deep-Learning-Assignments/Course 2/Initialization.ipynb | 
	ravi1-7/sally-recruitment | 10 | 
| 
	<jupyter_start><jupyter_text># Multiclass Support Vector Machine exercise
*Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the [assignments page](http://vision.stanford.edu/teaching/cs231n/assignments.html) on the course website.*
In this exercise you will:
    
- implement a fully-vectorized **loss function** for the SVM
- implement the fully-vectorized expression for its **analytic gradient**
- **check your implementation** using numerical gradient
- use a validation set to **tune the learning rate and regularization** strength
- **optimize** the loss function with **SGD**
- **visualize** the final learned weights
<jupyter_code># Run some setup code for this notebook.
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
# This is a bit of magic to make matplotlib figures appear inline in the
# notebook rather than in a new window.
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# Some more magic so that the notebook will reload external python modules;
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2<jupyter_output><empty_output><jupyter_text>## CIFAR-10 Data Loading and Preprocessing<jupyter_code># Load the raw CIFAR-10 data.
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# As a sanity check, we print out the size of the training and test data.
print 'Training data shape: ', X_train.shape
print 'Training labels shape: ', y_train.shape
print 'Test data shape: ', X_test.shape
print 'Test labels shape: ', y_test.shape
# Visualize some examples from the dataset.
# We show a few examples of training images from each class.
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
num_classes = len(classes)
samples_per_class = 7
for y, cls in enumerate(classes):
    idxs = np.flatnonzero(y_train == y)
    idxs = np.random.choice(idxs, samples_per_class, replace=False)
    for i, idx in enumerate(idxs):
        plt_idx = i * num_classes + y + 1
        plt.subplot(samples_per_class, num_classes, plt_idx)
        plt.imshow(X_train[idx].astype('uint8'))
        plt.axis('off')
        if i == 0:
            plt.title(cls)
plt.show()
# Split the data into train, val, and test sets. In addition we will
# create a small development set as a subset of the training data;
# we can use this for development so our code runs faster.
num_training = 49000
num_validation = 1000
num_test = 1000
num_dev = 500
# Our validation set will be num_validation points from the original
# training set.
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
# Our training set will be the first num_train points from the original
# training set.
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
# We will also make a development set, which is a small subset of
# the training set.
mask = np.random.choice(num_training, num_dev, replace=False)
X_dev = X_train[mask]
y_dev = y_train[mask]
# We use the first num_test points of the original test set as our
# test set.
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
print 'Train data shape: ', X_train.shape
print 'Train labels shape: ', y_train.shape
print 'Validation data shape: ', X_val.shape
print 'Validation labels shape: ', y_val.shape
print 'Test data shape: ', X_test.shape
print 'Test labels shape: ', y_test.shape
# Preprocessing: reshape the image data into rows
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_val = np.reshape(X_val, (X_val.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
X_dev = np.reshape(X_dev, (X_dev.shape[0], -1))
# As a sanity check, print out the shapes of the data
print 'Training data shape: ', X_train.shape
print 'Validation data shape: ', X_val.shape
print 'Test data shape: ', X_test.shape
print 'dev data shape: ', X_dev.shape
# Preprocessing: subtract the mean image
# first: compute the image mean based on the training data
mean_image = np.mean(X_train, axis=0)
print mean_image[:10] # print a few of the elements
plt.figure(figsize=(4,4))
plt.imshow(mean_image.reshape((32,32,3)).astype('uint8')) # visualize the mean image
plt.show()
# second: subtract the mean image from train and test data
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
X_dev -= mean_image
# third: append the bias dimension of ones (i.e. bias trick) so that our SVM
# only has to worry about optimizing a single weight matrix W.
X_train = np.hstack([X_train, np.ones((X_train.shape[0], 1))])
X_val = np.hstack([X_val, np.ones((X_val.shape[0], 1))])
X_test = np.hstack([X_test, np.ones((X_test.shape[0], 1))])
X_dev = np.hstack([X_dev, np.ones((X_dev.shape[0], 1))])
print X_train.shape, X_val.shape, X_test.shape, X_dev.shape<jupyter_output>(49000, 3073) (1000, 3073) (1000, 3073) (500, 3073)
<jupyter_text>## SVM Classifier
Your code for this section will all be written inside **cs231n/classifiers/linear_svm.py**. 
As you can see, we have prefilled the function `compute_loss_naive` which uses for loops to evaluate the multiclass SVM loss function. <jupyter_code># Evaluate the naive implementation of the loss we provided for you:
from cs231n.classifiers.linear_svm import svm_loss_naive
import time
# generate a random SVM weight matrix of small numbers
W = np.random.randn(3073, 10) * 0.0001 
loss, grad = svm_loss_naive(W, X_dev, y_dev, 0.00001)
print 'loss: %f' % (loss, )<jupyter_output>loss: 9.407723
<jupyter_text>The `grad` returned from the function above is right now all zero. Derive and implement the gradient for the SVM cost function and implement it inline inside the function `svm_loss_naive`. You will find it helpful to interleave your new code inside the existing function.
To check that you have correctly implemented the gradient correctly, you can numerically estimate the gradient of the loss function and compare the numeric estimate to the gradient that you computed. We have provided code that does this for you:<jupyter_code># Once you've implemented the gradient, recompute it with the code below
# and gradient check it with the function we provided for you
# Compute the loss and its gradient at W.
loss, grad = svm_loss_naive(W, X_dev, y_dev, 0.0)
# Numerically compute the gradient along several randomly chosen dimensions, and
# compare them with your analytically computed gradient. The numbers should match
# almost exactly along all dimensions.
from cs231n.gradient_check import grad_check_sparse
f = lambda w: svm_loss_naive(w, X_dev, y_dev, 0.0)[0]
grad_numerical = grad_check_sparse(f, W, grad)
# do the gradient check once again with regularization turned on
# you didn't forget the regularization gradient did you?
loss, grad = svm_loss_naive(W, X_dev, y_dev, 1e2)
f = lambda w: svm_loss_naive(w, X_dev, y_dev, 1e2)[0]
grad_numerical = grad_check_sparse(f, W, grad)<jupyter_output>numerical: -4.900581 analytic: -4.900581, relative error: 7.469148e-12
numerical: 0.138303 analytic: 0.138303, relative error: 2.012612e-09
numerical: -5.299654 analytic: -5.299654, relative error: 2.102212e-11
numerical: -8.731251 analytic: -8.731251, relative error: 3.824316e-11
numerical: 17.621102 analytic: 17.621102, relative error: 5.531362e-12
numerical: -9.417006 analytic: -9.417006, relative error: 4.797311e-12
numerical: 27.628090 analytic: 27.628090, relative error: 1.028127e-11
numerical: -21.772254 analytic: -21.772254, relative error: 1.600253e-11
numerical: 6.678035 analytic: 6.678035, relative error: 4.611242e-12
numerical: 15.433862 analytic: 15.433862, relative error: 3.683090e-12
numerical: -38.089272 analytic: -38.089272, relative error: 6.274777e-12
numerical: -32.384322 analytic: -32.384322, relative error: 6.864555e-12
numerical: 15.988066 analytic: 15.988066, relative error: 1.184609e-11
numerical: -6.283471 analytic: -6.283471, relative error: 3.396473e-11
nume[...]<jupyter_text>### Inline Question 1:
It is possible that once in a while a dimension in the gradcheck will not match exactly. What could such a discrepancy be caused by? Is it a reason for concern? What is a simple example in one dimension where a gradient check could fail? *Hint: the SVM loss function is not strictly speaking differentiable*
**Your Answer:** 
Its because of SVM loss function contains max(0, ...) operation, its not differentiable at 0 value (kink point).
Example: suppose we have W=-1e-10 and this causes SVM margin to be max(0, -) = 0. Let the loss value be also 0 then.
If our h is about 1e-6, then when we change the weight to be W+h, the sign of weight changes, which causes change of score value and it won't be 0 anymore (e.g. 1).
Thus, relative error will be abs(1 - 0) / max(1, 0) = 1, which does not pass our check.
We should keep track on changing winner values at max() operations, like in this examples. If gradcheck fails with kink point cross, our gradient is probably fine. If there is no kink point cross, bud gradient fails, we have bad gradient<jupyter_code># Next implement the function svm_loss_vectorized; for now only compute the loss;
# we will implement the gradient in a moment.
tic = time.time()
loss_naive, grad_naive = svm_loss_naive(W, X_dev, y_dev, 0.00001)
toc = time.time()
print 'Naive loss: %e computed in %fs' % (loss_naive, toc - tic)
from cs231n.classifiers.linear_svm import svm_loss_vectorized
tic = time.time()
loss_vectorized, _ = svm_loss_vectorized(W, X_dev, y_dev, 0.00001)
toc = time.time()
print 'Vectorized loss: %e computed in %fs' % (loss_vectorized, toc - tic)
# The losses should match but your vectorized implementation should be much faster.
print 'difference: %f' % (loss_naive - loss_vectorized)
# Complete the implementation of svm_loss_vectorized, and compute the gradient
# of the loss function in a vectorized way.
# The naive implementation and the vectorized implementation should match, but
# the vectorized version should still be much faster.
tic = time.time()
_, grad_naive = svm_loss_naive(W, X_dev, y_dev, 0.00001)
toc = time.time()
print 'Naive loss and gradient: computed in %fs' % (toc - tic)
tic = time.time()
_, grad_vectorized = svm_loss_vectorized(W, X_dev, y_dev, 0.00001)
toc = time.time()
print 'Vectorized loss and gradient: computed in %fs' % (toc - tic)
# The loss is a single number, so it is easy to compare the values computed
# by the two implementations. The gradient on the other hand is a matrix, so
# we use the Frobenius norm to compare them.
difference = np.linalg.norm(grad_naive - grad_vectorized, ord='fro')
print 'difference: %f' % difference<jupyter_output>Naive loss and gradient: computed in 0.059269s
Vectorized loss and gradient: computed in 0.003269s
difference: 0.000000
<jupyter_text>### Stochastic Gradient Descent
We now have vectorized and efficient expressions for the loss, the gradient and our gradient matches the numerical gradient. We are therefore ready to do SGD to minimize the loss.<jupyter_code># In the file linear_classifier.py, implement SGD in the function
# LinearClassifier.train() and then run it with the code below.
from cs231n.classifiers import LinearSVM
svm = LinearSVM()
tic = time.time()
loss_hist = svm.train(X_train, y_train, learning_rate=1e-7, reg=5e4,
                      num_iters=1500, verbose=True)
toc = time.time()
print 'That took %fs' % (toc - tic)
# A useful debugging strategy is to plot the loss as a function of
# iteration number:
plt.plot(loss_hist)
plt.xlabel('Iteration number')
plt.ylabel('Loss value')
plt.show()
# Write the LinearSVM.predict function and evaluate the performance on both the
# training and validation set
y_train_pred = svm.predict(X_train)
print 'training accuracy: %f' % (np.mean(y_train == y_train_pred), )
y_val_pred = svm.predict(X_val)
print 'validation accuracy: %f' % (np.mean(y_val == y_val_pred), )
# Use the validation set to tune hyperparameters (regularization strength and
# learning rate). You should experiment with different ranges for the learning
# rates and regularization strengths; if you are careful you should be able to
# get a classification accuracy of about 0.4 on the validation set.
learning_rates = [1e-7, 5e-5]
regularization_strengths = [5e4, 1e5]
# results is dictionary mapping tuples of the form
# (learning_rate, regularization_strength) to tuples of the form
# (training_accuracy, validation_accuracy). The accuracy is simply the fraction
# of data points that are correctly classified.
results = {}
best_val = -1   # The highest validation accuracy that we have seen so far.
best_svm = None # The LinearSVM object that achieved the highest validation rate.
################################################################################
# TODO:                                                                        #
# Write code that chooses the best hyperparameters by tuning on the validation #
# set. For each combination of hyperparameters, train a linear SVM on the      #
# training set, compute its accuracy on the training and validation sets, and  #
# store these numbers in the results dictionary. In addition, store the best   #
# validation accuracy in best_val and the LinearSVM object that achieves this  #
# accuracy in best_svm.                                                        #
#                                                                              #
# Hint: You should use a small value for num_iters as you develop your         #
# validation code so that the SVMs don't take much time to train; once you are #
# confident that your validation code works, you should rerun the validation   #
# code with a larger value for num_iters.                                      #
################################################################################
for lr in learning_rates:
    for reg in regularization_strengths:
        clf = LinearSVM()
        clf.train(X_train, y_train, learning_rate=lr, reg=reg, 
                  num_iters=1500, verbose=False)
        y_train_pred = clf.predict(X_train)
        y_train_acc = np.mean(y_train == y_train_pred)
        y_val_pred = clf.predict(X_val)
        y_val_acc = np.mean(y_val == y_val_pred)
        if y_val_acc > best_val:
            best_val = y_val_acc
            best_svm = clf
        
        results[(lr, reg)] = (y_train_acc, y_val_acc)
################################################################################
#                              END OF YOUR CODE                                #
################################################################################
    
# Print out results.
for lr, reg in sorted(results):
    train_accuracy, val_accuracy = results[(lr, reg)]
    print 'lr %e reg %e train accuracy: %f val accuracy: %f' % (
                lr, reg, train_accuracy, val_accuracy)
    
print 'best validation accuracy achieved during cross-validation: %f' % best_val
# Visualize the cross-validation results
import math
x_scatter = [math.log10(x[0]) for x in results]
y_scatter = [math.log10(x[1]) for x in results]
# plot training accuracy
marker_size = 100
colors = [results[x][0] for x in results]
plt.subplot(2, 1, 1)
plt.scatter(x_scatter, y_scatter, marker_size, c=colors)
plt.colorbar()
plt.xlabel('log learning rate')
plt.ylabel('log regularization strength')
plt.title('CIFAR-10 training accuracy')
# plot validation accuracy
colors = [results[x][1] for x in results] # default size of markers is 20
plt.subplot(2, 1, 2)
plt.scatter(x_scatter, y_scatter, marker_size, c=colors)
plt.colorbar()
plt.xlabel('log learning rate')
plt.ylabel('log regularization strength')
plt.title('CIFAR-10 validation accuracy')
plt.show()
# Evaluate the best svm on test set
y_test_pred = best_svm.predict(X_test)
test_accuracy = np.mean(y_test == y_test_pred)
print 'linear SVM on raw pixels final test set accuracy: %f' % test_accuracy
# Visualize the learned weights for each class.
# Depending on your choice of learning rate and regularization strength, these may
# or may not be nice to look at.
w = best_svm.W[:-1,:] # strip out the bias
w = w.reshape(32, 32, 3, 10)
w_min, w_max = np.min(w), np.max(w)
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
for i in xrange(10):
    plt.subplot(2, 5, i + 1)
    # Rescale the weights to be between 0 and 255
    wimg = 255.0 * (w[:, :, :, i].squeeze() - w_min) / (w_max - w_min)
    plt.imshow(wimg.astype('uint8'))
    plt.axis('off')
    plt.title(classes[i])<jupyter_output><empty_output> | 
	permissive | 
	/assignment1/svm.ipynb | 
	budmitr/cs231n | 6 | 
| 
	<jupyter_start><jupyter_text># Data Science Career Guide Probability and Statistics
#### Notebook I am using to follow along with Jose's course and practice LaTex<jupyter_code>import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import scipy.stats as scs
import seaborn as sns<jupyter_output><empty_output><jupyter_text># Probability Section## 1. You are given a fair coin. On average, how many flips would you need to get two of the same flip in a row (either 2 heads in a row or 2 tails in a row)?
### $$ P(X = n) = \frac {2} {2^{n-1}} \cdot \frac{1}{2}$$
###  $$ 1/2 \times 1/2 \times 1/2 \ldots $$
### $$ E[X] = x_1p_1 + x_2p_2 + \cdots + x_kp_k $$
### $$ \sum_{n=2}^{\infty}nP_n = \sum_{n=2}^{\infty} \frac{n}{2^{n-1}} = 3 $$
<jupyter_code>fig, ax = plt.subplots(1, 1)
p = 0.5
mean, var, skew, kurt = scs.geom.stats(p, moments='mvsk')
x = np.arange(scs.geom.ppf(0.01, p), scs.geom.ppf(0.99, p))
ax.plot(x, scs.geom.pmf(x, p), 'bo', ms=8, label='geom pmf')
ax.vlines(x, 0, scs.geom.pmf(x, p), colors='b', lw=5, alpha=0.5)
geo = scs.geom(.5)
xs = np.arange(0,50)
ys = geo.pmf(xs)
plt.plot(xs, ys)
geo.expect()<jupyter_output><empty_output><jupyter_text>## 2. What is the probability of rolling a total sum of 4 with 2 dice?
### 36 total ways the dice can be thrown
### [(1,3),(2,2),(3,1)] = 3/36## 3. What is the probability of rolling at least one 4 with 2 dice?
 * ### there are 11 / 36## 4. You have two jars, 50 red marbles, 50 blue marbles. You need to place all the marbles into the jars such that when you blindly pick one marble out of one jar, you maximize the chances that it will be red.
* ### Place 1 red in 1st jar and the rest of the marbles in the 2nd jar.
### $ (1 \times \frac{1}{2}) + (\frac{49}{99} \times \frac{1}{2}) = .7475 $## 5. If the probability of seeing a car on the highway in 30 minutes is 0.95, what is the probability of seeing a car on the highway in 10 minutes? (Assume a constant default probability)
* ### Let probability of seeing no car in 10 minutes be P
* ### No car for 30 mins = $ P x P x P $
* ### Seeing at least one car in 30  mins $ = 1 - P^3 = 0.95 $
* ### $ 1-P = 0.63 $
## 6. You are given a fair coin. On average, how many flips would you need to get two heads in a row?
* ### $ x = (1/2)(x+1) + (1/4)(x+2) + (1/4)2 $
* ### $ x = 6 $## 7. You are given 10 coins. 9 are fair and 1 is biased. You are told the biased coin has P>0.5 to be heads. You randomly grab a coin and flip it three times and get HHT. What is the probability you flipped the biased coin?
* ### Bayes
* ### $$ P(A \mid B) = \frac {P(A) \times P(B \mid A)} {P(B)} $$
* ### $$ P(A \mid B) = \frac {P(A) \times P(B \mid A)} {P(B \mid A) \times P(A) + P(B \mid \bar A) \times P(\bar A)} $$## 8. Given a biased coin with P>0.5 for heads, how could you simulate a fair coin. In more general words: simulate a fair coin given only access to a biased coin. Note, this is tricky!
* ### Von Neumann gave a simple solution: flip the coin twice. If it comes up heads followed by tails, then call the outcome HEAD. If it comes up tails followed by heads, then call the outcome TAIL.<jupyter_code>def fairCoin(biasedCoin):
    coin1, coin2 = 0, 0
    while coin1 == coin2:
        coin1, coin2 = biasedCoin(), biasedCoin()
    return coin1
from random import random
def biasedCoin():
    return int(random() < 0.2)
flips = 10000
n_heads_biased = sum(biasedCoin() for i in range(flips))
print('{}%'.format(round(n_heads_biased / flips * 100, 1)))
n_heads_fair = sum(fairCoin(biasedCoin) for i in range(flips))
print('{}%'.format(round(n_heads_fair / flips * 100, 1)))<jupyter_output>50.9%
<jupyter_text>## 9. Alice has 2 kids and one of them is a girl. What is the probability that the other child is also a girl? (You can assume that there are an equal number of males and females in the world.)
* ### Supposedly 1/3
* ### BB GG BG GB# Statistics Section## 1. You're about to get on a plane to Seattle. You want to know if you should bring an umbrella. You call 3 random friends of yours who live there and ask each independently if it's raining.
* ### Each of your friends has a 2/3 chance of telling you the truth and a 1/3 chance of messing with you by lying.
* ### All 3 friends tell you that 'Yes' it is raining.
* ### You also know that there is a 25% chance it's raining on any given day in Seattle
* ### What is the probability that it's actually raining in Seattle?
* ### $$ P(A \mid B) = \frac {P(A) \times P(B \mid A)} {P(B \mid A) \times P(A) + P(B \mid \bar A) \times P(\bar A)} $$
* ### $$ P(raining \mid Yes,Yes,Yes) = \frac {P(raining) \times P(Yes,Yes,Yes \mid raining)} {P(Yes, Yes, Yes)} $$
* ### $$ P(Yes,Yes,Yes) = P(raining) \times P(Yes,Yes,Yes \mid raining) + P(\overline {raining}) \times P(Yes,Yes,Yes \mid \overline {raining}) $$
* ### $$ P(Yes,Yes,Yes) = 0.25\times(2/3)^3 + 0.75\times(1/3)^3 = 0.25\times(8/27) + 0.75\times(1/27) $$
* ### $$ P(raining \mid Yes,Yes,Yes) = 0.25\times(8/27) / ( 0.25\times8/27 + 0.75\times1/27 ) $$<jupyter_code>((1/4)*(8/27)) / ((1/4)*(8/27)+(3/4)*(1/27))
8/11<jupyter_output><empty_output><jupyter_text>## 2. A new quantum message system has a probability of 0.8 of success in any attempt to send a message through. Calculate the probability of having 7 successes in 10 attempts.
* ### Binomial PDF $$ {n \choose k} p^k (1-p)^{ n-k} $$
* ### Combinations $$ {n \choose k} = \frac {N!} {(N-K)!K!} $$<jupyter_code>from math import factorial as fct
round((fct(10) / (fct(3)*fct(7))) * (.8**7) * (1-.8)**(3), 4)
#BINOMIAL
font_size = 11
font_name = 'sans-serif'
fig = plt.figure(figsize=(10, 6), dpi=300)
splot = 0
# looxp through parameterizations of the beta
for n, p in [(10, 0.8), (11, 0.8), (12, 0.8)]:
    splot += 1
    ax = fig.add_subplot(1, 3, splot)
    x = np.arange(scs.binom.ppf(0.01, n, p), scs.binom.ppf(.99, n, p)+1)
    ax.plot(x, scs.binom.pmf(x, n, p), 'bo', ms=8, label='pmf')
    ax.vlines(x, 0, scs.binom.pmf(x, n, p), colors='b', lw=5, alpha=0.5)
    rv = scs.binom(n, p)
    ax.set_ylim((0, 1.0))
    ax.set_xlim((-0.5, 10.5))
    ax.set_title("n=%s,p=%s" % (n, p))
    ax.set_aspect(1./ax.get_data_ratio())
    ax.set_xlabel('k')
    for t in ax.get_xticklabels():
        t.set_fontsize(font_size-1)
        t.set_fontname(font_name)
    for t in ax.get_yticklabels():
        t.set_fontsize(font_size-1)
        t.set_fontname(font_name)<jupyter_output><empty_output><jupyter_text>## 3. What is the difference between Type I vs Type II error?
 | Predict 0 | Predict 1
--- | ---
**Actual 0** | TN | FP (Type I)
**Actual 1** | FN (Type II) | TP
* ### Accuracy = (TP + TN) / Total
* ### Misclassification Rate = (FP + FN) / Total
* ### Recall aka True Positive Rate aka Sensitivity = TP / Actual Yes
* ### Precision = TP / Predicted Yes
* ### False Positive Rate = FP / Actual No
* ### Specificity = TN / Actual No
* ### Prevalence = Actual Yes / Total## 4. Below
* ### A new medical test for a virus has been created.
* ### 1% of the population has the virus.
* ### 99% of sick people with the virus test positive
* ### 99% of healthy individuals test negative for the virus.
* ### $$ P(A \mid B) = \frac {P(A) \times P(B \mid A)} {P(B \mid A) \times P(A) + P(B \mid \bar A) \times P(\bar A)} $$<jupyter_code>a_b = .01 * .99
a_b / (a_b+a_b)<jupyter_output><empty_output><jupyter_text>## 5. Below
* ### The average life of a certain type of motor is 10 years with a standard deviation of 2 years.
* ### If the manufacturer is willing to replace only 3% because of failures, how long a guarantee should she offer?
* ### Assume Normal Distribution
* ### z-table
* ### $$ Z = \frac {X - \mu} {\sigma} $$
* ### $$ z = -1.88\:\:\:for\:p = .0301 $$
* ### $$ -1.88 = \frac {x-10} {2} $$
* ### $$ x = 6.24 $$
<jupyter_code>engine_norm = scs.norm(10, 2)
years = round(engine_norm.ppf(.03),2)
print('years {}'.format(years))<jupyter_output>years 6.24
 | 
	no_license | 
	/.ipynb_checkpoints/Data-Science-Career-Guide-Prob-Stats-Exper-ED-checkpoint.ipynb | 
	edeane/ds-interview-prep | 7 | 
| 
	<jupyter_start><jupyter_text># 序列主題(一):變數賦值與輸入輸出## I. 自由練習**1. 變數賦值**
用變數儲存資料,將資料放在電腦記憶體中的某個位置,然後給這個地方一個好名字。**2. 變數三二一**
一個特殊符號(等號、名字=資料),兩種運算方式(運算式與函式),三種資料型態(文字數字與布林)(1)光有名字不行,要有等號,要有資料;名字在左、資料在右,運算式也可以。<jupyter_code># 練習
Sum = 0
A = 1
B = 2 #「=」的左邊是名字,不能放常數。
A + B = Sum #「=」的左邊不能是運算式。 
A, B = 1, 2 # 這樣寫可以,是Pythonic的寫法。
Sum = 0
A = 1
B = 2 #「=」的左邊是名字,不能放常數。
Sum = A + B #「=」的左邊不能是運算式。 
A, B = 1, 2 # 這樣寫可以,是Pythonic的寫法。<jupyter_output><empty_output><jupyter_text>(2)變數的命名,選個好名字。**3.數字文字布林三種資料類型**
整數浮點數、字串、真假。用type()判斷資料類型,用int(),float(),str(), bool()指定資料型態。<jupyter_code># 練習
print(87)
print(8.7)
hello_py1 = 'Hello Python!'
hello_py2 = "Hello Python!"
print(hello_py1)
print(hello_py2)
es1 = "I'm a student."
es2 = "I\'m a student."
es3 = 'I\'m a student.'
print(es1, es2, es3)
print(True)
print(False)<jupyter_output>87
8.7
Hello Python!
Hello Python!
I'm a student. I'm a student. I'm a student.
True
False
<jupyter_text>(3)資料類型的判斷用type()(4)資料類型的建立與轉換用int(), float(),str(),bool() **4.輸入**
用input()輸入資料。要有小括號、要有輸入說明、要有等號賦值,儲存的資料為文字字串、做數學運算時需要改變資料類型。<jupyter_code># 練習
a = input("Input number a = ")
b = input("Input number b = ")
print(a, b)
print(a + b)
print(a + 1)
# 練習
a = input("Input number a = ")
b = input("Input number b = ")
print(a, b)
print(a + b)
a = eval(a)
print(a + 1)<jupyter_output><empty_output><jupyter_text>**5.輸出**
print()為螢幕列印。將物件放在小括號中,多個物件以逗號相隔即可。
注意分隔、換行等轉義序列用法(\n, \t, \", \')。
可以使用字串輸出列印,即print(f"...{variable}...")<jupyter_code># 練習
print("Hello, Francis")
name = "Francis"
print("Hello, %s" %name)
print("Hello, {}".format(name))
print(f"Hello, {name}")<jupyter_output><empty_output><jupyter_text>## II. 隨堂練習**隨堂練習1: 自我介紹**
我們將和電腦一起完成很多工作,並要求它為我們做一堆很酷的事情,所以不妨來個自我介紹,正式打個招呼!
(1)使用print()函數向電腦介紹自己。你的自我介紹應該顯示在主控台視窗裡,中英文均可。
(2)更改現有的print()函數,使用f字串。
請創立兩個變數,name和age;然後為name變數指派你的名字字串,為age變數指派一個數學運算式,計算你的年齡。最後,使用f字串、name和age變數輸出你的簡介到電腦上!
預期輸出範例
"Hello, My name is Franics. I was bon in 2000."
"Hi, My name is Franics and I am 21 years old!"<jupyter_code>print("Hello,my name is Rita. I was born in 2002.")
name= "Rita"
age= 2021 - 2002
print(f"Hi, My name is {name} and I am {age} years old!")<jupyter_output>Hello,my name is Rita. I was born in 2002.
Hi, My name is Rita and I am 19 years old!
<jupyter_text>**隨堂練習2: 引用引文**
在網路上搜尋名言,或者使用你自己的名言。它可以是激勵你的文字、電影中一段有趣的台詞,甚至是家人說的話。<jupyter_code>print('\" Couding is a super power You can do so many cool things with your imagination and code.\"- I will dream for you')<jupyter_output>" Couding is a super power You can do so many cool things with your imagination and code."- I will dream for you
 | 
	no_license | 
	/謝欣汝_Week5_練習作業.ipynb | 
	a109010169/a109010169 | 6 | 
| 
	<jupyter_start><jupyter_text># Question 1
* Define Python functions for the two functions $e^{x}$ and $\cos(\cos(x))$ which return a vector (or scalar) value.  
* Plot the functions over the interval [−2$\pi$,$4\pi$).
* Discuss periodicity of both functions 
* Plot the expected functions from fourier series<jupyter_code>#Functions for $e^{x}$ and $\cos(\cos(x))$ is defined
def fexp(x):        
    return exp(x)
def fcoscos(x):
    return cos(cos(x))
x = linspace(-2*pi, 4*pi,400)  
#Period of function created using fourier coefficients will be 2pi
period = 2*pi 
exp_fn = fexp(x)               #finding exp(x) for all values in x vector
cos_fn = fcoscos(x)            #finding cos(cos(x)) for all values in x vector
#Plotting original function vs expected function for exp(x)
fig1 = figure(figsize=(9,7))
ax1 = fig1.add_subplot(111)
ax1.semilogy(x,exp_fn,'k',label="Original Function")
#plotting expected function by dividing the x by period and giving remainder as
#input to the function, so that x values repeat after given period.
ax1.semilogy(x,fexp(x%period),'--',label="Expected Function from fourier series")
ax1.legend(numpoints=3,loc='best')
title("Figure 1 : Plot of $e^{x}$")
xlabel(r"$x \to$")
ylabel("$e^{x}$")
grid()
savefig("Figure1.jpg")
#Plotting original function vs expected function for cos(cos((x)))
fig2 = figure()
ax2 = fig2.add_subplot(111)
ax2.plot(x,cos_fn,'b',lw=4,label="Original Function")
#plotting expected function by dividing the x by period and giving remainder as
#input to the function, so that x values repeat after given period.
ax2.semilogy(x,fcoscos(x%period),'y--',label="Expected Function from fourier series")
ax2.legend(loc='upper right')
title("Figure 2 : Plot of $\cos(\cos(x))$")
xlabel("$x$")
ylabel("$\cos(\cos(x))$")
grid()
savefig("Figure2.jpg")
show()<jupyter_output><empty_output><jupyter_text># Results and Discussion :
* We observe that $e^{x}$ is not periodic,  whereas $\cos(\cos(x))$ is periodic as the expected and original function matched for the latter but not for $e^{x}$.
* Period of $\cos(\cos(x))$ is $2\pi$ as we observe from graph and  $e^{x}$ monotously increasing hence not periodic.
* We get expected function by:
    * plotting expected function by dividing the x by period and giving remainder as input to the function, so that x values repeat after given period.
    * That is f(x%period) is now the expected periodic function from fourier series.# Question 2
* Obtain the first 51 coefficients i.e $a_{0}, a_{1}, b_{1},....$ for $e^{x}$ and $\cos(\cos(x))$ using scipy quad function
* And to calculate the function using those coefficients and comparing with original funcitons graphically.<jupyter_code>#function to calculate 
def fourier_an(x,k,f):
    return f(x)*cos(k*x)
def fourier_bn(x,k,f):
    return f(x)*sin(k*x)
#function to find the fourier coefficients taking function 'f' as argument.
def find_coeff(f):
    
    coeff = []
    coeff.append((quad(f,0,2*pi)[0])/(2*pi))
    for i in range(1,26):
        coeff.append((quad(fourier_an,0,2*pi,args=(i,f))[0])/pi)
        coeff.append((quad(fourier_bn,0,2*pi,args=(i,f))[0])/pi)
        
    return coeff
#function to create 'A' matrix for calculating function back from coefficients
# with no_of rows, columns and vector x as arguments
def createAmatrix(nrow,ncol,x):
    A = zeros((nrow,ncol)) # allocate space for A
    A[:,0]=1 # col 1 is all ones
    for k in range(1,int((ncol+1)/2)):
        A[:,2*k-1]=cos(k*x) # cos(kx) column
        A[:,2*k]=sin(k*x) # sin(kx) column
    #endfor
    return A
#Function to compute function from coefficients with argument as coefficient vector 'c'
def computeFunctionfromCoeff(c):
    A = createAmatrix(400,51,x)
    f_fourier = A.dot(c)
    return f_fourier
# Initialising empty lists to store coefficients for both functions
exp_coeff = []               
coscos_coeff = []
exp_coeff1 = []
coscos_coeff1 = []
exp_coeff1 = find_coeff(fexp)
coscos_coeff1 = find_coeff(fcoscos)
# to store absolute value of coefficients
exp_coeff = np.abs(exp_coeff1)
coscos_coeff = np.abs(coscos_coeff1)
# Computing function using fourier coeff
fexp_fourier = computeFunctionfromCoeff(exp_coeff1)
fcoscos_fourier = computeFunctionfromCoeff(coscos_coeff1)
# Plotting the Function computed using Fourier Coefficients
ax1.semilogy(x,fexp_fourier,'ro',label = "Function using Fourier Coefficients")
ax1.set_ylim([pow(10,-1),pow(10,4)])
ax1.legend()
fig1
ax2.plot(x,fcoscos_fourier,'ro',label = "Function using Fourier Coefficients")
ax2.legend(loc='upper right')
fig2<jupyter_output><empty_output><jupyter_text># Question3
* Two different plots for each function using “semilogy” and “loglog” and plot the magnitude of the coefficients vs n
* And to analyse them and to discuss the observations.
## Plots: 
* For each function magnitude of $a_{n}$ and $b_{n}$ coefficients which are computed using integration are plotted in same figure in semilog as well as loglog plot for simpler comparisons.<jupyter_code># Plotting
fig3 = figure()
ax3 = fig3.add_subplot(111)
# By using array indexing methods we separate all odd indexes starting from 1 -> an
# and all even indexes starting from 2 -> bn
ax3.semilogy((exp_coeff[1::2]),'ro',label = "$a_{n}$ using Integration")
ax3.semilogy((exp_coeff[2::2]),'ko',label = "$b_{n}$ using Integration")
ax3.legend()
title("Figure 3 : Fourier coefficients of $e^{x}$ (semi-log)")
xlabel("n")
ylabel("Magnitude of coeffients")
show()
fig4 = figure()
ax4 = fig4.add_subplot(111)
# By using array indexing methods we separate all odd indexes starting from 1 -> an
# and all even indexes starting from 2 -> bn
ax4.loglog((exp_coeff[1::2]),'ro',label = "$a_{n}$ using Integration")
ax4.loglog((exp_coeff[2::2]),'ko',label = "$b_{n}$ using Integration")
ax4.legend(loc='upper right')
title("Figure 4 : Fourier coefficients of $e^{x}$ (Log-Log)")
xlabel("n")
ylabel("Magnitude of coeffients")
show()
fig5 = figure()
ax5 = fig5.add_subplot(111)
# By using array indexing methods we separate all odd indexes starting from 1 -> an
# and all even indexes starting from 2 -> bn
ax5.semilogy((coscos_coeff[1::2]),'ro',label = "$a_{n}$ using Integration")
ax5.semilogy((coscos_coeff[2::2]),'ko',label = "$b_{n}$ using Integration")
ax5.legend(loc='upper right')
title("Figure 5 : Fourier coefficients of $\cos(\cos(x))$ (semi-log)")
xlabel("n")
ylabel("Magnitude of coeffients")
show()
fig6 = figure()
ax6 = fig6.add_subplot(111)
# By using array indexing methods we separate all odd indexes starting from 1 -> an
# and all even indexes starting from 2 -> bn
ax6.loglog((coscos_coeff[1::2]),'ro',label = "$a_{n}$ using Integration")
ax6.loglog((coscos_coeff[2::2]),'ko',label = "$b_{n}$ using Integration")
ax6.legend(loc='upper right')
title("Figure 6 : Fourier coefficients of $\cos(\cos(x))$  (Log-Log)")
xlabel("n")
ylabel("Magnitude of coeffients")
show()<jupyter_output><empty_output><jupyter_text># Results and Observations : 
* The $b_{n}$ coefficients in the second case should be nearly zero. Why does this happen?
   * Because $\cos(\cos(x))$ is an even function and for finding $b_{n}$ we use Eq.(3) so the whole integral can be integrated in any interval with length of $2\pi$, so for convenience we choose $[-\pi,\pi)$ , then the integrand is odd since $\sin(nx)$ is there. so the integral becomes zero analytically. Where as here we compute using quad function which uses numerical methods so $b_{n}$ is very small but not exactly zero.
* In the first case, the coefficients do not decay as quickly as the coefficients for the second case.
Why not?
  * Rate of decay of fourier coefficients is determined by how smooth the function is,if a function is infinitely differentiable then its fourier coefficients decays very faster, where as if $k^{th}$ derivative of function is discontinous the coefficients falls as $\frac{1}{n^{k+1}}$. to atleast converge.So in first case i.e is $e^{x}$ is not periodic hence discontinous at $2n\pi$ so the function itself is discontinous so coefficients falls as $\frac{1}{n}$ so we need more coefficients for more accuracy,coefficients doesn't decay as quickly as for $\cos(\cos(x))$ as it is infinitely differentiable and smooth so we need less no of coefficients to reconstruct the function so it decays faster.
* Why does loglog plot in Figure 4 look linear, wheras the semilog plot in Figure 5 looks linear?
  * Because the coefficients of $e^{x}$ varies as $n^{k}$ where as $\cos(\cos(x))$ varies exponentially with 'n' means $\alpha^{n}$ , thats why loglog looks linear in first case and semilog in second case.#                                Question 4  & 5
* Uses least squares method approach to find the fourier coefficients of $e^{x}$ and $\cos(\cos(x))$ 
* Evaluate both the functions at each x values and call it b. Now this is approximated by
  $a_{0} + \sum\limits_{n=1}^{\infty} {{a_{n}\cos(nx)+b_{n}\sin(nx)}}$ 
* such that \begin{equation}
    a_{0} + \sum\limits_{n=1}^{\infty} {{a_{n}\cos(nx_{i})+b_{n}\sin(nx_{i})}} \approx f(x_{i}) 
    \end{equation}
* To implement this we use matrices to find the coefficients using Least Squares method using inbuilt python function 'lstsq'
<jupyter_code>#Function to calculate coefficients using lstsq and by calling 
# function 'createAmatrix' which was defined earlier in the code 
# to create 'A' matrix with arguments as function 'f' and lower
# and upper limits of input x and no_of points needed
def getCoeffByLeastSq(f,low_lim,upp_lim,no_points):
    x1 = linspace(low_lim,upp_lim,no_points)
    # drop last term to have a proper periodic integral
    x1 = x1[:-1]
    b = []
    b = f(x1)
    A = createAmatrix(no_points-1,51,x1)
    c = []
    c=lstsq(A,b)[0] # the ’[0]’ is to pull out the
    # best fit vector. lstsq returns a list.
    return c
# Calling function and storing them in respective vectors.
coeff_exp = getCoeffByLeastSq(fexp,0,2*pi,401)
coeff_coscos = getCoeffByLeastSq(fcoscos,0,2*pi,401)
# To plot magnitude of coefficients this is used
c1 = np.abs(coeff_exp)
c2 = np.abs(coeff_coscos)
# Plotting in coefficients got using Lstsq in corresponding figures
# 3,4,5,6 using axes.
ax3.semilogy((c1[1::2]),'go',label = "$a_{n}$ using Least Squares")
ax3.semilogy((c1[2::2]),'bo',label = "$b_{n}$ using Least Squares")
ax3.legend(loc='upper right')
fig3
ax4.loglog((c1[1::2]),'go',label = "$a_{n}$ using Least Squares ")
ax4.loglog((c1[2::2]),'bo',label = "$b_{n}$ using Least Squares")
ax4.legend(loc='lower left')
fig4
ax5.semilogy((c2[1::2]),'go',label = "$a_{n}$ using Least Squares")
ax5.semilogy((c2[2::2]),'bo',label = "$b_{n}$ using Least Squares")
ax5.legend(loc='upper right')
fig5
ax6.loglog((c2[1::2]),'go',label = "$a_{n}$ using Least Squares ")
ax6.loglog((c2[2::2]),'bo',label = "$b_{n}$ using Least Squares")
ax6.legend(loc=0)
fig6<jupyter_output><empty_output><jupyter_text># Question 6
* To compare the answers got by least squares and by the direct integration.
* And finding deviation between them and find the largest deviation using Vectors<jupyter_code># Function to compare the coefficients got by integration and 
# least squares and find largest deviation using Vectorized Technique
# Argument : 'integer f which is either 1 or .
# 1 -> exp(x)    2 -> cos(cos(x))
def compareCoeff(f):
    deviations = []
    max_dev = 0
    if(f==1):
        deviations = np.abs(exp_coeff1 - coeff_exp)
    elif(f==2):
        deviations = np.abs(coscos_coeff1 - coeff_coscos)
        
    max_dev = np.amax(deviations)
    return deviations,max_dev
dev1,maxdev1 = compareCoeff(1)
dev2,maxdev2 = compareCoeff(2)
print("The largest deviation for exp(x) : %g" %(maxdev1))
print("The largest deviation for cos(cos(x)) : %g" %(maxdev2))
# Plotting the deviation vs n 
plot(dev1,'g')
title("Figure 7 : Deviation between Coefficients for $e^{x}$")
xlabel("n")
ylabel("Magnitude of Deviations")
show()
# Plotting the deviation vs n 
plot(dev2,'g')
title("Figure 8 : Deviation between coefficients for $\cos(\cos(x))$")
xlabel("n")
ylabel("Magnitude of Deviations")
show()<jupyter_output><empty_output><jupyter_text># Results and Discussion :
* As we observe that there is a significant deviation for $e^{x}$ as it has discontinuites at $2n\pi$ which can be observed in Figure 1 and hence there will be **Gibbs phenomenon** i.e there will be oscillations around the discontinuity points.
* Also Importantly in **Least Squares method** we give very less no of points between x range i.e stepsize is very high whereas while using integration to find coefficients we are using quad function where we are not mentioning any stepsize so it takes very **less stepsize** so it has more no of points to fit.So say when we increase the stepsize i.e no of points from 400 to $10^{5}$ the deviation is much lesser compared to previous stepsize!.
* This happens because $e^{x}$ is a aperiodic function,so to construct the signal back we need more fourier coefficients whereas $\cos(\cos(x))$ is periodic so it fits well with less no of points.
* Due to this the fourier coefficients using least squares will not fit the curve exactly 
* Whereas for $\cos(\cos(x))$ the largest deviation is in order of $10^{-15}$ because the function itself is a periodic function and it is a continous function in entire x range so we get very negligible deviation.
* And as we know that Fourier series is used to define periodic signals in frequency domain and $e^{x}$ is a aperiodic signal so you can't define an aperiodic signal on an interval of finite length (if you try, you'll lose information about the signal), so one must use the Fourier transform for such a signal.
* Thats why there are  significant deviations are found for $e^{x}$.# Question 7
* Computing  Ac i.e multiplying Matrix A and Vector C from the estimated values of Coeffient Vector C by Least Squares Method.
* To Plot them (with green circles) in Figures 1 and 2 respectively for the two functions.<jupyter_code># Define vector x1 from 0 to 2pi
x1 = linspace(0,2*pi,400)
# Function to reconstruct the signalfrom coefficients
# computed using Least Squares.
# Takes coefficient vector : 'c' as argument
# returns vector values of function at each x
def computeFunctionbyLeastSq(c):
    f_lstsq = []
    A = createAmatrix(400,51,x1)
    f_lstsq = A.dot(c)
    return f_lstsq
fexp_lstsq = computeFunctionbyLeastSq(coeff_exp)
fcoscos_lstsq = computeFunctionbyLeastSq(coeff_coscos)
# Plotting in Figure1 to compare the original function 
# and Reconstructed one using Least Squares method
ax1.semilogy(x1,fexp_lstsq,'go',
             label = "Inverse Fourier Transform From Least Squares")
ax1.legend()
ax1.set_ylim([pow(10,-2),pow(10,5)])
ax1.set_xlim([0,2*pi])
fig1
ax2.plot(x1,fcoscos_lstsq,'go',markersize=4,
         label = "Inverse Fourier Transform From Least Squares")
ax2.set_ylim([0.5,1.3])
ax2.set_xlim([0,2*pi])
ax2.legend()
fig2<jupyter_output><empty_output> | 
	no_license | 
	/Assign3/.ipynb_checkpoints/ass3q1-checkpoint.ipynb | 
	Rohithram/EE2703_Applied_Programming_in_python | 6 | 
| 
	<jupyter_start><jupyter_text># Exploratory Data Analysis on stock data from 5 companies.
IBM
GE
Procter & Gamble
Coca Cola
Boeing<jupyter_code>%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import datetime
ibm = pd.read_csv('D:/DS/analytics_edge/IBMStock.csv')
ge = pd.read_csv('D:/DS/analytics_edge/GEStock.csv')
png = pd.read_csv('D:/DS/analytics_edge/ProcterGambleStock.csv')
cc = pd.read_csv('D:/DS/analytics_edge/CocaColaStock.csv')
boeing = pd.read_csv('D:/DS/analytics_edge/BoeingStock.csv')
ibm.head()
ibm['Date'] = pd.to_datetime(ibm['Date'])
ge['Date'] = pd.to_datetime(ge['Date'])
png['Date'] = pd.to_datetime(png['Date'])
cc['Date'] = pd.to_datetime(cc['Date'])
boeing['Date'] = pd.to_datetime(boeing['Date'])
ibm.shape<jupyter_output><empty_output><jupyter_text>Total number of observations in each of the 5 datasets is 480 with 2 columns each.<jupyter_code>ibm['Date'].describe()<jupyter_output><empty_output><jupyter_text>first     1970-01-01 00:00:00
So, the earliest date is January 1, 1970 for any dataset
last      2009-12-01 00:00:00
and, latest date is December 1, 2009 for any dataset.<jupyter_code>ibm['StockPrice'].mean()
ge['StockPrice'].min()
cc['StockPrice'].max()
boeing['StockPrice'].median()
png['StockPrice'].std()<jupyter_output><empty_output><jupyter_text>### Visualizing stock dynamics<jupyter_code>plt.plot(cc['Date'], cc['StockPrice'])
plt.plot(png['Date'], png['StockPrice'])
plt.legend()
a = cc.iloc[(cc.index > 301) & (cc.index < 420)]
b = png.iloc[(png.index > 301) & (png.index < 420)]
c = ge.iloc[(ge.index > 301) & (ge.index < 420)]
d = boeing.iloc[(boeing.index > 301) & (boeing.index < 420)]
e = ibm.iloc[(ibm.index > 301) & (ibm.index < 420)]
plt.plot(a['Date'], a['StockPrice'])
plt.plot(b['Date'], b['StockPrice'])
plt.plot(c['Date'], c['StockPrice'])
plt.plot(d['Date'], d['StockPrice'])
plt.plot(e['Date'], e['StockPrice'])
labels = ['Coca Cola', 'P&G', 'GE', 'Boeing', 'IBM']
plt.legend(labels)<jupyter_output><empty_output><jupyter_text>### Monthly Trends<jupyter_code>ibm.head()
ibm['Month'] = pd.DatetimeIndex(ibm['Date']).month
ibm.head()
a = ibm.groupby(pd.Grouper('Month')).mean()
a
ibm['StockPrice'].mean()<jupyter_output><empty_output><jupyter_text>IBM had a higher stock price than total average during the first 5 months of the year.Creating a function to compute the average of each month across the entire dataset.<jupyter_code>def monthly_trends(df):
    df['Month'] = pd.DatetimeIndex(df['Date']).month
    a = df.groupby(pd.Grouper('Month')).mean()
    return a
monthly_trends(ibm)
monthly_trends(cc)
monthly_trends(png)
monthly_trends(boeing)
monthly_trends(ge)<jupyter_output><empty_output> | 
	permissive | 
	/1. EDA/3. Stock Dynamics.ipynb | 
	atheesh1998/analytics-edge | 6 | 
| 
	<jupyter_start><jupyter_text># HyperParameter Tuning### `keras.wrappers.scikit_learn`
Example adapted from: [https://github.com/fchollet/keras/blob/master/examples/mnist_sklearn_wrapper.py]()## Problem: 
Builds simple CNN models on MNIST and uses sklearn's GridSearchCV to find best model<jupyter_code>import numpy as np
np.random.seed(1337)  # for reproducibility
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.utils import np_utils
from keras.wrappers.scikit_learn import KerasClassifier
from keras import backend as K
from sklearn.model_selection import GridSearchCV<jupyter_output><empty_output><jupyter_text># Data Preparation<jupyter_code>nb_classes = 10
# input image dimensions
img_rows, img_cols = 28, 28
# load training data and do basic data normalization
(X_train, y_train), (X_test, y_test) = mnist.load_data()
if K.image_dim_ordering() == 'th':
    X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
    X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
    input_shape = (1, img_rows, img_cols)
else:
    X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
    X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
    input_shape = (img_rows, img_cols, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
# convert class vectors to binary class matrices
y_train = np_utils.to_categorical(y_train, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)<jupyter_output><empty_output><jupyter_text>## Build Model<jupyter_code>def make_model(dense_layer_sizes, filters, kernel_size, pool_size):
    '''Creates model comprised of 2 convolutional layers followed by dense layers
    dense_layer_sizes: List of layer sizes. This list has one number for each layer
    nb_filters: Number of convolutional filters in each convolutional layer
    nb_conv: Convolutional kernel size
    nb_pool: Size of pooling area for max pooling
    '''
    model = Sequential()
    model.add(Conv2D(filters, (kernel_size, kernel_size),
                     padding='valid', input_shape=input_shape))
    model.add(Activation('relu'))
    model.add(Conv2D(filters, (kernel_size, kernel_size)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))
    model.add(Dropout(0.25))
    model.add(Flatten())
    for layer_size in dense_layer_sizes:
        model.add(Dense(layer_size))
        model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='adadelta',
                  metrics=['accuracy'])
    return model
dense_size_candidates = [[32], [64], [32, 32], [64, 64]]
my_classifier = KerasClassifier(make_model, batch_size=32)<jupyter_output><empty_output><jupyter_text>## GridSearch HyperParameters<jupyter_code>validator = GridSearchCV(my_classifier,
                         param_grid={'dense_layer_sizes': dense_size_candidates,
                                     # nb_epoch is avail for tuning even when not
                                     # an argument to model building function
                                     'epochs': [3, 6],
                                     'filters': [8],
                                     'kernel_size': [3],
                                     'pool_size': [2]},
                         scoring='neg_log_loss',
                         n_jobs=1)
validator.fit(X_train, y_train)
print('The parameters of the best model are: ')
print(validator.best_params_)
# validator.best_estimator_ returns sklearn-wrapped version of best model.
# validator.best_estimator_.model returns the (unwrapped) keras model
best_model = validator.best_estimator_.model
metric_names = best_model.metrics_names
metric_values = best_model.evaluate(X_test, y_test)
for metric, value in zip(metric_names, metric_values):
    print(metric, ': ', value)<jupyter_output>The parameters of the best model are: 
{'filters': 8, 'pool_size': 2, 'epochs': 6, 'dense_layer_sizes': [64, 64], 'kernel_size': 3}
 9920/10000 [============================>.] - ETA: 0sloss :  0.0577878101223
acc :  0.9822
 | 
	no_license | 
	/05_keras/keras_lab_2/04 - HyperParameter Tuning.ipynb | 
	ginevracoal/statisticalMachineLearning | 4 | 
| 
	<jupyter_start><jupyter_text># K-Means Clustering## Importing the libraries<jupyter_code>import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
input_path = "/Users/sayarsamanta/Documents/GitHub/Data-Science-Projects/Clustering/KNN/Data/"<jupyter_output><empty_output><jupyter_text>## Importing the dataset<jupyter_code>dataset = pd.read_csv(input_path+'Mall_Customers.csv')
X = dataset.iloc[:, [2, 4]].values<jupyter_output><empty_output><jupyter_text>## Using the elbow method to find the optimal number of clusters<jupyter_code>from sklearn.cluster import KMeans
wcss = []
for i in range(1, 11):
    kmeans = KMeans(n_clusters = i, init = 'k-means++', random_state = 42)
    kmeans.fit(X)
    wcss.append(kmeans.inertia_)
plt.plot(range(1, 11), wcss)
plt.title('The Elbow Method')
plt.xlabel('Number of clusters')
plt.ylabel('WCSS')
plt.show()<jupyter_output><empty_output><jupyter_text>## Training the K-Means model on the dataset<jupyter_code>kmeans = KMeans(n_clusters = 4, init = 'k-means++', random_state = 42)
y_kmeans = kmeans.fit_predict(X)<jupyter_output><empty_output><jupyter_text>## Visualising the clusters<jupyter_code>plt.scatter(X[y_kmeans == 0, 0], X[y_kmeans == 0, 1], s = 100, c = 'red', label = 'Cluster 1')
plt.scatter(X[y_kmeans == 1, 0], X[y_kmeans == 1, 1], s = 100, c = 'blue', label = 'Cluster 2')
plt.scatter(X[y_kmeans == 2, 0], X[y_kmeans == 2, 1], s = 100, c = 'green', label = 'Cluster 3')
plt.scatter(X[y_kmeans == 3, 0], X[y_kmeans == 3, 1], s = 100, c = 'cyan', label = 'Cluster 4')
#plt.scatter(X[y_kmeans == 4, 0], X[y_kmeans == 4, 1], s = 100, c = 'magenta', label = 'Cluster 5')
#plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s = 300, c = 'yellow', label = 'Centroids')
plt.title('Clusters of customers')
plt.xlabel('Annual Income (k$)')
plt.ylabel('Spending Score (1-100)')
plt.legend()
plt.show()<jupyter_output><empty_output> | 
	no_license | 
	/Clustering/KNN/.ipynb_checkpoints/k_means_clustering-checkpoint.ipynb | 
	sayarsamanta/Data-Science-Projects | 5 | 
| 
	<jupyter_start><jupyter_text># Data Munging## Relational Data
The simplest type of data we have see might consist a single table with a some columns and some rows. This sort of data is easy to analyze and compute and we generally want to reduce our data to a single table before we start running machine learning algorithms. Yet, real world data doesn't necessarily fit into this paradigm.  Most real world data is messy and complicated which doesn't fit well into a tabular format and we will have to do some work to reduce this complexity.  Additionally, in many case we can reduce our memory cost by not keeping data in a single table, but instead in a set of data structures with defined relations between them. 
Here we will explore a bit of data and see how combining different sets of data can help us generate useful features.
First we need some data.  We will make use of some data from Wikipedia and we will use the pandas `read_html` function to scrape the data from a particular webpage.  We will study the top 10 companies in the Fortune Global 500 which conveniently have [their own Wikipedia page](https://en.wikipedia.org/w/index.php?title=Fortune_Global_500&oldid=855890446).
We will download the data in tabular form, but work with it as a list of dictionaries, this will allow us to get used to working with unstructured data.<jupyter_code>import pandas as pd
import json
df = pd.read_html('https://en.wikipedia.org/w/index.php?title=Fortune_Global_500&oldid=855890446', header=0)[0]
fortune_500 = json.loads(df.to_json(orient="records"))
df<jupyter_output><empty_output><jupyter_text>Lets look at the data.<jupyter_code>fortune_500<jupyter_output><empty_output><jupyter_text>This is a great start to our analysis, however, there really isn't that much information here, we will need to bring in additional data sources to get any further understanding of these companies.
The first question we might want to ask is how many employees does it take to get that revenue, in other words, what is the revenue per employee?  Luckily, we can use Wikipedia to get that data as well, we have scraped this data manually (all from Wikipedia) and created the following dictionary.<jupyter_code>other_data = [
    {"name": "Walmart",
     "employees": 2300000,
     "year founded": 1962
    },
    {"name": "State Grid Corporation of China",
     "employees": 927839,
     "year founded": 2002},
    {"name": "China Petrochemical Corporation",
     "employees":358571,
     "year founded": 1998
     },
    {"name": "China National Petroleum Corporation",
     "employees": 1636532,
     "year founded": 1988},
    {"name": "Toyota Motor Corporation",
     "employees": 364445,
     "year founded": 1937},
    {"name": "Volkswagen AG",
     "employees": 642292,
     "year founded": 1937},
    {"name": "Royal Dutch Shell",
     "employees": 92000,
     "year founded": 1907},
    {"name": "Berkshire Hathaway Inc.",
     "employees":377000,
     "year founded": 1839},
    {"name": "Apple Inc.",
     "employees": 123000,
     "year founded": 1976},
    {"name": "Exxon Mobile Corporation",
     "employees": 69600,
     "year founded": 1999},
    {"name": "BP plc",
     "employees": 74000,
     "year founded": 1908}
]<jupyter_output><empty_output><jupyter_text>Some data have a slightly different name than in our original set, so we will keep a dictionary of mappings between the two.  Notice, we only include the mapping in the dictionary if there is a difference.<jupyter_code>mapping = {
    'Apple': 'Apple Inc.',
    'BP': 'BP plc',
    'Berkshire Hathaway': 'Berkshire Hathaway Inc.',
    'China National Petroleum': 'China National Petroleum Corporation',
    'Exxon Mobil': 'Exxon Mobile Corporation',
    'Sinopec Group': 'China Petrochemical Corporation',
    'State Grid': 'State Grid Corporation of China',
    'Toyota Motor': 'Toyota Motor Corporation',
    'Volkswagen': 'Volkswagen AG'
}<jupyter_output><empty_output><jupyter_text>This data is one to one, meaning the data contained in one source only aligns with a single element in the other source, thus we should be able to put these together.  However, we know that the data isn't in a great form to be joined at the moment.  This is for two reasons
1. All the names will not align (we need to use our mapping)
2. The `list` structure is not optimized for looking through elements. 
While for 10 elements the second reason won't really matter, for larger data sets such performance considerations are extremely important.  We can turn this list of dictionaries into a dictionary of dictionaries, so we can quickly access each element of the data.<jupyter_code>dict_data = {k["name"] : k for k in other_data}
dict_data<jupyter_output><empty_output><jupyter_text>**Question:** If we had many entries in `other_data`, we could display a small piece by printing `other_data[:5]`. With dataframes we might use `df.head()`. Can you think of a way to print out a small piece of a dictionary?Now we can easily compute the revenue per employee, we need to map the "Company" value in our original data with the "name" column of this other data, but we also need to use the mapping to ensure the columns will line up.  We in general don't want to mutate our original data, so lets make a new list of dictionaries with this new feature (revenue per employee).  On the course of doing this, we will need to handle converting some numbers like `$500 Billion` to a numeric value.  Lets create a function to do this.<jupyter_code>def convert_revenue(x):
    return float(x.lstrip('$').rstrip('billion')) * 1e9
assert convert_revenue('$500 billion') == 500e9<jupyter_output><empty_output><jupyter_text>Now we should be able to create a few functions to compute this revenue per employee and create a data list.<jupyter_code>def rev_per_emp(company):
    name = company[u'Company']
    n_employees = dict_data[mapping.get(name, name)].get('employees')
    company['rev per emp'] = convert_revenue(company[u'Revenue in USD'])/n_employees
    return company
def compute_copy(d, func):
    return func({k:v for k,v in d.items()})
data = list(map(lambda x : compute_copy(x, rev_per_emp), fortune_500))<jupyter_output><empty_output><jupyter_text>Lets take a look at our new data and also the old data to ensure we didn't mutate anything.<jupyter_code>data[:2]
fortune_500[:2]<jupyter_output><empty_output><jupyter_text>Now we can sort these values.  We first can select out on the elements we care about and then sort that list.<jupyter_code>rev_per_emp = sorted([(i[u'Company'], i['rev per emp']) for i in data], 
                   key=lambda x : x[1],
                   reverse=True)
rev_per_emp<jupyter_output><empty_output><jupyter_text>This results in a much different order.  What does this tell us about the companies?
Now lets pull in some other data (this is data science, more data is always better!).  We can see that these companies are in a few different industries, let find out which ones.<jupyter_code>from collections import Counter
Counter(i[u'Industry'] for i in data)<jupyter_output><empty_output><jupyter_text>One thing we might want to know is what sort of market share they have of the specific industry to which they belong.  Let's look at the two industries that categorize the 6 of the top 10, `Automobiles` and `Petroleum`.  We can select only those elements of our data to work with.<jupyter_code>sub_data = [i for i in data if i[u'Industry'] in [u'Automobiles', u'Petroleum']]
sub_data<jupyter_output><empty_output><jupyter_text>It might be the case that the each particular category has a different relevant metric for market share.  For example, we could look at total revenue for a car company or we could look at cars produced.  
So for the automobile industry we will look at the percent total of cars produced.  We can get this data again from Wikipedia.<jupyter_code>df_list = pd.read_html("https://en.wikipedia.org/w/index.php?title=Automotive_industry&oldid=875776152", header=0)
car_totals = json.loads(df_list[0].to_json(orient="records"))
car_by_man = json.loads(df_list[2].to_json(orient='records'))
car_totals[:2]
car_by_man[:2]<jupyter_output><empty_output><jupyter_text>Now lets get only the groups we care about and divide by the total production which we will take as the latest year.<jupyter_code>total_prod = sorted((i[u"Year"], i[u'Production']) for i in car_totals)[-1][1]
total_prod<jupyter_output><empty_output><jupyter_text>Now we can find the market share for each of the car companies. We will keep track of a market share dictionary.  We will again need to keep track of some slight name differences.<jupyter_code>car_by_man_dict = {i[u'Group']:i[u'Vehicles'] for i in car_by_man}
market_share = {}
for name, orig_name in zip(['Toyota', 'Volkswagen Group'], ['Toyota', 'Volkswagen']):
    market_share[orig_name] = car_by_man_dict[name]/ float(total_prod)
    
market_share<jupyter_output><empty_output><jupyter_text>Now we can do the same for the Petroleum industry, but in this case, lets compute the market share by revenue.  On Wikipedia, we can find a list of oil companies by revenue.  Although its not a complete list, it has enough companies that we don't expect the companies left off the list to contribute greatly to our analysis.<jupyter_code>rev = pd.read_html("https://en.wikipedia.org/w/index.php?title=List_of_largest_oil_and_gas_companies_by_revenue&oldid=871711850", header=1)[0]
rev = rev.iloc[:, 1:3]
rev.columns = ['Company', 'Revenue']
rev = rev[~(rev['Company'] == 'Company name')]
oil_data = json.loads(rev.to_json(orient="records"))
oil_data[:2]<jupyter_output><empty_output><jupyter_text>Now we can compute the totals and market share.  Since the data here might be slightly different (perhaps older) than our original data, we will compute the market share of each company within this data set, then pull out the numbers we care about.<jupyter_code>total = sum([float(i[u'Revenue'].rstrip('*')) for i in oil_data])
shares = {i[u'Company']:float(i[u'Revenue'].rstrip('*'))/total for i in oil_data}
print(total)<jupyter_output><empty_output><jupyter_text>Now we can pull out the companies we care about in the petroleum industry.<jupyter_code>petro_companies = [i[u'Company'] for i in data if i['Industry'] == u'Petroleum']
petro_companies<jupyter_output><empty_output><jupyter_text>Lets check if these are all in the our shares dictionary.<jupyter_code>[(i, i in shares) for i in petro_companies]<jupyter_output><empty_output><jupyter_text>Some of these companies are directly there, and looking through our dictionary, we can see the others are there without exact names.<jupyter_code>shares.keys()<jupyter_output><empty_output><jupyter_text>So lets make a fuzzy match, this will be a pretty simple one where it will try to match words in a name and take the maximum number of matches.<jupyter_code>def fuzzy_match(word, s):
    words = set(word.split(' '))
    overlaps = [(k, len(v.intersection(words))) for k, v in s.items()]
    return max(overlaps, key=lambda x : x[1])[0]
split_names = {i: set(i.split(' ')) for i in shares.keys()}
for i in petro_companies:
    match = fuzzy_match(i, split_names)
    print("matched {} to {}".format(i, match))
    market_share[i] = shares[match]
market_share<jupyter_output><empty_output><jupyter_text>## By industry
We have some nice examples of data munging, now lets see an example of keeping data in a relational fashion.  Lets say we want to add another feature which is the growth of each industry.  If we were to store this data as a single quantity, we would be saving a bunch of extra information, we would be much better off extracting this information and keeping it in a single table so we are not replicating by industry.## With Pandas
Now we can also perform these same computations with Pandas, lets see how this compares.<jupyter_code>df = pd.read_html('https://en.wikipedia.org/w/index.php?title=Fortune_Global_500&oldid=855890446', header=0)[0]
df
df['rev'] = df['Revenue in USD'].apply(convert_revenue)
df['employees'] = df['Company'].apply(lambda x : dict_data[mapping.get(x, x)].get('employees'))
df['rev_per_employee'] = df['rev'] / df['employees'].astype(float)
df.sort_values(by='rev_per_employee', ascending=False)
df_list = pd.read_html("https://en.wikipedia.org/w/index.php?title=Automotive_industry&oldid=875776152", header=0)
df_totals = df_list[0]
df_by_man = df_list[2]
total_prod = df_totals.sort_values(by='Year').iloc[-1]['Production']
total_prod
df_by_man['share'] = df_by_man['Vehicles'].astype(float) / total_prod
market_share = df_by_man.set_index('Group')['share'][['Toyota', 'Volkswagen Group']]
market_share
rev = pd.read_html("https://en.wikipedia.org/w/index.php?title=List_of_largest_oil_and_gas_companies_by_revenue&oldid=871711850", header=1)[0]
rev = rev.iloc[:, 1:3]
rev.columns = ['Company', 'Revenue']
rev = rev[~(rev['Company'] == 'Company name')]
rev
rev['rev_clean'] = rev['Revenue'].apply(lambda x : float(x.rstrip('*')))
total = rev['rev_clean'].sum()
total
rev['share'] = rev['rev_clean'] / total
rev
rev = rev[rev['Company'].isin(['Exxon Mobil', 'Sinopec', 'China National Petroleum Corporation', 'Royal Dutch Shell'])].copy()
rev
# do fuzzy search
split_names = {i: set(i.split(' ')) for i in df['Company']}
def fuzzy(word):
    return fuzzy_match(word, split_names)
rev['name'] = rev['Company'].apply(fuzzy)
rev
ms2 = df.merge(rev[['share', 'name']], left_on='Company', right_on='name')<jupyter_output><empty_output><jupyter_text>Now we want to put these together and get only the company and the market share.<jupyter_code>ms = market_share.reset_index()[['Group','share']]
ms.columns = ['Company', 'share']
pd.concat([ms, ms2[['Company', 'share']]])<jupyter_output><empty_output> | 
	no_license | 
	/home/datacourse/data-wrangling/DS_Data_Munging.ipynb | 
	Tobiadefami/WQU-Data-Science-Module-1 | 22 | 
| 
	<jupyter_start><jupyter_text># Handling Missing Values - AssignmentIn this exercise, you'll apply what you learned in the **Handling missing values** tutorial.
# Setup
The questions below will give you feedback on your work. Run the following cell to set up the feedback system.<jupyter_code>from learntools.core import binder
binder.bind(globals())
from learntools.data_cleaning.ex1 import *
print("Setup Complete")<jupyter_output>/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:3: DtypeWarning: Columns (22,32) have mixed types.Specify dtype option on import or set low_memory=False.
  This is separate from the ipykernel package so we can avoid doing imports until
<jupyter_text># 1) Take a first look at the data
Run the next code cell to load in the libraries and dataset you'll use to complete the exercise.<jupyter_code># modules we'll use
import pandas as pd
import numpy as np
# read in all our data
sf_permits = pd.read_csv("../input/building-permit-applications-data/Building_Permits.csv")
# set seed for reproducibility
np.random.seed(0) <jupyter_output>/opt/conda/lib/python3.7/site-packages/IPython/core/interactiveshell.py:3441: DtypeWarning: Columns (22,32) have mixed types.Specify dtype option on import or set low_memory=False.
  exec(code_obj, self.user_global_ns, self.user_ns)
<jupyter_text>Use the code cell below to print the first five rows of the `sf_permits` DataFrame.<jupyter_code># TODO: Your code here!
sf_permits.head(5)
sf_permits.info()<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 198900 entries, 0 to 198899
Data columns (total 43 columns):
 #   Column                                  Non-Null Count   Dtype  
---  ------                                  --------------   -----  
 0   Permit Number                           198900 non-null  object 
 1   Permit Type                             198900 non-null  int64  
 2   Permit Type Definition                  198900 non-null  object 
 3   Permit Creation Date                    198900 non-null  object 
 4   Block                                   198900 non-null  object 
 5   Lot                                     198900 non-null  object 
 6   Street Number                           198900 non-null  int64  
 7   Street Number Suffix                    2216 non-null    object 
 8   Street Name                             198900 non-null  object 
 9   Street Suffix                           196132 non-null  object 
 10  Unit                                    2947[...]<jupyter_text>Does the dataset have any missing values?  Once you have an answer, run the code cell below to get credit for your work.<jupyter_code># Check your answer (Run this code cell to receive credit!)
q1.check()
# Line below will give you a hint
#q1.hint()<jupyter_output><empty_output><jupyter_text># 2) How many missing data points do we have?
What percentage of the values in the dataset are missing?  Your answer should be a number between 0 and 100.  (If 1/4 of the values in the dataset are missing, the answer is 25.)<jupyter_code>sf_permits.isna().sum()
shape_df = sf_permits.shape
value = sf_permits.isna().sum()
# how many total missing values do we have?
total_cells = np.product(shape_df)
total_missing = value.sum()
# percent of data that is missing
percent_missing = (total_missing/total_cells) * 100
# TODO: Your code here!
percent_missing = percent_missing
# Check your answer
q2.check()
# Lines below will give you a hint or solution code
#q2.hint()
#q2.solution()<jupyter_output><empty_output><jupyter_text># 3) Figure out why the data is missing
Look at the columns **"Street Number Suffix"** and **"Zipcode"** from the [San Francisco Building Permits dataset](https://www.kaggle.com/aparnashastry/building-permit-applications-data). Both of these contain missing values. 
- Which, if either, are missing because they don't exist? 
- Which, if either, are missing because they weren't recorded?  
Once you have an answer, run the code cell below.<jupyter_code># Checking the missing value existing in the "Street Number Suffix" and "Zipcode" columns
check_df = sf_permits[['Street Number Suffix', 'Zipcode']]
check_df.sample(10)
# Check your answer (Run this code cell to receive credit!)
q3.check()
# Line below will give you a hint
q3.hint()<jupyter_output><empty_output><jupyter_text># 4) Drop missing values: rows
If you removed all of the rows of `sf_permits` with missing values, how many rows are left?
**Note**: Do not change the value of `sf_permits` when checking this.  <jupyter_code># TODO: Your code here!
sf_permits.dropna()<jupyter_output><empty_output><jupyter_text>Once you have an answer, run the code cell below.<jupyter_code># Check your answer (Run this code cell to receive credit!)
q4.check()
# Line below will give you a hint
#q4.hint()<jupyter_output><empty_output><jupyter_text># 5) Drop missing values: columns
Now try removing all the columns with empty values.  
- Create a new DataFrame called `sf_permits_with_na_dropped` that has all of the columns with empty values removed.  
- How many columns were removed from the original `sf_permits` DataFrame? Use this number to set the value of the `dropped_columns` variable below.<jupyter_code># TODO: Your code here
sf_permits_with_na_dropped = sf_permits.dropna(axis=1)
df_col = sf_permits.shape[1]
dropped_columns = df_col - sf_permits_with_na_dropped.shape[1]
# Check your answer
q5.check()
# Lines below will give you a hint or solution code
q5.hint()
#q5.solution()<jupyter_output><empty_output><jupyter_text># 6) Fill in missing values automatically
Try replacing all the NaN's in the `sf_permits` data with the one that comes directly after it and then replacing any remaining NaN's with 0.  Set the result to a new DataFrame `sf_permits_with_na_imputed`.<jupyter_code># TODO: Your code here
sf_permits_with_na_imputed = sf_permits.fillna(method ='bfill')
# Check your answer
q6.check()
# Lines below will give you a hint or solution code
#q6.hint()
#q6.solution()<jupyter_output><empty_output> | 
	no_license | 
	/Handling Missing Values.ipynb | 
	amdonatusprince/Handling-Missing-Values---Assignment | 10 | 
| 
	<jupyter_start><jupyter_text># Import modules<jupyter_code>%matplotlib 
import matplotlib.pyplot as plt
import numpy as np
from scipy.linalg import sqrtm, block_diag
from math import pi
from qutip import *
import sympy
import functools, operator
from collections import OrderedDict
import itertools<jupyter_output>Using matplotlib backend: TkAgg
<jupyter_text># Two transmon + bus 
Following Magesan's paper## Function define<jupyter_code>def hamiltonian_eff_H0(w1,w2,J,alpha1,alpha2,N):
    """ 
    Two-transmon effective Hamiltonian with zero excitation in bus. 
    Eq. 2.12, Magesan
    w1, w2:  angular freq
    """
    b = destroy(N)
        
    I = qeye(N)
    # Eq. 2.21, Magesan
    H1 = w1*tensor(b.dag()*b, I) + alpha1/2.0*tensor(b.dag()*b.dag()*b*b, I)
    H2 = w2*tensor(I, b.dag()*b) + alpha2/2.0*tensor(I, b.dag()*b.dag()*b*b)
    H12 = J*(tensor(b.dag(), b) + tensor(b, b.dag())) 
    
    H_eff = H1 + H2 + H12
    
    return H_eff
def hamiltonian_eff_Hd(wd, Omega_X, Omega_Y, t, N):
    """ 
    CR drive Hamiltonian. Eq.2.14.
    All arguments are single number, not an array
    wd: angular freq
    """
    b = destroy(N)  # qubit 2 is target qubit
#     Hd = (Omega_X * np.cos(wd*t) + Omega_Y * np.sin(wd*t)) * tensor(qeye(N),b + b.dag())
    Hd = (Omega_X * np.cos(wd*t) + Omega_Y * np.sin(wd*t)) * tensor(b + b.dag(),qeye(N))
    
    return Hd
def ZZ_computed(J, alpha1, alpha2, Delta):
    """ Eq.4.5 in Magesan"""
    return -J**2*(1/(Delta + alpha1) + 1/(-Delta + alpha2)) * 2
def sorted_diag_op(eigen):
    """
    Diagonalizing matrix by the order of eigenvector
    
    Args:
        eigen : output of eigenstates() method in qutip
       
    Return: a new diagonalizing operator Qobj
    """
       
    index_list = [np.argmax(np.absolute(vec)) for vec in eigen[1]]
    print(index_list)
    X_array = np.column_stack([eigen[1][index_list.index(i)] for i in range(eigen[1].size)])
    X = Qobj(X_array)
    
    return X
# parameters for IBM qubit
w1, w2 = 2*pi*5, 2*pi*5.2 # in GHz
J = 0.005*2*pi
alpha1, alpha2 = -0.35*2*pi, -0.35*2*pi # in GHz
N=5
H_eff = hamiltonian_eff_H0(w1,w2,J,alpha1,alpha2,N)
eigen = H_eff.eigenstates()
ZZ = eigen[0][5] - eigen[0][1] - eigen[0][2]  # in GHz
sympy.SparseMatrix(np.round((H_eff)[:, 0:13],2))
print(f'ZZ = {ZZ*1e3/(2*pi):.3f} (MHz)')
print(f'ZZ_by_formula = {ZZ_computed(J,alpha1,alpha2,w1-w2)*1e3/(2*pi):.3f} (MHz)')
eigen[0]<jupyter_output><empty_output><jupyter_text>## CR Driven: two-level qubit case<jupyter_code>w1, w2 = 2*pi*5, 2*pi*5.2 # in GHz
J = 0.01*2*pi
alpha1, alpha2 = -0.35*2*pi, -0.35*2*pi
N =2
Omega_X, Omega_Y = 0.02*2*pi,0*2*pi
b1 = destroy(N)
b2 = destroy(N)
HA = w2*(tensor(b1.dag()*b1, qeye(N)) + tensor(qeye(N), b2.dag()*b2))
R = lambda t: (-1j*HA*t).expm()
H0 = hamiltonian_eff_H0(w1,w2,J,alpha1,alpha2,N)
Hd = lambda t:hamiltonian_eff_Hd(w2, Omega_X, Omega_Y, t, N)
HR = lambda t: R(t).dag() * (H0 + Hd(t)) * R(t) - HA
t_list = np.linspace(0, 2, 401)  # in ns
HR_list = [HR(t) for t in t_list]
H_R = functools.reduce(operator.add, HR_list) / len(HR_list)
H_R.conj().trans()
H_R
avg_list = []
for N in range(2,401):
    temp = (functools.reduce(operator.add,[HR_list[i] for i in range(N)]) / N)
    temp = np.real(temp[2,0])
    avg_list.append(temp)
    
plt.plot(avg_list)
# avg_list
# Least action
# find X
X_array = np.column_stack( (eigen[1][0],eigen[1][1],eigen[1][2],eigen[1][3]))
X = Qobj(X_array)
# find X_BD and XP
A, B = X[0:2,0:2], X[2:4, 2:4]
X_BD_array = block_diag(A,B)
X_BD = Qobj(X_BD_array)
XP = X_BD * X_BD.conj().trans()
# find T
T = X * X_BD.conj().trans() * XP.sqrtm().inv()
# find H_R_BD
T.dims = H_R.dims
H_R_BD =  T.conj().trans() * H_R * T
H_R_BD<jupyter_output><empty_output><jupyter_text>## CR Driven: three-level case
1. get block-diagonal full H_RWA Hamiltonina, then take computational subspace
2. To fine Pauli coefficients: ZX,ZZ,ZI,IX,and IZ
3. Main question: how to compute H_RWA. How can we ignore other than e^{-iwt} or e^{iwt}.<jupyter_code>def get_Pauli(Omega):
    w1, w2 = 2*pi*5.164, 2*pi*5.292 # in GHz
    J = 0.075*2*pi
    alpha1, alpha2 = 0.6*2*pi, -0.33*2*pi
    # Omega = 0.001*2*pi
    N =3
    w1_b = [0, w1, 2*w1+alpha1]
    w2_b = [0, w2, 2*w2+alpha2]
    b = destroy(N)
    # H_eff = hamiltonian_eff_H0(w1,w2,J,alpha1,alpha2,N)
    def get_possible_states(n):
        """
        Get a list of tuples of possible states, given n
        Ex) for n=2, output is [(0,0),(0,1),(1,0),(0,2),(1,1),(2,0)]
        Args: 
            n: integer such that n1+n2<=n for |n1,n2> state
        Return:
            List of tuples that satisfy n1+n2<=n where n1 and n2 are 0,1,2,3,....
        """
        def get_possible_sum(n):
            """
             [(0,1)]
            """
            result = []
            for i in range(n+1):
                result.append((i,n-i))
            return result
        possible_list = []
        for i in range(n + 1):
            possible_list += get_possible_sum(i)
        return possible_list
    def kronecker_delta(i,j):
        return (1 if i==j else 0)
    # Note
    # b1[i,j] = <i|b1|j>, where |i> = {|00>, |01>, |10>, |11>, ... |30>}
    # b1|n1_i,n2_i> = sqrt(n1_i)|n1_i-1,n2_i> where |i> = |n1_i, n2_i> and |j> = |n1_j, n2_j>
    # b2|n1_i,n2_i> = sqrt(n2_i)|n1_i,n2_i-1> where |i> = |n1_i, n2_i> and |j> = |n1_j, n2_j>
    # V = sqrt((n1+1)*(n2+1))*J_n1_n2*(|n1+1,n2><n1,n2+1|+|n1,n2+1><n1+1,n2|)
    states = [ prod for prod in itertools.product(range(3), range(3))]
    unsorted_state_energy_dict = { s: w1_b[s[0]] + w2_b[s[1]] for s in states}
    sorted_state_energy_dict = OrderedDict(sorted(unsorted_state_energy_dict.items(), key=lambda x: x[1]))
    sorted_energy = list(sorted_state_energy_dict.values())
    sorted_states = list(sorted_state_energy_dict.keys())
    # [(0,0),(0,1),(1,0),(1,1),(0,2),(2,0),(0,3),(1,2),(2,1),(3,0)]
    sorted_states= [(0, 0), (0, 1), (1, 0),  (1, 1), (0, 2), (2, 0),(1, 2), (2, 1), (2, 2)]
    # sorted_states= [(0, 0), (1, 0), (0, 1), (0, 2), (1, 1),  (2, 0),(1, 2), (2, 1), (2, 2)]
    sorted_energy = [ w1_b[state[0]]+w2_b[state[1]] for state in sorted_states]
    n = len(sorted_state_energy_dict)
    b1_array, b2_array, V0_array = np.zeros((n,n)), np.zeros((n,n)), np.zeros((n,n))
    # redefind b1, b2 with basis of sorted states
    for i, j in itertools.product(range(n), range(n)): # i, j = basis index
        n1_i, n2_i  = sorted_states[i][0], sorted_states[i][1]
        n1_j, n2_j = sorted_states[j][0], sorted_states[j][1]    
        # b1_array[i,j] = <n1_i, n2_i|b1|n1_j, n2_j>
        b1_array[i, j] = np.sqrt(n1_j)* kronecker_delta(n1_i, n1_j-1) * kronecker_delta(n2_i, n2_j)
        b2_array[i, j] = np.sqrt(n2_j)* kronecker_delta(n2_i, n2_j-1) * kronecker_delta(n1_i, n1_j) 
    b1 = Qobj(b1_array)
    b2 = Qobj(b2_array)
    # H0 = Qobj(np.diag(sorted_energy))    
    H0 = w1*b1.dag()*b1 + alpha1/2.0*b1.dag()*b1.dag()*b1*b1 + w2*b2.dag()*b2 \
        + alpha2/2.0*b2.dag()*b2.dag()*b2*b2    
    V0 = J*(b1.dag()*b2 + b1*b2.dag()) 
    H_eff = H0 + V0
    b1t = b1 + b1.dag()
    b2t = b2 + b2.dag()
    ntot = b1.dag()*b1 + b2.dag()*b2
    # digonalize H0: remove J
    eigen = H_eff.eigenstates()
    # find U
    # U_array = np.column_stack( [eigen[1][i] for i in range(eigen[1].size)])
    U_array = sorted_diag_op(eigen)
    U = Qobj(U_array)
    U.dims = H0.dims
    # digonalize H0
    H_eff_D = U.dag() * H_eff * U
    # get new Hd and HR
    wd = (H_eff_D[3,3] - H_eff_D[1,1] + H_eff_D[2,2])/2
    Hd = lambda t:Omega * np.cos(wd*t)*b1t
    Hd_D = lambda t: U.dag() * Hd(t) * U
    # To rotating frame
    HA = wd*(b1.dag()*b1 + b2.dag()*b2)
    R = lambda t: (-1j*HA*t).expm()
    HR = lambda t: R(t).dag() * (H_eff_D + Hd_D(t)) * R(t) - HA
    # average over 10 period
    t_list = np.linspace(0, 2, 401)  # in ns
    HR_list = [HR(t) for t in t_list]
    H_R = functools.reduce(operator.add,HR_list) / len(HR_list)
    H_R.tidyup(1e-3)
    # least action
    eigen = H_R.eigenstates()
    # find X
    # X_array = np.column_stack( [eigen[1][i] for i in range(eigen[1].size)])
    X_array = sorted_diag_op(eigen)
    X = Qobj(X_array)
    # find X_BD and XP
    A, B, C = X[0:2,0:2], X[2:4, 2:4], X[4:10, 4:10]
    X_BD_array = block_diag(A,B,C)
    X_BD = Qobj(X_BD_array)
    # X_BD = X_BD.tidyup(atol=1e-3)
    XP = X_BD * X_BD.dag()
    # find T
    T = X * X_BD.dag() * XP.sqrtm().inv()
    # find H_R_BD
    T.dims = H_R.dims
    H_R_BD =  T.dag() * H_R * T
    H_R_BD
    def from16to8(M):
        return np.array([M[0,0],M[1,0],M[0,1],M[1,1],M[2,2],M[3,2],M[2,3],M[3,3]]).reshape(8,1)
    # Get Pauli coeff.
    II = tensor(qeye(2), qeye(2))
    IX = tensor(qeye(2), sigmax())
    IY = tensor(qeye(2), sigmay())
    IZ = tensor(qeye(2), sigmaz())
    ZI = tensor(sigmaz(), qeye(2))
    ZY = tensor(sigmaz(), sigmay())
    ZX = tensor(sigmaz(), sigmax())
    ZZ = tensor(sigmaz(), sigmaz())
    a = np.column_stack([from16to8(II),from16to8(IX),from16to8(IY),from16to8(IZ)
                        ,from16to8(ZI),from16to8(ZX),from16to8(ZY),from16to8(ZZ)])
    b = from16to8(H_R_BD[0:4, 0:4])
    c = np.linalg.solve(a, b)
#     sympy.Matrix(np.round(c,6))/2/pi*1000
    # -63.9, -0.02, -0.00017,1.2e-5,127.9,0.073, 0.00017, -0.039
    return c/2/pi*1e3
# plot Pauli coeff. vs CR amplitude
Omega_list = np.linspace(1, 140, 11)*1e-3*2*pi
Pauli = get_Pauli(Omega_list[0])
for omega in Omega_list[1:]:
    Pauli = np.column_stack((Pauli, get_Pauli(omega)))
# plot
Pauli_label = ['II','IX','IY','IZ','ZI','ZX','ZY','ZZ']
fig, ax = plt.subplots(1,1, figsize=(10,8))
for i in range(Pauli.shape[0]):
    ax.plot(Omega_list, Pauli[i], label=Pauli_label[i], linewidth=3)
ax.set_xlabel('CR amplitude, Omega (MHz)', fontsize=18)
ax.set_ylabel('$Pauli Coefficient$ (MHz)', fontsize=18)
ax.tick_params(axis='x', labelsize=18)
ax.tick_params(axis='y', labelsize=18)
ax.grid('on')
ax.legend(fontsize=18)
H_eff/2/pi
# H0
# V0
# states
# X
sorted_states
sorted_energy
H_R_BD
H0_D
U
H_eff_D
X_BD
X
avg_list = []
for N in range(2,401):
    temp = (functools.reduce(operator.add,[HR_list[i] for i in range(N)]) / N)
    temp = np.real(temp[0,1])
    avg_list.append(temp)
    
# plt.plot(avg_list)
f=np.fft.fft(avg_list)
plt.plot(np.absolute(f))
plt.yscale('log')
X.tidyup(1e-4)
test=Qobj(IX.data.reshape(16,1))
IX.data.reshape(16,1).todense()<jupyter_output><empty_output><jupyter_text># Transmon with a Duffing model<jupyter_code>%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from qutip import *
import sympy
# parameters for IBM qubit
w = 5 # in GHz
alpha = -0.35 # in GHz
N = 3
b = destroy(N)
H = w1*b.dag()*b + alpha/2.0*b.dag()*b*(b.dag()*b-qeye(N))
H1 = w1*b.dag()*b + alpha/2.0*b.dag()*b.dag()*b*b
Hd = b.dag() + b
H
# Hd<jupyter_output><empty_output><jupyter_text># Find Pauli-coefficients of CSQF/transmon with CR
Ref: Xuexin's mathematica notebook
1. Build effective two-qubit Hamiltonian.
2. Diagonalize the two-qbuti Hamiltonian to get rid of coupling by either SW or numerical diagonalization. Dressed state.
3. Build driving Hamiltonian in the dressed state.
4. Move into rotating frame.
5. Block-diagonalize the total Hamiltonian by SW or least action.
6. Find Pauli-coefficients.## Helper<jupyter_code>def list_energies(f1,alpha1, beta1, f2, alpha2, beta2, sort=False):
    """
    Calculate energy levels. E00 = 0
    """
    numStates = 4 # 0,1,2,3 states
    state1 = [0, f1, f1+f1+alpha1,f1+f1+f1+alpha1+beta1]
    state2 = [0, f2, f2+f2+alpha2,f2+f2+f2+alpha2+beta2]
    
    result_dict = {}
    for prod in itertools.product(range(numStates), range(numStates)):
        temp = {prod:state1[prod[0]] + state2[prod[1]]}
        result_dict.update(temp)
    
    if sort:
        sorted_dict = OrderedDict(sorted(result_dict.items(), key=lambda x: x[1]))
        return sorted_dict
    else:
        return result_dict
    
list_energies(5.05, 0.6, 0.5, 5.25,-0.33, -0.37,sort=True)    
# list_energies(5.25,-0.33, -0.37, 5.05, -0.3, -0.33, sort=True)    
def sorted_diag_op(eigen):
    """
    Diagonalizing matrix by the order of eigenvector
    
    Args:
        eigen : output of eigenstates() method in qutip
       
    Return: a new diagonalizing operator Qobj
    """
       
    index_list = [np.argmax(np.absolute(vec)) for vec in eigen[1]]
    print(index_list)
    X_array = np.column_stack([eigen[1][index_list.index(i)] for i in range(eigen[1].size)])
    X = Qobj(X_array)
    
    return X<jupyter_output><empty_output><jupyter_text>## Define qubit frequencies and J<jupyter_code>wm = 5164       # CSFQ w01
del_m0 = 600   # first anharmonicity
del_m1 = 430  # second anharmon.
gdm = 80     # CSFQ-bus coupling
wr = 6292 # bus cavity
wt = 5292   # transmon
del_t0 = -329.1
del_t1 = -369.7
gdt = -80    # transmon-bus coupling
gmt = 0  # csfq-transmon direct g
# CSFQ, j=0,1,2
w1_d = lambda j: wm*j + del_m0/2*j*(j-1) if j<3 else wm*j + del_m0/2*j*(j-1) + del_m1-del_m0 # dressed freq
gamma1 = lambda j: j/(wr + w1_d(j-1) - w1_d(j))
w1_b = lambda j: w1_d(j) - gamma1(j) * gdm**2  # bare freq. j=1,2,3
# transmon, j=0,1,2
w2_d = lambda j: wt*j + del_t0/2*j*(j-1) if j<3 else wt*j + del_t0/2*j*(j-1) + del_t1-del_t0 # dressed freq
gamma2 = lambda j: j/(wr + w2_d(j-1) - w2_d(j))
w2_b = lambda j: w2_d(j) - gamma2(j) * gdt**2  # bare freq.
# J, {j1,j2}=0,1,2
J = lambda j1,j2 : gmt - gdm*gdt/2*(1/(wr+w1_d(j1)-w1_d(j1+1)) + 1/(wr+w2_d(j2)-w2_d(j2+1)) 
                                    + 1/(wr-w1_d(j1)+w1_d(j1+1)) + 1/(wr-w2_d(j2)+w2_d(j2+1))) 
J(0,1)
w1_d(1)-w1_d(0)
# w2_d(1)-w2_d(0)
j1=0
j2=1
- gdm*gdt/2*(1/(wr+w1_d(j1)-w1_d(j1+1)) + 1/(wr+w2_d(j2)-w2_d(j2+1)) 
                                    + 1/(wr-w1_d(j1)+w1_d(j1+1)) + 1/(wr-w2_d(j2)+w2_d(j2+1))) 
J(0,0)<jupyter_output><empty_output><jupyter_text>## Build effective two-qubit Hamiltonian<jupyter_code>def get_possible_states(n):
    """
    Get a list of tuples of possible states, given n
    Ex) for n=2, output is [(0,0),(0,1),(1,0),(0,2),(1,1),(2,0)]
    Args: 
        n: integer such that n1+n2<=n for |n1,n2> state
    Return:
        List of tuples that satisfy n1+n2<=n where n1 and n2 are 0,1,2,3,....
    """
    def get_possible_sum(n):
        """
         [(0,1)]
        """
        result = []
        for i in range(n+1):
            result.append((i,n-i))
        return result
    
    possible_list = []
    for i in range(n + 1):
        possible_list += get_possible_sum(i)
    
    return possible_list
def kronecker_delta(i,j):
    return (1 if i==j else 0)
# Note
# b1[i,j] = <i|b1|j>, where |i> = {|00>, |01>, |10>, |11>, ... |30>}
# b1|n1_i,n2_i> = sqrt(n1_i)|n1_i-1,n2_i> where |i> = |n1_i, n2_i> and |j> = |n1_j, n2_j>
# b2|n1_i,n2_i> = sqrt(n2_i)|n1_i,n2_i-1> where |i> = |n1_i, n2_i> and |j> = |n1_j, n2_j>
# V = sqrt((n1+1)*(n2+1))*J_n1_n2*(|n1+1,n2><n1,n2+1|+|n1,n2+1><n1+1,n2|)
# states = get_possible_states(3) # up to |3> state
# unsorted_state_energy_dict = { s: w1_b(s[0]) + w2_b(s[1]) for s in states}
# sorted_state_energy_dict = OrderedDict(sorted(unsorted_state_energy_dict.items(), key=lambda x: x[1]))
# sorted_energy = np.array(list(sorted_state_energy_dict.values()))
# sorted_states = list(sorted_state_energy_dict.keys())
sorted_states= [(0,0),(0,1),(1,0),(1,1),(0,2),(2,0),(0,3),(1,2),(2,1),(3,0)] # correct
# sorted_states= [(0,0),(1,0),(0,1),(0,2),(1,1),(2,0),(0,3),(1,2),(2,1),(3,0)]
sorted_energy = [ w1_b(state[0])+w2_b(state[1]) for state in sorted_states]
n = len(sorted_states)
b1_array, b2_array, V0_array = np.zeros((n,n)), np.zeros((n,n)), np.zeros((n,n))
for i, j in itertools.product(range(n), range(n)):
    n1_i, n1_j = sorted_states[i][0], sorted_states[j][0]
    n2_i, n2_j = sorted_states[i][1], sorted_states[j][1]
    
    # b1_array[i,j] = <n1_i, n2_i|b1|n1_j, n2_j>
    b1_array[i, j] = np.sqrt(n1_j)* kronecker_delta(n1_i, n1_j-1) * kronecker_delta(n2_i, n2_j)
    b2_array[i, j] = np.sqrt(n2_j)* kronecker_delta(n2_i, n2_j-1) * kronecker_delta(n1_i, n1_j)
   
    # 1) n1=n1_i-1, n2 = n2_i,  2) n1=n1_i, n2=n2_i-1
    V0_array[i,j] = (np.sqrt(n1_i*(n2_i+1))*J(n1_i-1, n2_i)*kronecker_delta(n1_i-1, n1_j)*kronecker_delta(n2_i+1, n2_j)
                   + np.sqrt((n1_i+1)*n2_i)*J(n1_i, n2_i-1)*kronecker_delta(n1_i+1, n1_j)*kronecker_delta(n2_i-1, n2_j))
H0 = Qobj(np.diag(sorted_energy))    
V0 = Qobj(V0_array)
b1 = Qobj(b1_array)
b2 = Qobj(b2_array)
b1t = b1 + b1.dag()
b2t = b2 + b2.dag()
ntot = b1.dag()*b1 + b2.dag()*b2
sorted_state_energy_dict
sorted_energy
sorted_states
H0
# V0
# b2.dag()
# states to consider
# 00, 01,10,11,02,20,03,12,21,30
diag = np.array([0, w2_b(1), w1_b(1), w1_b(1)+w2_b(1), w2_b(2), w1_b(2), w2_b(3)
                 ,w1_b(1)+w2_b(2), w1_b(2)+w2_b(1), w1_b(3)])
H0 = Qobj(np.diag(diag))
b1dag = np.zeros((10,10))
b1dag[2,0] = 1
b1dag[3,1] = 1
b1dag[5,2] = np.sqrt(2)
b1dag[8,3] = np.sqrt(2)
b1dag[7,4] = 1
b1dag[9,5] = np.sqrt(3)
b1dag = Qobj(b1dag)
b2dag = np.zeros((10,10))
b2dag[1,0] = 1
b2dag[3,2] = 1
b2dag[4,1] = np.sqrt(2)
b2dag[7,3] = np.sqrt(2)
b2dag[6,4] = np.sqrt(3)
b2dag[8,5] = 1
b2dag = Qobj(b2dag)
V0  = np.zeros((10,10))
V0[2,1] = J(0,0)
V0[1,2] = J(0,0)
V0[4,3] = np.sqrt(2)*J(0,1)
V0[5,3] = np.sqrt(2)*J(1,0)
V0[3,4] = np.sqrt(2)*J(0,1)
V0[3,5] = np.sqrt(2)*J(1,0)
V0[7,6] = np.sqrt(3)*J(0,2)
V0[6,7] = np.sqrt(3)*J(0,2)
V0[8,7] = np.sqrt(4)*J(1,1)
V0[7,8] = np.sqrt(4)*J(1,1)
V0[9,8] = np.sqrt(3)*J(2,0)
V0[8,9] = np.sqrt(3)*J(2,0)
V0 = Qobj(V0)
b1 = b1dag.dag()
b2 = b2dag.dag()
b1t = b1 + b1dag
b2t = b2 + b2dag
ntot = b1dag*b1 + b2dag*b2
V0
H0
b2dag<jupyter_output><empty_output><jupyter_text>## Diagonalize two-qubit Hamiltonian<jupyter_code># By solving eigenvalue
H1 = H0 + V0
eigen = H1.eigenstates()
# find U
U = sorted_diag_op(eigen)
U_unsort_array = np.column_stack([eigen[1][i] for i in range(eigen[1].size)])
U_unsort = Qobj(U_unsort_array) 
U.dims = H1.dims
# U = U_unsort
# digonalize H0
H2 = U.dag() * H1 * U
H2
# U
# U_unsort<jupyter_output><empty_output><jupyter_text>## Driving Hamiltonian in dressed state<jupyter_code># get new Hd and HR
Omega = 50 # MHz
wd = (H2[3,3]-H2[2,2]+H2[1,1])/2  # 10443 - 5157 + 5285 in MHz(|11>, |10>, |01>)
# wd = (H2[4,4]-H2[1,1]+H2[2,2])/2  # 10443 - 5157 + 5285 in MHz(|11>, |10>, |01>)
Hd = lambda t: Omega * np.cos(2*pi*wd*t)* b1t
Hd_D = lambda t: U.dag() * Hd(t) * U
# To rotating frame
R = lambda t: (-1j*2*pi*wd*ntot*t).expm()
HR = lambda t: R(t).dag() * (H2 + Hd_D(t)) * R(t) - ntot *wd
# average over 10 period
t_list = np.linspace(0, 2, 401)*1e-3  # in us
HR_list = [HR(t) for t in t_list]
H_R = functools.reduce(operator.add, HR_list) / len(HR_list)
H_R.tidyup(1e-4)
wd
avg_list = []
for N in range(2,401):
    temp = (functools.reduce(operator.add,[HR_list[i] for i in range(N)]) / N)
    temp = np.real(temp[0,1])
    avg_list.append(temp)
    
 plt.plot(avg_list)
    
# f=np.fft.fft(avg_list)
# plt.plot(np.absolute(f))
# plt.yscale('log')<jupyter_output><empty_output><jupyter_text>## Least action<jupyter_code># least action
eigen = H_R.eigenstates()
# find X
X = sorted_diag_op(eigen)
X_unsort_array = np.column_stack([eigen[1][i] for i in range(eigen[1].size)])
X_unsort = Qobj(X_unsort_array) 
# X = X_unsort
# find X_BD and XP
A, B, C = X[0:2,0:2], X[2:4, 2:4], X[4:10, 4:10]
X_BD_array = block_diag(A,B,C)
X_BD = Qobj(X_BD_array)  # X_BD is a block-diagonalization of X. How?
# X_BD = X_BD.tidyup(atol=1e-3)
XP = X_BD * X_BD.dag()
# find T=XF, F=X_BD/sqrt(X_BD*X_BD.dag())
T = X * X_BD.dag() * XP.sqrtm().inv()
# find H_R_BD
T.dims = H_R.dims
H_R_BD =  T.dag() * H_R * T
H_R_BD.tidyup(1e-4)
X.tidyup(1e-3)
# X_unsort.tidyup(1e-4)<jupyter_output><empty_output><jupyter_text>## Pauli coefficient<jupyter_code>def from16to8(M):
    """
    Convert 4x4 block-diagonal matrix to 8x1 column vector
    Args:
        M: 4x4 block-diagonal matrix
    Return:
        8x1 matrix
    """
    return np.array([M[0,0],M[1,0],M[0,1],M[1,1],M[2,2],M[3,2],M[2,3],M[3,3]]).reshape(8,1)
# Get Pauli coeff.
# All these are block-diagonal.
II = tensor(qeye(2), qeye(2))
IX = tensor(qeye(2), sigmax())
IY = tensor(qeye(2), sigmay())
IZ = tensor(qeye(2), sigmaz())
ZI = tensor(sigmaz(), qeye(2))
ZY = tensor(sigmaz(), sigmay())
ZX = tensor(sigmaz(), sigmax())
ZZ = tensor(sigmaz(), sigmaz())
a = np.column_stack([from16to8(II),from16to8(IX)/2,from16to8(IY)/2,from16to8(IZ)/2
                    ,from16to8(ZI)/2,from16to8(ZX)/2,from16to8(ZY)/2,from16to8(ZZ)/2])
b = from16to8(H_R_BD[0:4, 0:4])
c = np.linalg.solve(a, b)
sympy.Matrix(np.round(c,6))
# -63.9, -0.02, -0.00017,1.2e-5,127.9,0.073, 0.00017, -0.039<jupyter_output><empty_output><jupyter_text>## Pauli vs CR amplitude<jupyter_code>def get_Pauli_coeff(Omega):
    # get new Hd and HR
#     Omega = 1 # MHz
    wd = (H2[3,3]-H2[2,2]+H2[1,1])/2  # 10443 - 5157 + 5285 in MHz
    Hd = lambda t: Omega * np.cos(2*pi*wd*t)* b1t
    Hd_D = lambda t: U.dag() * Hd(t) * U
    # To rotating frame
    R = lambda t: (-1j*2*pi*wd*ntot*t).expm()
    HR = lambda t: R(t).dag() * (H2 + Hd_D(t)) * R(t) - ntot *wd
    # average over 10 period
    t_list = np.linspace(0, 2, 401)*1e-3  # in us
    HR_list = [HR(t) for t in t_list]
    H_R = functools.reduce(operator.add, HR_list) / len(HR_list)
    
    # least action
    eigen = H_R.eigenstates()
    # find X
    X = sorted_diag_op(eigen)
    # find X_BD and XP
    A, B, C = X[0:2,0:2], X[2:4, 2:4], X[4:10, 4:10]
    X_BD_array = block_diag(A,B,C)
    X_BD = Qobj(X_BD_array)
    # X_BD = X_BD.tidyup(atol=1e-3)
    XP = X_BD * X_BD.conj().trans()
    # find T
    T = X * X_BD.dag() * XP.sqrtm().inv()
    # find H_R_BD
    T.dims = H_R.dims
    H_R_BD =  T.dag() * H_R * T
   
    def from16to8(M):
        return np.array([M[0,0],M[1,0],M[0,1],M[1,1],M[2,2],M[3,2],M[2,3],M[3,3]]).reshape(8,1)
    # Get Pauli coeff.
    II = tensor(qeye(2), qeye(2))
    IX = tensor(qeye(2), sigmax())
    IY = tensor(qeye(2), sigmay())
    IZ = tensor(qeye(2), sigmaz())
    ZI = tensor(sigmaz(), qeye(2))
    ZY = tensor(sigmaz(), sigmay())
    ZX = tensor(sigmaz(), sigmax())
    ZZ = tensor(sigmaz(), sigmaz())
    a = np.column_stack([from16to8(II),from16to8(IX)/2,from16to8(IY)/2,from16to8(IZ)/2
                        ,from16to8(ZI)/2,from16to8(ZX)/2,from16to8(ZY)/2,from16to8(ZZ)/2])
    b = from16to8(H_R_BD[0:4, 0:4])
    c = np.linalg.solve(a, b)
    print(Omega)
    print(X_BD[0,0], X_BD[1,1], X_BD[2,2], X_BD[3,3])
    return c
# plot Pauli coeff. vs CR amplitude
Omega_list = np.linspace(1, 140, 11)
Pauli = get_Pauli_coeff(Omega_list[0])
for omega in Omega_list[1:]:
    Pauli = np.column_stack((Pauli, get_Pauli_coeff(omega)))
# plot
Pauli_label = ['II','IX','IY','IZ','ZI','ZX','ZY','ZZ']
fig, ax = plt.subplots(1,1, figsize=(10,8))
for i in range(Pauli.shape[0]):
    ax.plot(Omega_list, Pauli[i], label=Pauli_label[i], linewidth=3)
ax.set_xlabel('CR amplitude, Omega (MHz)', fontsize=18)
ax.set_ylabel('$Pauli Coefficient$ (MHz)', fontsize=18)
ax.tick_params(axis='x', labelsize=18)
ax.tick_params(axis='y', labelsize=18)
ax.grid('on')
ax.legend(fontsize=18)
# plot ZZ only
fig, ax = plt.subplots(1,1, figsize=(10,8))
i = 7
ax.plot(Omega_list, Pauli[i], label=Pauli_label[i], linewidth=3)
ax.set_xlabel('CR amplitude, Omega (MHz)', fontsize=18)
ax.set_ylabel(r'$\zeta$ (MHz)', fontsize=18)
ax.tick_params(axis='x', labelsize=18)
ax.tick_params(axis='y', labelsize=18)
ax.grid('on')
ax.legend(fontsize=18)<jupyter_output><empty_output><jupyter_text>## Using Duffing model<jupyter_code>wm = 5164       # CSFQ w01
del_m0 = 600   # first anharmonicity
del_m1 = 430  # second anharmon.
gdm = 80     # CSFQ-bus coupling
wr = 6292 # bus cavity
wt = 5292   # transmon
del_t0 = -329.1
del_t1 = -369.7
gdt = -80    # transmon-bus coupling
gmt = 0  # csfq-transmon direct g
# CSFQ, j=0,1,2
w1_d = lambda j: wm*j + del_m0/2*j*(j-1) if j<3 else wm*j + del_m0/2*j*(j-1) + del_m1-del_m0 # dressed freq
gamma1 = lambda j: j/(wr + w1_d(j-1) - w1_d(j))
w1_b = lambda j: w1_d(j) - gamma1(j) * gdm**2  # bare freq. j=1,2,3
# transmon, j=0,1,2
w2_d = lambda j: wt*j + del_t0/2*j*(j-1) if j<3 else wt*j + del_t0/2*j*(j-1) + del_t1-del_t0 # dressed freq
gamma2 = lambda j: j/(wr + w2_d(j-1) - w2_d(j))
w2_b = lambda j: w2_d(j) - gamma2(j) * gdt**2  # bare freq.
# J, {j1,j2}=0,1,2
J = lambda j1,j2 : gmt - gdm*gdt/2*(1/(wr+w1_d(j1)-w1_d(j1+1)) + 1/(wr+w2_d(j2)-w2_d(j2+1)) 
                                    + 1/(wr-w1_d(j1)+w1_d(j1+1)) + 1/(wr-w2_d(j2)+w2_d(j2+1))) 
##############
def get_possible_states(n):
    """
    Get a list of tuples of possible states, given n
    Ex) for n=2, output is [(0,0),(0,1),(1,0),(0,2),(1,1),(2,0)]
    Args: 
        n: integer such that n1+n2<=n for |n1,n2> state
    Return:
        List of tuples that satisfy n1+n2<=n where n1 and n2 are 0,1,2,3,....
    """
    def get_possible_sum(n):
        """
         [(0,1)]
        """
        result = []
        for i in range(n+1):
            result.append((i,n-i))
        return result
    
    possible_list = []
    for i in range(n + 1):
        possible_list += get_possible_sum(i)
    
    return possible_list
def kronecker_delta(i,j):
    return (1 if i==j else 0)
# Note
# b1[i,j] = <i|b1|j>, where |i> = {|00>, |01>, |10>, |11>, ... |30>}
# b1|n1_i,n2_i> = sqrt(n1_i)|n1_i-1,n2_i> where |i> = |n1_i, n2_i> and |j> = |n1_j, n2_j>
# b2|n1_i,n2_i> = sqrt(n2_i)|n1_i,n2_i-1> where |i> = |n1_i, n2_i> and |j> = |n1_j, n2_j>
# V = sqrt((n1+1)*(n2+1))*J_n1_n2*(|n1+1,n2><n1,n2+1|+|n1,n2+1><n1+1,n2|)
states = get_possible_states(3) # up to |3> state
unsorted_state_energy_dict = { s: w1_b(s[0]) + w2_b(s[1]) for s in states}
sorted_state_energy_dict = OrderedDict(sorted(unsorted_state_energy_dict.items(), key=lambda x: x[1]))
sorted_energy = np.array(list(sorted_state_energy_dict.values()))
sorted_states = list(sorted_state_energy_dict.keys())
# sorted_states= [(0,0),(0,1),(1,0),(1,1),(0,2),(2,0),(0,3),(1,2),(2,1),(3,0)]
# sorted_energy = np.array([0, w2_b(1), w1_b(1), w1_b(1)+w2_b(1), w2_b(2), w1_b(2), w2_b(3)
#                  ,w1_b(1)+w2_b(2), w1_b(2)+w2_b(1), w1_b(3)])
n = len(sorted_state_energy_dict)
b1_array, b2_array, V0_array = np.zeros((n,n)), np.zeros((n,n)), np.zeros((n,n))
for i, j in itertools.product(range(n), range(n)):
    n1_i, n1_j = sorted_states[i][0], sorted_states[j][0]
    n2_i, n2_j = sorted_states[i][1], sorted_states[j][1]
    
    # b1_array[i,j] = <n1_i, n2_i|b1|n1_j, n2_j>
    b1_array[i, j] = np.sqrt(n1_j)* kronecker_delta(n1_i, n1_j-1) * kronecker_delta(n2_i, n2_j)
    b2_array[i, j] = np.sqrt(n2_j)* kronecker_delta(n2_i, n2_j-1) * kronecker_delta(n1_i, n1_j)
   
    # 1) n1=n1_i-1, n2 = n2_i,  2) n1=n1_i, n2=n2_i-1
    V0_array[i,j] = (np.sqrt(n1_i*(n2_i+1))*J(n1_i-1, n2_i)*kronecker_delta(n1_i-1, n1_j)*kronecker_delta(n2_i+1, n2_j)
                   + np.sqrt((n1_i+1)*n2_i)*J(n1_i, n2_i-1)*kronecker_delta(n1_i+1, n1_j)*kronecker_delta(n2_i-1, n2_j))
def hamiltonian_eff_H0(w1,w2,J,alpha1,alpha2,N):
    """ 
    Two-transmon effective Hamiltonian with zero excitation in bus. 
    Eq. 2.12, Magesan
    w1, w2:  angular freq
    """
    b1 = destroy(N)
    b2 = destroy(N)
    
    I = qeye(N)
    # Eq. 2.21, Magesan
    H1 = w1*tensor(b1.dag()*b1, I) + alpha1/2.0*tensor(b1.dag()*b1.dag()*b1*b1, I)
    H2 = w2*tensor(I, b2.dag()*b2) + alpha2/2.0*tensor(I, b2.dag()*b2.dag()*b2*b2)
    H12 = J*(tensor(b1.dag(), b2) + tensor(b1, b2.dag())) 
    
    H_eff = H1 + H2 + H12
    
    return H_eff
H0 = Qobj(np.diag(sorted_energy))    
V0 = Qobj(V0_array)
b1 = Qobj(b1_array)
b2 = Qobj(b2_array)
b1t = b1 + b1.dag()
b2t = b2 + b2.dag()
ntot = b1.dag()*b1 + b2.dag()*b2
###############
# By solving eigenvalue
H1 = H0 + V0
eigen = H1.eigenstates()
# find U
U = sorted_diag_op(eigen)
U_unsort_array = np.column_stack([eigen[1][i] for i in range(eigen[1].size)])
U_unsort = Qobj(U_unsort_array) 
U.dims = H1.dims
# digonalize H0
H2 = U.dag() * H1 * U
#########
# get new Hd and HR
Omega = 1 # MHz
wd = (H2[3,3]-H2[2,2]+H2[1,1])/2  # 10443 - 5157 + 5285 in MHz
Hd = lambda t: Omega * np.cos(2*pi*wd*t)* b1t
Hd_D = lambda t: U.dag() * Hd(t) * U
# To rotating frame
R = lambda t: (-1j*2*pi*wd*ntot*t).expm()
HR = lambda t: R(t).dag() * (H2 + Hd_D(t)) * R(t) - ntot *wd
# average over 10 period
t_list = np.linspace(0, 2, 401)*1e-3  # in us
HR_list = [HR(t) for t in t_list]
H_R = functools.reduce(operator.add, HR_list) / len(HR_list)
H_R
##############
# least action
eigen = H_R.eigenstates()
# find X
X = sorted_diag_op(eigen)
# find X_BD and XP
A, B, C = X[0:2,0:2], X[2:4, 2:4], X[4:10, 4:10]
X_BD_array = block_diag(A,B,C)
X_BD = Qobj(X_BD_array)
# X_BD = X_BD.tidyup(atol=1e-3)
XP = X_BD * X_BD.conj().trans()
# find T
T = X * X_BD.dag() * XP.sqrtm().inv()
# find H_R_BD
T.dims = H_R.dims
H_R_BD =  T.dag() * H_R * T
H_R_BD.tidyup(1e-4)
##################
def from16to8(M):
    """
    Convert 4x4 block-diagonal matrix to 8x1 column vector
    Args:
        M: 4x4 block-diagonal matrix
    Return:
        8x1 matrix
    """
    return np.array([M[0,0],M[1,0],M[0,1],M[1,1],M[2,2],M[3,2],M[2,3],M[3,3]]).reshape(8,1)
# Get Pauli coeff.
# All these are block-diagonal.
II = tensor(qeye(2), qeye(2))
IX = tensor(qeye(2), sigmax())
IY = tensor(qeye(2), sigmay())
IZ = tensor(qeye(2), sigmaz())
ZI = tensor(sigmaz(), qeye(2))
ZY = tensor(sigmaz(), sigmay())
ZX = tensor(sigmaz(), sigmax())
ZZ = tensor(sigmaz(), sigmaz())
a = np.column_stack([from16to8(II),from16to8(IX)/2,from16to8(IY)/2,from16to8(IZ)/2
                    ,from16to8(ZI)/2,from16to8(ZX)/2,from16to8(ZY)/2,from16to8(ZZ)/2])
b = from16to8(H_R_BD[0:4, 0:4])
c = np.linalg.solve(a, b)
sympy.Matrix(np.round(c,6))
# -63.9, -0.02, -0.00017,1.2e-5,127.9,0.073, 0.00017, -0.039
<jupyter_output><empty_output><jupyter_text># Function : get_Pauli_coefficient<jupyter_code>def sorted_diag_op(eigen):
    """
    Diagonalizing matrix by the order of eigenvector
    
    Args:
        eigen : output of eigenstates() method in qutip
       
    Return: a new diagonalizing operator Qobj
    """
       
    index_list = [np.argmax(np.absolute(vec)) for vec in eigen[1]]
    
    X_array = np.column_stack([eigen[1][index_list.index(i)] for i in range(eigen[1].size)])
    X = Qobj(X_array)
    
    return X
def get_Pauli_coefficient(wm,del_m0,del_m1,gdm
                         ,wt,del_t0,del_t1,gdt
                         ,wr,gmt,Omega):
    # CSFQ, j=0,1,2
    w1_d = lambda j: wm*j + del_m0/2*j*(j-1) if j<3 else wm*j + del_m0/2*j*(j-1) + del_m1-del_m0 # dressed freq
    gamma1 = lambda j: j/(wr + w1_d(j-1) - w1_d(j))
    w1_b = lambda j: w1_d(j) - gamma1(j) * gdm**2  # bare freq. j=1,2,3
    # transmon, j=0,1,2
    w2_d = lambda j: wt*j + del_t0/2*j*(j-1) if j<3 else wt*j + del_t0/2*j*(j-1) + del_t1-del_t0 # dressed freq
    gamma2 = lambda j: j/(wr + w2_d(j-1) - w2_d(j))
    w2_b = lambda j: w2_d(j) - gamma2(j) * gdt**2  # bare freq.
    # J, {j1,j2}=0,1,2
    J = lambda j1,j2 : gmt - gdm*gdt/2*(1/(wr+w1_d(j1)-w1_d(j1+1)) + 1/(wr+w2_d(j2)-w2_d(j2+1)) 
                                        + 1/(wr-w1_d(j1)+w1_d(j1+1)) + 1/(wr-w2_d(j2)+w2_d(j2+1))) 
    
    def get_possible_states(n):
        """
        Get a list of tuples of possible states, given n
        Ex) for n=2, output is [(0,0),(0,1),(1,0),(0,2),(1,1),(2,0)]
        Args: 
            n: integer such that n1+n2<=n for |n1,n2> state
        Return:
            List of tuples that satisfy n1+n2<=n where n1 and n2 are 0,1,2,3,....
        """
        def get_possible_sum(n):
                """
                 [(0,1)]
                """
                result = []
                for i in range(n+1):
                    result.append((i,n-i))
                return result
        possible_list = []
        for i in range(n + 1):
            possible_list += get_possible_sum(i)
        return possible_list
    def kronecker_delta(i,j):
        return (1 if i==j else 0)
    # Get H0, V0, b1 and b2
    states = get_possible_states(3) # up to |3> state
    unsorted_state_energy_dict = { s: w1_b(s[0]) + w2_b(s[1]) for s in states}
   
    sorted_state_energy_dict = OrderedDict(sorted(unsorted_state_energy_dict.items(), key=lambda x: x[1]))
    sorted_energy = np.array(list(sorted_state_energy_dict.values()))
    sorted_states = list(sorted_state_energy_dict.keys())
#     Xuexin
    sorted_states= [(0,0),(0,1),(1,0),(1,1),(0,2),(2,0),(0,3),(1,2),(2,1),(3,0)]
    sorted_energy = np.array([0, w2_b(1), w1_b(1), w1_b(1)+w2_b(1), w2_b(2), w1_b(2), w2_b(3)
                 ,w1_b(1)+w2_b(2), w1_b(2)+w2_b(1), w1_b(3)])
    n = len(sorted_state_energy_dict)
    b1_array, b2_array, V0_array = np.zeros((n,n)), np.zeros((n,n)), np.zeros((n,n))
    for i, j in itertools.product(range(n), range(n)):
        n1_i, n1_j = sorted_states[i][0], sorted_states[j][0]
        n2_i, n2_j = sorted_states[i][1], sorted_states[j][1]
        # b1_array[i,j] = <n1_i, n2_i|b1|n1_j, n2_j>
        b1_array[i, j] = np.sqrt(n1_j)* kronecker_delta(n1_i, n1_j-1) * kronecker_delta(n2_i, n2_j)
        b2_array[i, j] = np.sqrt(n2_j)* kronecker_delta(n2_i, n2_j-1) * kronecker_delta(n1_i, n1_j)
        # 1) n1=n1_i-1, n2 = n2_i,  2) n1=n1_i, n2=n2_i-1
        V0_array[i,j] = (np.sqrt(n1_i*(n2_i+1))*J(n1_i-1, n2_i)*kronecker_delta(n1_i-1, n1_j)*kronecker_delta(n2_i+1, n2_j)
                       + np.sqrt((n1_i+1)*n2_i)*J(n1_i, n2_i-1)*kronecker_delta(n1_i+1, n1_j)*kronecker_delta(n2_i-1, n2_j))
    H0 = Qobj(np.diag(sorted_energy))    
    V0 = Qobj(V0_array)
    b1 = Qobj(b1_array)
    b2 = Qobj(b2_array)
    
    b1t = b1 + b1.dag()
    b2t = b2 + b2.dag()
    ntot = b1.dag()*b1 + b2.dag()*b2
    # By solving eigenvalue
    H1 = H0 + V0
    eigen = H1.eigenstates()
    # find U
    U = sorted_diag_op(eigen)
    U.dims = H1.dims
    # digonalize H0
    H2 = U.dag() * H1 * U
    
    # get new Hd and HR
#     Omega = 1 # MHz
    wd = (H2[3,3]-H2[2,2]+H2[1,1])/2  # 10443 - 5157 + 5285 in MHz
    Hd = lambda t: Omega * np.cos(2*pi*wd*t)* b1t
    Hd_D = lambda t: U.dag() * Hd(t) * U
    # To rotating frame
    R = lambda t: (-1j*2*pi*wd*ntot*t).expm()
    HR = lambda t: R(t).dag() * (H2 + Hd_D(t)) * R(t) - ntot *wd
    # average over 10 period
    t_list = np.linspace(0, 2, 401)*1e-3  # in us
    HR_list = [HR(t) for t in t_list]
    H_R = functools.reduce(operator.add, HR_list) / len(HR_list)
        
    # least action
    eigen = H_R.eigenstates()
    # find X
    X = sorted_diag_op(eigen)
    # find X_BD and XP
    A, B, C = X[0:2,0:2], X[2:4, 2:4], X[4:10, 4:10]
    X_BD_array = block_diag(A,B,C)
    X_BD = Qobj(X_BD_array)
    # X_BD = X_BD.tidyup(atol=1e-3)
    XP = X_BD * X_BD.dag()
    # find T
    T = X * X_BD.dag() * XP.sqrtm().inv()
    # find H_R_BD
    T.dims = H_R.dims
    H_R_BD =  T.dag() * H_R * T
       
    def from16to8(M):
        return np.array([M[0,0],M[1,0],M[0,1],M[1,1],M[2,2],M[3,2],M[2,3],M[3,3]]).reshape(8,1)
    # Get Pauli coeff.
    II = tensor(qeye(2), qeye(2))
    IX = tensor(qeye(2), sigmax())
    IY = tensor(qeye(2), sigmay())
    IZ = tensor(qeye(2), sigmaz())
    ZI = tensor(sigmaz(), qeye(2))
    ZY = tensor(sigmaz(), sigmay())
    ZX = tensor(sigmaz(), sigmax())
    ZZ = tensor(sigmaz(), sigmaz())
    a = np.column_stack([from16to8(II),from16to8(IX)/2,from16to8(IY)/2,from16to8(IZ)/2
                        ,from16to8(ZI)/2,from16to8(ZX)/2,from16to8(ZY)/2,from16to8(ZZ)/2])
    b = from16to8(H_R_BD[0:4, 0:4])
    c = np.linalg.solve(a, b)
    
    return c
# parameters CSFQ-transmon
wm = 5164       # CSFQ w01
del_m0 = 600   # first anharmonicity
del_m1 = 430  # second anharmon.
gdm = 80     # CSFQ-bus coupling
wr = 6292 # bus cavity
wt = 5292   # transmon
del_t0 = -329.1
del_t1 = -369.7
gdt = 80    # transmon-bus coupling
gmt = 0  # csfq-transmon direct g
# parameters, transmon-transmon
wm = 5292 #5164       # CSFQ w01
del_m0 = -330 #600   # first anharmonicity
del_m1 = -370 #430  # second anharmon.
gdm = 80     # CSFQ-bus coupling
wr = 6292 # bus cavity
wt = 5164 #5292   # transmon
del_t0 = -329.1
del_t1 = -369.7
gdt = 80    # transmon-bus coupling
gmt = 0  # csfq-transmon direct g
# parameters, transmon-transmon (usual target-control swapped)
wm = 5164       # CSFQ w01
del_m0 = -330 #600   # first anharmonicity
del_m1 = -370 #430  # second anharmon.
gdm = 80    # CSFQ-bus coupling
wr = 6292 # bus cavity
wt = 5292   # transmon
del_t0 = -329.1
del_t1 = -369.7
gdt = -80   # transmon-bus coupling
gmt = 2.7  # csfq-transmon direct g
# plot Pauli coeff. vs CR amplitude
Omega_list = np.linspace(0, 200, 20)
Pauli = get_Pauli_coefficient(wm,del_m0,del_m1,gdm
                         ,wt,del_t0,del_t1,gdt
                         ,wr,gmt,Omega_list[0])
for omega in Omega_list[1:]:
    Pauli = np.column_stack((Pauli, get_Pauli_coefficient(wm,del_m0,del_m1,gdm
                         ,wt,del_t0,del_t1,gdt,wr,gmt,omega)))
    
# plot
Pauli_label = ['II','IX','IY','IZ','ZI','ZX','ZY','ZZ']
fig, ax = plt.subplots(1,1, figsize=(10,8))
for i in range(Pauli.shape[0]):
    ax.plot(Omega_list, Pauli[i], label=Pauli_label[i], linewidth=3)
ax.set_xlabel('CR amplitude, Omega (MHz)', fontsize=18)
ax.set_ylabel('$Pauli Coefficient$ (MHz)', fontsize=18)
ax.tick_params(axis='x', labelsize=18)
ax.tick_params(axis='y', labelsize=18)
ax.grid('on')
ax.legend(fontsize=18)<jupyter_output>/usr/lib/python3/dist-packages/numpy/core/_asarray.py:85: ComplexWarning: Casting complex values to real discards the imaginary part
  return array(a, dtype, copy=False, order=order)
<jupyter_text># Function : get ZZ<jupyter_code>def get_static_ZZ(wm,del_m0,del_m1,gdm
                  ,wt,del_t0,del_t1,gdt
                         ,wr,gmt):
    """
    By diagonalizing two-qubit Hamiltonian, calculate ZZ(=E11-E01-E10+E00).
    """
    
    # CSFQ, j=0,1,2
    w1_d = lambda j: wm*j + del_m0/2*j*(j-1) if j<3 else wm*j + del_m0/2*j*(j-1) + del_m1-del_m0 # dressed freq
    gamma1 = lambda j: j/(wr + w1_d(j-1) - w1_d(j))
    w1_b = lambda j: w1_d(j) - gamma1(j) * gdm**2  # bare freq. j=1,2,3
    # transmon, j=0,1,2
    w2_d = lambda j: wt*j + del_t0/2*j*(j-1) if j<3 else wt*j + del_t0/2*j*(j-1) + del_t1-del_t0 # dressed freq
    gamma2 = lambda j: j/(wr + w2_d(j-1) - w2_d(j))
    w2_b = lambda j: w2_d(j) - gamma2(j) * gdt**2  # bare freq.
    # J, {j1,j2}=0,1,2
    J = lambda j1,j2 : gmt - gdm*gdt/2*(1/(wr+w1_d(j1)-w1_d(j1+1)) + 1/(wr+w2_d(j2)-w2_d(j2+1)) 
                                        + 1/(wr-w1_d(j1)+w1_d(j1+1)) + 1/(wr-w2_d(j2)+w2_d(j2+1))) 
    
    def get_possible_states(n):
        """
        Get a list of tuples of possible states, given n
        Ex) for n=2, output is [(0,0),(0,1),(1,0),(0,2),(1,1),(2,0)]
        Args: 
            n: integer such that n1+n2<=n for |n1,n2> state
        Return:
            List of tuples that satisfy n1+n2<=n where n1 and n2 are 0,1,2,3,....
        """
        def get_possible_sum(n):
                """
                 [(0,1)]
                """
                result = []
                for i in range(n+1):
                    result.append((i,n-i))
                return result
        possible_list = []
        for i in range(n + 1):
            possible_list += get_possible_sum(i)
        return possible_list
    def kronecker_delta(i,j):
        return (1 if i==j else 0)   
    def sorted_diag_op(eigen):
        """
        Diagonalizing matrix by the order of eigenvector
        Args:
            eigen : output of eigenstates() method in qutip
        Return: a new diagonalizing operator Qobj
        """
        index_list = [np.argmax(np.absolute(vec)) for vec in eigen[1]]
        X_array = np.column_stack([eigen[1][index_list.index(i)] for i in range(eigen[1].size)])
        X = Qobj(X_array)
        return X
    
    # Get H0, V0, b1 and b2
    states = get_possible_states(3) # up to |3> state
    unsorted_state_energy_dict = { s: w1_b(s[0]) + w2_b(s[1]) for s in states}
   
    sorted_state_energy_dict = OrderedDict(sorted(unsorted_state_energy_dict.items(), key=lambda x: x[1]))
    sorted_energy = np.array(list(sorted_state_energy_dict.values()))
    sorted_states = list(sorted_state_energy_dict.keys())
#     Xuexin
    sorted_states= [(0,0),(0,1),(1,0),(1,1),(0,2),(2,0),(0,3),(1,2),(2,1),(3,0)]
    sorted_energy = np.array([0, w2_b(1), w1_b(1), w1_b(1)+w2_b(1), w2_b(2), w1_b(2), w2_b(3)
                 ,w1_b(1)+w2_b(2), w1_b(2)+w2_b(1), w1_b(3)])
    n = len(sorted_state_energy_dict)
    b1_array, b2_array, V0_array = np.zeros((n,n)), np.zeros((n,n)), np.zeros((n,n))
    for i, j in itertools.product(range(n), range(n)):
        n1_i, n1_j = sorted_states[i][0], sorted_states[j][0]
        n2_i, n2_j = sorted_states[i][1], sorted_states[j][1]
        # b1_array[i,j] = <n1_i, n2_i|b1|n1_j, n2_j>
        b1_array[i, j] = np.sqrt(n1_j)* kronecker_delta(n1_i, n1_j-1) * kronecker_delta(n2_i, n2_j)
        b2_array[i, j] = np.sqrt(n2_j)* kronecker_delta(n2_i, n2_j-1) * kronecker_delta(n1_i, n1_j)
        # 1) n1=n1_i-1, n2 = n2_i,  2) n1=n1_i, n2=n2_i-1
        V0_array[i,j] = (np.sqrt(n1_i*(n2_i+1))*J(n1_i-1, n2_i)*kronecker_delta(n1_i-1, n1_j)*kronecker_delta(n2_i+1, n2_j)
                       + np.sqrt((n1_i+1)*n2_i)*J(n1_i, n2_i-1)*kronecker_delta(n1_i+1, n1_j)*kronecker_delta(n2_i-1, n2_j))
    H0 = Qobj(np.diag(sorted_energy))    
    V0 = Qobj(V0_array)
    b1 = Qobj(b1_array)
    b2 = Qobj(b2_array)
    
    b1t = b1 + b1.dag()
    b2t = b2 + b2.dag()
    ntot = b1.dag()*b1 + b2.dag()*b2
    # By solving eigenvalue
    H1 = H0 + V0
    eigen = H1.eigenstates()
    # find U
    U = sorted_diag_op(eigen)
    U.dims = H1.dims
    # digonalize H0
    H2 = U.dag() * H1 * U
    
    return (H2[3,3]-H2[2,2]-H2[1,1], J(0,0))
# parameters, transmon-transmon
wm = 5292 #5164       # transmon
del_m0 = -330 #600   # first anharmonicity
del_m1 = -370 #430  # second anharmon.
gdm = 120     # transmon-bus coupling
wr = 6292 # bus cavity
wt = 5164 #5292   # transmon
del_t0 = -329.1
del_t1 = -369.7
gdt = 120    # transmon-bus coupling
gmt = 3  # csfq-transmon direct g
get_static_ZZ(wm,del_m0,del_m1,gdm
              ,wt,del_t0,del_t1,gdt
              ,wr,gmt)[1]
# plot Pauli coeff. vs CR amplitude
wr_list = np.linspace(5500, 8000, 101)
ZZ = [get_static_ZZ(wm,del_m0,del_m1,gdm
                   ,wt,del_t0,del_t1,gdt
                   ,wr,gmt)[0] for wr in wr_list]
J = [get_static_ZZ(wm,del_m0,del_m1,gdm
                   ,wt,del_t0,del_t1,gdt
                   ,wr,gmt)[1] for wr in wr_list]  
# plot
fig, ax = plt.subplots(1,1, figsize=(10,8))
ax.plot(wr_list, ZZ, linewidth=3, label='ZZ')
ax.plot(wr_list, J, linewidth=3, label='J')
ax.set_xlabel('wr (MHz)', fontsize=18)
ax.set_ylabel('$ZZ$ (MHz)', fontsize=18)
ax.tick_params(axis='x', labelsize=18)
ax.tick_params(axis='y', labelsize=18)
ax.grid('on')
ax.legend(fontsize=18)
plt.plot(wr_list, J)<jupyter_output><empty_output><jupyter_text># Test<jupyter_code>from sympy import I
Omega = 10
wd = sympy.Symbol('wd')
t = sympy.Symbol('t')
Hd = Omega * (sympy.exp(-I*wd*t) + sympy.exp(I*wd*t))/2 * sympy.Matrix(b1t.data.toarray())
Hdt = (U.dag()).data.toarray() * Hd * U.data.toarray()
Htot = H2.data.toarray() + Hdt
Ur = sympy.exp(-I*wd *sympy.Matrix(ntot.data.toarray())*t)
Urdag = sympy.exp(I*wd *sympy.Matrix(ntot.data.toarray())*t)
H3t = Urdag * Htot * Ur -  sympy.Matrix(ntot.data.toarray())
H3t_RWA = sympy.simplify(H3t).subs(sympy.exp(-2*I*wd*t),0)
sympy.simplify(H3t).subs([(sympy.exp(2*I*wd*t),0), (sympy.oo,0)])
<jupyter_output><empty_output> | 
	no_license | 
	/Two-qubit with CR.ipynb | 
	jaseung/python-code-SYR | 17 | 
| 
	<jupyter_start><jupyter_text>## 1. Google Play Store apps and reviews
Mobile apps are everywhere. They are easy to create and can be lucrative. Because of these two factors, more and more apps are being developed. In this notebook, we will do a comprehensive analysis of the Android app market by comparing over ten thousand apps in Google Play across different categories. We'll look for insights in the data to devise strategies to drive growth and retention.
Let's take a look at the data, which consists of two files:
apps.csv: contains all the details of the applications on Google Play. There are 13 features that describe a given app.
user_reviews.csv: contains 100 reviews for each app, most helpful first. The text in each review has been pre-processed and attributed with three new features: Sentiment (Positive, Negative or Neutral), Sentiment Polarity and Sentiment Subjectivity.
<jupyter_code># Read in dataset
import pandas as pd
apps_with_duplicates = pd.read_csv("datasets/apps.csv")
# Drop duplicates
apps = apps_with_duplicates.drop_duplicates()
# Print the total number of apps
print('Total number of apps in the dataset = ', apps)
# Print a concise summary of apps dataframe
print(apps.info())
# Have a look at a random sample of n rows
n = 5
apps.sample(n)<jupyter_output>Total number of apps in the dataset =        Unnamed: 0                                                App  \
0              0     Photo Editor & Candy Camera & Grid & ScrapBook   
1              1                                Coloring book moana   
2              2  U Launcher Lite – FREE Live Cool Themes, Hide ...   
3              3                              Sketch - Draw & Paint   
4              4              Pixel Draw - Number Art Coloring Book   
5              5                         Paper flowers instructions   
6              6            Smoke Effect Photo Maker - Smoke Editor   
7              7                                   Infinite Painter   
8              8                               Garden Coloring Book   
9              9                      Kids Paint Free - Drawing Fun   
10            10                            Text on Photo - Fonteee   
11            11            Name Art Photo Editor - Focus n Filters   
12            12                     T[...]<jupyter_text>## 2. Data cleaning
The four features that we will be working with most frequently henceforth are Installs, Size, Rating and Price. The info() function (from the previous task)  told us that Installs and Price columns are of type object and not int64 or float64 as we would expect. This is because the column contains some characters more than just [0,9] digits. Ideally, we would want these columns to be numeric as their name suggests. 
Hence, we now proceed to data cleaning and prepare our data to be consumed in our analyis later. Specifically, the presence of special characters (, $ +) in the Installs and Price columns make their conversion to a numerical data type difficult.<jupyter_code># List of characters to remove
chars_to_remove = ['+', ',', '$']
# List of column names to clean
cols_to_clean = ["Installs", "Price"]
# Loop for each column
for col in cols_to_clean:
    # Replace each character with an empty string
    for char in chars_to_remove:
        apps[col] = apps[col].astype(str).str.replace(char, '')
    # Convert col to numeric
    apps[col] = pd.to_numeric(apps[col]) <jupyter_output><empty_output><jupyter_text>## 3. Exploring app categories
With more than 1 billion active users in 190 countries around the world, Google Play continues to be an important distribution platform to build a global audience. For businesses to get their apps in front of users, it's important to make them more quickly and easily discoverable on Google Play. To improve the overall search experience, Google has introduced the concept of grouping apps into categories.
This brings us to the following questions:
Which category has the highest share of (active) apps in the market? 
Is any specific category dominating the market?
Which categories have the fewest number of apps?
We will see that there are 33 unique app categories present in our dataset. Family and Game apps have the highest market prevalence. Interestingly, Tools, Business and Medical apps are also at the top.<jupyter_code>import plotly
plotly.offline.init_notebook_mode(connected=True)
import plotly.graph_objs as go
# Print the total number of unique categories
num_categories = len(apps['Category'].unique())
print('Number of categories = ', num_categories)
# Count the number of apps in each 'Category' and sort them in descending order
num_apps_in_category = apps['Category'].value_counts().sort_values(ascending = False)
data = [go.Bar(
        x = num_apps_in_category.index, # index = category name
        y = num_apps_in_category.values, # value = count
)]
plotly.offline.iplot(data)<jupyter_output><empty_output><jupyter_text>## 4. Distribution of app ratings
After having witnessed the market share for each category of apps, let's see how all these apps perform on an average. App ratings (on a scale of 1 to 5) impact the discoverability, conversion of apps as well as the company's overall brand image. Ratings are a key performance indicator of an app.
From our research, we found that the average volume of ratings across all app categories is 4.17. The histogram plot is skewed to the left indicating that the majority of the apps are highly rated with only a few exceptions in the low-rated apps.<jupyter_code># Average rating of apps
avg_app_rating = apps['Rating'].mean()
print('Average app rating = ', avg_app_rating)
# Distribution of apps according to their ratings
data = [go.Histogram(
        x = apps['Rating']
)]
# Vertical dashed line to indicate the average app rating
layout = {'shapes': [{
              'type' :'line',
              'x0': avg_app_rating,
              'y0': 0,
              'x1': avg_app_rating,
              'y1': 1000,
              'line': { 'dash': 'dashdot'}
          }]
          }
plotly.offline.iplot({'data': data, 'layout': layout})<jupyter_output>Average app rating =  4.173243045387994
<jupyter_text>## 5. Size and price of an app
Let's now examine app size and app price. For size, if the mobile app is too large, it may be difficult and/or expensive for users to download. Lengthy download times could turn users off before they even experience your mobile app. Plus, each user's device has a finite amount of disk space. For price, some users expect their apps to be free or inexpensive. These problems compound if the developing world is part of your target market; especially due to internet speeds, earning power and exchange rates.
How can we effectively come up with strategies to size and price our app?
Does the size of an app affect its rating? 
Do users really care about system-heavy apps or do they prefer light-weighted apps? 
Does the price of an app affect its rating? 
Do users always prefer free apps over paid apps?
We find that the majority of top rated apps (rating over 4) range from 2 MB to 20 MB. We also find that the vast majority of apps price themselves under \$10.<jupyter_code>%matplotlib inline
import seaborn as sns
sns.set_style("darkgrid")
import warnings
warnings.filterwarnings("ignore")
# Filter rows where both Rating and Size values are not null
apps_with_size_and_rating_present = apps[(~apps['Rating'].isnull()) & (~apps['Size'].isnull())]
# Subset for categories with at least 250 apps
large_categories = apps_with_size_and_rating_present.groupby(['Category']).filter(lambda x: len(x) >= 250).reset_index()
# Plot size vs. rating
plt1 = sns.jointplot(x = large_categories['Size'], y = large_categories['Rating'], kind = 'hex')
# Subset apps whose 'Type' is 'Paid'
paid_apps = apps_with_size_and_rating_present[apps_with_size_and_rating_present['Type'] == 'Paid']
# Plot price vs. rating
plt2 = sns.jointplot(x = paid_apps['Price'], y = paid_apps['Rating'])<jupyter_output><empty_output><jupyter_text>## 6. Relation between app category and app price
So now comes the hard part. How are companies and developers supposed to make ends meet? What monetization strategies can companies use to maximize profit? The costs of apps are largely based on features, complexity, and platform.
There are many factors to consider when selecting the right pricing strategy for your mobile app. It is important to consider the willingness of your customer to pay for your app. A wrong price could break the deal before the download even happens. Potential customers could be turned off by what they perceive to be a shocking cost, or they might delete an app they’ve downloaded after receiving too many ads or simply not getting their money's worth.
Different categories demand different price ranges. Some apps that are simple and used daily, like the calculator app, should probably be kept free. However, it would make sense to charge for a highly-specialized medical app that diagnoses diabetic patients. Below, we see that Medical and Family apps are the most expensive. Some medical apps extend even up to \$80! All game apps are reasonably priced below \$20.<jupyter_code>import matplotlib.pyplot as plt
fig, ax = plt.subplots()
fig.set_size_inches(15, 8)
# Select a few popular app categories
popular_app_cats = apps[apps.Category.isin(['GAME', 'FAMILY', 'PHOTOGRAPHY',
                                            'MEDICAL', 'TOOLS', 'FINANCE',
                                            'LIFESTYLE','BUSINESS'])]
# Examine the price trend by plotting Price vs Category
ax = sns.stripplot(x = popular_app_cats['Price'], y = popular_app_cats['Category'], jitter=True, linewidth=1)
ax.set_title('App pricing trend across categories')
# Apps whose Price is greater than 200
apps_above_200 = popular_app_cats[['Category', 'App', 'Price']][popular_app_cats['Price'] > 200]
apps_above_200<jupyter_output><empty_output><jupyter_text>## 7. Filter out "junk" apps
It looks like a bunch of the really expensive apps are "junk" apps. That is, apps that don't really have a purpose. Some app developer may create an app called I Am Rich Premium or most expensive app (H) just for a joke or to test their app development skills. Some developers even do this with malicious intent and try to make money by hoping people accidentally click purchase on their app in the store.
Let's filter out these junk apps and re-do our visualization.<jupyter_code># Select apps priced below $100
apps_under_100 = popular_app_cats[popular_app_cats['Price'] < 100]
fig, ax = plt.subplots()
fig.set_size_inches(15, 8)
# Examine price vs category with the authentic apps (apps_under_100)
ax = sns.stripplot(x='Price', y='Category', data=apps_under_100,
                   jitter=True, linewidth=1)
ax.set_title('App pricing trend across categories after filtering for junk apps')<jupyter_output><empty_output><jupyter_text>## 8. Popularity of paid apps vs free apps
For apps in the Play Store today, there are five types of pricing strategies: free, freemium, paid, paymium, and subscription. Let's focus on free and paid apps only. Some characteristics of free apps are:
Free to download.
Main source of income often comes from advertisements.
Often created by companies that have other products and the app serves as an extension of those products.
Can serve as a tool for customer retention, communication, and customer service.
Some characteristics of paid apps are:
Users are asked to pay once for the app to download and use it.
The user can't really get a feel for the app before buying it.
Are paid apps installed as much as free apps? It turns out that paid apps have a relatively lower number of installs than free apps, though the difference is not as stark as I would have expected!<jupyter_code>trace0 = go.Box(
    # Data for paid apps
    y=apps[apps['Type'] == 'Paid']['Installs'],
    name = 'Paid'
)
trace1 = go.Box(
    # Data for free apps
    y=apps[apps['Type'] == 'Free']['Installs'],
    name = 'Free'
)
layout = go.Layout(
    title = "Number of downloads of paid apps vs. free apps",
    yaxis = dict(
        type = 'log',
        autorange = True
    )
)
# Add trace0 and trace1 to a list for plotting
data = [trace0, trace1]
plotly.offline.iplot({'data': data, 'layout': layout})<jupyter_output><empty_output><jupyter_text>## 9. Sentiment analysis of user reviews
Mining user review data to determine how people feel about your product, brand, or service can be done using a technique called sentiment analysis. User reviews for apps can be analyzed to identify if the mood is positive, negative or neutral about that app. For example, positive words in an app review might include words such as 'amazing', 'friendly', 'good', 'great', and 'love'. Negative words might be words like 'malware', 'hate', 'problem', 'refund', and 'incompetent'.
By plotting sentiment polarity scores of user reviews for paid and free apps, we observe that free apps receive a lot of harsh comments, as indicated by the outliers on the negative y-axis. Reviews for paid apps appear never to be extremely negative. This may indicate something about app quality, i.e., paid apps being of higher quality than free apps on average. The median polarity score for paid apps is a little higher than free apps, thereby syncing with our previous observation.
In this notebook, we analyzed over ten thousand apps from the Google Play Store. We can use our findings to inform our decisions should we ever wish to create an app ourselves.<jupyter_code># Load user_reviews.csv
reviews_df = pd.read_csv("datasets/user_reviews.csv")
# Join and merge the two dataframe
merged_df = pd.merge(apps, reviews_df, on = 'App', how = "inner")
# Drop NA values from Sentiment and Translated_Review columns
merged_df = merged_df.dropna(subset=['Sentiment', 'Translated_Review'])
sns.set_style('ticks')
fig, ax = plt.subplots()
fig.set_size_inches(11, 8)
# User review sentiment polarity for paid vs. free apps
ax = sns.boxplot(x = 'Type', y = 'Sentiment_Polarity', data = merged_df)
ax.set_title('Sentiment Polarity Distribution')<jupyter_output><empty_output> | 
	no_license | 
	/notebook.ipynb | 
	nitanitapsari/The-Android-App-Market-on-Google-Play | 9 | 
| 
	<jupyter_start><jupyter_text># 3 Maneras de Programar a una Red Neuronal - DOTCSV
## Código inicial<jupyter_code>import numpy as np
import scipy as sc
import matplotlib.pyplot as plt
from sklearn.datasets import make_circles
# Creamos nuestros datos artificiales, donde buscaremos clasificar 
# dos anillos concéntricos de datos. 
X, Y = make_circles(n_samples=500, factor=0.5, noise=0.05)
# Resolución del mapa de predicción.
res = 100 
# Coordendadas del mapa de predicción.
_x0 = np.linspace(-1.5, 1.5, res)
_x1 = np.linspace(-1.5, 1.5, res)
# Input con cada combo de coordenadas del mapa de predicción.
_pX = np.array(np.meshgrid(_x0, _x1)).T.reshape(-1, 2)
# Objeto vacio a 0.5 del mapa de predicción.
_pY = np.zeros((res, res)) + 0.5
# Visualización del mapa de predicción.
plt.figure(figsize=(8, 8))
plt.pcolormesh(_x0, _x1, _pY, cmap="coolwarm", vmin=0, vmax=1)
# Visualización de la nube de datos.
plt.scatter(X[Y == 0,0], X[Y == 0,1], c="skyblue")
plt.scatter(X[Y == 1,0], X[Y == 1,1], c="salmon")
plt.tick_params(labelbottom=False, labelleft=False)<jupyter_output><empty_output><jupyter_text>## Tensorflow<jupyter_code>import tensorflow as tf
from matplotlib import animation
from IPython.core.display import display, HTML
# Definimos los puntos de entrada de la red, para la matriz X e Y.
iX = tf.placeholder('float', shape=[None, X.shape[1]])
iY = tf.placeholder('float', shape=[None])
lr = 0.01           # learning rate
nn = [2, 16, 8, 1]  # número de neuronas por capa.
# Capa 1
W1 = tf.Variable(tf.random_normal([nn[0], nn[1]]), name='Weights_1')
b1 = tf.Variable(tf.random_normal([nn[1]]), name='bias_1')
l1 = tf.nn.relu(tf.add(tf.matmul(iX, W1), b1))
# Capa 2
W2 = tf.Variable(tf.random_normal([nn[1], nn[2]]), name='Weights_2')
b2 = tf.Variable(tf.random_normal([nn[2]]), name='bias_2')
l2 = tf.nn.relu(tf.add(tf.matmul(l1, W2), b2))
# Capa 3
W3 = tf.Variable(tf.random_normal([nn[2], nn[3]]), name='Weights_3')
b3 = tf.Variable(tf.random_normal([nn[3]]), name='bias_3')
# Vector de predicciones de Y.
pY = tf.nn.sigmoid(tf.add(tf.matmul(l2, W3), b3))[:, 0]
# Evaluación de las predicciones.
loss = tf.losses.mean_squared_error(pY, iY)
# Definimos al optimizador de la red, para que minimice el error.
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.05).minimize(loss)
n_steps = 1000 # Número de ciclos de entrenamiento.
iPY = [] # Aquí guardaremos la evolución de las predicción, para la animación.
with tf.Session() as sess:
  
  # Inicializamos todos los parámetros de la red, las matrices W y b.
  sess.run(tf.global_variables_initializer())
    
  # Iteramos n pases de entrenamiento.
  for step in range(n_steps):
  
    # Evaluamos al optimizador, a la función de coste y al tensor de salida pY. 
    # La evaluación del optimizer producirá el entrenamiento de la red.
    _, _loss, _pY = sess.run([optimizer, loss, pY], feed_dict={ iX : X, iY : Y })
    
    # Cada 25 iteraciones, imprimimos métricas.
    if step % 25 == 0: 
      
      # Cálculo del accuracy.
      acc = np.mean(np.round(_pY) == Y)
      
      # Impresión de métricas.
      print('Step', step, '/', n_steps, '- Loss = ', _loss, '- Acc =', acc)
      
      # Obtenemos predicciones para cada punto de nuestro mapa de predicción _pX.
      _pY = sess.run(pY, feed_dict={ iX : _pX }).reshape((res, res))
      # Y lo guardamos para visualizar la animación.
      iPY.append(_pY)
      
  
# ----- CÓDIGO ANIMACIÓN ----- #
ims = []
fig = plt.figure(figsize=(10, 10))
print("--- Generando animación ---")
for fr in range(len(iPY)):
  
  im = plt.pcolormesh(_x0, _x1, iPY[fr], cmap="coolwarm", animated=True)
  # Visualización de la nube de datos.
  plt.scatter(X[Y == 0,0], X[Y == 0,1], c="skyblue")
  plt.scatter(X[Y == 1,0], X[Y == 1,1], c="salmon")
  # plt.title("Resultado Clasificación")
  plt.tick_params(labelbottom=False, labelleft=False)
  ims.append([im])
ani = animation.ArtistAnimation(fig, ims, interval=50, blit=True, repeat_delay=1000)
HTML(ani.to_html5_video())<jupyter_output>Step 0 / 1000 - Loss =  0.29063216 - Acc = 0.562
Step 25 / 1000 - Loss =  0.18204297 - Acc = 0.632
Step 50 / 1000 - Loss =  0.1471082 - Acc = 0.79
Step 75 / 1000 - Loss =  0.13354021 - Acc = 0.854
Step 100 / 1000 - Loss =  0.122594796 - Acc = 0.902
Step 125 / 1000 - Loss =  0.111153014 - Acc = 0.942
Step 150 / 1000 - Loss =  0.09965967 - Acc = 0.956
Step 175 / 1000 - Loss =  0.08899061 - Acc = 0.968
Step 200 / 1000 - Loss =  0.07819476 - Acc = 0.98
Step 225 / 1000 - Loss =  0.06843367 - Acc = 0.984
Step 250 / 1000 - Loss =  0.059809823 - Acc = 0.992
Step 275 / 1000 - Loss =  0.051961992 - Acc = 0.994
Step 300 / 1000 - Loss =  0.04506078 - Acc = 0.996
Step 325 / 1000 - Loss =  0.03921565 - Acc = 0.998
Step 350 / 1000 - Loss =  0.034442045 - Acc = 1.0
Step 375 / 1000 - Loss =  0.030614918 - Acc = 1.0
Step 400 / 1000 - Loss =  0.027491104 - Acc = 1.0
Step 425 / 1000 - Loss =  0.024817096 - Acc = 1.0
Step 450 / 1000 - Loss =  0.022489877 - Acc = 1.0
Step 475 / 1000 - Loss =  0.020462362 - [...]<jupyter_text>## Keras<jupyter_code>import tensorflow as tf
import tensorflow.keras as kr
from IPython.core.display import display, HTML
lr = 0.01           # learning rate
nn = [2, 16, 8, 1]  # número de neuronas por capa.
# Creamos el objeto que contendrá a nuestra red neuronal, como
# secuencia de capas.
model = kr.Sequential()
# Añadimos la capa 1
l1 = model.add(kr.layers.Dense(nn[1], activation='relu'))
# Añadimos la capa 2
l2 = model.add(kr.layers.Dense(nn[2], activation='relu'))
# Añadimos la capa 3
l3 = model.add(kr.layers.Dense(nn[3], activation='sigmoid'))
# Compilamos el modelo, definiendo la función de coste y el optimizador.
model.compile(loss='mse', optimizer=kr.optimizers.SGD(lr=0.05), metrics=['acc'])
# Y entrenamos al modelo. Los callbacks 
model.fit(X, Y, epochs=100)<jupyter_output>Epoch 1/100
500/500 [==============================] - 0s 111us/sample - loss: 0.2468 - acc: 0.5040
Epoch 2/100
500/500 [==============================] - 0s 37us/sample - loss: 0.2457 - acc: 0.5100
Epoch 3/100
500/500 [==============================] - 0s 40us/sample - loss: 0.2446 - acc: 0.5040
Epoch 4/100
500/500 [==============================] - 0s 37us/sample - loss: 0.2434 - acc: 0.5160
Epoch 5/100
500/500 [==============================] - 0s 36us/sample - loss: 0.2422 - acc: 0.5200
Epoch 6/100
500/500 [==============================] - 0s 39us/sample - loss: 0.2412 - acc: 0.5400
Epoch 7/100
500/500 [==============================] - 0s 38us/sample - loss: 0.2400 - acc: 0.5460
Epoch 8/100
500/500 [==============================] - 0s 38us/sample - loss: 0.2388 - acc: 0.5780
Epoch 9/100
500/500 [==============================] - 0s 41us/sample - loss: 0.2376 - acc: 0.5840
Epoch 10/100
500/500 [==============================] - 0s 41us/sample - loss: 0.2363 - acc: 0.5960
Epoch 11[...]<jupyter_text>## Sklearn<jupyter_code>import sklearn as sk
import sklearn.neural_network
from IPython.core.display import display, HTML
lr = 0.01           # learning rate
nn = [2, 16, 8, 1]  # número de neuronas por capa.
# Creamos el objeto del modelo de red neuronal multicapa.
clf = sk.neural_network.MLPRegressor(solver='sgd', 
                                     learning_rate_init=lr, 
                                     hidden_layer_sizes=tuple(nn[1:]),
                                     verbose=True,
                                     n_iter_no_change=1000,
                                     batch_size = 64)
# Y lo entrenamos con nuestro datos.
clf.fit(X, Y)<jupyter_output>Iteration 1, loss = 0.66391606
Iteration 2, loss = 0.29448667
Iteration 3, loss = 0.13429471
Iteration 4, loss = 0.13165037
Iteration 5, loss = 0.13430276
Iteration 6, loss = 0.12556423
Iteration 7, loss = 0.12292571
Iteration 8, loss = 0.12204933
Iteration 9, loss = 0.12175702
Iteration 10, loss = 0.12129750
Iteration 11, loss = 0.12073281
Iteration 12, loss = 0.12028767
Iteration 13, loss = 0.11983928
Iteration 14, loss = 0.11939207
Iteration 15, loss = 0.11909108
Iteration 16, loss = 0.11836549
Iteration 17, loss = 0.11771654
Iteration 18, loss = 0.11703195
Iteration 19, loss = 0.11636100
Iteration 20, loss = 0.11559426
Iteration 21, loss = 0.11475135
Iteration 22, loss = 0.11391514
Iteration 23, loss = 0.11296898
Iteration 24, loss = 0.11183055
Iteration 25, loss = 0.11070522
Iteration 26, loss = 0.10945900
Iteration 27, loss = 0.10807801
Iteration 28, loss = 0.10653328
Iteration 29, loss = 0.10483565
Iteration 30, loss = 0.10299502
Iteration 31, loss = 0.10109678
Iteration 32, los[...] | 
	no_license | 
	/Notebooks IA/3_Maneras_de_Programar_a_una_Red_Neuronal_DotCSV.ipynb | 
	miguelmontcerv/Artificial-Intelligence | 4 | 
| 
	<jupyter_start><jupyter_text># How to make a plot with legend entries that are hyperlinks
* See https://github.com/matplotlib/matplotlib/issues/25567
* Works with SVG and PDF<jupyter_code>from matplotlib import pyplot as plt
import numpy as np
# generate SVG images instead of PNG
%config InlineBackend.figure_formats = ['svg']
# required for SVG to accept a click on the text area and not just on the text path
plt.rcParams["svg.fonttype"] = "none"
plt.figure()
plt.scatter([1, 2], [4, 6], label="BBC")
plt.scatter([1, 2, 3], [6, 5, 4], label="Google")
urls = {"BBC": 'https://www.bbc.com/news'}
leg = plt.legend()
for ta in leg.texts:
    t = ta.get_text()
    try:
        url = urls[t]
        ta.set_url(url)
    except KeyError:
        pass
plt.savefig('scatter.svg')
plt.savefig('scatter.pdf')<jupyter_output><empty_output> | 
	permissive | 
	/plots_with_hyperlinks.ipynb | 
	HDembinski/essays | 1 | 
| 
	<jupyter_start><jupyter_text># Programming Exercise 4:  Neural Networks Learning
## Introduction
In this exercise, you will implement the backpropagation algorithm for neural networks and apply it to the task of hand-written digit recognition. Before starting on the programming exercise, we strongly recommend watching the video lectures and completing the review questions for the associated topics.
All the information you need for solving this assignment is in this notebook, and all the code you will be implementing will take place within this notebook. The assignment can be promptly submitted to the coursera grader directly from this notebook (code and instructions are included below).
Before we begin with the exercises, we need to import all libraries required for this programming exercise. Throughout the course, we will be using [`numpy`](http://www.numpy.org/) for all arrays and matrix operations, [`matplotlib`](https://matplotlib.org/) for plotting, and [`scipy`](https://docs.scipy.org/doc/scipy/reference/) for scientific and numerical computation functions and tools. You can find instructions on how to install required libraries in the README file in the [github repository](https://github.com/dibgerge/ml-coursera-python-assignments).<jupyter_code># used for manipulating directory paths
import os
# Scientific and vector computation for python
import numpy as np
# Plotting library
from matplotlib import pyplot
# Optimization module in scipy
from scipy import optimize
# will be used to load MATLAB mat datafile format
from scipy.io import loadmat
# library written for this exercise providing additional functions for assignment submission, and others
import utils
# define the submission/grader object for this exercise
grader = utils.Grader()
# tells matplotlib to embed plots within the notebook
%matplotlib inline<jupyter_output><empty_output><jupyter_text>## Submission and Grading
After completing each part of the assignment, be sure to submit your solutions to the grader. The following is a breakdown of how each part of this exercise is scored.
| Section | Part                                             | Submission function | Points 
| :-      |:-                                                | :-                  | :-:    
| 1       | [Feedforward and Cost Function](#section1)                    | [`nnCostFunction`](#nnCostFunction)   | 30     
| 2       | [Regularized Cost Function](#section2)                        | [`nnCostFunction`](#nnCostFunction)   | 15     
| 3       | [Sigmoid Gradient](#section3)                                 | [`sigmoidGradient`](#sigmoidGradient) | 5      
| 4       | [Neural Net Gradient Function (Backpropagation)](#section4)   | [`nnCostFunction`](#nnCostFunction)   | 40     
| 5       | [Regularized Gradient](#section5)                             | [`nnCostFunction`](#nnCostFunction)   |10     
|         | Total Points                                     |    | 100    
You are allowed to submit your solutions multiple times, and we will take only the highest score into consideration.
At the end of each section in this notebook, we have a cell which contains code for submitting the solutions thus far to the grader. Execute the cell to see your score up to the current section. For all your work to be submitted properly, you must execute those cells at least once.
## Neural Networks
In the previous exercise, you implemented feedforward propagation for neural networks and used it to predict handwritten digits with the weights we provided. In this exercise, you will implement the backpropagation algorithm to learn the parameters for the neural network.
We start the exercise by first loading the dataset. <jupyter_code>#  training data stored in arrays X, y
data = loadmat(os.path.join('Data', 'ex4data1.mat'))
X, y = data['X'], data['y'].ravel()
# set the zero digit to 0, rather than its mapped 10 in this dataset
# This is an artifact due to the fact that this dataset was used in 
# MATLAB where there is no index 0
y[y == 10] = 0
# Number of training examples
m = y.size
<jupyter_output><empty_output><jupyter_text>### 1.1 Visualizing the data
You will begin by visualizing a subset of the training set, using the function `displayData`, which is the same function we used in Exercise 3. It is provided in the `utils.py` file for this assignment as well. The dataset is also the same one you used in the previous exercise.
There are 5000 training examples in `ex4data1.mat`, where each training example is a 20 pixel by 20 pixel grayscale image of the digit. Each pixel is represented by a floating point number indicating the grayscale intensity at that location. The 20 by 20 grid of pixels is “unrolled” into a 400-dimensional vector. Each
of these training examples becomes a single row in our data matrix $X$. This gives us a 5000 by 400 matrix $X$ where every row is a training example for a handwritten digit image.
$$ X = \begin{bmatrix} - \left(x^{(1)} \right)^T - \\
- \left(x^{(2)} \right)^T - \\
\vdots \\
- \left(x^{(m)} \right)^T - \\
\end{bmatrix}
$$
The second part of the training set is a 5000-dimensional vector `y` that contains labels for the training set. 
The following cell randomly selects 100 images from the dataset and plots them.<jupyter_code># Randomly select 100 data points to display
rand_indices = np.random.choice(m, 100, replace=False)
sel = X[rand_indices, :]
utils.displayData(sel)<jupyter_output><empty_output><jupyter_text>### 1.2 Model representation
Our neural network is shown in the following figure.

It has 3 layers - an input layer, a hidden layer and an output layer. Recall that our inputs are pixel values
of digit images. Since the images are of size $20 \times 20$, this gives us 400 input layer units (not counting the extra bias unit which always outputs +1). The training data was loaded into the variables `X` and `y` above.
You have been provided with a set of network parameters ($\Theta^{(1)}, \Theta^{(2)}$) already trained by us. These are stored in `ex4weights.mat` and will be loaded in the next cell of this notebook into `Theta1` and `Theta2`. The parameters have dimensions that are sized for a neural network with 25 units in the second layer and 10 output units (corresponding to the 10 digit classes).<jupyter_code># Setup the parameters you will use for this exercise
input_layer_size  = 400  # 20x20 Input Images of Digits
hidden_layer_size = 25   # 25 hidden units
num_labels = 10          # 10 labels, from 0 to 9
# Load the weights into variables Theta1 and Theta2
weights = loadmat(os.path.join('Data', 'ex4weights.mat'))
# Theta1 has size 25 x 401
# Theta2 has size 10 x 26
Theta1, Theta2 = weights['Theta1'], weights['Theta2']
# swap first and last columns of Theta2, due to legacy from MATLAB indexing, 
# since the weight file ex3weights.mat was saved based on MATLAB indexing
Theta2 = np.roll(Theta2, 1, axis=0)
# Unroll parameters 
nn_params = np.concatenate([Theta1.ravel(), Theta2.ravel()])
<jupyter_output><empty_output><jupyter_text>
### 1.3 Feedforward and cost function
Now you will implement the cost function and gradient for the neural network. First, complete the code for the function `nnCostFunction` in the next cell to return the cost.
Recall that the cost function for the neural network (without regularization) is:
$$ J(\theta) = \frac{1}{m} \sum_{i=1}^{m}\sum_{k=1}^{K} \left[ - y_k^{(i)} \log \left( \left( h_\theta \left( x^{(i)} \right) \right)_k \right) - \left( 1 - y_k^{(i)} \right) \log \left( 1 - \left( h_\theta \left( x^{(i)} \right) \right)_k \right) \right]$$
where $h_\theta \left( x^{(i)} \right)$ is computed as shown in the neural network figure above, and K = 10 is the total number of possible labels. Note that $h_\theta(x^{(i)})_k = a_k^{(3)}$ is the activation (output
value) of the $k^{th}$ output unit. Also, recall that whereas the original labels (in the variable y) were 0, 1, ..., 9, for the purpose of training a neural network, we need to encode the labels as vectors containing only values 0 or 1, so that
$$ y = 
\begin{bmatrix} 1 \\ 0 \\ 0 \\\vdots \\ 0 \end{bmatrix}, \quad
\begin{bmatrix} 0 \\ 1 \\ 0 \\ \vdots \\ 0 \end{bmatrix}, \quad \cdots  \quad \text{or} \qquad
\begin{bmatrix} 0 \\ 0 \\ 0 \\ \vdots \\ 1 \end{bmatrix}.
$$
For example, if $x^{(i)}$ is an image of the digit 5, then the corresponding $y^{(i)}$ (that you should use with the cost function) should be a 10-dimensional vector with $y_5 = 1$, and the other elements equal to 0.
You should implement the feedforward computation that computes $h_\theta(x^{(i)})$ for every example $i$ and sum the cost over all examples. **Your code should also work for a dataset of any size, with any number of labels** (you can assume that there are always at least $K \ge 3$ labels).
**Implementation Note:** The matrix $X$ contains the examples in rows (i.e., X[i,:] is the i-th training example $x^{(i)}$, expressed as a $n \times 1$ vector.) When you complete the code in `nnCostFunction`, you will need to add the column of 1’s to the X matrix. The parameters for each unit in the neural network is represented in Theta1 and Theta2 as one row. Specifically, the first row of Theta1 corresponds to the first hidden unit in the second layer. You can use a for-loop over the examples to compute the cost.
<jupyter_code>def nnCostFunction(nn_params,
                   input_layer_size,
                   hidden_layer_size,
                   num_labels,
                   X, y, lambda_=0.0):
    """
    Implements the neural network cost function and gradient for a two layer neural 
    network which performs classification. 
    
    Parameters
    ----------
    nn_params : array_like
        The parameters for the neural network which are "unrolled" into 
        a vector. This needs to be converted back into the weight matrices Theta1
        and Theta2.
    
    input_layer_size : int
        Number of features for the input layer. 
    
    hidden_layer_size : int
        Number of hidden units in the second layer.
    
    num_labels : int
        Total number of labels, or equivalently number of units in output layer. 
    
    X : array_like
        Input dataset. A matrix of shape (m x input_layer_size).
    
    y : array_like
        Dataset labels. A vector of shape (m,).
    
    lambda_ : float, optional
        Regularization parameter.
 
    Returns
    -------
    J : float
        The computed value for the cost function at the current weight values.
    
    grad : array_like
        An "unrolled" vector of the partial derivatives of the concatenatation of
        neural network weights Theta1 and Theta2.
    
    Instructions
    ------------
    You should complete the code by working through the following parts.
    
    - Part 1: Feedforward the neural network and return the cost in the 
              variable J. After implementing Part 1, you can verify that your
              cost function computation is correct by verifying the cost
              computed in the following cell.
    
    - Part 2: Implement the backpropagation algorithm to compute the gradients
              Theta1_grad and Theta2_grad. You should return the partial derivatives of
              the cost function with respect to Theta1 and Theta2 in Theta1_grad and
              Theta2_grad, respectively. After implementing Part 2, you can check
              that your implementation is correct by running checkNNGradients provided
              in the utils.py module.
    
              Note: The vector y passed into the function is a vector of labels
                    containing values from 0..K-1. You need to map this vector into a 
                    binary vector of 1's and 0's to be used with the neural network
                    cost function.
     
              Hint: We recommend implementing backpropagation using a for-loop
                    over the training examples if you are implementing it for the 
                    first time.
    
    - Part 3: Implement regularization with the cost function and gradients.
    
              Hint: You can implement this around the code for
                    backpropagation. That is, you can compute the gradients for
                    the regularization separately and then add them to Theta1_grad
                    and Theta2_grad from Part 2.
    
    Note 
    ----
    We have provided an implementation for the sigmoid function in the file 
    `utils.py` accompanying this assignment.
    """
    # Reshape nn_params back into the parameters Theta1 and Theta2, the weight matrices
    # for our 2 layer neural network
    theta1 = np.reshape(nn_params[:hidden_layer_size * (input_layer_size + 1)],
                        (hidden_layer_size, (input_layer_size + 1)))
    theta2 = np.reshape(nn_params[(hidden_layer_size * (input_layer_size + 1)):],
                        (num_labels, (hidden_layer_size + 1)))
    # Setup some useful variables
    m = y.size
         
    # You need to return the following variables correctly 
    J = 0
    grad1 = np.zeros(theta1.shape)
    grad2 = np.zeros(theta2.shape)
    # ====================== YOUR CODE HERE ======================
    init_y=np.zeros((m,num_labels))#5000*10
    for i in range(m):
        init_y[i][y[i]]=1
    if X.shape[1]==input_layer_size:
        ones=np.ones((m,1))
        X=np.hstack((ones,X))
    
    cost=[0]*m
    #initialize
    # Theta1 has size 25 x 401
    # Theta2 has size 10 x 26
    D1=np.zeros_like(theta1)
    D2=np.zeros_like(theta2)
    
    for i in range(m):
        a1=X[i][:,None]#401*1
        z2=np.dot(theta1,a1)#25*1
        a2=utils.sigmoid(z2)
        #add bias
        a2=np.vstack((np.ones(1),a2))#26*1
        z3=np.dot(theta2,a2)#10*1
        h=utils.sigmoid(z3)#output layer
        a3=h
        cost[i]=(-1/m)*(np.sum((init_y[i][:,None])*(np.log(h))+(1-init_y[i][:,None])*(np.log(1-h))))
        
        
        #calcualte gradient
        d3=a3-init_y[i][:,None]
        d2=np.dot(theta2.T,d3)[1:]*(sigmoidGradient(z2))
        
        D1=D1 + np.dot(d2,a1.T)#25*401
        D2=D2 + np.dot(d3,a2.T)#10*26
        
    #regularization
    reg=(lambda_/(2*m))*((np.sum(theta1[:,1:]**2))+(np.sum(theta2[:,1:]**2)))
    
    grad1=(1/m)*(D1)+(lambda_/m)*theta1
    grad1[0]=grad1[0]-(lambda_/m)*theta1[0]
    
    grad2=(1/m)*(D2)+(lambda_/m)*theta2
    grad2[0]=grad2[0]-(lambda_/m)*theta2[0]   
    
    #append and unroll 
#     grad=np.append(grad1,grad2).reshape(-1)
    J=np.sum(cost)+reg
    
    
    # ================================================================
    # Unroll gradients
    # grad = np.concatenate([Theta1_grad.ravel(order=order), Theta2_grad.ravel(order=order)])
    grad = np.concatenate([grad1.ravel(), grad2.ravel()])
    return J, grad<jupyter_output><empty_output><jupyter_text>
Use the following links to go back to the different parts of this exercise that require to modify the function `nnCostFunction`.
Back to:
- [Feedforward and cost function](#section1)
- [Regularized cost](#section2)
- [Neural Network Gradient (Backpropagation)](#section4)
- [Regularized Gradient](#section5)
Once you are done, call your `nnCostFunction` using the loaded set of parameters for `Theta1` and `Theta2`. You should see that the cost is about 0.287629.<jupyter_code>lambda_ = 0
J, _ = nnCostFunction(nn_params, input_layer_size, hidden_layer_size,
                   num_labels, X, y, lambda_)
print('Cost at parameters (loaded from ex4weights): %.6f ' % J)
print('The cost should be about                   : 0.287629.')<jupyter_output>Cost at parameters (loaded from ex4weights): 0.287629 
The cost should be about                   : 0.287629.
<jupyter_text>*You should now submit your solutions.*<jupyter_code>grader = utils.Grader()
grader[1] = nnCostFunction
grader.grade()<jupyter_output>
Submitting Solutions | Programming Exercise neural-network-learning
Use token from last successful submission (ezrzrtc)? (Y/n): n
Login (email address): [email protected]
Token: 2ndHgbcjRsmC4bQ8
                                  Part Name |     Score | Feedback
                                  --------- |     ----- | --------
              Feedforward and Cost Function |  30 /  30 | Nice work!
                  Regularized Cost Function |   0 /  15 | 
                           Sigmoid Gradient |   0 /   5 | 
  Neural Network Gradient (Backpropagation) |   0 /  40 | 
                       Regularized Gradient |   0 /  10 | 
                                  --------------------------------
                                            |  30 / 100 |  
<jupyter_text>
### 1.4 Regularized cost function
The cost function for neural networks with regularization is given by:
$$ J(\theta) = \frac{1}{m} \sum_{i=1}^{m}\sum_{k=1}^{K} \left[ - y_k^{(i)} \log \left( \left( h_\theta \left( x^{(i)} \right) \right)_k \right) - \left( 1 - y_k^{(i)} \right) \log \left( 1 - \left( h_\theta \left( x^{(i)} \right) \right)_k \right) \right] + \frac{\lambda}{2 m} \left[ \sum_{j=1}^{25} \sum_{k=1}^{400} \left( \Theta_{j,k}^{(1)} \right)^2 + \sum_{j=1}^{10} \sum_{k=1}^{25} \left( \Theta_{j,k}^{(2)} \right)^2 \right] $$
You can assume that the neural network will only have 3 layers - an input layer, a hidden layer and an output layer. However, your code should work for any number of input units, hidden units and outputs units. While we
have explicitly listed the indices above for $\Theta^{(1)}$ and $\Theta^{(2)}$ for clarity, do note that your code should in general work with $\Theta^{(1)}$ and $\Theta^{(2)}$ of any size. Note that you should not be regularizing the terms that correspond to the bias. For the matrices `Theta1` and `Theta2`, this corresponds to the first column of each matrix. You should now add regularization to your cost function. Notice that you can first compute the unregularized cost function $J$ using your existing `nnCostFunction` and then later add the cost for the regularization terms.
[Click here to go back to `nnCostFunction` for editing.](#nnCostFunction)Once you are done, the next cell will call your `nnCostFunction` using the loaded set of parameters for `Theta1` and `Theta2`, and $\lambda = 1$. You should see that the cost is about 0.383770.<jupyter_code># Weight regularization parameter (we set this to 1 here).
lambda_ = 1
J, _ = nnCostFunction(nn_params, input_layer_size, hidden_layer_size,
                      num_labels, X, y, lambda_)
print('Cost at parameters (loaded from ex4weights): %.6f' % J)
print('This value should be about                 : 0.383770.')<jupyter_output>Cost at parameters (loaded from ex4weights): 0.383770
This value should be about                 : 0.383770.
<jupyter_text>*You should now submit your solutions.*<jupyter_code>grader[2] = nnCostFunction
grader.grade()<jupyter_output>
Submitting Solutions | Programming Exercise neural-network-learning
Use token from last successful submission ([email protected])? (Y/n): Y
<jupyter_text>## 2 Backpropagation
In this part of the exercise, you will implement the backpropagation algorithm to compute the gradient for the neural network cost function. You will need to update the function `nnCostFunction` so that it returns an appropriate value for `grad`. Once you have computed the gradient, you will be able to train the neural network by minimizing the cost function $J(\theta)$ using an advanced optimizer such as `scipy`'s `optimize.minimize`.
You will first implement the backpropagation algorithm to compute the gradients for the parameters for the (unregularized) neural network. After you have verified that your gradient computation for the unregularized case is correct, you will implement the gradient for the regularized neural network.
### 2.1 Sigmoid Gradient
To help you get started with this part of the exercise, you will first implement
the sigmoid gradient function. The gradient for the sigmoid function can be
computed as
$$ g'(z) = \frac{d}{dz} g(z) = g(z)\left(1-g(z)\right) $$
where
$$ \text{sigmoid}(z) = g(z) = \frac{1}{1 + e^{-z}} $$
Now complete the implementation of `sigmoidGradient` in the next cell.
<jupyter_code>def sigmoidGradient(z):
    """
    Computes the gradient of the sigmoid function evaluated at z. 
    This should work regardless if z is a matrix or a vector. 
    In particular, if z is a vector or matrix, you should return
    the gradient for each element.
    
    Parameters
    ----------
    z : array_like
        A vector or matrix as input to the sigmoid function. 
    
    Returns
    --------
    g : array_like
        Gradient of the sigmoid function. Has the same shape as z. 
    
    Instructions
    ------------
    Compute the gradient of the sigmoid function evaluated at
    each value of z (z can be a matrix, vector or scalar).
    
    Note
    ----
    We have provided an implementation of the sigmoid function 
    in `utils.py` file accompanying this assignment.
    """
    g = np.zeros(z.shape)
    # ====================== YOUR CODE HERE ======================
    
    # =============================================================
    return (utils.sigmoid(z)*(1-utils.sigmoid(z)))<jupyter_output><empty_output><jupyter_text>When you are done, the following cell call `sigmoidGradient` on a given vector `z`. Try testing a few values by calling `sigmoidGradient(z)`. For large values (both positive and negative) of z, the gradient should be close to 0. When $z = 0$, the gradient should be exactly 0.25. Your code should also work with vectors and matrices. For a matrix, your function should perform the sigmoid gradient function on every element.<jupyter_code>z = np.array([-1, -0.5, 0, 0.5, 1])
g = sigmoidGradient(z)
print('Sigmoid gradient evaluated at [-1 -0.5 0 0.5 1]:\n  ')
print(g)<jupyter_output>Sigmoid gradient evaluated at [-1 -0.5 0 0.5 1]:
  
[0.19661193 0.23500371 0.25       0.23500371 0.19661193]
<jupyter_text>*You should now submit your solutions.*<jupyter_code>grader[3] = sigmoidGradient
grader.grade()<jupyter_output>
Submitting Solutions | Programming Exercise neural-network-learning
Use token from last successful submission ([email protected])? (Y/n): Y
<jupyter_text>## 2.2 Random Initialization
When training neural networks, it is important to randomly initialize the parameters for symmetry breaking. One effective strategy for random initialization is to randomly select values for $\Theta^{(l)}$ uniformly in the range $[-\epsilon_{init}, \epsilon_{init}]$. You should use $\epsilon_{init} = 0.12$. This range of values ensures that the parameters are kept small and makes the learning more efficient.
One effective strategy for choosing $\epsilon_{init}$ is to base it on the number of units in the network. A good choice of $\epsilon_{init}$ is $\epsilon_{init} = \frac{\sqrt{6}}{\sqrt{L_{in} + L_{out}}}$ where $L_{in} = s_l$ and $L_{out} = s_{l+1}$ are the number of units in the layers adjacent to $\Theta^{l}$.
Your job is to complete the function `randInitializeWeights` to initialize the weights for $\Theta$. Modify the function by filling in the following code:
```python
# Randomly initialize the weights to small values
W = np.random.rand(L_out, 1 + L_in) * 2 * epsilon_init - epsilon_init
```
Note that we give the function an argument for $\epsilon$ with default value `epsilon_init = 0.12`.<jupyter_code>def randInitializeWeights(L_in, L_out, epsilon_init=0.12):
    """
    Randomly initialize the weights of a layer in a neural network.
    
    Parameters
    ----------
    L_in : int
        Number of incomming connections.
    
    L_out : int
        Number of outgoing connections. 
    
    epsilon_init : float, optional
        Range of values which the weight can take from a uniform 
        distribution.
    
    Returns
    -------
    W : array_like
        The weight initialiatized to random values.  Note that W should
        be set to a matrix of size(L_out, 1 + L_in) as
        the first column of W handles the "bias" terms.
        
    Instructions
    ------------
    Initialize W randomly so that we break the symmetry while training
    the neural network. Note that the first column of W corresponds 
    to the parameters for the bias unit.
    """
    # You need to return the following variables correctly 
    W = np.zeros((L_out, 1 + L_in))
    # ====================== YOUR CODE HERE ======================
    # Randomly initialize the weights to small values
    W = np.random.rand(L_out, 1 + L_in) * 2 * epsilon_init - epsilon_init
    # ============================================================
    return W<jupyter_output><empty_output><jupyter_text>*You do not need to submit any code for this part of the exercise.*
Execute the following cell to initialize the weights for the 2 layers in the neural network using the `randInitializeWeights` function.<jupyter_code>print('Initializing Neural Network Parameters ...')
initial_Theta1 = randInitializeWeights(input_layer_size, hidden_layer_size)
initial_Theta2 = randInitializeWeights(hidden_layer_size, num_labels)
# Unroll parameters
initial_nn_params = np.concatenate([initial_Theta1.ravel(), initial_Theta2.ravel()], axis=0)<jupyter_output>Initializing Neural Network Parameters ...
<jupyter_text>
### 2.4 Backpropagation

Now, you will implement the backpropagation algorithm. Recall that the intuition behind the backpropagation algorithm is as follows. Given a training example $(x^{(t)}, y^{(t)})$, we will first run a “forward pass” to compute all the activations throughout the network, including the output value of the hypothesis $h_\theta(x)$. Then, for each node $j$ in layer $l$, we would like to compute an “error term” $\delta_j^{(l)}$ that measures how much that node was “responsible” for any errors in our output.
For an output node, we can directly measure the difference between the network’s activation and the true target value, and use that to define $\delta_j^{(3)}$ (since layer 3 is the output layer). For the hidden units, you will compute $\delta_j^{(l)}$ based on a weighted average of the error terms of the nodes in layer $(l+1)$. In detail, here is the backpropagation algorithm (also depicted in the figure above). You should implement steps 1 to 4 in a loop that processes one example at a time. Concretely, you should implement a for-loop `for t in range(m)` and place steps 1-4 below inside the for-loop, with the $t^{th}$ iteration performing the calculation on the $t^{th}$ training example $(x^{(t)}, y^{(t)})$. Step 5 will divide the accumulated gradients by $m$ to obtain the gradients for the neural network cost function.
1. Set the input layer’s values $(a^{(1)})$ to the $t^{th }$training example $x^{(t)}$. Perform a feedforward pass, computing the activations $(z^{(2)}, a^{(2)}, z^{(3)}, a^{(3)})$ for layers 2 and 3. Note that you need to add a `+1` term to ensure that the vectors of activations for layers $a^{(1)}$ and $a^{(2)}$ also include the bias unit. In `numpy`, if a 1 is a column matrix, adding one corresponds to `a_1 = np.concatenate([np.ones((m, 1)), a_1], axis=1)`.
1. For each output unit $k$ in layer 3 (the output layer), set 
$$\delta_k^{(3)} = \left(a_k^{(3)} - y_k \right)$$
where $y_k \in \{0, 1\}$ indicates whether the current training example belongs to class $k$ $(y_k = 1)$, or if it belongs to a different class $(y_k = 0)$. You may find logical arrays helpful for this task (explained in the previous programming exercise).
1. For the hidden layer $l = 2$, set 
$$ \delta^{(2)} = \left( \Theta^{(2)} \right)^T \delta^{(3)} * g'\left(z^{(2)} \right)$$
Note that the symbol $*$ performs element wise multiplication in `numpy`.
1. Accumulate the gradient from this example using the following formula. Note that you should skip or remove $\delta_0^{(2)}$. In `numpy`, removing $\delta_0^{(2)}$ corresponds to `delta_2 = delta_2[1:]`.
$$ \Delta^{(l)} = \Delta^{(l)} + \delta^{(l+1)} (a^{(l)})^{(T)} $$
1. Obtain the (unregularized) gradient for the neural network cost function by dividing the accumulated gradients by $\frac{1}{m}$:
$$ \frac{\partial}{\partial \Theta_{ij}^{(l)}} J(\Theta) = D_{ij}^{(l)} = \frac{1}{m} \Delta_{ij}^{(l)}$$
**Python/Numpy tip**: You should implement the backpropagation algorithm only after you have successfully completed the feedforward and cost functions. While implementing the backpropagation alogrithm, it is often useful to use the `shape` function to print out the shapes of the variables you are working with if you run into dimension mismatch errors.
[Click here to go back and update the function `nnCostFunction` with the backpropagation algorithm](#nnCostFunction).
**Note:** If the iterative solution provided above is proving to be difficult to implement, try implementing the vectorized approach which is easier to implement in the opinion of the moderators of this course. You can find the tutorial for the vectorized approach [here](https://www.coursera.org/learn/machine-learning/discussions/all/threads/a8Kce_WxEeS16yIACyoj1Q).After you have implemented the backpropagation algorithm, we will proceed to run gradient checking on your implementation. The gradient check will allow you to increase your confidence that your code is
computing the gradients correctly.
### 2.4  Gradient checking 
In your neural network, you are minimizing the cost function $J(\Theta)$. To perform gradient checking on your parameters, you can imagine “unrolling” the parameters $\Theta^{(1)}$, $\Theta^{(2)}$ into a long vector $\theta$. By doing so, you can think of the cost function being $J(\Theta)$ instead and use the following gradient checking procedure.
Suppose you have a function $f_i(\theta)$ that purportedly computes $\frac{\partial}{\partial \theta_i} J(\theta)$; you’d like to check if $f_i$ is outputting correct derivative values.
$$
\text{Let } \theta^{(i+)} = \theta + \begin{bmatrix} 0 \\ 0 \\ \vdots \\ \epsilon \\ \vdots \\ 0 \end{bmatrix}
\quad \text{and} \quad \theta^{(i-)} = \theta - \begin{bmatrix} 0 \\ 0 \\ \vdots \\ \epsilon \\ \vdots \\ 0 \end{bmatrix}
$$
So, $\theta^{(i+)}$ is the same as $\theta$, except its $i^{th}$ element has been incremented by $\epsilon$. Similarly, $\theta^{(i−)}$ is the corresponding vector with the $i^{th}$ element decreased by $\epsilon$. You can now numerically verify $f_i(\theta)$’s correctness by checking, for each $i$, that:
$$ f_i\left( \theta \right) \approx \frac{J\left( \theta^{(i+)}\right) - J\left( \theta^{(i-)} \right)}{2\epsilon} $$
The degree to which these two values should approximate each other will depend on the details of $J$. But assuming $\epsilon = 10^{-4}$, you’ll usually find that the left- and right-hand sides of the above will agree to at least 4 significant digits (and often many more).
We have implemented the function to compute the numerical gradient for you in `computeNumericalGradient` (within the file `utils.py`). While you are not required to modify the file, we highly encourage you to take a look at the code to understand how it works.
In the next cell we will run the provided function `checkNNGradients` which will create a small neural network and dataset that will be used for checking your gradients. If your backpropagation implementation is correct,
you should see a relative difference that is less than 1e-9.
**Practical Tip**: When performing gradient checking, it is much more efficient to use a small neural network with a relatively small number of input units and hidden units, thus having a relatively small number
of parameters. Each dimension of $\theta$ requires two evaluations of the cost function and this can be expensive. In the function `checkNNGradients`, our code creates a small random model and dataset which is used with `computeNumericalGradient` for gradient checking. Furthermore, after you are confident that your gradient computations are correct, you should turn off gradient checking before running your learning algorithm.
**Practical Tip:** Gradient checking works for any function where you are computing the cost and the gradient. Concretely, you can use the same `computeNumericalGradient` function to check if your gradient implementations for the other exercises are correct too (e.g., logistic regression’s cost function).
<jupyter_code>utils.checkNNGradients(nnCostFunction)<jupyter_output>[[-9.27825235e-03 -9.27825236e-03]
 [-3.04978931e-06 -3.04978914e-06]
 [-1.75060082e-04 -1.75060082e-04]
 [-9.62660640e-05 -9.62660620e-05]
 [ 8.89911959e-03  8.89911960e-03]
 [ 1.42869427e-05  1.42869443e-05]
 [ 2.33146358e-04  2.33146357e-04]
 [ 1.17982666e-04  1.17982666e-04]
 [-8.36010761e-03 -8.36010762e-03]
 [-2.59383093e-05 -2.59383100e-05]
 [-2.87468729e-04 -2.87468729e-04]
 [-1.37149707e-04 -1.37149706e-04]
 [ 7.62813551e-03  7.62813551e-03]
 [ 3.69883235e-05  3.69883234e-05]
 [ 3.35320347e-04  3.35320347e-04]
 [ 1.53247082e-04  1.53247082e-04]
 [-6.74798370e-03 -6.74798370e-03]
 [-4.68759787e-05 -4.68759769e-05]
 [-3.76215585e-04 -3.76215587e-04]
 [-1.66560297e-04 -1.66560294e-04]
 [ 3.14544970e-01  3.14544970e-01]
 [ 1.64090819e-01  1.64090819e-01]
 [ 1.64567932e-01  1.64567932e-01]
 [ 1.58339334e-01  1.58339334e-01]
 [ 1.51127527e-01  1.51127527e-01]
 [ 1.49568335e-01  1.49568335e-01]
 [ 1.11056588e-01  1.11056588e-01]
 [ 5.75736493e-02  5.75736493e-02]
 [ 5.77867378e-02  5[...]<jupyter_text>*Once your cost function passes the gradient check for the (unregularized) neural network cost function, you should submit the neural network gradient function (backpropagation).*<jupyter_code>grader[4] = nnCostFunction
grader.grade()<jupyter_output>
Submitting Solutions | Programming Exercise neural-network-learning
Use token from last successful submission ([email protected])? (Y/n): Y
                                  Part Name |     Score | Feedback
                                  --------- |     ----- | --------
              Feedforward and Cost Function |  30 /  30 | Nice work!
                  Regularized Cost Function |  15 /  15 | Nice work!
                           Sigmoid Gradient |   5 /   5 | Nice work!
  Neural Network Gradient (Backpropagation) |  40 /  40 | Nice work!
                       Regularized Gradient |   0 /  10 | 
                                  --------------------------------
                                            |  90 / 100 |  
<jupyter_text>
### 2.5 Regularized Neural Network
After you have successfully implemented the backpropagation algorithm, you will add regularization to the gradient. To account for regularization, it turns out that you can add this as an additional term *after* computing the gradients using backpropagation.
Specifically, after you have computed $\Delta_{ij}^{(l)}$ using backpropagation, you should add regularization using
$$ \begin{align} 
& \frac{\partial}{\partial \Theta_{ij}^{(l)}} J(\Theta) = D_{ij}^{(l)} = \frac{1}{m} \Delta_{ij}^{(l)} & \qquad \text{for } j = 0 \\
& \frac{\partial}{\partial \Theta_{ij}^{(l)}} J(\Theta) = D_{ij}^{(l)} = \frac{1}{m} \Delta_{ij}^{(l)} + \frac{\lambda}{m} \Theta_{ij}^{(l)} & \qquad \text{for } j \ge 1
\end{align}
$$
Note that you should *not* be regularizing the first column of $\Theta^{(l)}$ which is used for the bias term. Furthermore, in the parameters $\Theta_{ij}^{(l)}$, $i$ is indexed starting from 1, and $j$ is indexed starting from 0. Thus, 
$$
\Theta^{(l)} = \begin{bmatrix}
\Theta_{1,0}^{(i)} & \Theta_{1,1}^{(l)} & \cdots \\
\Theta_{2,0}^{(i)} & \Theta_{2,1}^{(l)} & \cdots \\
\vdots &  ~ & \ddots
\end{bmatrix}
$$
[Now modify your code that computes grad in `nnCostFunction` to account for regularization.](#nnCostFunction)
After you are done, the following cell runs gradient checking on your implementation. If your code is correct, you should expect to see a relative difference that is less than 1e-9.<jupyter_code>#  Check gradients by running checkNNGradients
lambda_ = 3
utils.checkNNGradients(nnCostFunction, lambda_)
# Also output the costFunction debugging values
debug_J, _  = nnCostFunction(nn_params, input_layer_size,
                          hidden_layer_size, num_labels, X, y, lambda_)
print('\n\nCost at (fixed) debugging parameters (w/ lambda = %f): %f ' % (lambda_, debug_J))
print('(for lambda = 3, this value should be about 0.576051)')
grader[5] = nnCostFunction
grader.grade()<jupyter_output>
Submitting Solutions | Programming Exercise neural-network-learning
Use token from last successful submission ([email protected])? (Y/n): Y
<jupyter_text>### 2.6 Learning parameters using `scipy.optimize.minimize`
After you have successfully implemented the neural network cost function
and gradient computation, the next step we will use `scipy`'s minimization to learn a good set parameters.<jupyter_code>#  After you have completed the assignment, change the maxiter to a larger
#  value to see how more training helps.
options= {'maxiter': 100}
#  You should also try different values of lambda
lambda_ = 1
# Create "short hand" for the cost function to be minimized
costFunction = lambda p: nnCostFunction(p, input_layer_size,
                                        hidden_layer_size,
                                        num_labels, X, y, lambda_)
# Now, costFunction is a function that takes in only one argument
# (the neural network parameters)
res = optimize.minimize(costFunction,
                        initial_nn_params,
                        jac=True,
                        method='TNC',
                        options=options)
# get the solution of the optimization
nn_params = res.x
        
# Obtain Theta1 and Theta2 back from nn_params
Theta1 = np.reshape(nn_params[:hidden_layer_size * (input_layer_size + 1)],
                    (hidden_layer_size, (input_layer_size + 1)))
Theta2 = np.reshape(nn_params[(hidden_layer_size * (input_layer_size + 1)):],
                    (num_labels, (hidden_layer_size + 1)))<jupyter_output><empty_output><jupyter_text>After the training completes, we will proceed to report the training accuracy of your classifier by computing the percentage of examples it got correct. If your implementation is correct, you should see a reported
training accuracy of about 95.3% (this may vary by about 1% due to the random initialization). It is possible to get higher training accuracies by training the neural network for more iterations. We encourage you to try
training the neural network for more iterations (e.g., set `maxiter` to 400) and also vary the regularization parameter $\lambda$. With the right learning settings, it is possible to get the neural network to perfectly fit the training set.<jupyter_code>
pred = utils.predict(Theta1, Theta2, X)
print('Training Set Accuracy: %f' % (np.mean(pred == y) * 100))<jupyter_output>Training Set Accuracy: 97.520000
<jupyter_text>## 3 Visualizing the Hidden Layer
One way to understand what your neural network is learning is to visualize what the representations captured by the hidden units. Informally, given a particular hidden unit, one way to visualize what it computes is to find an input $x$ that will cause it to activate (that is, to have an activation value 
($a_i^{(l)}$) close to 1). For the neural network you trained, notice that the $i^{th}$ row of $\Theta^{(1)}$ is a 401-dimensional vector that represents the parameter for the $i^{th}$ hidden unit. If we discard the bias term, we get a 400 dimensional vector that represents the weights from each input pixel to the hidden unit.
Thus, one way to visualize the “representation” captured by the hidden unit is to reshape this 400 dimensional vector into a 20 × 20 image and display it (It turns out that this is equivalent to finding the input that gives the highest activation for the hidden unit, given a “norm” constraint on the input (i.e., $||x||_2 \le 1$)). 
The next cell does this by using the `displayData` function and it will show you an image with 25 units,
each corresponding to one hidden unit in the network. In your trained network, you should find that the hidden units corresponds roughly to detectors that look for strokes and other patterns in the input.<jupyter_code>utils.displayData(Theta1[:, 1:])<jupyter_output><empty_output> | 
	no_license | 
	/Exercise4/exercise4.ipynb | 
	arpit1012/COURSERA-MACHINE-LEARNING-PYTHON-IMPLEMENTATION | 20 | 
| 
	<jupyter_start><jupyter_text># New plots in preparation for paper* There appears to be no correlation between the radio properties of our sample and eddington ratio in any sense
* Radio flux/luminosity is roughly correlated with bolometric luminosity and absolute magnitudes $M_i(z=2)$
* Loosely speaking, the median black hole mass is higher for our detections than our non-detections.  The median BH mass is also slightly higher for RL detections than RQ detections.
* The 1d fit of our data in $C_{IV}$ space does not tell us much, other than both detections and non-detections span the fit uniformly.  RQ  and RL detections also uniformly span the fit.
* $\alpha_{ro}$ as a radio-loudness indicator results in stronger correlations between radio and optical properties--possibly due to it depending on optical properties as well..?
Main takeaway:  It is difficult to really find any trends between our observed radio properties of our targets and their optical properties.  However, when analyzing our RQ and RL detections separately, there is an apparent change in behavior nearby the radio-loudness boundary (which is determined by the peak luminosity of rapidly star forming galaxies), possibly backing the idea of more than one process driving the emission in each subsample.  Either way, after sifting through our data again for trends, it's clear that we could benefit from more/further observations to really discern anything meaningful about our sample. <jupyter_code>import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import astropy
from astropy.io import fits
from astropy.table import Table
import richardsplot
import sklearn
import sys
%matplotlib inline
infile = 'SpringFinalFigs_moreParams.csv'
df = pd.read_csv(infile)
print(df.head())
infile2 = 'ICA/spring_ICA_weights.csv'
df2 = pd.read_csv(infile2)
X = np.array(df2)[:,1:] #format weights for t-SNE
print(X.shape)
#10 weights for this set of objects
W1 = X[:,0]
W2 = X[:,1]
W3 = X[:,2]
W4 = X[:,3]
W5 = X[:,4]
W6 = X[:,5]
W7 = X[:,6]
W8 = X[:,7]
W9 = X[:,8]
W10 = X[:,9]
# t-SNE
from sklearn.manifold import TSNE
tsne2 = TSNE(n_components = 2)
projTSNE2 = tsne2.fit_transform(X)
print(projTSNE2)
plt.figure(figsize=(9,8))
plt.scatter(projTSNE2[:,0], projTSNE2[:,1], c=W1, s=100, cmap="PRGn", edgecolor="None")
cb = plt.colorbar()
cb.ax.set_ylabel("W1")
plt.xlim(-100,100)
plt.ylim(-100,100)
# Find radius of circle containing 20% of the sources and plot
x0 = 0 #take center of circle to be 0,0
y0 = 0
x  = projTSNE2[:,0]
y  = projTSNE2[:,1]
# Find radial coordinate of each point
r  = np.sqrt((x - x0)**2 + (y - y0)**2)
t  = 20 # percent
# Find radius that corresponds to t percentile
r0 = np.percentile(r, t)
circle=plt.Circle((0, 0), r0, color='k', fill=False)
fig = plt.gcf()
fig.gca().add_artist(circle)
plt.plot([0,0], [r0,100], c='k')
plt.plot([0,0], [-r0,-100], c='k')
plt.plot([r0,100], [0,0], c='k')
plt.plot([-r0,-100], [0,0], c='k')<jupyter_output><empty_output><jupyter_text>Can't really do anything meaningful with this analysis with current data.  Could be more effective if:
* Had all three sets of eigenvalues to compare all of our objects in the same context (above is only based on the 42 objects whose regular EW fit was deemed best)
* Gordon - did you say that ICA reconstructions were done for all of the dr12 spectra?  If the weights that were used for those are available somewhere, we could do a more complete t-SNE analysis with the new weights (if that is something worth doing)
Edit:  Actually, does it make sense to do the same analysis with our new ICA weights?  I know we chose our sample using the old (6-component) ICA weights to show they spanned our space uniformly, but is it OK for us to test how our objects move around the newly-defined space, since the original analysis just showed the uniformity of our sample?
-------
#### Try testing out some parameters:<jupyter_code>#Parameters to test
uniform = df[['UNI_FLAG']].values
Miz2 = df[['MI_Z2']].values
det = df[['detection']].values
peak_flux = df[['peak_flux']].values
int_flux = np.log10(df[['int_flux']].values)
logL_rad = df[['L_rad']].values
aro = df[['alpha_ro']].values
ew_civ = df[['EW_CIV_2']].values
voff_civ = df[['VOFF_CIV_PEAK_2']].values
log_bh = df[['LOGBH']].values
logedd_ratio = df[['LOGEDD_RATIO']].values
logL_bol = df[['LOGLBOL']].values
logL_civ = df[['LOGL_CIV']].values
fwhm_civ = df[['FWHM_CIV']].values
duni = (det>0)&(uniform>0)
nduni = (det<0)&(uniform>0)
dnuni = (det>0)&(uniform==0)
ndnuni = (det<0)&(uniform==0)
plt.figure(figsize=(8,8))
plt.hist(int_flux[(det>0)], color='b', histtype='step', bins=15, label='Detections')
plt.hist(int_flux[det<0], color='r', histtype='step', label='Non-Detections')
plt.xlabel('$log_{10} (F * {\mu Jy}^-1)$')
plt.ylabel('Count')
plt.title('Radio Flux Distribution of Our Targets')
plt.legend(loc="best")
print("Num Detections     =", (det>0).sum())
print("Num Non-Detections =", (det<0).sum())
#plt.savefig("Graphs/radioflux_distribution.png")<jupyter_output>Num Detections     = 22
Num Non-Detections = 28
<jupyter_text>* Two of our objects are FIRST detections and are significantly brighter than the rest of the sample
---
### Demographics of our sample<jupyter_code>fig, axs = plt.subplots(4, 2, figsize=(15,21))
axs[0,0].hist(Miz2[det>0], color='b', histtype='step')
axs[0,0].hist(Miz2[det<0], color='r', histtype='step')
axs[0,0].set_xlabel('$M_i(z=2)$')
axs[0,0].invert_xaxis()
axs[0,0].set_ylabel('Count')
axs[0,1].hist(logL_bol[det>0], color='b', histtype='step', label="Detections")
axs[0,1].hist(logL_bol[det<0], color='r', histtype='step', label="Non-Detections")
axs[0,1].set_xlabel('$log_{10} (L_{bol})$')
axs[0,1].legend(loc="best")
axs[1,0].hist(logL_rad[det>0], color='b', histtype='step')
axs[1,0].hist(logL_rad[det<0], color='r', histtype='step')
axs[1,0].set_xlabel('$log_{10} (L_{rad} * (erg/s/Hz)^{-1})$')
axs[1,0].set_ylabel('Count')
axs[1,1].hist(logL_civ[det>0], color='b', histtype='step')
axs[1,1].hist(logL_civ[det<0], color='r', histtype='step')
axs[1,1].set_xlabel('$log_{10} (L_{C_{IV}})$')
axs[1,1].set_ylabel('Count')
axs[2,0].hist(log_bh[det>0], color='b', histtype='step')
axs[2,0].hist(log_bh[det<0], color='r', histtype='step')
axs[2,0].set_xlabel('$log_{10} (M_{BH})$')
axs[2,1].hist(logedd_ratio[det>0], color='b', histtype='step')
axs[2,1].hist(logedd_ratio[det<0], color='r', histtype='step')
axs[2,1].set_xlabel('$log_{10} (L / L_{edd})$')
axs[3,0].hist(ew_civ[det>0], color='b', histtype='step')
axs[3,0].hist(ew_civ[det<0], color='r', histtype='step')
axs[3,0].set_xlabel('$EQW$')
plt.suptitle("Sample Demographics")
#plt.savefig("Graphs/demographics.png")<jupyter_output><empty_output><jupyter_text>* What are the units for log_bh ($M_{BH}$?) and L_bol?--------
### $log(M_{BH}), log(L/L_{edd}), M_i(z=2), log(L_{bol})$ related to radio properties
Reminder: $\alpha_{ro}$ is calculated using the following equation from Eq. 4 of Stone & Richards 2019:
 $\alpha_{ro} = \frac{log(L_{20cm}/L_{2500Å})}{log(20cm / 2500Å)}$<jupyter_code>fig, axs = plt.subplots(2, 2, figsize=(15,12))
axs[0,0].plot(log_bh[duni], int_flux[duni],'bo', label="Det, Uni")
axs[0,0].plot(log_bh[dnuni], int_flux[dnuni],'bo', markerfacecolor="None", label="Det, Non-Uni")
axs[0,0].plot(log_bh[nduni], int_flux[nduni],'ro', label="Non-Det, Uni")
axs[0,0].plot(log_bh[ndnuni], int_flux[ndnuni],'ro', markerfacecolor="None", label="Non-Det, Non-Uni")
#axs[0,0].set_ylim(1,3)
axs[0,0].set_xlabel("$log_{10} (M_{BH} / M_{sun})$")
axs[0,0].set_ylabel('$log_{10} (F * {\mu Jy}^-1)$')
axs[0,0].legend(loc="best")
axs[0,1].plot(log_bh[duni], aro[duni], 'bo')
axs[0,1].plot(log_bh[dnuni], aro[dnuni], 'bo', markerfacecolor="None")
axs[0,1].plot(log_bh[nduni], aro[nduni], 'ro')
axs[0,1].plot(log_bh[ndnuni], aro[ndnuni], 'ro', markerfacecolor="None")
axs[0,1].set_xlabel("$log_{10} (M_{BH} / M_{sun})$")
axs[0,1].set_ylabel('$\\alpha_{ro}$')
axs[0,1].invert_yaxis()
axs[1,0].plot(Miz2[duni], int_flux[duni], 'bo')
axs[1,0].plot(Miz2[dnuni], int_flux[dnuni], 'bo', markerfacecolor="None")
axs[1,0].plot(Miz2[nduni], int_flux[nduni], 'ro')
axs[1,0].plot(Miz2[ndnuni], int_flux[ndnuni], 'ro', markerfacecolor="None")
axs[1,0].set_xlabel("$M_i(z=2)$")
axs[1,0].set_ylabel('$log_{10} (F * {\mu Jy}^-1)$')
axs[1,0].invert_xaxis()
axs[1,1].plot(Miz2[duni], aro[duni], 'bo')
axs[1,1].plot(Miz2[dnuni], aro[dnuni], 'bo', markerfacecolor="None")
axs[1,1].plot(Miz2[nduni], aro[nduni], 'ro')
axs[1,1].plot(Miz2[ndnuni], aro[ndnuni], 'ro', markerfacecolor="None")
axs[1,1].set_xlabel("$M_i(z=2)$")
axs[1,1].set_ylabel('$\\alpha_{ro}$')
axs[1,1].invert_xaxis()
axs[1,1].invert_yaxis()
plt.suptitle("Black Hole Mass and Absolute Magnitudes")
#plt.savefig("Graphs/radio_vs_bhmass_and_mag.png")
fig, axs = plt.subplots(2, 2, figsize=(15,12))
axs[0,0].plot(logedd_ratio[duni], int_flux[duni], 'bo')
axs[0,0].plot(logedd_ratio[dnuni], int_flux[dnuni], 'bo', markerfacecolor="None")
axs[0,0].plot(logedd_ratio[nduni], int_flux[nduni], 'ro')
axs[0,0].plot(logedd_ratio[ndnuni], int_flux[ndnuni], 'ro', markerfacecolor="None")
axs[0,0].set_xlabel("$log_{10} (L / L_{edd})$")
axs[0,0].set_ylabel('$log_{10} (F * {\mu Jy}^-1)$')
axs[0,1].plot(logedd_ratio[duni], aro[duni], 'bo')
axs[0,1].plot(logedd_ratio[dnuni], aro[dnuni], 'bo', markerfacecolor="None")
axs[0,1].plot(logedd_ratio[nduni], aro[nduni], 'ro')
axs[0,1].plot(logedd_ratio[ndnuni], aro[ndnuni], 'ro', markerfacecolor="None")
axs[0,1].set_xlabel("$log_{10} (L / L_{edd})$")
axs[0,1].set_ylabel('$\\alpha_ro$')
axs[0,1].invert_yaxis()
axs[1,0].plot(logL_bol[duni], int_flux[duni], 'bo')
axs[1,0].plot(logL_bol[dnuni], int_flux[dnuni], 'bo', markerfacecolor="None")
axs[1,0].plot(logL_bol[nduni], int_flux[nduni], 'ro')
axs[1,0].plot(logL_bol[ndnuni], int_flux[ndnuni], 'ro', markerfacecolor="None")
axs[1,0].set_xlabel("$log_{10} (L_{bol})$")
axs[1,0].set_ylabel('$log_{10} (F * {\mu Jy}^-1)$')
axs[1,1].plot(logL_bol[duni], aro[duni], 'bo')
axs[1,1].plot(logL_bol[dnuni], aro[dnuni], 'bo', markerfacecolor="None")
axs[1,1].plot(logL_bol[nduni], aro[nduni], 'ro')
axs[1,1].plot(logL_bol[ndnuni], aro[ndnuni], 'ro', markerfacecolor="None")
axs[1,1].set_xlabel("$log_{10} (L_{bol})$")
axs[1,1].set_ylabel('$\\alpha_ro$')
axs[1,1].invert_yaxis()
plt.suptitle("Eddington ratio and Bolometric Luminosity")
#plt.savefig("Graphs/radio_vs_eddratio_and_Lbol.png")<jupyter_output><empty_output><jupyter_text>---
### $C_{IV}$, EQW, Blueshift Analysis<jupyter_code>fig, axs = plt.subplots(2, 2, figsize=(15,12))
axs[0,0].plot(ew_civ[duni], int_flux[duni], 'bo')
axs[0,0].plot(ew_civ[dnuni], int_flux[dnuni], 'bo', markerfacecolor="None")
axs[0,0].plot(ew_civ[nduni], int_flux[nduni], 'ro')
axs[0,0].plot(ew_civ[ndnuni], int_flux[ndnuni], 'ro', markerfacecolor="None")
axs[0,0].set_xlabel("$EQW$")
axs[0,0].set_ylabel('$log_{10} (F * {\mu Jy}^-1)$')
axs[0,1].plot(ew_civ[duni], aro[duni], 'bo')
axs[0,1].plot(ew_civ[dnuni], aro[dnuni], 'bo', markerfacecolor="None")
axs[0,1].plot(ew_civ[nduni], aro[nduni], 'ro')
axs[0,1].plot(ew_civ[ndnuni], aro[ndnuni], 'ro', markerfacecolor="None")
axs[0,1].set_xlabel("$EQW$")
axs[0,1].set_ylabel('$\\alpha_{ro}$')
axs[0,1].invert_yaxis()
axs[1,0].plot(logL_civ[duni], int_flux[duni], 'bo')
axs[1,0].plot(logL_civ[dnuni], int_flux[dnuni], 'bo', markerfacecolor="None")
axs[1,0].plot(logL_civ[nduni], int_flux[nduni], 'ro')
axs[1,0].plot(logL_civ[ndnuni], int_flux[ndnuni], 'ro', markerfacecolor="None")
axs[1,0].set_xlabel("$log_{10} (L_{C_{IV}})$")
axs[1,0].set_ylabel('$log_{10} (F * {\mu Jy}^-1)$')
axs[1,1].plot(logL_civ[duni], aro[duni], 'bo')
axs[1,1].plot(logL_civ[dnuni], aro[dnuni], 'bo', markerfacecolor="None")
axs[1,1].plot(logL_civ[nduni], aro[nduni], 'ro')
axs[1,1].plot(logL_civ[ndnuni], aro[ndnuni], 'ro', markerfacecolor="None")
axs[1,1].set_xlabel("$log_{10} (L_{C_{IV}})$")
axs[1,1].set_ylabel('$\\alpha_{ro}$')
axs[1,1].invert_yaxis()
plt.suptitle("EQW and $C_{IV}$ Luminosity")
#plt.savefig("Graphs/radio_vs_Lciv_and_eqw.png")<jupyter_output><empty_output><jupyter_text>---
#### 1d analysis in $C_{IV}$ space<jupyter_code>from sklearn.preprocessing import scale
Xsort = df[['EW_CIV_2','VOFF_CIV_PEAK_2']].values
x = scale(Xsort[:,0])
y = scale(Xsort[:,1])
z = np.arange(min(x),max(x),0.001)
fit = np.poly1d(np.polyfit(x,y,3))
#Now manually do the 1-d fit for the cubic line
cubefit = np.array([z, fit(z)]).T
data = np.array([x, y]).T
trevorFit = np.array([]).reshape(0, 2)
print(trevorFit.shape)
#Want to loop through each point of the cubic fit
#for each data point, saving the distance between the two
#in delta.  Then take the index of the min in delta and
#plot that index of the cubic fit to display the 1d distribution
for scat in data: 
    r = np.sqrt((scat[0]-cubefit[:,0])**2 + (scat[1]-cubefit[:,1])**2)
    delta = np.array([cubefit[np.argmin(r), 0], cubefit[np.argmin(r), 1]])
    trevorFit = np.concatenate((trevorFit, np.atleast_2d(delta)))
fitx = np.reshape(trevorFit[:,0], (50,1))
fity = np.reshape(trevorFit[:,1], (50,1))
fill = 0.5
plt.figure(figsize=(10,8))
plt.scatter(x, y, alpha=0.1)
plt.plot(fitx[duni], fity[duni], 'bo', label="Det, Uni", alpha=fill)
plt.plot(fitx[dnuni], fity[dnuni], 'bo', label="Det, Non-Uni", markerfacecolor="None", alpha=fill)
plt.plot(fitx[nduni], fity[nduni], 'ro', label="Non-Det, Uni", alpha=fill)
plt.plot(fitx[ndnuni], fity[ndnuni], 'ro', label="Non-Det, Non-Uni", markerfacecolor="None", alpha=fill)
plt.plot(z, fit(z), alpha=0.1)
plt.xlabel('EQW')
plt.ylabel('Blueshift')
plt.legend(loc="best")
plt.title("1d Fit of our Data in $C_{IV}-Space$")
#plt.savefig("Graphs/civ_1dfit.png")<jupyter_output>(0, 2)
<jupyter_text>---
## Now display same trends comparing only detections, based on whether or not they are radio loud<jupyter_code>#Can change radio-loudness indicator below:
#Flux/Luminosity as RL parameter
RQ = ((10.**int_flux)<229)&(det>0) #229µJy = 10^23.3 W/Hz, which is radio-loud using 
RL = ((10.**int_flux)>229)&(det>0)
##alpha_ro as RL parameter
#RQ = (aro>-0.2)&(det>0) #alpha_ro<-0.2 is radio-loud 
#RL = (aro<-0.2)&(det>0)
fig, axs = plt.subplots(4, 2, figsize=(15,21))
axs[0,0].hist(Miz2[RQ], color='k', histtype='step')
axs[0,0].hist(Miz2[RL], color='r', histtype='step')
axs[0,0].set_xlabel('$M_i(z=2)$')
axs[0,0].invert_xaxis()
axs[0,0].set_ylabel('Count')
axs[0,1].hist(logL_bol[RQ], color='k', histtype='step', label="RQ")
axs[0,1].hist(logL_bol[RL], color='r', histtype='step', label="RL")
axs[0,1].set_xlabel('$log_{10} (L_{bol})$')
axs[0,1].legend(loc="best")
axs[1,0].hist(logL_rad[RQ], color='k', histtype='step')
axs[1,0].hist(logL_rad[RL], color='r', histtype='step')
axs[1,0].set_xlabel('$log_{10} (L_{rad} * (erg/s/Hz)^{-1})$')
axs[1,0].set_ylabel('Count')
axs[1,1].hist(logL_civ[RQ], color='k', histtype='step')
axs[1,1].hist(logL_civ[RL], color='r', histtype='step')
axs[1,1].set_xlabel('$log_{10} (L_{C_{IV}})$')
axs[1,1].set_ylabel('Count')
axs[2,0].hist(log_bh[RQ], color='k', histtype='step')
axs[2,0].hist(log_bh[RL], color='r', histtype='step')
axs[2,0].set_xlabel('$log_{10} (M_{BH})$')
axs[2,1].hist(logedd_ratio[RQ], color='k', histtype='step')
axs[2,1].hist(logedd_ratio[RL], color='r', histtype='step')
axs[2,1].set_xlabel('$log_{10} (L / L_{edd})$')
axs[3,0].hist(ew_civ[RQ], color='k', histtype='step')
axs[3,0].hist(ew_civ[RL], color='r', histtype='step')
axs[3,0].set_xlabel('$EQW$')
plt.suptitle("Sample Demographics")
#plt.savefig("Graphs/demographics_RL_vs_RQ_flux.png")
fig, axs = plt.subplots(2, 2, figsize=(15,12))
axs[0,0].scatter(log_bh[RQ], int_flux[RQ], color="black", label="Radio-Quiet")
axs[0,0].scatter(log_bh[RL], int_flux[RL], color="red", label="Radio-Loud")
#axs[0,0].set_ylim(1,3)
axs[0,0].set_xlabel("$log_{10} (M_{BH} / M_{sun})$")
axs[0,0].set_ylabel('$log_{10} (F * {\mu Jy}^-1)$')
axs[0,0].legend(loc="best")
axs[0,1].scatter(log_bh[RQ], aro[RQ], color="black")
axs[0,1].scatter(log_bh[RL], aro[RL], color="red")
axs[0,1].set_xlabel("$log_{10} (M_{BH} / M_{sun})$")
axs[0,1].set_ylabel('$\\alpha_{ro}$')
axs[0,1].invert_yaxis()
axs[1,0].scatter(Miz2[RQ], int_flux[RQ], color="black")
axs[1,0].scatter(Miz2[RL], int_flux[RL], color="red")
axs[1,0].set_xlabel("$M_i(z=2)$")
axs[1,0].set_ylabel('$log_{10} (F * {\mu Jy}^-1)$')
axs[1,0].invert_xaxis()
axs[1,1].scatter(Miz2[RQ], aro[RQ], color="black")
axs[1,1].scatter(Miz2[RL], aro[RL], color="red")
axs[1,1].set_xlabel("$M_i(z=2)$")
axs[1,1].set_ylabel('$\\alpha_{ro}$')
axs[1,1].invert_xaxis()
axs[1,1].invert_yaxis()
plt.suptitle("Detections, Black Hole Mass and Absolute Magnitudes")
#plt.savefig("Graphs/radio_vs_bhmass_and_mag_RL_vs_RQ_flux.png")
fig, axs = plt.subplots(2, 2, figsize=(15,12))
axs[0,0].scatter(logedd_ratio[RQ], int_flux[RQ], color="black", label="Radio-Quiet")
axs[0,0].scatter(logedd_ratio[RL], int_flux[RL], color="red", label="Radio-Loud")
axs[0,0].set_xlabel("$log_{10} (L / L_{edd})$")
axs[0,0].set_ylabel('$log_{10} (F * {\mu Jy}^-1)$')
axs[0,0].legend(loc="best")
axs[0,1].scatter(logedd_ratio[RQ], aro[RQ], color="black")
axs[0,1].scatter(logedd_ratio[RL], aro[RL], color="red")
axs[0,1].set_xlabel("$log_{10} (L / L_{edd})$")
axs[0,1].set_ylabel('$\\alpha_ro$')
axs[0,1].invert_yaxis()
axs[1,0].scatter(logL_bol[RQ], int_flux[RQ], color="black")
axs[1,0].scatter(logL_bol[RL], int_flux[RL], color="red")
axs[1,0].set_xlabel("$log_{10} (L_{bol})$")
axs[1,0].set_ylabel('$log_{10} (F * {\mu Jy}^-1)$')
axs[1,1].scatter(logL_bol[RQ], aro[RQ], color="black")
axs[1,1].scatter(logL_bol[RL], aro[RL], color="red")
axs[1,1].set_xlabel("$log_{10} (L_{bol})$")
axs[1,1].set_ylabel('$\\alpha_ro$')
axs[1,1].invert_yaxis()
plt.suptitle("Detections, Eddington ratio and Bolometric Luminosity")
plt.savefig("Graphs/radio_vs_eddratio_and_Lbol_RL_vs_RQ_flux.png")
fill = 0.5
plt.figure(figsize=(10,8))
plt.scatter(x, y, alpha=0.1)
plt.scatter(fitx[RQ], fity[RQ], color='black', label="Radio-Quiet", alpha=fill)
plt.scatter(fitx[RL], fity[RL], color='red', label="Radio-Loud", alpha=fill)
plt.plot(z, fit(z), alpha=0.1)
plt.xlabel('EQW')
plt.ylabel('Blueshift')
plt.legend(loc="best")
plt.title("1d Fit of our Data in $C_{IV}-Space$")
plt.savefig("Graphs/civ_1dfit_RL_vs_RQ_flux.png")<jupyter_output><empty_output> | 
	no_license | 
	/Notebooks/Random_DataAnalysis/OpticalvsRadio_PreliminaryDataAnalysis.ipynb | 
	RichardsGroup/VLA2018b | 7 | 
| 
	<jupyter_start><jupyter_text># GitHubとの連携
Google ColaboratoryとGitHubの連携について学びましょう。## ●Githubとは?
GitHubは、今や開発者にとってなくてはならないサービスです。  
「Git」は、プログラミングによるサービス開発の現場などでよく使われている「バージョン管理システム」です。  
そして、GitHubは、Gitの仕組みを利用して、世界中の人々が自分のプロダクトを共有、公開することができるようにしたウェブサービス名です。  
GitHubで作成されたリポジトリ(貯蔵庫のようなもの)は、無料の場合誰にでも公開されますが、有料の場合は指定したユーザーのみがアクセスできるプライベートなレポジトリを作ることができます。  
GitHubは、TensorFlowやKerasなどのオープンソースプロジェクトの公開にも利用されています。  
## ●ノートブックをGitHubで公開する
ノートブックをGitHubにアップすることにより、ノートブックを一般に公開したり、チーム内で共有することができます。  
GitHubで新しくレポジトリを作った上で、このノートブックをGitHubにアップしてみましょう。  
以下はダミーのコードです。  <jupyter_code>import numpy as np
import matplotlib.pyplot as plt
import keras
from keras.datasets import cifar10
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.optimizers import Adam
(x_train, t_train), (x_test, t_test) = cifar10.load_data()
batch_size = 32
epochs = 1
n_class = 10
t_train = keras.utils.to_categorical(t_train, n_class)
t_test = keras.utils.to_categorical(t_test, n_class)
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same', input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(n_class))
model.add(Activation('softmax'))
model.compile(optimizer=Adam(), loss='categorical_crossentropy', metrics=['accuracy'])
x_train = x_train / 255
x_test = x_test / 255
model.fit(x_train, t_train, epochs=epochs, batch_size=batch_size, validation_data=(x_test, t_test))<jupyter_output><empty_output> | 
	no_license | 
	/github.ipynb | 
	yuichinambu/Colab_Sample | 1 | 
| 
	<jupyter_start><jupyter_text># Dynamic WebPage
---<jupyter_code>import requests
from bs4 import BeautifulSoup
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
url = "https://play.google.com/store/movies/top"
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:79.0) Gecko/20100101 Firefox/79.0', "Accept-Language":"ko-KR,ko"}
res = requests.get(url, verify=False)
res.raise_for_status()
soup = BeautifulSoup(res.text, "lxml")
movies = soup.find_all("div", attrs={"class":"ImZGtf mpg5gc"})
print(len(movies))
with open("movie.html", "w", encoding="utf8") as f:
    #f.write(res.text)
    f.write(soup.prettify()) # html 문서를 예쁘게 출력
# for movie in movies:
#     title = movie.find("div", attrs={"class":"WsMG1c nnK0zc"}).get_text()
#     print(title)
from selenium import webdriver
browser = webdriver.Chrome('C://chromedriver_win32/chromedriver.exe')
browser.maximize_window() # 창 최대화
# 페이지 이동
url = "https://play.google.com/store/movies/top"
browser.get(url)
# # 모니터(해상도) 높이인 1080 위치로 스크롤 내리기
# browser.execute_script('window.scrollTo(0, 1080)')
# 화면 가장 아래로 스크롤 내리기
browser.execute_script('window.scrollTo(0, document.body.scrollHeight)')<jupyter_output><empty_output> | 
	no_license | 
	/15_selenium_movie.ipynb | 
	yangguda/Web_Crawling | 1 | 
| 
	<jupyter_start><jupyter_text>
 # Exploratory Data Analysis on Superstore Data### Objectives:- 
#### 1) To Perform Exploratory Data Analysis 
#### 2) Find out business problems
#### 3) identify key areas for improving profits.### 1. Importing required packages and dataset<jupyter_code>import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
SuperStoreData = pd.read_csv("SampleSuperstore.csv")
# Initial Shape of the Data Frame
Initial_Shape = SuperStoreData.shape
Initial_Shape<jupyter_output><empty_output><jupyter_text>### 2. Data Inspection<jupyter_code>SuperStoreData
SuperStoreData.info()<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 9994 entries, 0 to 9993
Data columns (total 13 columns):
 #   Column        Non-Null Count  Dtype  
---  ------        --------------  -----  
 0   Ship Mode     9994 non-null   object 
 1   Segment       9994 non-null   object 
 2   Country       9994 non-null   object 
 3   City          9994 non-null   object 
 4   State         9994 non-null   object 
 5   Postal Code   9994 non-null   int64  
 6   Region        9994 non-null   object 
 7   Category      9994 non-null   object 
 8   Sub-Category  9994 non-null   object 
 9   Sales         9994 non-null   float64
 10  Quantity      9994 non-null   int64  
 11  Discount      9994 non-null   float64
 12  Profit        9994 non-null   float64
dtypes: float64(3), int64(2), object(8)
memory usage: 1015.1+ KB
<jupyter_text>No null-valued cells present.<jupyter_code>#Statistics of Numerical valued colums
SuperStoreData.describe()<jupyter_output><empty_output><jupyter_text># 3. Data Cleaning<jupyter_code>#Deleting Identicle rows to reduce noise from the data
SuperStoreData.drop_duplicates(keep= 'first',inplace=True) 
SuperStoreData
#Final Shape of the dataframe
Final_Shape = SuperStoreData.shape
Final_Shape
# Initial number of rows vs final
Initial_Shape[0]- Final_Shape[0]<jupyter_output><empty_output><jupyter_text>### Note: 17 Identical Rows Identified  and removed !## 4. Exploratory Data Analysis and Visulization ### 4.1 What are total Sales and Profits of the company?<jupyter_code>Sales_and_Profits = SuperStoreData.groupby("Segment").sum().iloc[:,[1,-1]].sum()
round(Sales_and_Profits,2) # Rounding Numericala entries to 2 decimal places<jupyter_output><empty_output><jupyter_text>###  Total Sales = 2,296,195.56 USD
### Total Profits = 286,241.42 USD### 4.2 Top 10 States by Sales and Profits<jupyter_code>Top_10_Sales = SuperStoreData.groupby("State").Sales.sum().nlargest(n =10)
Top_10_Profits = SuperStoreData.groupby("State").Profit.sum().nlargest(n =10)
Top_10_Sales.index
Top_10_Profits.index<jupyter_output><empty_output><jupyter_text>#### Top 10 States by Sales:
'California', 'New York', 'Texas', 'Washington', 'Pennsylvania','Florida', 'Illinois', 'Ohio', 'Michigan', 'Virginia'
#### Top 10 States by Profit:
'California', 'New York', 'Washington', 'Michigan', 'Virginia','Indiana', 'Georgia', 'Kentucky', 'Minnesota', 'Delaware'<jupyter_code>plt.style.use('seaborn')
Top_10_Sales.plot(kind ='bar', figsize =(14,8), fontsize =14)
plt.xlabel("States", fontsize =13)
plt.ylabel("Total Sales",fontsize =13)
plt.title("Top 10 States by Sales",fontsize =16)
plt.show()
plt.style.use('seaborn')
Top_10_Profits.plot(kind ='bar', figsize =(14,8), fontsize =14)
plt.xlabel("States", fontsize =13)
plt.ylabel("Total Profits",fontsize =13)
plt.title("Top 10 States by Profits",fontsize =16)
plt.show()<jupyter_output><empty_output><jupyter_text>## California and New York are far ahead of their competitor states.<jupyter_code>plt.style.use('seaborn')
SuperStoreData.plot(kind = "scatter", figsize = (15,8), x = "Sales", y ="Profit" ,c ="Discount", s =20, marker ="x",colormap ="viridis")
plt.ylabel("Total Profits",fontsize =13)
plt.title("Interdependence of Sales, Profit, and Discount",fontsize =16)
plt.show()<jupyter_output><empty_output><jupyter_text>## Relation Analysis<jupyter_code># Pair_plot
financial=SuperStoreData.loc[:,['Sales','Quantity','Discount','Profit']]
sns.pairplot(financial)<jupyter_output><empty_output><jupyter_text>In above we see that there is some relation between sales and profit and also there is some relation between Discount and Profit. Now To see what exact relation between those entities we plot the heat_map. so we get more clearity<jupyter_code>correlation=financial.corr()
sns.heatmap(correlation,xticklabels=correlation.columns,yticklabels=correlation.columns,annot=True)<jupyter_output><empty_output><jupyter_text>From Above map we infer that, 1)sales and profits are positively correlated 2)Discount and Profits are Negatively correlated
Hence we consider these cases and proceed further## Case-1 When Discount is 0<jupyter_code>data=SuperStoreData[SuperStoreData['Discount']==0]
sns.relplot(x='Sales',y='Profit',data=data)<jupyter_output><empty_output><jupyter_text>Hence we say that there positive relation between Profit and sales.when Discount is 0 Now we plot heat_map to get correlaton<jupyter_code>correlation=data.corr()
sns.heatmap(correlation,xticklabels=correlation.columns,yticklabels=correlation.columns,annot=True)<jupyter_output><empty_output><jupyter_text>Hence, we see that There is strong correlation between sales and Profit i.e 0.92<jupyter_code>#Rel_plot with respect category
sns.relplot(x='Sales',y='Profit',hue='Category',data=data)
# Regression Plot 
sns.regplot(data['Sales'],data['Profit'])<jupyter_output><empty_output><jupyter_text>There is positive trend between Profit and sales<jupyter_code>sns.boxplot(x='Category',y='Profit',data=data)<jupyter_output><empty_output><jupyter_text>## Case-2 When Discount is Not 0<jupyter_code>data1=SuperStoreData[SuperStoreData['Discount']!=0]
sns.relplot(x='Sales',y='Profit',hue='Discount',data=data1)<jupyter_output><empty_output><jupyter_text>In above graph we see that as percentages of Discount increses the sales is also goes increses but profit goes decreases
Now we check how it can be effects on different sectors of businesses<jupyter_code>sns.relplot(x='Sales',y='Profit',hue='Category',data=data1)
#realtion analysis
correlation=data1.corr()
sns.heatmap(correlation,xticklabels=correlation.columns,yticklabels=correlation.columns,annot=True)
pivot=pd.pivot_table(data1,index='Sub-Category',values='Profit')
pivot.plot(kind='bar')<jupyter_output><empty_output><jupyter_text>Here we see that copiers had highest Profit and Machines had highest loss<jupyter_code>pivot=pd.pivot_table(data1,index='Sub-Category',values='Sales')
pivot.plot(kind='bar')<jupyter_output><empty_output><jupyter_text>Here we see that copiers had highest sale and Machines had second highest sales
In above two graph we see that 'Machines' had second highest sale but due to large discount it is in loss and in second graph we see that sales in 'Fasteners','labels'and 'Art' category are so weak.so we have to concentrate on these sub-category businesses### Discounts are triggering losses### 4.3 Distrubution of Profits across diffrent regions<jupyter_code>plt.figure(figsize = (12,4))
sns.set(font_scale=1, palette= "viridis")
sns.barplot(data = SuperStoreData , x = "Region",y = "Profit" ,hue = "Segment")
plt.show()<jupyter_output><empty_output><jupyter_text>### Overall each Segment is profitable.
### 4.4 Profit distribution by Region<jupyter_code>plt.figure(figsize = (12,4))
sns.set(font_scale=1, palette= "viridis")
sns.barplot(data = SuperStoreData , x = "Region",y = "Profit" ,hue = "Category")
plt.show()<jupyter_output><empty_output><jupyter_text>## "Furniture" Category is the only loss making sector that to only in Central Region### 4.5 Investigating losses in Furniture category in the Central region<jupyter_code># Grouping Data by Region and only slicing Data for Central Region from whole Data Set
gb_Central = list(SuperStoreData.groupby("Region"))[0][1]
# Investing Further in cenral Region 
plt.figure(figsize = (12,4))
sns.set(font_scale=1.5, palette= "viridis")
sns.barplot(data = gb_Central, x = "Category",y = "Profit" ,hue = "Ship Mode")
plt.title("Investigation of central region: Profit making(by Ship Mode)")
plt.show()<jupyter_output><empty_output><jupyter_text>### Losses are inccured in Furniture Cateory irrespective to ship mode in Central Region<jupyter_code># Slicing Furniture Data from whole data set
gb_Category_Furniture =list(list(SuperStoreData.groupby("Region"))[0][1].groupby("Category"))[0][1]
# Correlation matrix Heat Map to identify key factors influening profits
plt.figure(figsize = (12,8))
sns.set(font_scale=1.4)
sns.heatmap(gb_Category_Furniture.corr() , annot = True, cmap ="Reds")
plt.show()<jupyter_output><empty_output><jupyter_text>### There is unusually high positive correlation between Postal Code and Discount
### Also, Their is negative correlation between Discount and Sales eventhough dicounts are entered as positive values... i.e. they are not helping in improving sales of "Furniture" category of the company
### 4.6 Investigating individual performance by states in the central region<jupyter_code>plt.figure(figsize = (12,8))
sns.set(font_scale=1, palette= "viridis")
sns.barplot(data = gb_Category_Furniture , x = "State",y = "Profit" ,hue = "Sub-Category")
plt.title("Investigation of Central Region Furniture Category: Profit Analysis(by Sub Category)", fontsize = 20)
plt.show()
plt.figure(figsize = (12,8))
sns.set(font_scale=1, palette= "viridis")
sns.barplot(data = gb_Category_Furniture , x = "State",y = "Profit" ,hue = "Segment")
plt.title("Investigation of Central Region Furniture Category: Profit Analysis(by Segment)", fontsize = 20)
plt.show()<jupyter_output><empty_output><jupyter_text>### Texas and Illiois are only two states contributing to all the losses in Furniture category in the Central Region 
### Losses in Tables Sub Category is significanlty high.
### 4.7 So, what is it they are doing diffrently?<jupyter_code>plt.figure(figsize = (12,8))
sns.set(font_scale=1, palette= "viridis")
sns.barplot(data = gb_Category_Furniture , x = "State",y = "Discount" ,hue = "Sub-Category")
plt.title("Discounts provided by each state", fontsize = 20)
plt.show()<jupyter_output><empty_output><jupyter_text>### Texas and Illinois are only states providing discounts in the whole central region this justifies high positive correlation between postal codes and discounts.### Also, these discounts are very high!
### 1. 60% on Furnishings
### 2. 30% on Bookcases and Chairs
### 3. 50% disount on Tables in Illinois and 30% in Texas
### 4. Are these discounts driving sales up?.... NO!<jupyter_code>plt.figure(figsize = (12,8))
sns.set(font_scale=1.5)
sns.lmplot(data = gb_Category_Furniture , x = "Discount", y ="Sales", aspect = 1, height = 8, col ="Sub-Category", col_wrap= 2)
plt.show()
# Jiont plot for studying overall relationship between Sales and Discounts
sns.set(font_scale=1.5)
sns.jointplot(data = gb_Category_Furniture , x = "Discount", y ="Sales", height = 8, kind = "reg")
plt.show()<jupyter_output><empty_output><jupyter_text>### Actually Sales tend to do down when discounts go up in 3 out of 4 "Subcategories" and also in overall Furniture Category Sales in the Central Region.## Some more insights regarding Distribution of data <jupyter_code># box plot
sns.boxplot(x='Category',y='Profit',data=SuperStoreData)<jupyter_output><empty_output><jupyter_text>we see that variation in Technology sector is more as compared to other two sector<jupyter_code>pivot=pd.pivot_table(SuperStoreData,index='Category',values='Discount')
pivot.plot(kind='bar')
pivot=pd.pivot_table(SuperStoreData,index='Category',values='Sales')
pivot.plot(kind='bar')
pivot=pd.pivot_table(SuperStoreData,index='Category',values='Profit')
pivot.plot(kind='bar')<jupyter_output><empty_output> | 
	no_license | 
	/Task5/EDA on SuperStore Data.ipynb | 
	harshit9665/The-Spark-Foundation-GRIP | 26 | 
| 
	<jupyter_start><jupyter_text>## Lesson-01 Assignment#### 今天是2020年1月05日,今天世界上又多了一名AI工程师 :) `各位同学大家好,欢迎各位开始学习我们的人工智能课程。这门课程假设大家不具备机器学习和人工智能的知识,但是希望大家具备初级的Python编程能力。根据往期同学的实际反馈,我们课程的完结之后 能力能够超过80%的计算机人工智能/深度学习方向的硕士生的能力。`## 本次作业的内容#### 1. 复现课堂代码
在本部分,你需要参照我们给大家的GitHub地址里边的课堂代码,结合课堂内容,复现内容。#### 2. 作业截止时间
此次作业截止时间为 2020.01.12日#### 3. 完成以下问答和编程练习>## 基础理论部分> **评阅点**:每道题是否回答完整#### 0. Can you come up out 3 sceneraies which use AI methods? Ans: 1、根据主题不同将新闻自动归类。2、判断一条微博的情绪态度。3、判断一条微博是否是广告。#### 1. How do we use Github; Why do we use Jupyter and Pycharm;Ans: 
GitHub 基于 Git 版本控制工具,它一方面可以记录软件从无到有的每个版本,软件是怎么发展过来的,全都一览无遗;另一方面它还可以记录我的编程能力的每一点进步。  
GitHub 托管的项目既可以是软件包或者程序代码,也可以是文档教程。因此它也非常适合作为管理学习或教学资料的工具平台。  
GitHub是开源的,所有项目的代码和文档,甚至中间过程都是开放的。我可以找一些感兴趣的项目参与其中,可以利用这些项目来提升自己的技术,积累项目经验。  
  
 
Jupyter Notebook是一个交互式笔记本,它提供了一个环境,用户可以在里面写代码、运行代码、查看结果,并在其中可视化数据。在进行AI领域的相关工作时,用户可以用Jupyter Notebook便捷地执行各种端到端任务,如数据清洗、统计建模、构建/训练机器学习模型等。  
Jupyter Notebook的一个特色是允许把代码写入独立的cell中,然后单独执行。这样做意味着用户可以在测试项目时单独测试特定代码块,无需从头开始执行代码。这非常适合初学者。#### 2. What's the Probability Model?Ans:概率模型是用来描述不同随机变量之间关系的数学模型,通常情况下刻画了一个或多个随机变量之间的相互非确定性的概率关系。#### 3. Can you came up with some sceneraies at which we could use Probability Model?Ans:1、连续抛硬币出现正面或反面的次数,我们用“二项分布”概率模型来描述。  
  
2、地铁每隔5分钟到站,乘客达到这个地铁站的时刻是等可能的,求乘客候车时间不超过3分钟的概率。我们用“几何分布”概率模型来求解(本例是一维连续的均匀分布)。
  
3、医学中估计白细胞数的正常值范围。我们应用“正态分布”制定一个上限和下限,比如95%的人在正常范围之内,那么超出这一范围的人就认为需要特殊关注。#### 4. Why do we use probability and what's the difficult points for programming based on parsing and pattern match?Ans: 
摘自《数学之美》:
“这里边至少有两个越不过去的坎儿。首先,要想通过文法规则覆盖哪怕20%的真是语句,文法规则的数量(不包括词性标注的规则)至少是几万条。语言学家几乎已经来不及写了,而且这些文法规则写到后来甚至会出现矛盾,为了解决这些矛盾,还要说明各个规则特定的使用环境。如果想要覆盖50%以上的语句,文法规则的数量最后会多到每增加一个新句子,就要加入一些新的文法。...”  
“其次,即使能够写出涵盖所有自然语言现象的语法规则集合,用计算机解析也是相当困难的。描述自然语言的文法和计算机高级程序语言的文法不同。自然语言在演变过程中,产生了词义和上下文相关的特性。因此,它的文法是比较复杂的上下文有关文法,而程序语言是我们人为设计的,为了便于计算机解码的上下文无关文法,相比自然语言而言简单得多。理解两者的计算量不可同日而语。”
由于上述困难,我们只好放弃基于解析与模式匹配的方法,转向基于概率模型的方法。#### 5. What's the Language Model;Ans:  
对于语言序列$w_1,w_2,...w_n$,语言模型就是计算该序列的概率,即$Pr(w_1w_2...w_n)$  
从机器学习的角度来看,语言模型是对语句的概率分布的建模,作用是判断一个语言序列是否是正常语句,即是否是“人话”。#### 6. Can you came up with some sceneraies at which we could use Language Model?
Ans:  
1、语音识别,利用语言模型找出最像“人话”的语句。
2、在一个语料集中,给定一个词,找出与这个词共现概率最大的其他词。比如有一个粉丝应援团微博的数据集,我们输入明星的名字,得到跟这个明星名字共现最多的词。可以对比不同明星关联词的区别,找出一些有意思的洞察。#### 7. What's the 1-gram language model;Ans:  
N-Gram是一种统计语言模型。它的基本思想是将文本里面的内容按照字节进行大小为N的滑动窗口操作,形成了长度是N的字节片段序列。
每一个字节片段称为gram,对所有gram的出现频次进行统计,并且按照事先设定好的阈值(课堂练习没有要求过滤短文本)进行过滤,形成关键gram列表,也就是这个文本的向量特征空间,列表中的每一个gram就是一个特征向量维度。  
该模型基于这样一种假设(马尔可夫假设):第N个词的出现只与前面N-1个词相关,而与其它任何词都不相关。这些概率可以通过直接从语料中统计N个词同时出现的次数得到。  
常用的是Bi-Gram和Tri-Gram。  
基于上述定义,uni-gram的假设是:每个词的出现只跟自己有关,跟语句中其他词无关。#### 8. What's the disadvantages and advantages of 1-gram language model;Ans:  
uni-gram相对于bi-gram和tri-gram的好处和坏处:  
好处:容易理解、容易计算、容易代码实现。  
坏处:损失了全部上下文信息,判断语句合理性的准确性相比而言更低。  
N-Gram模型的优缺点:  
优点:(1) 采用极大似然估计,参数易训练;(2) 完全包含了前 n-1 个词的全部信息;(3) 可解释性强,直观易理解。  
缺点:(1) 缺乏长期依赖,只能建模到前 n-1 个词;(2) 随着 n 的增大,参数空间呈指数增长;(3) 数据稀疏,难免会出现OOV(out of vocabulary)的问题;(4) 单纯的基于统计频次,泛化能力差。#### 9. What't the 2-gram models;Ans:
在N-gram模型定义基础上(前面已经回答),bi-gram基于假设:第2个词的出现只与前面1个词相关,而与其它任何词都不相关。## 编程实践部分#### 1. 设计你自己的句子生成器如何生成句子是一个很经典的问题,从1940s开始,图灵提出机器智能的时候,就使用的是人类能不能流畅和计算机进行对话。和计算机对话的一个前提是,计算机能够生成语言。
计算机如何能生成语言是一个经典但是又很复杂的问题。 我们课程上为大家介绍的是一种基于规则(Rule Based)的生成方法。该方法虽然提出的时间早,但是现在依然在很多地方能够大显身手。值得说明的是,现在很多很实用的算法,都是很久之前提出的,例如,二分查找提出与1940s, Dijstra算法提出于1960s 等等。在著名的电视剧,电影《西部世界》中,这些机器人们语言生成的方法就是使用的SyntaxTree生成语言的方法。
> 
>

> 
>在这一部分,需要各位同学首先定义自己的语言。 大家可以先想一个应用场景,然后在这个场景下,定义语法。例如:
在西部世界里,一个”人类“的语言可以定义为:
``` 
human = """
human = 自己 寻找 活动
自己 = 我 | 俺 | 我们 
寻找 = 看看 | 找找 | 想找点
活动 = 乐子 | 玩的
"""
```
一个“接待员”的语言可以定义为
```
host = """
host = 寒暄 报数 询问 业务相关 结尾 
报数 = 我是 数字 号 ,
数字 = 单个数字 | 数字 单个数字 
单个数字 = 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 
寒暄 = 称谓 打招呼 | 打招呼
称谓 = 人称 ,
人称 = 先生 | 女士 | 小朋友
打招呼 = 你好 | 您好 
询问 = 请问你要 | 您需要
业务相关 = 玩玩 具体业务
玩玩 = 耍一耍 | 玩一玩
具体业务 = 喝酒 | 打牌 | 打猎 | 赌博
结尾 = 吗?"""
```
请定义你自己的语法: 第一个语法:<jupyter_code>you_need_replace_this_with_name_you_given = '''
# you code here
'''
# 场景1:老婆支配老公
wifeNagging = """
nag = 称呼 介词 做事
称呼 = 老公|孩儿他爸|大坏蛋
介词 = 快去|给我
做事 = 开瓶酸奶|倒杯水|剥个橘子
"""<jupyter_output><empty_output><jupyter_text>> **评阅点**: 是否提出了和课程上区别较大的语法结构第二个语法:<jupyter_code>you_need_replace_this_with_name_you_given = '''
# you code here
'''
# 场景2:老公敷衍老婆 
husbandBePerfunctory = """
perfunctory = 夸赞 称呼 扯理由 建议
夸赞 = 赞美词|赞美词 夸赞
赞美词 = 美丽的|善良的|善解人意的
称呼 = 老婆|老婆大人|媳妇儿|宝贝
扯理由 = 我要 事由 ,
事由 = 写代码|打游戏|看书|睡觉
建议 = 你还是 替代方案 吧。
替代方案 = 自己做|找 别人
别人 = 爸爸|妈妈
"""<jupyter_output><empty_output><jupyter_text>> **评阅点**:是否和上一个语法区别比较大TODO: 然后,使用自己之前定义的generate函数,使用此函数生成句子。<jupyter_code># 解析语法树
def parseGrammar(grammar_str, split='=', line_split='\n'):
    grammar = {}
    for line in grammar_str.split(line_split):
        if not line.strip(): continue
        exp, stmt = line.split(split)
        grammar[exp.strip()] = [s.split() for s in stmt.split('|')]
    return grammar
# gram = parseGrammar(grammar_str=husbandBePerfunctory)
# print(gram)
# 基于语法树随机生成语言
# 函数依赖:parseGrammar
import random
def generateLan(grammar, target):
    if target not in grammar: return target # means target is a terminal expression
    
    expaned = [generateLan(grammar, t) for t in random.choice(grammar[target])]
    return ''.join([e for e in expaned if e != 'null'])
perfunSen = generateLan(grammar=parseGrammar(grammar_str=husbandBePerfunctory), target='perfunctory')
print(perfunSen)<jupyter_output>善解人意的善良的老婆我要写代码,你还是找爸爸吧。
<jupyter_text>TODO: 然后,定义一个函数,generate_n,将generate扩展,使其能够生成n个句子:<jupyter_code># 基于语法树随机生成n个句子
# 函数依赖:parseGrammar, generateLan
def generate_nLan(grammar_str, target, n):
    grammar = parseGrammar(grammar_str=grammar_str)
    
    sentence_list=[]
    for i in range(n):
        sentence_list.append(generateLan(grammar=grammar, target=target))
    return sentence_list
sentence_list = generate_nLan(grammar_str=husbandBePerfunctory, target='perfunctory', n=10)
i=1
for sen in sentence_list:
    print(str(i)+' '+sen)
    i+=1<jupyter_output>1 善良的善良的老婆大人我要写代码,你还是自己做吧。
2 美丽的美丽的善解人意的善良的媳妇儿我要看书,你还是自己做吧。
3 善良的宝贝我要睡觉,你还是自己做吧。
4 美丽的善解人意的善良的善良的媳妇儿我要看书,你还是自己做吧。
5 善良的老婆大人我要写代码,你还是自己做吧。
6 善良的善良的老婆我要打游戏,你还是自己做吧。
7 善解人意的老婆大人我要看书,你还是找爸爸吧。
8 善良的美丽的宝贝我要看书,你还是找妈妈吧。
9 美丽的美丽的美丽的善良的美丽的老婆我要看书,你还是找爸爸吧。
10 善良的媳妇儿我要睡觉,你还是自己做吧。
<jupyter_text>> **评阅点**; 运行代码,观察是否能够生成多个句子#### 2. 使用新数据源完成语言模型的训练按照我们上文中定义的`prob_2`函数,我们更换一个文本数据源,获得新的Language Model:
1. 下载文本数据集(你可以在以下数据集中任选一个,也可以两个都使用)
    + 可选数据集1,保险行业问询对话集: https://github.com/Computing-Intelligence/insuranceqa-corpus-zh/raw/release/corpus/pool/train.txt.gz
    + 可选数据集2:豆瓣评论数据集:https://github.com/Computing-Intelligence/datasource/raw/master/movie_comments.csv
2. 修改代码,获得新的**2-gram**语言模型
    + 进行文本清洗,获得所有的纯文本
    + 将这些文本进行切词
    + 送入之前定义的语言模型中,判断文本的合理程度Step1:从硬盘文件读取原始数据<jupyter_code>import pandas as pd
filePath = r'D:\人工智能与自然语言处理006期_开课吧\第二章第1节-人工智能引论之概率模型与语言自动生成模型\Assignment-01\movie_comments.csv'
content = pd.read_csv(filePath)
content.head()
comments = content['comment'].tolist() #仅保留新闻正文,并转化为列表
print(len(comments))<jupyter_output>261497
<jupyter_text>Step2:清洗数据<jupyter_code>import re
def token(string):
    # we will learn the regular expression next course.
    return re.findall('\w+', string)
# ''.join(token(comments[10]))
comments_cleaned = [''.join(token(str(c)))for c in comments]
print(len(comments_cleaned))
wfilePath=r'D:\人工智能与自然语言处理006期_开课吧\第二章第1节-人工智能引论之概率模型与语言自动生成模型\Assignment-01\comments_cleaned.txt'
with open(wfilePath,'w',encoding='UTF-8') as wfile:
    for com in comments_cleaned:
        wfile.write(com+'\n')
print('Done')<jupyter_output>Done
<jupyter_text>Step3:切词<jupyter_code>import jieba
# 对一行文本做切词
def cut(string): return list(jieba.cut(string.strip('\n')))
# 对全部评论做切词
# 将切词得到的所有词(token)存到一个list里
def cutAllLines(filePath):
    token_list = []
    with open(filePath,'r',encoding='UTF-8') as file:
        for line in file:
            token_list+=cut(line)
    return token_list
%time token_list = cutAllLines(filePath=wfilePath)
print(len(token_list))<jupyter_output>Wall time: 41.7 s
4490313
<jupyter_text>Step4:统计词频并计算词的3-Gram概率基于2-Gram的单词出现条件概率的递推过程:  
sentence = '其实 就和 随机森林 原理 一样'  
Pr(其实&就和&随机森林&原理&一样)  
->Pr(其实|就和&随机森林&原理&一样)Pr(就和&随机森林&原理&一样)  
->Pr(其实|就和&随机森林&原理&一样)Pr(就和|随机森林&原理&一样)Pr(随机森林|原理&一样)Pr(原理&一样)  
做一个简化:每个单词出现的概率只跟它后面2个单词有关。  
->Pr(其实|就和&随机森林)Pr(就和|随机森林&原理)Pr(随机森林|原理&一样)Pr(原理&一样)  
这其中:$$ Pr(其实|就和\&随机森林) = \frac{N("其实就和随机森林")}{N("就和随机森林")} $$将上述递推过程推广到一般情况: $$Pr(sentence)=Pr(w_1w_2...w_n)= \prod_i^{n-2} \frac {N(w_iw_{i+1}w_{i+2})}{N(w_{i+1}w_{i+2})}*Pr(w_{n-1}w_n)$$<jupyter_code>from collections import Counter
token_list = [str(t) for t in token_list] #将token_list中的全部词转为字符串类型
print(token_list[:20])
print(token_list[-1])
print(token_list[-2])
print(token_list[-3])
#复习列表索引规则
test_list = [1,2,3,4]
print(test_list[1:3])
print(test_list[:-2])
print(test_list[-2])<jupyter_output>[2, 3]
[1, 2]
3
<jupyter_text>['吴京', '意淫', '到', '了', '脑残', '的', '地步'] 7  
['吴京意淫','意淫到','到了','了脑残','脑残的','的地步'] 6  
['吴京意淫到', '意淫到了', '到了脑残', '了脑残的', '脑残的地步'] 5  <jupyter_code># 将TOKEN中相邻2个词组合到一起
%time token_2_gram_list = [''.join(token_list[i:i+2]) for i in range(len(token_list)-1)]
print(token_2_gram_list[:10])
print(token_2_gram_list[-1])
# 将TOKEN中相邻3个词组合到一起
%time token_3_gram_list = [''.join(token_list[i:i+3]) for i in range(len(token_list)-2)]
print(token_3_gram_list[:10])
print(token_3_gram_list[-1])
# 统计相邻2个词组的词频,生成统计结果列表[(词组, 词频)]
%time wordsFreq_2 = Counter(token_2_gram_list)
wordsFreq_2['吴京意淫']
# 统计相邻3个词组的词频,生成统计结果列表[(词组, 词频)]
%time wordsFreq_3 = Counter(token_3_gram_list)
# 计算:相邻3个词组的出现频次 除以 相邻3个词组中后2个出现频次
def two_gram_prob(word1, word2, word3):
    if word1 + word2 +word3 in wordsFreq_3: return wordsFreq_3[word1+word2+word3] / wordsFreq_2[word2+word3]
    # out of vocabulary problem,如果单词没有出现在词库中,设其概率为1/词汇量
    else: 
        return 1 / len(wordsFreq_2)
two_gram_prob('吴京', '意淫', '了')<jupyter_output><empty_output><jupyter_text>Step5:计算一句话的概率(实现3-Gram)<jupyter_code># 实现上述2-Gram计算公式
def get_probablity(sentence):
    words_list = cut(sentence) #对一句话做切词
#     print(words_list) #打印
    sentence_pro = 1 # 初始化这句话的出现概率
#     print(words_list[:-2]) #打印
    for i, word in enumerate(words_list[:-2]):
        next_1 = words_list[i+1]
        next_2 = words_list[i+2]
        
#         print('word'+word)#打印
#         print('next_1'+next_1)#打印
#         print('next_2'+next_2)#打印
        
        probability = two_gram_prob(word, next_1, next_2)
        
#         print('probability:%f'%probability)#打印
        
        sentence_pro *= probability
        
#         print(sentence_pro) #打印
    sentence_pro *= (wordsFreq_2[words_list[-2]+words_list[-1]]/len(token_2_gram_list)) #乘以公式最后一项:最后2个词的出现概率
    return sentence_pro
get_probablity('吴京是一个爱国的演员')
get_probablity('吴京意淫到了脑残的地步看了恶心想吐')<jupyter_output><empty_output><jupyter_text>> **评阅点** 1. 是否使用了新的数据集; 2. csv(txt)数据是否正确解析#### 3. 获得最优质的的语言当我们能够生成随机的语言并且能判断之后,我们就可以生成更加合理的语言了。请定义 generate_best 函数,该函数输入一个语法 + 语言模型,能够生成**n**个句子,并能选择一个最合理的句子: 
提示,要实现这个函数,你需要Python的sorted函数<jupyter_code>sorted([1, 3, 5, 2])<jupyter_output><empty_output><jupyter_text>这个函数接受一个参数key,这个参数接受一个函数作为输入,例如<jupyter_code>sorted([(2, 5), (1, 4), (5, 0), (4, 4)], key=lambda x: x[0])<jupyter_output><empty_output><jupyter_text>能够让list按照第0个元素进行排序.<jupyter_code>sorted([(2, 5), (1, 4), (5, 0), (4, 4)], key=lambda x: x[1])<jupyter_output><empty_output><jupyter_text>能够让list按照第1个元素进行排序.<jupyter_code>sorted([(2, 5), (1, 4), (5, 0), (4, 4)], key=lambda x: x[1], reverse=True)<jupyter_output><empty_output><jupyter_text>能够让list按照第1个元素进行排序, 但是是递减的顺序。<jupyter_code># 函数依赖:generate_nLan,get_probablity
def generate_bestLan(grammar_str, target, n):
    sen_list = generate_nLan(grammar_str, target, n)
    sen_pro_list = [] #[(句子,句子的概率)]
    for sen in sen_list:
        pro = get_probablity(sen)
        print(sen+'    '+str(pro)) #打印
        sen_pro_list.append((sen,pro))
    sen_pro_sorted_list = sorted(sen_pro_list,key=lambda x:x[1],reverse=True)
    return sen_pro_sorted_list[0][0]
%time best_sen = generate_bestLan(grammar_str=husbandBePerfunctory, target='perfunctory', n=20)
print("最优质的一句话:%s"%best_sen)<jupyter_output>善解人意的媳妇儿我要睡觉,你还是自己做吧。    0.0
美丽的媳妇儿我要睡觉,你还是自己做吧。    0.0
善解人意的老婆大人我要写代码,你还是自己做吧。    0.0
善解人意的善良的老婆大人我要睡觉,你还是找爸爸吧。    0.0
善解人意的善良的善良的老婆大人我要看书,你还是找妈妈吧。    0.0
善解人意的宝贝我要睡觉,你还是自己做吧。    0.0
善解人意的媳妇儿我要打游戏,你还是找爸爸吧。    0.0
善解人意的善良的老婆大人我要睡觉,你还是自己做吧。    0.0
美丽的善良的老婆我要睡觉,你还是自己做吧。    0.0
美丽的老婆我要睡觉,你还是自己做吧。    0.0
善解人意的宝贝我要睡觉,你还是自己做吧。    0.0
善解人意的媳妇儿我要写代码,你还是自己做吧。    0.0
美丽的善解人意的美丽的宝贝我要打游戏,你还是找爸爸吧。    0.0
善解人意的善良的老婆我要打游戏,你还是找爸爸吧。    0.0
善解人意的宝贝我要睡觉,你还是自己做吧。    0.0
美丽的媳妇儿我要打游戏,你还是找妈妈吧。    0.0
善良的宝贝我要看书,你还是自己做吧。    0.0
美丽的宝贝我要打游戏,你还是找妈妈吧。    0.0
美丽的善良的美丽的善良的善解人意的老婆我要看书,你还是自己做吧。    0.0
善良的善解人意的善解人意的善良的老婆我要打游戏,你还是自己做吧。    0.0
Wall time: 4 ms
最优质的一句话:善解人意的媳妇儿我要睡觉,你还是自己做吧。
 | 
	no_license | 
	/Assignment_01_Build_Sentence_Generation_System_Using_Syntax_Tree_and_Language_Model.ipynb | 
	samisgood968/NLP | 15 | 
| 
	<jupyter_start><jupyter_text># Explore Your Environment## Get Latest Code<jupyter_code>%%bash
pull_force_overwrite_local<jupyter_output><empty_output><jupyter_text>## Helper Scripts### Find Script from Anywhere<jupyter_code>!which pull_force_overwrite_local<jupyter_output><empty_output><jupyter_text>### List `/scripts` Directory<jupyter_code>!ls -l /root/scripts/<jupyter_output><empty_output><jupyter_text>### Show `pull_force_overwrite_local` Script<jupyter_code>!cat /root/scripts/pull_force_overwrite_local<jupyter_output><empty_output><jupyter_text>## [PipelineAI](http://pipeline.io)<jupyter_code>%%html
<iframe width=800 height=600 src="http://pipeline.io"></iframe><jupyter_output><empty_output><jupyter_text>## All Code in [GitHub Repo](https://github.com/fluxcapacitor/pipeline/)
Please Star this [GitHub Repo](https://github.com/fluxcapacitor/pipeline/)!!## [Advanced Spark and TensorFlow Meetup](https://www.meetup.com/Advanced-Spark-and-TensorFlow-Meetup/)
Please Join this [Global Meetup](https://www.meetup.com/Advanced-Spark-and-TensorFlow-Meetup/)!!
## Verify IP Address
This should match your browser!<jupyter_code>import requests
url = 'http://169.254.169.254/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip'
headers = {'Metadata-Flavor': 'Google'}
r = requests.get(url, headers=headers)
ip_address = r.text
print('Your IP:  %s' % ip_address)<jupyter_output><empty_output><jupyter_text>## Get Allocation Index
We may use this later.<jupyter_code>import requests
import json
url = 'http://allocator.demo.pipeline.io/allocation/%s' % ip_address
r = requests.get(url, timeout=5)
allocation = r.text
allocation_json = json.loads(allocation)
print(allocation_json)
print(allocation_json['index'])<jupyter_output><empty_output> | 
	permissive | 
	/gpu.ml/notebooks/01_Explore_Environment.ipynb | 
	BrentDorsey/pipeline | 7 | 
| 
	<jupyter_start><jupyter_text># Quarterback QBR Rank<jupyter_code>import pandas as pd
qbs = pd.read_csv('../Capstone_csv_file/qbs_stats_rank_19-20', index_col = 'NAME')
qbs.head()
qbs.rename(columns=lambda x: x.strip(), inplace = True)
qbs = qbs[['QBR', 'QBR_rank']].copy()
qbs = qbs.sort_values('QBR_rank')
qbs.to_csv('../Capstone_csv_file/qbs_qbr_rank_19-20')
qbs<jupyter_output><empty_output> | 
	no_license | 
	/Capstone_EDA_Quarterbacks/Capstone_qbs_qbr_rank_19-20.ipynb | 
	ChrisSCorliss/Capstone | 1 | 
| 
	<jupyter_start><jupyter_text>This project is for Decision Trees
We will use the Pima Indian database<jupyter_code>import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import classification_report,confusion_matrix
columns=['Pregnant','Plasma','Pres','skin','test','mass','pedi','age','class']
df_diab=pd.read_csv("D:\PGP-AIML\Supervised Learning\Ensemble Techniques\pima-indians-diabetes.csv")
df_diab.columns=['Pregnant','Plasma','Pres','skin','test','mass','pedi','age','class']
df_diab.head()
df_diab.shape
n=df_diab['class'].count()
train_set = df_diab.head(int(round(n*0.7))) # Up to the last initial training set row
test_set = df_diab.tail(int(round(n*0.3))) # Past the last initial training set row
train_labels = train_set.pop("class")
test_labels = test_set.pop("class")
X=df_diab.drop(['class'],axis=1)
y=df_diab['class']
X_train, X_test, y_train, y_test=train_test_split(X,y,test_size=.3,random_state=0)
X_train.shape
train_set.shape
from sklearn.tree import DecisionTreeClassifier
dt_model = DecisionTreeClassifier(criterion = 'entropy' )
dt_model.fit(X_train, y_train)
test_predict=dt_model.predict(X_test)
print (pd.DataFrame(dt_model.feature_importances_, columns = ["Imp"], index = X_train.columns))
model_score = dt_model.score(X_test, y_test)
print(model_score)
cm=confusion_matrix(y_test, test_predict, labels=[1, 0])
df_cm = pd.DataFrame(cm, index = [i for i in ["1","0"]],
                  columns = [i for i in ["Predict 1","Predict 0"]])
plt.figure(figsize = (7,5))
sns.heatmap(df_cm, annot=True,fmt='g')<jupyter_output><empty_output> | 
	no_license | 
	/Untitled33.ipynb | 
	oops-git/aiml | 1 | 
| 
	<jupyter_start><jupyter_text># Newton method with double rootConsider the function
$$
f(x) = (x-1)^2 \sin(x)
$$
for which $x=1$ is a double root.<jupyter_code>%matplotlib inline
%config InlineBackend.figure_format = 'svg'
from numpy import sin,cos,linspace,zeros,abs
from matplotlib.pyplot import plot,xlabel,ylabel,grid
def f(x):
    return (x-1.0)**2 * sin(x)
def df(x):
    return 2.0*(x-1.0)*sin(x) + (x-1.0)**2 * cos(x)
x = linspace(0.0,2.0,100)
plot(x,f(x))
xlabel('x')
ylabel('f(x)')
grid(True)
def newton(x0,m):
    n = 50
    x = zeros(50)
    x[0] = x0
    print "%6d %24.14e" % (0,x[0])
    for i in range(1,50):
        x[i] = x[i-1] - m*f(x[i-1])/df(x[i-1])
        if i > 1:
            r = (x[i] - x[i-1])/(x[i-1]-x[i-2])
        else:
            r = 0.0
        print "%6d %24.14e %14.6e" % (i,x[i],r)
        if abs(f(x[i])) < 1.0e-16:
            break<jupyter_output><empty_output><jupyter_text>We first apply the standard newton method.<jupyter_code>newton(2.0,1)<jupyter_output>     0     2.00000000000000e+00
     1     1.35163555744248e+00   0.000000e+00
     2     1.18244356861394e+00   2.609520e-01
     3     1.09450383817604e+00   5.197630e-01
     4     1.04837639635805e+00   5.245347e-01
     5     1.02452044172239e+00   5.171749e-01
     6     1.01235093391487e+00   5.101245e-01
     7     1.00619920246051e+00   5.055037e-01
     8     1.00310567444820e+00   5.028711e-01
     9     1.00155437342780e+00   5.014666e-01
    10     1.00077757303341e+00   5.007412e-01
    11     1.00038888338214e+00   5.003726e-01
    12     1.00019446594325e+00   5.001868e-01
    13     1.00009723903915e+00   5.000935e-01
    14     1.00004862103702e+00   5.000468e-01
    15     1.00002431089794e+00   5.000234e-01
    16     1.00001215554384e+00   5.000117e-01
    17     1.00000607779564e+00   5.000059e-01
    18     1.00000303890375e+00   5.000029e-01
    19     1.00000151945336e+00   5.000015e-01
    20     1.00000075972705e+00   5.000007e-01
    21     1.00000037986362e[...]<jupyter_text>Newton method is converging linearly. Now try the modified Newton method.<jupyter_code>newton(2.0,2)<jupyter_output>     0     2.00000000000000e+00
     1     7.03271114884954e-01   0.000000e+00
     2     1.06293359720705e+00  -2.773614e-01
     3     1.00108318855754e+00  -1.719679e-01
     4     1.00000037565568e+00   1.750696e-02
     5     1.00000000000005e+00   3.469257e-04
 | 
	no_license | 
	/root_finding/newton2.ipynb | 
	animesh1995/na | 3 | 
| 
	<jupyter_start><jupyter_text># ------------------------------------PLOTS------------------------------------# LC Description<jupyter_code>plot.figure(1)
plot.subplots_adjust(hspace=0.14)
p1=plot.subplot(321)
p1.set_title('GALFORM$_{r<24.8}$')
graph.Density(LC1[LC1_abs[0]],'galaxies per $mag\cdot z$',ylabel='u, absolute rest frame [$mag$]',
              ylim=(-24,-9),yinvert=True,hold_graph=True)
p2=plot.subplot(322)
p2.set_title('GALFORM$_{r<24.8}$')
graph.Density(LC1[LC1_abs[1]],'galaxies per $mag\cdot z$',ylabel='u-z, absolute rest frame [$mag$]',
              ylim=(-1.5,5.5),hold_graph=True)
p3=plot.subplot(323)
p3.set_title('Buzzard$_{r<24.8}$')
graph.Density(LC2[LC2_abs[0]],'galaxies per $mag\cdot z$',ylabel='u, absolute rest frame [$mag$]',
              ylim=(-24,-9),yinvert=True,hold_graph=True)
p4=plot.subplot(324)
p4.set_title('Buzzard$_{r<24.8}$')
graph.Density(LC2[LC2_abs[1]],'galaxies per $mag\cdot z$',ylabel='u-z, absolute rest frame [$mag$]',
              ylim=(-1.5,5.5),hold_graph=True)
p5=plot.subplot(325)
p5.set_title('Buzzard$_{r<27.5}$')
graph.Density(LC3[LC3_abs[0]],'galaxies per $mag\cdot z$',xlabel='redshift',
              ylim=(-24,-9),ylabel='u, absolute rest frame [$mag$]',yinvert=True,hold_graph=True)
p6=plot.subplot(326)
p6.set_title('Buzzard$_{r<27.5}$')
graph.Density(LC3[LC3_abs[1]],'galaxies per $mag\cdot z$',xlabel='redshift',
              ylabel='u-z, absolute rest frame [$mag$]',ylim=(-1.5,5.5),fig_size=(30,30),hold_graph=True)
plot.savefig('../MILL_Graph/'+'01_02_abs.pdf',dpi=200,bbox_inches="tight")
plot.show()
plot.figure(4)
plot.subplots_adjust(hspace=0.14)
p1=plot.subplot(311)
p1.set_title('GALFORM$_{r<24.8}$')
graph.Discrete_Contour(LC1[LC1_abs[3]],ylabel='u-z, absolute rest frame [$mag$]',c=np.linspace(0,0.6,5),
                       xlim=[-22,-11],ylim=[-0.5,4],xinvert=True,hold_graph=True)
p2=plot.subplot(312)
p2.set_title('Buzzard$_{r<24.8}$')
graph.Discrete_Contour(LC2[LC2_abs[3]],ylabel='u-z, absolute rest frame [$mag$]',c=np.linspace(0,0.6,5),
                       xlim=[-22,-11],ylim=[-0.5,4],xinvert=True,hold_graph=True)
p3=plot.subplot(313)
p3.set_title('Buzzard$_{r<27.5}$')
graph.Discrete_Contour(LC3[LC3_abs[3]],xlabel='u, absolute rest frame [$mag$]',
                       ylabel='u-z, absolute rest frame [$mag$]',c=np.linspace(0,0.75,6),
                       labels=['$z_{0.0,0.3}$','$z_{0.3,0.6}$','$z_{0.6,0.9}$','$z_{0.9,1.2}$',
                               '$z_{1.2,2.5}$','$z_{2.5+}$'],
                       lab_loc=4,xlim=[-22,-11],ylim=[-0.5,4],xinvert=True,fig_size=(15,30),hold_graph=True)
plot.savefig('../MILL_Graph/'+'04_abs.pdf',dpi=200,bbox_inches="tight")
plot.show()<jupyter_output><empty_output><jupyter_text># CMD plots<jupyter_code>plot.figure(5)
plot.subplots_adjust(hspace=0.18)
p1=plot.subplot(321)
p1.set_title('GALFORM$_{r<24.8}$, $z_{0.0,0.3}$',y=1.01)
graph.Arrow([23.75,23.85,23.95],[1.75]*3,LC1[LC1_app[0]],labels=[r'75%',r'50%',r'25%'])
graph.Discrete_Contour(LC1[LC1_app[1]],p=0.95,ylabel='u-z [$mag$]',c=np.linspace(1.4,0,3),lines='dotted',
                       hold_graph=True,subset=[1,2])
graph.Discrete_Contour(LC1[LC1_app[1]],ylabel='u-z [$mag$]',xinvert=True,xlim=[23.5,25.5],ylim=[1,3],
                       c=np.linspace(1.4,0,3),hold_graph=True,subset=[1,2])
plot.gca().tick_params(pad=10)
plot.grid()
p2=plot.subplot(322)
p2.set_title('Buzzard$_{r<24.8}$, $z_{0.0,0.3}$',y=1.01)
graph.Arrow([23.75,23.85,23.95],[2.0]*3,LC2[LC2_app[0]])
graph.Discrete_Contour(LC2[LC2_app[1]],p=0.95,c=np.linspace(1.4,0,3),lines='dotted',hold_graph=True,subset=[1,2])
graph.Discrete_Contour(LC2[LC2_app[1]],xinvert=True,xlim=[23.5,25.5],ylim=[1,3],c=np.linspace(1.4,0,3),
                       labels=['$\sim1\'$','$\sim7\'$','$\sim1^o$'],hold_graph=True,subset=[1,2])
plot.gca().tick_params(pad=10)
plot.grid()
p3=plot.subplot(323)
p3.set_title('GALFORM$_{r<24.8}$, $z_{0.6,0.9}$',y=1.01)
graph.Arrow([24.6,24.7,24.8],[1]*3,LC1[LC1_app[2]])
graph.Discrete_Contour(LC1[LC1_app[3]],p=0.95,ylabel='u-z [$mag$]',c=np.linspace(1.4,0,3),lines='dotted',
                       hold_graph=True,subset=[1,2])
graph.Discrete_Contour(LC1[LC1_app[3]],ylabel='u-z [$mag$]',c=np.linspace(1.4,0,3),xinvert=True,
                       xlim=[24.4,25.2],ylim=[0.75,2.25],hold_graph=True,subset=[1,2])
plot.gca().tick_params(pad=10)
plot.grid()
p4=plot.subplot(324)
p4.set_title('Buzzard$_{r<24.8}$, $z_{0.6,0.9}$',y=1.01)
graph.Arrow([25.2,25.3,25.4],[3.2]*3,LC2[LC2_app[2]])
graph.Discrete_Contour(LC2[LC2_app[3]],p=0.95,c=np.linspace(1.4,0,3),lines='dotted',hold_graph=True,subset=[1,2])
graph.Discrete_Contour(LC2[LC2_app[3]],xinvert=True,xlim=[25.1,25.8],ylim=[2.25,3.75],
                       c=np.linspace(1.4,0,3),hold_graph=True,subset=[1,2])
plot.gca().tick_params(pad=10)
plot.grid()
p5=plot.subplot(325)
p5.set_title('GALFORM$_{r<24.8}$, $z_{1.2,2.5}$',y=1.01)
graph.Arrow([24.4,24.5,24.6],[1.25]*3,LC1[LC1_app[4]])
graph.Discrete_Contour(LC1[LC1_app[5]],p=0.95,lines='dotted',xlabel='u [$mag$]',ylabel='u-z [$mag$]',
                       c=np.linspace(1.4,0,3),hold_graph=True,subset=[1,2])
graph.Discrete_Contour(LC1[LC1_app[5]],xlabel='u [$mag$]',ylabel='u-z [$mag$]',xinvert=True,
                       c=np.linspace(1.4,0,3),xlim=[24,25],ylim=[0,1.75],hold_graph=True,subset=[1,2])
plot.gca().tick_params(pad=10)
plot.grid()
p6=plot.subplot(326)
p6.set_title('Buzzard$_{r<24.8}$, $z_{1.2,2.5}$',y=1.01)
graph.Arrow([25.2,25.3,25.4],[1.25]*3,LC2[LC2_app[4]])
graph.Discrete_Contour(LC2[LC2_app[5]],p=0.95,lines='dotted',xlabel='u [$mag$]',
                       c=np.linspace(1.4,0,3),hold_graph=True,subset=[1,2])
graph.Discrete_Contour(LC2[LC2_app[5]],xlabel='u [$mag$]',xinvert=True,c=np.linspace(1.4,0,3),
                       fig_size=(30,30),xlim=[24.7,25.7],ylim=[1,2.75],hold_graph=True,subset=[1,2])
plot.gca().tick_params(pad=10)
plot.grid()
plot.savefig('../MILL_Graph/'+'11_app.pdf',dpi=200,bbox_inches="tight")
plot.show()
plot.figure(6)
plot.subplots_adjust(hspace=0.18)
p1=plot.subplot(221)
p1.set_title('Buzzard$_{r<27.5}$, $z_{0.0,0.3}$',y=1.01)
graph.Arrow([25.1,25.15,25.2],[1.5]*3,LC3[LC3_app[0]])
graph.Discrete_Contour(LC3[LC3_app[1]],p=0.95,lines='dotted',ylabel='u-z [$mag$]',c=np.linspace(1.4,0,3),
                       hold_graph=True,subset=[1,2])
graph.Discrete_Contour(LC3[LC3_app[1]],ylabel='u-z [$mag$]',xinvert=True,c=np.linspace(1.4,0,3),
                       labels=['$\sim1\'$','$\sim7\'$','$\sim1^o$'],xlim=[25,26],ylim=[1.4,2.2],
                       hold_graph=True,subset=[1,2],lab_loc=2)
plot.gca().tick_params(pad=10)
plot.grid()
p2=plot.subplot(222)
p2.set_title('Buzzard$_{r<27.5}$, $z_{0.6,0.9}$',y=1.01)
graph.Arrow([26.4,26.45,26.5],[1.7]*3,LC3[LC3_app[2]])
graph.Discrete_Contour(LC3[LC3_app[3]],p=0.95,c=np.linspace(1.4,0,3),lines='dotted',hold_graph=True,subset=[1,2])
graph.Discrete_Contour(LC3[LC3_app[3]],xinvert=True,c=np.linspace(1.4,0,3),xlim=[26,27],ylim=[1.6,2.4],
                       hold_graph=True,subset=[1,2])
plot.gca().tick_params(pad=10)
plot.grid()
p3=plot.subplot(223)
p3.set_title('Buzzard$_{r<27.5}$, $z_{1.2,2.5}$',y=1.01)
graph.Arrow([26.6,26.65,26.7],[0.9]*3,LC3[LC3_app[4]],labels=[r'75%',r'50%',r'25%'])
graph.Discrete_Contour(LC3[LC3_app[5]],p=0.95,lines='dotted',ylabel='u-z [$mag$]',c=np.linspace(1.4,0,3),
                       hold_graph=True,subset=[1,2])
graph.Discrete_Contour(LC3[LC3_app[5]],xlabel='u [$mag$]',ylabel='u-z [$mag$]',xinvert=True,
                       c=np.linspace(1.4,0,3),xlim=[26,27],ylim=[0.8,1.6],
                       hold_graph=True,subset=[1,2])
plot.gca().tick_params(pad=10)
plot.grid()
p4=plot.subplot(224)
p4.set_title('Buzzard$_{r<27.5}$, $z_{2.5+}$',y=1.01)
graph.Arrow([26.4,26.45,26.5],[1.0]*3,LC3[LC3_app[6]])
graph.Discrete_Contour(LC3[LC3_app[7]],p=0.95,lines='dotted',xlabel='u [$mag$]',c=np.linspace(1.4,0,3),
                       hold_graph=True,subset=[1,2])
graph.Discrete_Contour(LC3[LC3_app[7]],xlabel='u [$mag$]',xinvert=True,c=np.linspace(1.4,0,3),
                       fig_size=(30,20),xlim=[26.3,27.3],ylim=[0.4,1.2],
                       hold_graph=True,subset=[1,2])
plot.gca().tick_params(pad=10)
plot.grid()
plot.savefig('../MILL_Graph/'+'11_LSST_full_app.pdf',dpi=200,bbox_inches="tight")
plot.show()<jupyter_output><empty_output><jupyter_text># STD plots<jupyter_code>plot.figure(7)
plot.subplots_adjust(hspace=0.15)
p1=plot.subplot(311)
p1.set_title('GALFORM$_{r<24.8}$')
graph.Arrow_Vline(LC1[LC1_app[4]])
plot.gca().tick_params(pad=10)
#plot.grid()
#graph.Line(LC1[LC1_app[9]],c=np.linspace(0,0.6,5),hold_graph=True)
graph.Scatter(LC1[LC1_app[10]],ylabel=r'$\sigma_\bot$ $[mag]$',marker_size=150,
              xlim=[0.005,1.6],ylim=[0.005,0.35],marker_type=[['v','o','^']]*5,c=np.linspace(0,0.6,5),
              xlog=True,ylog=True,hold_graph=True)
p2=plot.subplot(312)
p2.set_title('Buzzard$_{r<24.8}$')
graph.Arrow_Vline(LC2[LC2_app[4]],labels=[r'75%',r'50%',r'25%'])
plot.gca().tick_params(pad=10)
#plot.grid()
#graph.Line(LC2[LC2_app[9]],c=np.linspace(0,0.6,5),hold_graph=True)
graph.Scatter(LC2[LC2_app[10]],ylabel=r'$\sigma_\bot$ $[mag]$',marker_size=150,
              xlim=[0.005,1.6],ylim=[0.005,0.35],marker_type=[['v','o','^']]*5,c=np.linspace(0,0.6,5),
              xlog=True,ylog=True,lab_loc=4,hold_graph=True)
p3=plot.subplot(313)
p3.set_title('Buzzard$_{r<27.5}$')
graph.Arrow_Vline(LC3[LC3_app[4]])
plot.gca().tick_params(pad=10)
#plot.grid()
#graph.Line(LC3[LC3_app[12]],labels=['$z_{0.0,0.3}$','$z_{0.3,0.6}$','$z_{0.6,0.9}$','$z_{0.9,1.2}$',
#                                    '$z_{1.2,2.5}$','$z_{2.5+}$'],
#           c=np.linspace(0,0.75,6),lab_loc=4,hold_graph=True)
graph.Scatter(LC3[LC3_app[13]],xlabel='$\sigma_{\parallel}$ $[mag]$',ylabel=r'$\sigma_\bot$ $[mag]$',
              marker_size=150,xlim=[0.005,1.6],ylim=[0.005,0.35],marker_type=[['v','o','^']]*6,
              xlog=True,ylog=True,c=np.linspace(0,0.75,6),hold_graph=True,fig_size=(15,30))
plot.savefig('../MILL_Graph/'+'15_app.pdf',dpi=200,bbox_inches="tight")
plot.show()<jupyter_output><empty_output><jupyter_text># LF plots<jupyter_code>CFHTLS_fname=sorted(glob.glob('../MILL_Graph/CFHTLS*txt'))
CFHTLS_dat=[np.loadtxt(f) for f in CFHTLS_fname]
SDSS_dat=np.loadtxt('../MILL_Graph/SDSS.txt')
plot.figure(9)
graph.Histogram(LC1[LC1_abs[4]],labels=['GALFORM'],hold_graph=True,c=[0.7])
plot.grid()
plot.scatter(CFHTLS_dat[0][:,0],10**CFHTLS_dat[0][:,1],label='CFHTLS',c='r')
plot.scatter(SDSS_dat[:,0],10**SDSS_dat[:,1],label='SDSS',c='k')
graph.Histogram(LC2[LC2_abs[4]],xlabel='i, absolute rest frame [$mag$]',xlim=[-25,-14],xinvert=True,
                ylabel='$\Phi$ [$h^3mag^{-1}cMpc^{-3}$]',labels=['Buzzard'],hold_graph=True)
plot.savefig('../MILL_Graph/'+'05_abs.pdf',dpi=200,bbox_inches="tight")
plot.show()<jupyter_output><empty_output><jupyter_text># f-EBV plots<jupyter_code>plot.figure(10)
plot.subplots_adjust(hspace=0.15)
p1=plot.subplot(311)
p1.set_title('GALFORM$_{r<24.8}$')
graph.Arrow_Vline(arrow_ebv,labels=['75%','50%','25%'])
graph.Line(LC1[LC1_app[20]],ylabel='$\log_{10}(f)$',
           c=np.linspace(0,0.6,5),xlim=[0,0.5],ylim=[-1.2,0.1],hold_graph=True)
plot.gca().tick_params(pad=10)
p2=plot.subplot(312)
p2.set_title('Buzzard$_{r<24.8}$')
graph.Arrow_Vline(arrow_ebv)
graph.Line(LC2[LC2_app[20]],ylabel='$\log_{10}(f)$',
           c=np.linspace(0,0.6,5),xlim=[0,0.5],ylim=[-1.2,0.1],hold_graph=True)
plot.gca().tick_params(pad=10)
p3=plot.subplot(313)
p3.set_title('Buzzard$_{r<27.5}$')
graph.Arrow_Vline(arrow_ebv)
graph.Line(LC3[LC3_app[26]],xlabel='$E(B-V)$',ylabel='$\log_{10}(f)$',
           labels=['$z_{0.0,0.3}$','$z_{0.3,0.6}$','$z_{0.6,0.9}$','$z_{0.9,1.2}$',
                   '$z_{1.2,2.5}$','$z_{2.5+}$'],fig_size=(15,30),c=np.linspace(0,0.75,6),
           xlim=[0,0.5],ylim=[-1.2,0.1],hold_graph=True)
plot.gca().tick_params(pad=10)
plot.savefig('../MILL_Graph/'+'24_app.pdf',dpi=200,bbox_inches="tight")
plot.show()<jupyter_output><empty_output><jupyter_text># mag/colour-delta plots<jupyter_code>plot.figure(11)
p1=plot.subplot(211)
p1.set_title('Buzzard$_{r<24.8}$, $z_{0.6,0.9}$',y=1.01)
graph.Arrow([0.34,0.36,0.38],[25.3]*3,arrow_md)
graph.Discrete_Contour(LC2[LC2_app[15]],p=0.95,lines='dotted',c=np.linspace(1.4,0,3),hold_graph=True,subset=[1,2])
graph.Discrete_Contour(LC2[LC2_app[15]],ylabel='u [$mag$]',yinvert=True,xlim=[-0.3,0.4],ylim=[25.2,26],
                       lab_loc=4,c=np.linspace(1.4,0,3),labels=['$\sim1\'$','$\sim7\'$','$\sim1^o$'],
                       hold_graph=True,subset=[1,2])
plot.grid()
p2=plot.subplot(212)
graph.Arrow([0.34,0.36,0.38],[2.6]*3,arrow_cd,labels=[r'75%',r'50%',r'25%'])
graph.Discrete_Contour(LC2[LC2_app[7]],p=0.95,lines='dotted',c=np.linspace(1.4,0,3),hold_graph=True,subset=[1,2])
graph.Discrete_Contour(LC2[LC2_app[7]],xlabel='$\log_{10}(1+\delta)$',ylabel='u-z [$mag$]',xlim=[-0.3,0.4],
                       ylim=[2.3,3.4],lab_loc=4,c=np.linspace(1.4,0,3),fig_size=(15,20),
                       hold_graph=True,subset=[1,2])
plot.grid()
plot.savefig('../MILL_Graph/'+'12_18_app.pdf',dpi=200,bbox_inches="tight")
plot.show()<jupyter_output><empty_output><jupyter_text># colour-colour plots<jupyter_code>plot.figure(12)
graph.Discrete_Contour(LC2[LC2_app[11]],p=0.95,lines='dotted',xlabel='u-r [$mag$]',ylabel=r'r-z [$mag$]',
                       c=np.linspace(1.4,0,3),subset=[1,2],hold_graph=True)
graph.Discrete_Contour(LC2[LC2_app[11]],xlabel='u-r [$mag$]',ylabel=r'r-z [$mag$]',xlim=[1.2,2],ylim=[0.3,0.6],
                       labels=['$\sim1\'$','$\sim7\'$','$\sim1^o$'],
                       c=np.linspace(1.4,0,3),lab_loc=4,subset=[1,2],hold_graph=True)
plot.gca().tick_params(pad=10)
plot.grid()
plot.title('Buzzard$_{r<24.8}$, $z_{0.0,0.3}$',y=1.01)
plot.savefig('../MILL_Graph/'+'17_app.pdf',dpi=200,bbox_inches="tight")
plot.show()<jupyter_output><empty_output><jupyter_text># E(B-V)>0 plots<jupyter_code>plot.figure(15)
p1=plot.subplot(221)
p1.set_title('GALFORM$_{r<24.8}$, $z_{0.0,0.3}$',y=1.01)
graph.Discrete_Contour(LC1[LC1_app[17]],p=0.95,lines='dotted',xinvert=True,c=[0.0,0.7],hold_graph=True)
graph.Discrete_Contour(LC1[LC1_app[17]],ylabel='u-z [$mag$]',xlim=[23.75,25.5],ylim=[1.25,2.25],xinvert=True,
                       c=[0.0,0.7],labels=[r'$E(B-V)=0$',r'$E(B-V)<0.5$'],lab_loc=2,hold_graph=True)
plot.gca().tick_params(pad=10)
plot.grid()
p2=plot.subplot(222)
p2.set_title('Buzzard$_{r<24.8}$, $z_{0.0,0.3}$',y=1.01)
graph.Discrete_Contour(LC2[LC2_app[17]],p=0.95,lines='dotted',xinvert=True,c=[0.0,0.7],hold_graph=True)
graph.Discrete_Contour(LC2[LC2_app[17]],xinvert=True,c=[0.0,0.7],xlim=[24,25.75],ylim=[1.5,2.5],hold_graph=True)
plot.gca().tick_params(pad=10)
plot.grid()
p3=plot.subplot(223)
p3.set_title('GALFORM$_{r<24.8}$, $z_{1.2,2.5}$',y=1.01)
graph.Discrete_Contour(LC1[LC1_app[19]],p=0.95,lines='dotted',xlabel='u [$mag$]',ylabel='u-z [$mag$]',
                       xlim=[24,25.25],ylim=[0,2],c=[0.0,0.7],hold_graph=True)
graph.Discrete_Contour(LC1[LC1_app[19]],xlabel='u [$mag$]',xinvert=True,c=[0.0,0.7],hold_graph=True)
plot.gca().tick_params(pad=10)
plot.grid()
p4=plot.subplot(224)
p4.set_title('Buzzard$_{r<24.8}$, $z_{1.2,2.5}$',y=1.01)
graph.Discrete_Contour(LC2[LC2_app[19]],p=0.95,lines='dotted',c=[0.0,0.7],hold_graph=True)
graph.Discrete_Contour(LC2[LC2_app[19]],xinvert=True,c=[0.0,0.7],fig_size=(30,20),xlim=[24.5,25.75],ylim=[1,3],
                       xlabel='u [$mag$]',hold_graph=True)
plot.gca().tick_params(pad=10)
plot.grid()
plot.savefig('../MILL_Graph/'+'22_app.pdf',dpi=200,bbox_inches="tight")
plot.show()
plot.figure(18)
p1=plot.subplot(321)
p1.set_title('GALFORM$_{r<24.8}$, $z_{0.0,0.3}$',y=1.01)
plot.axvspan(24.65-arrow_g[1]/2,24.65+arrow_g[1]/2,facecolor='k',alpha=0.15)
graph.Histogram(np.load('../MILL_Graph/14_0_DESI_app_hist.npz'),ylabel='pixels per $mag$',
                xinvert=True,hold_graph=True,c=[0,0.7])
p2=plot.subplot(323)
p2.set_title('Buzzard$_{r<24.8}$, $z_{0.0,0.3}$',y=1.01)
plot.axvspan(24.9-arrow_g[1]/2,24.9+arrow_g[1]/2,facecolor='k',alpha=0.15)
graph.Histogram(np.load('../MILL_Graph/14_0_LSST_cut_app_hist.npz'),ylabel='pixels per $mag$',
                xinvert=True,hold_graph=True,c=[0,0.7])
p3=plot.subplot(325)
p3.set_title('Buzzard$_{r<27.5}$, $z_{0.0,0.3}$',y=1.01)
plot.axvspan(25.6-arrow_g[1]/2,25.6+arrow_g[1]/2,facecolor='k',alpha=0.15)
graph.Histogram(np.load('../MILL_Graph/14_0_LSST_full_app_hist.npz'),xlabel='u [$mag$]',
                ylabel='pixels per $mag$',labels=['$E(B-V)=0$','$E(B-V)<0.5$'],
                xinvert=True,hold_graph=True,c=[0,0.7])
p4=plot.subplot(322)
p4.set_title('GALFORM$_{r<24.8}$, $z_{1.2,2.5}$',y=1.01)
plot.axvspan(24.55-arrow_g[1]/2,24.55+arrow_g[1]/2,facecolor='k',alpha=0.15)
graph.Histogram(np.load('../MILL_Graph/14_4_DESI_app_hist.npz'),xinvert=True,hold_graph=True,c=[0,0.7])
p5=plot.subplot(324)
p5.set_title('Buzzard$_{r<24.8}$, $z_{1.2,2.5}$',y=1.01)
plot.axvspan(25.15-arrow_g[1]/2,25.15+arrow_g[1]/2,facecolor='k',alpha=0.15)
graph.Histogram(np.load('../MILL_Graph/14_4_LSST_cut_app_hist.npz'),xinvert=True,hold_graph=True,c=[0,0.7])
p6=plot.subplot(326)
p6.set_title('Buzzard$_{r<27.5}$, $z_{1.2,2.5}$',y=1.01)
plot.axvspan(26.7-arrow_g[1]/2,26.7+arrow_g[1]/2,facecolor='k',alpha=0.15)
graph.Histogram(np.load('../MILL_Graph/14_4_LSST_full_app_hist.npz'),xlabel='u [$mag$]',
                xinvert=True,hold_graph=True,c=[0,0.7],fig_size=(30,30))
plot.savefig('../MILL_Graph/29_app.pdf',dpi=200,bbox_inches="tight")
plot.show()<jupyter_output><empty_output><jupyter_text># Completeness plots<jupyter_code>plot.figure(13)
p1=plot.subplot(211)
p1.set_title('GALFORM$_{r<24.8}$',y=1.01)
graph.Histogram(np.load('../MILL_Graph/25_DESI_app_hist.npz'),ylabel='galaxies per $mag$',
                c=np.linspace(0,0.75,6),xlim=[11,24.8],xinvert=True,hold_graph=True)
p2=plot.subplot(212)
p2.set_title('Buzzard$_{r<27.5}$',y=1.01)
plot.axvline(24.8,c='r')
graph.Histogram(np.load('../MILL_Graph/25_LSST_full_app_hist.npz'),xlabel='r [$mag$]',
                ylabel='galaxies per $mag$',labels=['$z_{0.0,0.3}$','$z_{0.3,0.6}$','$z_{0.6,0.9}$',
                                                    '$z_{0.9,1.2}$','$z_{1.2,2.5}$','$z_{2.5+}$'],
                c=np.linspace(0,0.75,6),xlim=[9,27.5],xinvert=True,fig_size=(15,20),hold_graph=True)
plot.savefig('../MILL_Graph/25_app.pdf',dpi=200,bbox_inches="tight")
plot.show()<jupyter_output><empty_output><jupyter_text># Counts table<jupyter_code>f_counts=glob.glob
counts_DESI=np.load('../MILL_Graph/01_0_DESI_count_line.npz')
counts_LSST_cut=np.load('../MILL_Graph/01_0_LSST_cut_count_line.npz')
counts_LSST_full=np.load('../MILL_Graph/01_0_LSST_full_count_line.npz')
keys_DESI=[i for i in sorted(counts_DESI.keys()) for j in xrange(5,10) if str(j) in i]
keys_LSST_cut=[i for i in sorted(counts_LSST_cut.keys()) for j in xrange(5,10) if str(j) in i]
keys_LSST_full=[i for i in sorted(counts_LSST_full.keys()) for j in xrange(6,12) if str(j) in i]
print 'DESI'
for k in keys_DESI:
    print counts_DESI[k]
print 'LSST_cut'
for k in keys_LSST_cut:
    print counts_LSST_cut[k]
print 'LSST_full'
for k in keys_LSST_full:
    print counts_LSST_full[k]<jupyter_output><empty_output><jupyter_text># Filter matrix<jupyter_code>plot.figure(17)
plot.suptitle('Buzzard$_{r<27.5}$',y=0.92)
p1=plot.subplot(221)
graph.EBV_Matrix(ylabel=True,hold_graph=True)
p2=plot.subplot(222)
graph.ab_Filter_Matrix('../MILL_Graph/27_LSST_full_app.csv',r'$\sigma_\parallel$ [$mag$]',hold_graph=True)
p3=plot.subplot(223)
graph.EBV_CD_Matrix('../MILL_Graph/27_LSST_full_app.csv',xlabel=True,ylabel=True,hold_graph=True)
p4=plot.subplot(224)
graph.ab_Filter_Matrix('../MILL_Graph/28_LSST_full_app.csv','Fraction of remaining galaxies',
                       xlabel=True,hold_graph=True,fig_size=(20,16))
plot.savefig('../MILL_Graph/28_LSST_full.pdf',dpi=200,bbox_inches="tight")
plot.show()<jupyter_output><empty_output> | 
	no_license | 
	/Plots.ipynb | 
	MBravoS/MILL_Codes | 11 | 
| 
	<jupyter_start><jupyter_text># Hierarchical A* path finding algorithm
Created by - Sanjana Tule
Date - 19/08/2021
* **Implement weighted risk factor**. 
Give higher weightage to risk factors compared to length. As length = 10 and risk = 2 vs length = 2 and risk = 10 should not be the same.
Weighted Risk Factor = Length + 2 * ( Building Density Risk + Distance Risk)
* **Algorithm steps/description**:
Inspired and based on ideas in the paper - https://www.researchgate.net/publication/228785110_Near_optimal_hierarchical_path-finding_HPA
Hierarchical A* algorithm are sub-optimal compared to A* but they are faster. In life threatning situations they would be most suitable than traditional A* as A* finds the complete path before returning the navigation to the user. Compared to that Hierarchical A* only returns abstract navigation path and can calculate the subpaths in parallel while the user is navigating.
Assumptions:
1.   All Street are 2 way as for earthquake people would be walking or even one-way signs can be ignored for safety purposes
2.   For implemenation simplicity only 1 entrance/exit pair between clusters are considered. But in reality we may need to store all of them as the in-flight risk might change as people navigate.
**PART 1 - Pre-processing steps to create abstract graph**
Implemented seperately and graph stored on disk
**PART 2 - Run-time steps ( search for path)**:
1.   Select the source s and destination d.
2.   Find the closest node from abstract graph a_s to s.
3.   Find the closest node from abstract graph d_s to d.
4.   Find the path betwen s and a_s in the original underlying detailed graph.
5.   Find the path betwen d and a_d in the original underlying detailed graph.
6.   Find the path between a_s and a_d in abstract graph.
7.   Plot all the paths together.
**PART 2 - FIND PATH USING ABSTRACT GRAPH**<jupyter_code># !pip install osmnx
# !apt install python3-rtree
# import libraries
import osmnx as ox
import networkx as nx
import matplotlib.pyplot as plt
import pandas as pd
import geopandas
import requests
import osmnx as ox
import networkx as nx
import numpy as np
from geopy.geocoders import Nominatim
import math
import time
import warnings
warnings.filterwarnings("ignore")
import matplotlib
# mount google drive
from google.colab import drive
drive.mount('/content/drive')<jupyter_output>Mounted at /content/drive
<jupyter_text>## PART 2 - PATH FINDING USING ABSTRACT AND DETAILED GRAPH<jupyter_code># Load abstract and detailed graph
graph_detailed = nx.read_gpickle('/content/drive/My Drive/omdena/earthquake/2_all_graph_all_risk_added.pickle')
graph_abstract = nx.read_gpickle('/content/drive/My Drive/omdena/earthquake/3_abstract_graph_25.pickle')
nodes_nr_all,edges = ox.graph_to_gdfs(graph_detailed)
abstract_intra_nodes = ox.graph_to_gdfs(graph_abstract,edges=False)
edges['combined_risk_per_length'] = edges['combined_risk'] * edges['length']
display(edges.head())
# Make the graph again
graph_detailed = ox.graph_from_gdfs(nodes_nr_all,edges)
 ##################################### PART 2 - FIND THE ROUTE #################################################################
 ###############################################################################################################################
def convertAddressToGeoCoordinates(address):
  '''
  This function is used to convert the address of any place to the corresponding latitude and longitude values.
  Parameters
  ----------
  address: str
  
  Returns
  -------
  coordinates: tuple 
               coordinates[0] = latitude 
               coordinates[1] = longitude
  '''
  geolocator = Nominatim(user_agent="Nominatim")
  address_latlon = geolocator.geocode(address)
  coordinates = (address_latlon.latitude, address_latlon.longitude)
  return coordinates
# custom plot route function
def plot_route(complete_path,graph_abstract,graph_detailed):
  '''
  This function is used to plot the graph. has to be custom written as abstract edges do not exist in the underlying graph
  Parameters
  ----------
  complete_path: list
  graph_abstract: MultiDiGraph
  graph_detailed: MultiDiGraph
  
  Returns
  -------
  NA
  '''
  abstract_intra_nodes_,abstract_edges_ = ox.graph_to_gdfs(graph_abstract)
  detailed_nodes_,edges_nr_all = ox.graph_to_gdfs(graph_detailed)
  complete_path_df = pd.DataFrame()
  for ind,se in enumerate(complete_path):
    if ind != len(complete_path)-1:
      try:
        complete_path_df = complete_path_df.append(abstract_edges_.loc[(se,complete_path[ind+1],0)])
      except:
        complete_path_df = complete_path_df.append(edges_nr_all.loc[(se,complete_path[ind+1],0)])
  complete_path_df_ = geopandas.GeoDataFrame(complete_path_df,geometry='geometry')
  # plot Hieararchical A* path
  fig, ax = plt.subplots(figsize=(20,20))
  ax.set_title('Visualise the Path using Hierarchical A* Algorithm')
  #edges_nr_all.plot(ax=ax, linewidth=1, column='combined_risk', cmap='seismic')
  edges_nr_all.plot(ax=ax, linewidth=1, edgecolor='#ffb6c1')
  complete_path_df_.plot(ax=ax, linewidth=3, edgecolor='green')
# find the distance to the nearest node
def find_close_node_in_abstract_graph(graph_abstract_nodes,source_node,graph_detailed_nodes):
  '''
  This function finds the closest node in the abstract graph
  Parameters
  ----------
  graph_abstract_nodes: dataframe
  source_node: integer
  graph_detailed_nodes: dataframe
  
  Returns
  -------
  NA
  '''
  detailed_source_point = (graph_detailed_nodes.loc[(source_node)].x , graph_detailed_nodes.loc[(source_node)].y)
  euc_distance_nearest_node_abstract_graph = graph_abstract_nodes.geometry.apply(lambda x: get_euclidean_distance(detailed_source_point,x))
  return euc_distance_nearest_node_abstract_graph.sort_values().index.values[0]
# euclidean distance
def get_euclidean_distance(s,geom):
    return ox.distance.euclidean_dist_vec(s[1],s[0],geom.bounds[1],geom.bounds[0])
# euclidean distance reverse
def get_euclidean_distance_r(s,geom):
    return ox.distance.euclidean_dist_vec(s[0],s[1],geom.bounds[1],geom.bounds[0])
# nearest park
def find_nearest_park_shelter(graph_detailed_nodes,source_coordinates,type):
  nodes_park = graph_detailed_nodes[graph_detailed_nodes['evacuation_type']== type]
  euc_distance_parks = nodes_park.geometry.apply(lambda x: get_euclidean_distance_r(source_coordinates,x))
  return euc_distance_parks.sort_values().index.values[0]
  
# find shortest route
def find_route(source_coordinates,destination_coordinates,choice_of_destination,graph_detailed,graph_abstract,graph_abstract_nodes,graph_detailed_nodes):
  # find nearest node to the address in the detailed graph
  source_node, source_dist = ox.get_nearest_node(graph_detailed,source_coordinates,return_dist=True)
  #print('source node in detailed graph',source_node)
  # find the nearest node in the abstract graph
  source_abs_graph_node = find_close_node_in_abstract_graph(graph_abstract_nodes,source_node,graph_detailed_nodes)
  #print('source node in abstract graph',source_abs_graph_node)
  # record time
  start_time = time.time()
  # find the shortest path between source node in detailed graph to source node in the abstract graph
  shortest_path_source_to_source_abs = nx.astar_path(G=graph_detailed,source=source_node, target=source_abs_graph_node, heuristic = None, weight='combined_risk_per_length')
  #print("FIRST HALF - Source Address to Source Node in Abstract Graph",shortest_path_source_to_source_abs)
  # find the destination if not given
  if choice_of_destination == 1: # park
    dest_node = find_nearest_park_shelter(graph_detailed_nodes,source_coordinates,'park')
  elif choice_of_destination == 2: # shelter
    dest_node = find_nearest_park_shelter(graph_detailed_nodes,source_coordinates,'shelter')
  else:
    # find nearest node to the address in the detailed graph
    dest_node, dest_dist = ox.get_nearest_node(graph_detailed,destination_coordinates,return_dist=True)
  # find the nearest node in the abstract graph
  dest_abs_graph_node = find_close_node_in_abstract_graph(graph_abstract_nodes,dest_node,graph_detailed_nodes)
  #print('destination node in abstract graph',dest_abs_graph_node) 
  # find the middle path in the abstract path
  shortest_path_abs = nx.astar_path(G = graph_abstract, source=source_abs_graph_node, target=dest_abs_graph_node, heuristic = None, weight='combined_risk_per_length')
  #print("MIDDLE ROUTE - Source to Destination Node in Abstract Graph",shortest_path_abs)
  # find the shortest path betwen destination node in detailed graph to the destination node in the abstract graph
  shortest_path_dest_to_dest_abs = nx.astar_path(G =graph_detailed,source=dest_abs_graph_node, target=dest_node, heuristic = None, weight='combined_risk_per_length')
  #print("SECOND HALF - Destination Address to Destination Node in Abstract Graph",shortest_path_dest_to_dest_abs)
  if len(shortest_path_abs) == 1: # if source and destination are in same cluster
    shortest_path_source_to_source_abs = shortest_path_source_to_source_abs[1:-1]
  # Join the Shortest Route together
  complete_path  = shortest_path_source_to_source_abs  + shortest_path_abs[1:-1] + shortest_path_dest_to_dest_abs
  #print('COMPLETE ROUTE -',complete_path)
  A_time = (time.time() - start_time)
  print("TOTAL TIME",A_time)
  # Plot route
  plot_route(complete_path,graph_abstract,graph_detailed)
  # length and risk
  total_length = sum(nx.Graph(graph_detailed)[u][v].get('length') for u, v in zip(shortest_path_source_to_source_abs[:-1], shortest_path_source_to_source_abs[1:]))
  total_risk = sum(nx.Graph(graph_detailed)[u][v].get('combined_risk_per_length') for u, v in zip(shortest_path_source_to_source_abs[:-1], shortest_path_source_to_source_abs[1:]))
  total_length = total_length + sum(nx.Graph(graph_abstract)[u][v].get('length') for u, v in zip(shortest_path_abs[:-1], shortest_path_abs[1:]))
  total_risk = total_risk + sum(nx.Graph(graph_abstract)[u][v].get('combined_risk_per_length') for u, v in zip(shortest_path_abs[:-1], shortest_path_abs[1:]))
  total_length = total_length + sum(nx.Graph(graph_detailed)[u][v].get('length') for u, v in zip(shortest_path_dest_to_dest_abs[:-1], shortest_path_dest_to_dest_abs[1:]))
  total_risk = total_risk + sum(nx.Graph(graph_detailed)[u][v].get('combined_risk_per_length') for u, v in zip(shortest_path_dest_to_dest_abs[:-1], shortest_path_dest_to_dest_abs[1:]))
  # total length and risk
  print("LENGTH OF THE ROUTE",total_length)
  print("RISK OF THE ROUTE",total_risk)<jupyter_output><empty_output><jupyter_text>DEMO 1 - 
FIND PATH FOR CUSTOM DESTINATION<jupyter_code>###### MAIN FUNCTION - CUSTOM DESTINATION #####
# source_address = input('Enter your Current Address.')
# choice_of_destination = int(input('Where to you want to go? \n 1.Nearest Park \n 2.Nearest Shelter \n 3.Custom Destination \n Type 1, 2 or 3 \n'))
# if choice_of_destination not in (1,2,3):
#   print("Wrong Choice")
# if choice_of_destination == 3:
#   destination_address = input('Please give the Custom Destination Address.')
# else:
#   destination_address = 'NA'
choice_of_destination = 3
source_address = "19130 Tulsa St, Northridge, CA 91326"
source_coordinates = convertAddressToGeoCoordinates(source_address)
destination_address = "7741 Hayvenhurst Ave, Van Nuys, CA 91406"
destination_coordinates = convertAddressToGeoCoordinates(destination_address)
  
# find shortest route and plot it
find_route(source_coordinates,destination_coordinates,choice_of_destination,graph_detailed,graph_abstract,abstract_intra_nodes,nodes_nr_all)
# Demo purposes - option 3
# Source Address = 19130 Tulsa St, Northridge, CA 91326
# Destination Address = '7741 Hayvenhurst Ave, Van Nuys, CA 91406'
###### MAIN FUNCTION - CUSTOM DESTINATION #####
# source_address = input('Enter your Current Address ')
# choice_of_destination = int(input('Where to you want to go? \n 1.Nearest Park \n 2.Nearest Shelter \n 3.Custom Destination \n Type 1, 2 or 3 \n'))
# if choice_of_destination not in (1,2,3):
#   print("Wrong Choice")
# if choice_of_destination == 3:
#   destination_address = input('Please give the Custom Destination Address.')
# else:
#   destination_address = 'NA'
choice_of_destination = 3
source_address = "22801 Santa Susana Pass Rd, Chatsworth, CA 91311"
source_coordinates = convertAddressToGeoCoordinates(source_address)
destination_address = "8418 Noble Ave, North Hills, CA 91326"
destination_coordinates = convertAddressToGeoCoordinates(destination_address)
# find shortest route and plot it
find_route(source_coordinates,destination_coordinates,choice_of_destination,graph_detailed,graph_abstract,abstract_intra_nodes,nodes_nr_all)
# Demo purposes - option 3
# Source Address = 22801 Santa Susana Pass Rd, Chatsworth, CA 91311
# Destination Address = 8418 Noble Ave, North Hills, CA 91326
###### MAIN FUNCTION - CUSTOM DESTINATION #####
# source_address = input('Enter your Current Address ')
# choice_of_destination = int(input('Where to you want to go? \n 1.Nearest Park \n 2.Nearest Shelter \n 3.Custom Destination \n Type 1, 2 or 3 \n'))
# if choice_of_destination not in (1,2,3):
#   print("Wrong Choice")
# if choice_of_destination == 3:
#   destination_address = input('Please give the Custom Destination Address.')
# else:
#   destination_address = 'NA'
choice_of_destination = 3
source_address = "22801 Santa Susana Pass Rd, Chatsworth, CA 91311"
source_coordinates = convertAddressToGeoCoordinates(source_address)
destination_address = "7741 Hayvenhurst Ave, Van Nuys, CA 91406"
destination_coordinates = convertAddressToGeoCoordinates(destination_address)
# find shortest route and plot it
find_route(source_coordinates,destination_coordinates,choice_of_destination,graph_detailed,graph_abstract,abstract_intra_nodes,nodes_nr_all)
# Demo purposes - option 3
# Source Address = 22801 Santa Susana Pass Rd, Chatsworth, CA 91311
# Destination Address = 7741 Hayvenhurst Ave, Van Nuys, CA 91406<jupyter_output>TOTAL TIME 0.26170802116394043
LENGTH OF THE ROUTE 21524.645999999993
RISK OF THE ROUTE 31122.676508011344
<jupyter_text>DEMO 2 - 
FIND PATH FOR NEAREST PARK
<jupyter_code>###### MAIN FUNCTION - NEAREST PARK #####
# source_address = input('Enter your Current Address ')
# choice_of_destination = int(input('Where to you want to go? \n 1.Nearest Park \n 2.Nearest Shelter \n 3.Custom Destination \n Type 1, 2 or 3 \n'))
# if choice_of_destination not in (1,2,3):
#   print("Wrong Choice")
# if choice_of_destination == 3:
#   destination_address = input('Please give the Custom Destination Address.')
# else:
#   destination_address = 'NA'
choice_of_destination = 1
source_address = "19130 Tulsa St, Northridge, CA 91326"
source_coordinates = convertAddressToGeoCoordinates(source_address)
destination_coordinates = ''
  
# find shortest route and plot it
find_route(source_coordinates,destination_coordinates,choice_of_destination,graph_detailed,graph_abstract,abstract_intra_nodes,nodes_nr_all)
# 19130 Tulsa St, Northridge, CA 91326<jupyter_output>TOTAL TIME 0.026717662811279297
LENGTH OF THE ROUTE 1684.3980000000001
RISK OF THE ROUTE 3775.2255948245306
<jupyter_text>DEMO 3 - 
FIND PATH FOR NEAREST SHELTER<jupyter_code>###### MAIN FUNCTION - NEAREST SHELTER #####
# source_address = input('Enter your Current Address ')
# choice_of_destination = int(input('Where to you want to go? \n 1.Nearest Park \n 2.Nearest Shelter \n 3.Custom Destination \n Type 1, 2 or 3 \n'))
# if choice_of_destination not in (1,2,3):
#   print("Wrong Choice")
# if choice_of_destination == 3:
#   destination_address = input('Please give the Custom Destination Address.')
# else:
#  destination_address = 'NA'
choice_of_destination = 2
source_address = "19130 Tulsa St, Northridge, CA 91326"
source_coordinates = convertAddressToGeoCoordinates(source_address)
destination_coordinates = ''
  
# find shortest route and plot it
find_route(source_coordinates,destination_coordinates,choice_of_destination,graph_detailed,graph_abstract,abstract_intra_nodes,nodes_nr_all)<jupyter_output>TOTAL TIME 0.021811246871948242
LENGTH OF THE ROUTE 4040.5259999999994
RISK OF THE ROUTE 6649.337162974123
 | 
	no_license | 
	/3_2_hierarchical_pathfinding_part_2.ipynb | 
	santule/oomdena_earthquake | 5 | 
| 
	<jupyter_start><jupyter_text>Still need to do
- figure out how to take out the random little low points for when stations were downloaded
- expand the code to loop across all 7 stations, (wooh!) 
- Also run with both baros, and have a nice little toggle switch somewhere to use 1 or 2
- Eventually use those graphs to code in an algorithm to give some minimum or 0 flow to the timeperiods where logger was dry. <jupyter_code># make the screen bigger!
from IPython.display import display, HTML
display(HTML(data="""
<style>
    div#notebook-container    { width: 95%; }
    div#menubar-container     { width: 85%; }
    div#maintoolbar-container { width: 99%; }
</style>
"""))
# import all our libraries (if this cell throws an error you may need to reinstall a library)
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import shutil   # use to move PDF out of the directory
import re
from datetime import date, datetime, timedelta
from itertools import chain
import statistics as st
import copy
%matplotlib notebook
#change this home directory to whatever the project home directroy is on any other computer 
homedir = ("C:\\Users\\cshuler\\Desktop\\AmericanSamoaDocs\\Data_Analysis\\Weather_Stations_and_stream_gages\\Python")
os.chdir(homedir)
# paths for files to come from or go to do not change as long as relative paths are kept consistent
path_raw_stream_data = ("raw_data/Stream/StreamGage_data")
path_raw_baro_data = ("raw_data/Stream/Baro_data")
path_flow_data = ("raw_data/Stream/Flow_data")
path_stream_data =("raw_data/Stream")
# this cell is the basic code block that takes the stream files from the folder and turns them into a Python data dictionary and then a master file 
os.chdir(homedir)
os.chdir(path_raw_stream_data)
files = os.listdir()                                   # make a list of all the files in the directory
columns = ['Date and Time', 'time', 'psi', 'temp']
float_cols = ['psi', 'temp']
All_data = {} 
for i in files: 
    x = pd.read_csv(i)                                 # read each file
    x = x.iloc[:,0:4]
    x.columns = columns
    x['Date_tmp'] = pd.to_datetime(x['Date and Time'] + ' ' + x['time'])
    x['Date and Time'] = x['Date_tmp']
    del x['Date_tmp']
    del x['time']
    
#some data cleaning    
# make sure each entry is on a 15 min rounded interval: note rounds down to the last 15 min interval
    x['Date and Time'] = x['Date and Time'].apply(lambda dt: datetime(dt.year, dt.month, dt.day, dt.hour,15*(dt.minute // 15)))
 # and also make sure all the data is in float form
    for h in float_cols:
        x[h]=x[h].astype(float)  
        
# now rename each dataframe as site, date and time
    site = i.split('-')[0]
    start = str(x['Date and Time'][0]).split(' ')[0]
    end = str(x['Date and Time'].iloc[-1]).split(' ')[0] 
    key_name = site +'_'+ start +'_'+ end
    All_data[key_name] = x    
# eventually should make this loopable to create 2 different baro files
#compile the baro values
os.chdir(homedir)
os.chdir(path_raw_baro_data)
files = os.listdir()                                   # make a list of all the files in the directory
All_Baro_data = {}  
columns = ['Date and Time', 'time', 'psi', 'temp']
float_cols = ['psi', 'temp']
Bdata = {} 
for i in files: 
    x = pd.read_csv(i)                                 # read each file
    x = x.iloc[:,0:4]
    x.columns = columns
    x['Date_tmp'] = pd.to_datetime(x['Date and Time'] + ' ' + x['time'])
    x['Date and Time'] = x['Date_tmp']
    del x['Date_tmp']
    del x['time']
      
#some data cleaning    
# and also make sure all the data is in float form
    for h in float_cols:
        x[h]=x[h].astype(float)  
        
# now rename each dataframe as site, date and time
    site = i.split('-')[0]
    start = str(x['Date and Time'][0]).split(' ')[0]
    end = str(x['Date and Time'].iloc[-1]).split(' ')[0] 
    key_name = site +'_'+ start +'_'+ end
    Bdata[key_name] = x
    
BaroMaster = pd.concat(Bdata.values()).drop_duplicates()  # stick all the files together into one, and drop any overlap
BaroMaster = BaroMaster.sort_values('Date and Time') # This now sorts in date order
baro_cols = ['Date and Time', 'baro_psi', 'baro_temp']
BaroMaster.columns = baro_cols   # now we have a master file of all the concoctated baro files
# this will interpolate the values into 1 minute intervals in case the baro is at a different interval than the stream loggers
bm_indexed = BaroMaster.set_index('Date and Time', inplace=False)    # the method needs the dates to be the index
bm_resampled  = bm_indexed.resample('min')                           # this resamples the data at whetver interval you want
bm_interpolated = bm_resampled.interpolate(method='quadratic')       # this interpolates data points between the actual values, Quadratic is nice, 'linear' works too 
bm_interpolated['Date and Time'] = bm_interpolated.index             # pull the index back onto a column for matching up 
bm_interpolated = bm_interpolated.dropna()                                             # for some reason the last rows have NaN values, drop these
<jupyter_output><empty_output><jupyter_text>This cell does compillation,  plotting, and baro correction. 
- First it will compile all the files with the same station name into one master file. 
- then it plots all the individual files and the master file so you can look for errors
- finally it will take the baro file(s) and baro correct a column so we can get a direct measurement of actual meters of water head above the transducer<jupyter_code>All_masters ={}                            # empty dictionary that will be the dictionary of all the master files
stations = ['Fagaalu', 'Leone', 'Fagasa', 'Afono', 'Nuuuli', 'Vaipito','Malota', 'Fagaitua']            # Station list, must be manually paramaterized                
listos = list(All_data.keys())             # This is the list of every single file in the folder
plt.close("all")  # close previous figures to clear memory
# this code sorts out each file name for each station and stores it in a temporary list
for q in stations: 
    Temp_Dic ={}
    r = re.compile(q)                       # find things that have this substring in them
    file_list = list(filter(r.match, listos))
# and just some plotting stuff that will happen for each station
    fig = plt.figure(figsize=(18, 5))
    ax1 = plt.axes()
    ax1.set_ylabel('psi', color='k')
    ax1.set_title(q)
    
#Now we iterate over each file name in the temporary list for each station    
    for i in file_list:                         
        Temp_Dic[i] = All_data[i]                                          # put all the data into a temorary dictionary (note this is the only place all the station specific data is in a dictionary)
        All_masters[q] = pd.concat(Temp_Dic.values()).drop_duplicates()    # stick them all together into a station specific master file
        All_masters[q] = All_masters[q].sort_values('Date and Time')       # sort by date
        #All_masters[q] = All_masters[q].fillna(10)
        All_masters[q] = All_masters[q].dropna(axis=0, how='all')
        
# more plotting stuff, this plots each file as a different color        
        plt.plot(All_data[i]['Date and Time'], All_data[i]['psi'], alpha = .9, label=i, marker='.')
        #plt.legend()   # just in case you want to know which file is which color, annoying so its commented out
# now get a graphical representation of how the individual files went into the master file. if there were issues with how the files overlpped it would be seen here
    plt.plot(All_masters[q]['Date and Time'], All_masters[q]['psi'], alpha = .5, color='k')    
    
    
# this is the baro correction routine     
    merger = All_masters[q].merge(bm_interpolated, how='inner', on='Date and Time') # this sticks the baro data onto the station master data and likes up the dates
    merger['corrected_H2O_level'] = (merger['psi'] -  merger['baro_psi'])*0.703070   # The correction is simple,  0.703070 is the conversion between PSI and m of water head
    All_masters[q] = merger
    All_masters[q] = All_masters[q].dropna(subset = ['corrected_H2O_level'], axis=0, how='any') # somehow the merge added duplicate dates remove entries with double dates take the    
#And a plot of the correction for good measure, just since it is soooo cool
plt.close("all")  # close previous figures to clear memory
for k in stations:
    fig = plt.figure(figsize=(18, 6))
    ax1 = plt.axes()
    plt.plot(All_masters[q]['Date and Time'], (All_masters[q]['psi'] -  All_masters[q]['baro_psi']), alpha = .7, color='k', label="Baro-corrected")   # note this plot is in 
    plt.plot(All_masters[q]['Date and Time'], All_masters[q]['psi']-All_masters[q]['baro_psi'].mean(), alpha = .6, color='y', label="raw")   # Master['psi']-merger['psi'].mean() is the average baro psi value for comparison
    plt.axhline(y=0, color='r', linestyle='-')                                                                              # just plotting the 0 point, if negative the logger was obviously pretty close to no water
    ax1.set_ylabel('corrected/shifted psi (not water height)', color='k')
    plt.legend()
    ax1.set_title(k)
<jupyter_output><empty_output><jupyter_text>Direct comparison between Baro data and gage data> 
this is specifically useful for assessing times whrn the logger dried out. 
can recognize dry logger when the gage data matches up with the baro data. 
When gege data is above the baro the station had water. 
Eventually use this to code in an algorithm to give some minimum or 0 flow to the timeperiods where logger was dry. <jupyter_code># Now here is an interesting graphical represenation of how the actual baro data compares to the stream gauge data. Looks like Fagaalu was out of the water for a lot of the time ooops.
plt.close("all")  # close previous figures to clear memory
for q in stations:
    fig = plt.figure(figsize=(19, 5))
   
    for i in Bdata.keys():    
        ax1 = plt.axes()
        ax1.set_ylabel('psi', color='k')
        plt.plot(Bdata[i]['Date and Time'], Bdata[i]['psi'], alpha = .9, label=i)
        ax1.set_title(q)
    plt.plot(All_masters[q]['Date and Time'], All_masters[q]['psi'], alpha = .4, color='k', label='streamdata')
   
#Change out bad data point removal algorithum
plt.close("all")  # close previous figures to clear memory
All_masters_clean = {}    #make a new dataframe for the data cleaned of download outliers
for q in stations: 
# this generates a baseline value that is defined as a quartile of the medians of 100 evenly spaced sample bins of the data set
    bin_size = round(len(All_masters[q]['corrected_H2O_level'])/100)
    da_meeds = []
    ind = 0
    for i in range(1,101): 
        win = All_masters[q]['corrected_H2O_level'][ind: i*bin_size]
        ind = ind + bin_size
        m = st.median(win)
        da_meeds.append(m)
    baseline = np.percentile(da_meeds, 1) # the percentile can change this number out if want to dial it in     
    
# and it doesn't work if the baseline is below zero. in this case just set the baseline to a small nunmber  
    if baseline > 0:
        All_masters[q]['baseline_val'] = baseline
    else:
        All_masters[q]['baseline_val'] = .001
            
# the algoruthim will look for values that are below the baseline, and also show up as spikes in a standard deviation plot. 
    All_masters[q]['run_std'] = All_masters[q]['corrected_H2O_level'].rolling(window=3, center=True).std()  # here is our rolling 3 value std plot
    
    bad = (All_masters[q]['corrected_H2O_level']-All_masters[q]['baseline_val'])/All_masters[q]['baseline_val'] < -.5   # now we flag values that are a a standardized amount below the baseline value
    bad_selected = All_masters[q][bad]
    
    also_bad = All_masters[q]['run_std']/All_masters[q]['run_std'].mean() > 10      # and from that set select values that also correspond with sdt sppikes, meaning they are anomolous 
    final_bad = bad_selected[also_bad]                                              # here is the data frame of just bad values (they are not gotten rid of yet only identified) 
       
#can plot the bad values on the data to see what it selected  de comment this out if 
    fig = plt.figure(figsize=(18, 6))
    ax1 = plt.axes()
    plt.plot(All_masters[q]['Date and Time'], All_masters[q]['corrected_H2O_level'] , alpha = .7, color='y', label="data",  marker = '.')
    ax1.plot(All_masters[q]['Date and Time'], All_masters[q]['baseline_val'], linestyle='-', color='purple', alpha=.8)
    plt.plot(final_bad['Date and Time'], final_bad['corrected_H2O_level'], color='r', linestyle='None', marker='o')   # note this plot is in 
    plt.title(q)
    plt.legend()
# if it seems like the computer selected the right values lets get rid of them
    drops = final_bad['Date and Time'].tolist()          #this is the list of bad dates to drop
    print(drops)
    All_masters_clean[q] =  All_masters[q][~All_masters[q]['Date and Time'].isin(drops)]  # the little ~ means the opposite of, so this will KEEP all the points that are not in the drops list (also looks like the code just deletes the whole row, does not make it NAN or anything)
# again can plot to see if its all good.   
    plt.plot(All_masters_clean[q]['Date and Time'], All_masters_clean[q]['corrected_H2O_level'] , alpha = .7, color='b', label="Clean_data")
os.chdir(homedir)
os.chdir(path_stream_data)
datums = pd.read_csv('Stream_gage_datums.csv') 
#basic cleaning 
datums['Date and Time'] = pd.to_datetime(datums['Date and Time'])   #date to dates
float_cols = ['WL_below_datum']
 # and also make sure all the data is in float form
for h in float_cols:
    datums[h]=datums[h].astype(float)
datums['Site'] = datums['Site'].apply(lambda x: x.strip()) # delete any eronious spaces before or after names
# this takes the times and rounds them DOWN to the last 15 min interval point.  
datums['Date and Time'] = datums['Date and Time'].apply(lambda dt: datetime(dt.year, dt.month, dt.day, dt.hour,15*(dt.minute // 15)))
Datum_cors = pd.DataFrame(columns=['Date and Time', 'psi', 'temp', 'baro_psi', 'baro_temp', 'corrected_H2O_level', 'baseline_val', 'run_std', 'Site', 'WL_below_datum'])
for k in stations: 
    d_pt_bool = datums['Site'] == k
    d_pt = datums[d_pt_bool]
    row_select = All_masters_clean[k].merge(d_pt, how='inner', on='Date and Time')
    
# since one of the datums (afono) landed on a point that was removed cuz of a download this code block will sub in a time, and thus PT stage from an hour ago to match the datum measurement  
    if row_select.empty:
        d_pt['Date and Time'] = d_pt['Date and Time'] - timedelta(minutes=60)
        row_select = All_masters_clean[k].merge(d_pt, how='inner', on='Date and Time')
    
    Datum_cors = pd.concat([Datum_cors, row_select]) 
Datum_cors['sensor_level_below_datum'] = Datum_cors['WL_below_datum'] + Datum_cors['corrected_H2O_level']
# now the All_masters_referenced set has the data based on the height below the datum line to keep it all standardized  (this is where I will change when sensor height is moved...)
All_masters_referenced = copy.deepcopy(All_masters_clean)      
for m in stations: 
    c_bool = Datum_cors['Site'] == m
    c = Datum_cors[c_bool]['sensor_level_below_datum'][0]
    All_masters_referenced[m]['WL_below_datum_[m]'] = All_masters_referenced[m]['corrected_H2O_level'] - c   
    
# this cell takes the manual flow measurements and sticks them on to the master files as a separate column
os.chdir(homedir)
os.chdir(path_flow_data)
flo = pd.read_csv('Flow_measurements.csv')   #read the flow measurements file Change name if it is different
plt.close("all")  # close previous figures to clear memory
#basic cleaning 
flo['Date and Time'] = pd.to_datetime(flo['Date and Time'])   #date to dates
float_cols = ['Q_m3ps', 'Staff_reading']
 # and also make sure all the data is in float form
for h in float_cols:
    flo[h]=flo[h].astype(float)
flo['Site'] = flo['Site'].apply(lambda x: x.strip()) # delete any eronious spaces before or after names
# this takes the times and rounds them DOWN to the last 15 min interval point.  
flo['Date and Time'] = flo['Date and Time'].apply(lambda dt: datetime(dt.year, dt.month, dt.day, dt.hour,15*(dt.minute // 15)))
# this block turns the consolodated measurement dataframe into a dict of each sites measurements
flo_sites = {}
for y in stations:
    current_sta = flo['Site'] == y
    isolated_dat = flo[current_sta] 
    flo_sites[y] = isolated_dat
# merge in the measurement data to the master files for each station
All_masters_flow = {}
for p in stations: 
    All_masters_flow[p] = All_masters_referenced[p].merge(flo_sites[p], how='left', on='Date and Time')
    
   #can plot where the values line up with dates and relative magnitudes, need to still develop the curve. 
    fig = plt.figure(figsize=(18, 6))
    ax1 = plt.axes()
    plt.plot(All_masters_flow[p]['Date and Time'], All_masters_flow[p]['WL_below_datum_[m]'] , alpha = .7, color='y',  marker = '.')
    #ax1.plot(All_masters_flow[p]['Date and Time'], All_masters_flow[p]['baseline_val'], linestyle='-', color='purple', alpha=.8)
    
    ax2 = ax1.twinx()
    ax2.plot(All_masters_flow[p]['Date and Time'], All_masters_flow[p]['Q_m3ps'], color='r', linestyle='None', marker='o')   # note this plot is in 
    plt.title(p)
    ax2.legend() 
    ax1.legend(loc=2)
# this is starting to work on graphing up the stage discharge relationship
R_curves = {}
fig = plt.figure(figsize=(10, 10))
p = 0   # subplot iterator
for l in stations:
    boo =  All_masters_flow[l]['Q_m3ps'].notnull()
    curve_data = All_masters_flow[l][boo]
    R_curves[l] = curve_data
    p = p+1 
    ax1 = fig.add_subplot(3,3,p)
    ax1.set_title(l, color='darkblue')
    xval =   R_curves[l]['WL_below_datum_[m]']              # just values for easy plug and play
    yval = R_curves[l]['Q_m3ps'] 
    
    ax1.set_ylabel('Q [m3ps]', color='k')     
    ax1.set_xlabel('WL_below_datum [m]', color='k')
    plt.scatter(xval, yval, label='', marker='o')
    plt.tight_layout()
    
    
    coefs = np.polyfit(xval, yval, 2 )
    ary = (np.linspace(xval.min(), xval.max(), num=30))
    x = pd.DataFrame(ary, dtype='float')
    y = coefs[0]*x**2 + coefs[1]*x +coefs[2]
    ax1.plot(x, y)
    
    
    
    
os.chdir(homedir)
os.chdir(path_stream_data)
num_interp_dots = 1000
Stages_count = 1000
CS_pre = pd.read_csv('CS_Test.csv')    # here is the cross section data file. WIll need individual one for each station
#basic cleaning 
float_cols = ['Distance', 'Depth']   #  make sure all the data is in float form
for h in float_cols:
    CS_pre[h]=CS_pre[h].astype(float)
# Interpolate values for dist and depth = distance from datum to channel bottom.
original_dots = np.linspace(0, 1, len(CS_pre['Distance']))    # the 0 to 1 thing must be just a kind of float type of index so that the acrual values will be properly spaced
 # this integer here is what controlls the density of total interpolated points that will exist  
interp_dots = np.linspace(0, 1, num_interp_dots)
# One-dimensional linear interpolation.
x_data = np.interp(interp_dots, original_dots, CS_pre['Distance'])     
y_data = np.interp(interp_dots, original_dots, CS_pre['Depth'])
CS = pd.DataFrame({'Distance':x_data, 'Depth':y_data})   # now define CS as our cross section dataframe with extra interp dots
    
CS['height'] = CS['Depth'] - CS['Depth'].min()            # this is to reset the 0 point to the bottom so I can think straight
CS['midpoints'] =   (CS['Distance'].shift(-1)- CS['Distance'])*.5  + CS['Distance']    # the midpoints of each box
CS['midheights'] =   (CS['height'].shift(-1)- CS['height'])*.5  + CS['height']         # the full height of each box from channel bottom to datum line 
CS['Box_width'] =   (CS['Distance'].shift(-1)- CS['Distance'])                       # the width of each box in m 
CS['hypotonuse_length'] = ((CS['Distance'].shift(-1)- CS['Distance'])**2 + (CS['height'].shift(-1)- CS['height'])**2)**.5   # the length of the bottom section of each box, to be used for wetted perimiter
# this paramater controles how many water level points are sampled to calculate mannings Q at.  SHould be lower than the number of interp points i would think
spacing = Stages_count                                                
level_list = np.linspace(0, CS['height'].max(), spacing)      # here we define a spacing and a list of different levels to calculate
#plot the profile and interp dots
fig = plt.figure(figsize=(4, 4))
ax1 = plt.axes()
plt.plot(CS['Distance'], CS['height'], marker='.')
plt.plot(CS['midpoints'], CS['midheights'], marker='.', linestyle='none')
#Make the dataframe of the paramaters for Mannings calculations 
Stage = [] 
Area = []
Wet_perim =[]
for i in level_list:
    
    ab = CS['midheights'] < i                   # select which boxes are active
    active_boxes =  copy.deepcopy(CS[ab])        # only boxes with bottoms below the water level are considered
    
    Water_level = i 
    active_boxes['Box_height'] = Water_level - active_boxes['midheights']            #the height of the boxes from bottom of box to height of water level
    active_boxes['Box_area'] = active_boxes['Box_width']*active_boxes['Box_height']  # cross sectional area of the box
 
    A = active_boxes['Box_area'].sum()                                              # cross sectional area of the challel
    WP = active_boxes['hypotonuse_length'].sum()                                    # the langth of the bottom of the channel for the boxes summed = wet perimiter
    Stage.append(i)
    Area.append(A)
    Wet_perim.append(WP)                   #take each value for each stage and append to a list
mannings = pd.DataFrame({'Stage':Stage, 'Area':Area, 'Wet_perim':Wet_perim})      # this is our mannings stage dataframe
mannings['hydraulic_radius'] = mannings['Area']/mannings['Wet_perim']
mannings
#plot the profile and interp dots
fig = plt.figure(figsize=(17, 10))
ax1 = plt.axes()
plt.plot(mannings['Stage'], mannings['Wet_perim'], 'g', label='Wet_perim')
plt.plot(mannings['Stage'], mannings['hydraulic_radius'], "b", label='hydraulic_radius')
plt.plot(mannings['Stage'], mannings['Area'], 'r', label='Area')
ax1.set_xlabel('Stage [m]', color='k')
plt.legend()
#parameters to adjust for each 
Slope = 0.2
Mannings_n = 0.2
mannings['mannings_Q'] = (1/Mannings_n)*(mannings['Area']*mannings['hydraulic_radius']**(2/3))*Slope**0.5
# this is starting to work on graphing up the stage discharge relationship
fig = plt.figure(figsize=(7, 5))
ax1 = plt.axes()
plt.plot(R_curves['Leone']['WL_below_datum_[m]']+2.1, R_curves['Leone']['Q_m3ps'], 'g', label='Leone_data', marker= 'o', linestyle='none')
plt.plot(mannings['Stage'][0:300], mannings['mannings_Q'][0:300], "b", label='hydraulic_radius')
ax1.set_xlabel('Stage [m]', color='k')
ax1.set_ylabel('Q [m3ps]', color='k')
ax1.set_title('leone', color='darkblue')
R_curves.keys()
# Top width, area of lfow, hydraulic radius
from math import *
from __future__ import division
# Given
b = 3                            # base of the channel
z = 0.5                          # slope of the channel
y = 2                            # depth of the channel
# Solution
T = b + 2*z*y
print ("Top width =",round(T,5),"m")
A = (b+z*y)*y
print("Area of flow =",round(A,0),"m**2")
P = b + 2*y*sqrt(1+z**2)
print("Wetted perimeter =",round(P,3),"m")
R = A/P
print( "Hydraulic radius =",round(R,2),"m")
D = A/T
print ("Hydraulic depth =",round(D,2),"m")
Z = A*sqrt(D)
print ("Secton Factor =",round(Z,2),"m**2")
Slope**0.5<jupyter_output><empty_output> | 
	no_license | 
	/Python/Scripts/.ipynb_checkpoints/exploratory_stream_script_ver3-checkpoint.ipynb | 
	cshuler/Samoa_ASPA_UH_Stream_data_process | 3 | 
| 
	<jupyter_start><jupyter_text>### Get stations data<jupyter_code>stations = pd.read_json('./bkk-stations.json')
bkk_lat, bkk_lng = stations['lat'].mean(),  stations['lng'].mean()
import folium
map_bkk = folium.Map(location=[bkk_lat, bkk_lng], zoom_start=12)
for lat, lng, name in zip(stations['lat'], stations['lng'], stations['desc']):
    label = folium.Popup(name, parse_html=True)
    folium.CircleMarker(
        [lat, lng],
        radius=5,
        popup=label,
        color='blue',
        fill=True,
        fill_color='#3186cc',
        fill_opacity=0.7
    ).add_to(map_bkk)  
    
map_bkk<jupyter_output><empty_output><jupyter_text>### Get all categories reference<jupyter_code>import requests
CLIENT_ID = '' # your Foursquare ID
CLIENT_SECRET = '' # your Foursquare Secret
VERSION = '20180605' # Foursquare API version
categories_url = 'https://api.foursquare.com/v2/venues/categories?v={}&client_id={}&client_secret={}'.format(VERSION,CLIENT_ID,CLIENT_SECRET)
raw_categories = requests.get(categories_url).json()['response']['categories']
def flatten_categories(category, main_category):
    output = [[category['id'], category['shortName'], main_category['id']]]
    if len(category['categories'])>0:
        for c in category['categories']:
            output += flatten_categories(c, main_category) 
    return output
all_categories = np.concatenate([flatten_categories(c, c) for c in raw_categories], axis=0)
all_categories = pd.DataFrame(all_categories,columns=['id','desc','main_id']).set_index('id')
main_categories = all_categories.loc[all_categories['main_id'].unique()][['desc']]
selected_categories = main_categories[main_categories['desc']!='Food']<jupyter_output><empty_output><jupyter_text>### Get venues of each stations<jupyter_code>def searchNearbyVenues(ref, lat, lng, categoryIds, radius=650, LIMIT=100,):
    venues_list=[]
    print(ref)
        
    # create the API request URL
    url = 'https://api.foursquare.com/v2/venues/search?&client_id={}&client_secret={}&v={}&ll={},{}&radius={}&limit={}&intent=browse&categoryId={}'.format(
        CLIENT_ID, 
        CLIENT_SECRET, 
        VERSION, 
        lat, 
        lng, 
        radius, 
        LIMIT,
        categoryIds)
        
    # make the GET request
    results = requests.get(url).json()["response"]['venues']
    
    # return only relevant information for each nearby venue
    venues_list += [(
        ref, 
        lat, 
        lng, 
        v['name'], 
        v['location']['lat'], 
        v['location']['lng'],  
        v['categories'][0]['id']) for v in results]
    
    nearby_venues = pd.DataFrame(venues_list,
                                 columns = ['reference', 
                                            'Neighborhood Latitude', 
                                            'Neighborhood Longitude', 
                                            'Venue', 
                                            'Venue Latitude', 
                                            'Venue Longitude', 
                                            'Venue Category ID'])
    
    return(nearby_venues)
venues = [searchNearbyVenues(x.desc, x.lat, x.lng, ','.join(selected_categories.index.values)) for index, x in stations.iterrows()]
venues = pd.concat(venues,axis=0).reset_index(drop=True)
venues['Venue Category ID'] = venues['Venue Category ID'].map(all_categories['main_id'])
venues = venues[venues['Venue Category ID']!=main_categories[main_categories['desc']=='Food'].index[0]]
palette = ["hsl({}, 100%, 50%)".format(360*x/3) for x in range(0,9)]
map_venues = folium.Map(location=[bkk_lat, bkk_lng], zoom_start=12)
for lat, lng, cate in zip(venues['Venue Latitude'], venues['Venue Longitude'], venues['Venue Category ID']):
    color = palette[selected_categories.index.get_loc(cate)]
    folium.Circle(
        [lat, lng],
        radius=5,
        opacity=0.7,
        color=color,
    ).add_to(map_venues)  
    
map_venues
venues['Category'] = venues['Venue Category ID'].map(selected_categories['desc'])
stations_x_categories = venues[['reference','Category','Venue']].groupby(['reference','Category']).count().unstack('Category')['Venue'].fillna(0)<jupyter_output><empty_output><jupyter_text>The category "Event" seems anomaly.<jupyter_code>stations_x_categories.sum()<jupyter_output><empty_output><jupyter_text>It is unlikely a permanent venue anyway, so, remove "Event" category.<jupyter_code>stations_x_categories.drop(columns=['Event'],inplace=True)
selected_categories = selected_categories[selected_categories['desc']!='Event']<jupyter_output><empty_output><jupyter_text>normalize features<jupyter_code>stations_x_categories = stations_x_categories.divide(stations_x_categories.max(),axis=1)
stations_x_categories
from sklearn.cluster import KMeans
kclusters = 4
kmeans = KMeans(n_clusters=kclusters, random_state=0).fit(stations_x_categories)
prediction = pd.DataFrame({'reference':stations_x_categories.index,'cluster':kmeans.labels_}).set_index('reference')
stations_x_prediction = stations.set_index('desc').join(prediction).reset_index()
map_predicted = folium.Map(location=[bkk_lat, bkk_lng], zoom_start=12)
palette = ["hsl({}, 100%, 50%)".format(360*x/kclusters) for x in range(0,kclusters)]
for lat, lng, name, cluster in zip(stations_x_prediction['lat'], stations_x_prediction['lng'], stations_x_prediction['desc'], stations_x_prediction['cluster']):
    label = folium.Popup(name, parse_html=True)
    folium.CircleMarker(
        [lat, lng],
        radius=5,
        popup=label,
        color=palette[cluster],
        fill=True,
        fill_color=palette[cluster],
        fill_opacity=0.7
    ).add_to(map_predicted)  
    
map_predicted<jupyter_output><empty_output><jupyter_text>### Describe meaning of each cluster<jupyter_code>from scipy import stats
stations_x_categories_prediction = stations_x_categories.join(prediction)
distinction_categories = []
for k in range(0,kclusters):
    for c in selected_categories['desc']:
        # ANOVA
        f_val, p_val = stats.f_oneway(stations_x_categories_prediction[stations_x_categories_prediction['cluster']==k][c], stations_x_categories_prediction[stations_x_categories_prediction['cluster']!=k][c])
        distinction_categories += [(k,c,f_val,p_val)]
summary = pd.DataFrame(distinction_categories,columns=['cluster','category','F','P'])
summary = summary[summary['P']<0.05]
summary.sort_values(by=['cluster','F'],ascending=False)
pd.set_option('display.max_colwidth', 80)
prediction.reset_index().groupby('cluster').aggregate(','.join).sort_values(by=['cluster'],ascending=False)<jupyter_output><empty_output> | 
	no_license | 
	/bangkok.ipynb | 
	Woracheth/Coursera_Capstone | 7 | 
| 
	<jupyter_start><jupyter_text># 다항회귀분석  
앞에서 살펴본 단순회귀분석은 두 변수간의 관계를 직선 형태로 설명하는 알고리즘  
다항 함수를 사용하면 보다 복잡한 곡선 형태의 회귀선을 표현할 수 있음<jupyter_code>import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression      #선형회귀분석
from sklearn.preprocessing import PolynomialFeatures   #다항식 변환
df = pd.read_csv('../002/auto-mpg.csv', header=None)
df.columns = ['mpg','cylinders','displacement','horsepower','weight',
              'acceleration','model year','origin','name'] 
df['horsepower'].replace('?', np.nan, inplace=True)
df.dropna(subset=['horsepower'], axis=0, inplace=True)
df['horsepower'] = df['horsepower'].astype('float')
features = df[['mpg', 'cylinders', 'horsepower', 'weight']]
X=df[['weight']]  #독립 변수 X
y=df['mpg']       #종속 변수 Y<jupyter_output><empty_output><jupyter_text>### train data 와 test data로 구분(7:3 비율)<jupyter_code>X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=10)
print('훈련 데이터: ', X_train.shape)
print('검증 데이터: ', X_test.shape)   <jupyter_output>훈련 데이터:  (313, 1)
검증 데이터:  (79, 1)
<jupyter_text>## 다항식 변환
다항회귀분석은 2차함수 이상의 다함함수를 이용하여 두 변수간의 선형관계를 설명하는 알고리즘  
예를 들어 2차함수는 y = aX^2 + bX + c로 표시할 수 있으며, 학습을 통해 3개의 계수 a, b, c를 찾아서 모델을 완성함#### 2차항 적용<jupyter_code>poly = PolynomialFeatures(degree=3)<jupyter_output><empty_output><jupyter_text>#### X_train 데이터를 2차항으로 변형<jupyter_code>X_train = poly.fit_transform(X_train)<jupyter_output><empty_output><jupyter_text>#### X_test 데이터를 2차항으로 변형<jupyter_code>X_test = poly.fit_transform(X_test)  
print('기존 데이터: ', X_train.shape)
print('2차항 변환 데이터: ', X_train_poly.shape)  
X_train
X_train_poly<jupyter_output><empty_output><jupyter_text>### train data를 가지고 모델 학습<jupyter_code>pr = LinearRegression()
pr.fit(X_train, y_train)
r_square = pr.score(X_test,y_test)
print(r_square)<jupyter_output>0.71376243950938
<jupyter_text>#### train data의 산점도와 test data로 예측한 회귀선을 그래프로 출력 <jupyter_code>y_hat_test = pr.predict(X_test_poly)
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(1, 1, 1)
ax.plot(X_train, y_train, 'o', label='Train Data')  # 데이터 분포
ax.plot(X_test, y_hat_test, 'r+', label='Predicted Value') # 모형이 학습한 회귀선
ax.legend(loc='best')
plt.xlabel('weight')
plt.ylabel('mpg')
plt.show()<jupyter_output><empty_output><jupyter_text>#### 모형에 전체 X 데이터를 입력하여 예측한 값 y_hat을 실제 값 y와 비교 <jupyter_code>X_ploy = poly.fit_transform(X)
y_hat = pr.predict(X_ploy)
plt.figure(figsize=(10, 5))
ax1 = sns.distplot(y, hist=False, label="y")
ax2 = sns.distplot(y_hat, hist=False, label="y_hat", ax=ax1)
plt.show()<jupyter_output><empty_output> | 
	permissive | 
	/AI_Class/003/Polynomial_regression.ipynb | 
	CodmingOut/AI_Mentoring | 8 | 
| 
	<jupyter_start><jupyter_text>## Deliverable 3. Create a Travel Itinerary Map.<jupyter_code># Dependencies and Setup
import pandas as pd
import requests
import gmaps
import numpy as np
import gmaps.datasets
# Import API key
from config import g_key
# Configure gmaps
gmaps.configure(api_key=g_key)
# 1. Read the WeatherPy_vacation.csv into a DataFrame.
vacation_df = pd.read_csv("../Vacation_Search/WeatherPy_vacation.csv")
vacation_df.head()
# 2. Using the template add the city name, the country code, the weather description and maximum temperature for the city.
info_box_template = """
<dt>Hotel Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
<dt>Current Weather</dt><dd>{Current Description} and {Max Temp} °F</dd>
</dl>
"""
# 3a. Get the data from each row and add it to the formatting template and store the data in a list.
hotel_info = [info_box_template.format(**row) for index, row in vacation_df.iterrows()]
# 3b. Get the latitude and longitude from each row and store in a new DataFrame.
locations = vacation_df[["Lat", "Lng"]]
# 4a. Add a marker layer for each city to the map.
max_temp = vacation_df["Max Temp"]
fig = gmaps.figure(center=(30.0, 31.0), zoom_level=1.5)
heat_layer = gmaps.heatmap_layer(locations, weights=max_temp,dissipating=False,
             max_intensity=300, point_radius=4)
marker_layer = gmaps.marker_layer(locations, info_box_content=hotel_info)
fig.add_layer(heat_layer)
fig.add_layer(marker_layer)
# 4b. Display the figure
fig
# From the map above pick 4 cities and create a vacation itinerary route to travel between the four cities. 
# 5. Create DataFrames for each city by filtering the 'vacation_df' using the loc method. 
# Hint: The starting and ending city should be the same city.
vacation_start = vacation_df.loc[(vacation_df["Country"] == "BR") &\
                                  (vacation_df["City"] == "Guaruja")]
vacation_end = vacation_df.loc[(vacation_df["Country"] == "BR") &\
                                  (vacation_df["City"] == "Guaruja")]
vacation_stop1 = vacation_df.loc[(vacation_df["Country"] == "BR") &\
                                  (vacation_df["City"] == "Sao Pedro")]
vacation_stop2 = vacation_df.loc[(vacation_df["Country"] == "BR") &\
                                  (vacation_df["City"] == "Esmeraldas")]
vacation_stop3 = vacation_df.loc[(vacation_df["Country"] == "BR") &\
                                  (vacation_df["City"] == "Arraial Do Cabo")]
# 6. Get the latitude-longitude pairs as tuples from each city DataFrame using the to_numpy function and list indexing.
start = vacation_start["Lat"].to_numpy()[0],vacation_start["Lng"].to_numpy()[0]
end = vacation_end["Lat"].to_numpy()[0],vacation_end["Lng"].to_numpy()[0]
stop1 = vacation_stop1["Lat"].to_numpy()[0],vacation_stop1["Lng"].to_numpy()[0]
stop2 = vacation_stop2["Lat"].to_numpy()[0],vacation_stop2["Lng"].to_numpy()[0]
stop3 = vacation_stop3["Lat"].to_numpy()[0],vacation_stop3["Lng"].to_numpy()[0]
# 7. Create a direction layer map using the start and end latitude-longitude pairs,
# and stop1, stop2, and stop3 as the waypoints. The travel_mode should be "DRIVING", "BICYCLING", or "WALKING".
fig = gmaps.figure()
start2end_via_cities = gmaps.directions_layer(
        start, end, waypoints=[stop1,stop2,stop3],
        travel_mode ="WALKING")
fig.add_layer(start2end_via_cities)
fig
# 8. To create a marker layer map between the four cities.
#  Combine the four city DataFrames into one DataFrame using the concat() function.
itinerary_df = pd.concat([vacation_start,vacation_stop1,vacation_stop2,vacation_stop3],ignore_index=True)
itinerary_df
# 9 Using the template add city name, the country code, the weather description and maximum temperature for the city. 
info_box_template = """
<dt>Hotel Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
<dt>Current Weather</dt><dd>{Current Description} and {Max Temp} °F</dd>
</dl>
"""
# 10a Get the data from each row and add it to the formatting template and store the data in a list.
hotel_info = [info_box_template.format(**row) for index, row in itinerary_df.iterrows()]
# 10b. Get the latitude and longitude from each row and store in a new DataFrame.
locations = itinerary_df[["Lat", "Lng"]]
# 11a. Add a marker layer for each city to the map.
max_temp = itinerary_df["Max Temp"]
fig = gmaps.figure(center=(30.0, 31.0), zoom_level=1.5)
heat_layer = gmaps.heatmap_layer(locations, weights=max_temp,dissipating=False,
             max_intensity=300, point_radius=4)
marker_layer = gmaps.marker_layer(locations, info_box_content=hotel_info)
fig.add_layer(heat_layer)
fig.add_layer(marker_layer)
# 11b. Display the figure
fig<jupyter_output><empty_output> | 
	no_license | 
	/Vacation_Itinerary/Vacation_Itinerary.ipynb | 
	rtippana1/World_Weather_Analysis | 1 | 
| 
	<jupyter_start><jupyter_text>## 파이썬 머신러닝
# 로지스틱 회귀 (Logistic Regression)- 로지스틱 회귀는 이름과 다르게 **분류(Classification)** 알고리즘이다.
- 로지스틱 회귀는 각 클래스를 **직선** 또는 **평면** 으로 가른다.<jupyter_code>import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
iris = load_iris()<jupyter_output><empty_output><jupyter_text>### 로지스틱회귀 맛보기<jupyter_code>X = iris.data
y = iris.target
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
model.fit(X, y)
model.score(X, y)
pred_y = model.predict(X)
(pred_y==y).mean()<jupyter_output><empty_output><jupyter_text>### 속성을 2개로 제한<jupyter_code># sepal length 와 sepal width
plt.scatter(iris.data[:,0], iris.data[:,1], c=iris.target)
X = iris.data[:,:2]
y = iris.target
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
model.fit(X, y)
model.score(X, y)
pred_y = model.predict(X)
(pred_y==y).mean()
import mglearn
plt.figure(figsize=[8,6])
mglearn.plots.plot_2d_classification(model, X, alpha=0.3)
mglearn.discrete_scatter(X[:,0], X[:,1], y)<jupyter_output><empty_output><jupyter_text>### 속성을 2개로 클래스도 2개로 제한<jupyter_code>### sepal length/petal length, versicolor/virginica
X = iris.data[50:,[0,2]]
X.shape
y = iris.target[50:]
y = np.where(y==1, 0, 1) # y = np.array([0]*50 + [1]*50)
y
plt.scatter(X[:,0], X[:,1], c=y, alpha=0.5)
plt.colorbar(shrink=0.7)
model = LogisticRegression()
model.fit(X, y)
model.score(X, y)
pred_y = model.predict(X)
(pred_y==y).mean()
import mglearn
plt.figure(figsize=[8,6])
mglearn.plots.plot_2d_classification(model, X, alpha=0.3)
mglearn.discrete_scatter(X[:,0], X[:,1], y)
help(LogisticRegression)<jupyter_output>Help on class LogisticRegression in module sklearn.linear_model.logistic:
class LogisticRegression(sklearn.base.BaseEstimator, sklearn.linear_model.base.LinearClassifierMixin, sklearn.linear_model.base.SparseCoefMixin)
 |  Logistic Regression (aka logit, MaxEnt) classifier.
 |  
 |  In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
 |  scheme if the 'multi_class' option is set to 'ovr', and uses the cross-
 |  entropy loss if the 'multi_class' option is set to 'multinomial'.
 |  (Currently the 'multinomial' option is supported only by the 'lbfgs',
 |  'sag' and 'newton-cg' solvers.)
 |  
 |  This class implements regularized logistic regression using the
 |  'liblinear' library, 'newton-cg', 'sag' and 'lbfgs' solvers. It can handle
 |  both dense and sparse input. Use C-ordered arrays or CSR matrices
 |  containing 64-bit floats for optimal performance; any other input format
 |  will be converted (and copied).
 |  
 |  The 'newton-cg', 'sag', and 'lbfgs' solve[...]<jupyter_text>### 옵션 C
- C 값이 클수록 칼 같이 자른다
- C 값이 작을수록 대강 자른다<jupyter_code>model = LogisticRegression(C=100)
model.fit(X, y)
model.score(X, y)
pred_y = model.predict(X)
(pred_y==y).mean()
plt.figure(figsize=[8,6])
mglearn.plots.plot_2d_classification(model, X, alpha=0.3)
mglearn.discrete_scatter(X[:,0], X[:,1], y)<jupyter_output><empty_output><jupyter_text>### 로지스틱회귀의 원리 (신경망 관점)
- 선형회귀의 결과를 시그모이드(로지스틱)함수에 적용한다
- 시그모이드함수는 계단함수의 역할이다
- 최종 결과는 0~1 사이의 값을 가진다 (대부분은 0이나 1이 된다)
(출처: https://sebastianraschka.com/faq/docs/logisticregr-neuralnet.html)<jupyter_code>def sigmoid(x):
    return 1/(1+np.exp(-x))
sigmoid(-5)
W = np.array([1,1]) # w1, w2
b = 0
pred_y = sigmoid(X@W + b) # 예측값
pred_y
X@W+b
cost = ((pred_y - y)**2).mean() # MSE로 비용계산
cost
W = np.array([1, 1]) # w1, w2
b = -0.1
pred_y = sigmoid(X@W + b)
cost = ((pred_y - y)**2).mean() # MSE
cost
w1=1
w2=1
b=0
pred_y = w1*X[:,0] + w2*X[:,1] + b
pred_y = sigmoid(pred_y)
cost = ((pred_y - y)**2).mean() # MSE
cost<jupyter_output><empty_output><jupyter_text>### 시그모이드함수 분석<jupyter_code>def sigmoid(x):
    return 1/(1+np.exp(-x))
x = np.arange(-10,10,0.1)
plt.plot(x, sigmoid(x))
x = np.arange(-10,10,0.1)
plt.hlines([0,1], -10,10, linestyles=':')
plt.plot(x, sigmoid(x))
plt.scatter([0],[0.5], s=200)<jupyter_output><empty_output><jupyter_text>- 최종출력값이 0.3 이라면, 0.5를 기준으로 해서 레이블-0 으로 판별한다
- 최종출력값이 0.3 이라면, 레이블-1 이 될 확률이 0.3, 레이블-0이 될 확률이 0.7로 해석할 수 있다
- 결과적으로 시그모이드함수의 출력값을 **확률**로 해석할 수 있다### 가중치 분석<jupyter_code>### sepal length/pepal length, versicolor/virginica
X = iris.data[50:,[0,2]]
X.shape
y = iris.target[50:]
y = np.where(y==1, 0, 1) # y = np.array([0]*50 + [1]*50)
y
model = LogisticRegression()
model.fit(X, y)
model.coef_, model.intercept_
w1, w2 = model.coef_[0]
b = model.intercept_[0]
w1, w2, b
plt.figure(figsize=[8,6])
mglearn.plots.plot_2d_classification(model, X, alpha=0.3)
mglearn.discrete_scatter(X[:,0], X[:,1], y)<jupyter_output><empty_output><jupyter_text>
(출처: https://florianhartl.com/logistic-regression-geometric-intuition.html)
(출처: https://livebook.manning.com/book/math-for-programmers/chapter-15/v-11/131)- 평면방정식
> z = ax + by + c<jupyter_code>rng = np.arange(-5, 5, 0.1)
plt.plot(rng, sigmoid(rng))
plt.plot(rng, sigmoid(2*rng), 'g--')
plt.plot(rng, sigmoid(0.5*rng), 'r--')
plt.legend(['sig(x)', 'sig(2x)', 'sig(x/2)'])
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize=[10,8])
ax = Axes3D(fig)
a = np.arange(-4,12,0.2)
b = np.arange(-4,12,0.2)
xx, yy = np.meshgrid(a,b)
ax.plot_surface(xx, yy, model.coef_[0,0]*xx + model.coef_[0,1]*yy + model.intercept_[0],
                shade=True, alpha=0.1, color='b')
ax.plot_wireframe(xx, yy, model.coef_[0,0]*xx + model.coef_[0,1]*yy + model.intercept_[0],
                  rstride=2, cstride=2, color='0.5')
ax.scatter(X[:,0], X[:,1], y, c=y, s=60)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('target')
#ax.view_init(60, 70)
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize=[10,8])
ax = Axes3D(fig)
a = np.arange(-4,12,0.2)
b = np.arange(-4,12,0.2)
xx, yy = np.meshgrid(a,b)
ax.plot_surface(xx, yy, sigmoid(model.coef_[0,0]*xx + model.coef_[0,1]*yy + model.intercept_[0]),
                shade=True, alpha=0.3, color='b')
ax.plot_wireframe(xx, yy, sigmoid(model.coef_[0,0]*xx + model.coef_[0,1]*yy + model.intercept_[0]),
                  rstride=2, cstride=2, color='0.5')
ax.scatter(X[:,0], X[:,1], y, c=y, s=60)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('target')
ax.view_init(20, -80)
X = iris.data[50:, [0,1]]
y = iris.target[50:]
y = np.where(y==1, 0, 1)
model = LogisticRegression()
model.fit(X, y)
pred_y = model.predict(X)
pred_y
model.coef_, model.intercept_
W = model.coef_[0]
b = model.intercept_[0]
X@W + b # 양수면 클래스-1, 음수면 클래스-0
sigmoid(X@W + b)
pred_y = np.array(sigmoid(X@W + b)>0.5, dtype=int)
pred_y
pred_y = np.array((X@W + b)>0, dtype=int)
pred_y<jupyter_output><empty_output><jupyter_text>### predict_proba() 함수
- 각 클래스에 속할 확률은 model.predict_proba() 함수로 알 수 있다.
- 각 행의 합은 1(100%)가 된다.<jupyter_code>model.predict_proba(X)
display(model.predict_proba(X)[:10], pred_y[:10], y[:10])
model.predict_proba(X).argmax(axis=1)<jupyter_output><empty_output><jupyter_text>- 각 샘플의 확률(sigmoid 적용값) 계산
> $$ 평면높이 = w_1 x_1 + w_2 x_2 + b $$
> $$ 시그모이드평면높이 = \frac {1} {1 + e^{-(w_1 x_1 + w_2 x_2 + b)}} $$<jupyter_code>n = 1
p1 = sigmoid(model.coef_[0,0]*X[n,0] + model.coef_[0,1]*X[n,1] + model.intercept_[0])
p0 = 1-p1
p0, p1<jupyter_output><empty_output><jupyter_text>### decision_function()
- 각 샘플의 선형회귀 결과 평면까지의 거리<jupyter_code>model.decision_function(X) # X@W + b<jupyter_output><empty_output><jupyter_text>### 다중분류
- 클래스가 3개 이상인 경우<jupyter_code>X = iris.data[:, [0,1]]
y = iris.target
model = LogisticRegression()
model.fit(X, y)
model.coef_
model.intercept_
sigmoid([email protected]_[2] + model.intercept_[2])
sigmoid([email protected]_.T + model.intercept_)<jupyter_output><empty_output><jupyter_text>```
z = ax+by+c => ax+by+c=0 => y = -ax/b - c/b
x2 = -w1*x1/w2 - b/w2
```### 클래스가 3개인 경우<jupyter_code>from sklearn.datasets import make_blobs
X, y = make_blobs(300, 2, [[0,0],[-10,10],[10,10]], [2,3,5])
plt.scatter(X[:,0], X[:,1], c=y, alpha=0.5)
plt.colorbar()
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
model.fit(X, y)
score = model.score(X, y)
score
display(model.coef_, model.intercept_)
import mglearn
plt.figure(figsize=[10,8])
mglearn.plots.plot_2d_classification(model, X, alpha=0.3)
mglearn.discrete_scatter(X[:,0], X[:,1], y)<jupyter_output><empty_output><jupyter_text>- 한 클래스와 나머지 클래스 간의 경계선을 그린다
- 이렇게 나온 경계선들을 통합하여 판정을 내린다<jupyter_code>plt.figure(figsize=[10,8])
mglearn.plots.plot_2d_classification(model, X, cm='Reds', alpha=0.3)
mglearn.discrete_scatter(X[:,0], X[:,1], y)
w = model.coef_
b = model.intercept_
rng = np.array([X[:,0].min(), X[:,0].max()])
for i in range(3):
    plt.plot(rng, -(w[i,0]*rng + b[i])/w[i,1], ':', lw=4, label='line'+str(i))
plt.legend()
pred_y = model.predict(X)
display(np.round(model.predict_proba(X)[:10],2), pred_y[:10], y[:10])
model.decision_function(X)[:10]<jupyter_output><empty_output><jupyter_text>### 확률 계산<jupyter_code>help(model.predict_proba)
help(LogisticRegression)
result = sigmoid(X@(w.T)+b)
result.shape
result[:10]
prob = result/((result.sum(axis=1)).reshape(-1,1))
prob<jupyter_output><empty_output><jupyter_text>### 소프트맥스 함수 적용
- multi_class : str, {'ovr', 'multinomial'}, default: 'ovr'
- multi_class 옵션이 'ovr' 이면 일대다(one-vs.-rest) 이고, 'multinomial' 이면 소프트맥스 함수를 적용한다
> $ softmax(n) = exp(p_n) / \sum_i exp(p_i) $ 
> $ p_i = \sum_j (w_j*x_j) + b $<jupyter_code>result = X@(w.T)+b
result.shape
prob = np.exp(result)/((np.exp(result).sum(axis=1)).reshape(-1,1))
np.round(prob[:10], 2)<jupyter_output><empty_output><jupyter_text>### 중요옵션 C!- 로지스틱회귀의 중요 옵션은 C 이다.
- C 값이 커질수록 학습데이터에 최대한 맞추려고 한다. 즉 과대적합된다.<jupyter_code>model = LogisticRegression(C=10000)
model.fit(X, y)
score = model.score(X, y)
print(score)
plt.figure(figsize=[10,8])
mglearn.plots.plot_2d_classification(model, X, cm='Reds', alpha=0.3)
mglearn.discrete_scatter(X[:,0], X[:,1], y)
w = model.coef_
b = model.intercept_
rng = np.array([X[:,0].min(), X[:,0].max()])
for i in range(3):
    plt.plot(rng, -(w[i,0]*rng + b[i])/w[i,1], ':', lw=4)<jupyter_output>0.9866666666666667
<jupyter_text>- 비용함수에 $ \frac{1}{C} \cdot \sum_i w_i^2 $ 항을 추가함
> C 의 역수는 $ \alpha $ 임 ($ \alpha \cdot \sum_i w_i^2 $)
> penalty='l1' 인 경우 $ \frac{1}{C} \cdot \sum_i |w_i| $ 항이 추가됨<jupyter_code>help(LogisticRegression)<jupyter_output>Help on class LogisticRegression in module sklearn.linear_model.logistic:
class LogisticRegression(sklearn.base.BaseEstimator, sklearn.linear_model.base.LinearClassifierMixin, sklearn.linear_model.base.SparseCoefMixin)
 |  LogisticRegression(penalty='l2', dual=False, tol=0.0001, C=1.0, fit_intercept=True, intercept_scaling=1, class_weight=None, random_state=None, solver='liblinear', max_iter=100, multi_class='ovr', verbose=0, warm_start=False, n_jobs=1)
 |  
 |  Logistic Regression (aka logit, MaxEnt) classifier.
 |  
 |  In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
 |  scheme if the 'multi_class' option is set to 'ovr', and uses the cross-
 |  entropy loss if the 'multi_class' option is set to 'multinomial'.
 |  (Currently the 'multinomial' option is supported only by the 'lbfgs',
 |  'sag' and 'newton-cg' solvers.)
 |  
 |  This class implements regularized logistic regression using the
 |  'liblinear' library, 'newton-cg', 'sag' and 'lbfgs' solvers. It [...]<jupyter_text>### Iris 데이터 - 속성 2개로 제한<jupyter_code>from sklearn.datasets import load_iris
iris = load_iris()
col1 = 1
col2 = 3
X = iris.data[:, [col1,col2]]
y = iris.target
X.shape, y.shape
plt.scatter(X[:,0], X[:,1], c=y, s=60)
plt.colorbar()
from sklearn.linear_model import LogisticRegression
model = LogisticRegression(C=1)
model.fit(X, y)
score = model.score(X, y)
score
display(model.coef_, model.intercept_)
import mglearn
plt.figure(figsize=[10,8])
mglearn.plots.plot_2d_classification(model, X, cm='Reds', alpha=0.3)
mglearn.discrete_scatter(X[:,0], X[:,1], y)
plt.figure(figsize=[10,8])
mglearn.plots.plot_2d_classification(model, X, cm='Reds', alpha=0.3)
mglearn.discrete_scatter(X[:,0], X[:,1], y)
rng = np.array([X[:,0].min(), X[:,0].max()])
for i in range(3):
    plt.plot(rng, -(model.coef_[i,0]*rng + model.intercept_[i])/model.coef_[i,1], ':', lw=4)<jupyter_output><empty_output><jupyter_text>### Iris 데이터 - 모든 속성 사용<jupyter_code>from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
iris = load_iris()
X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target)
X_train.shape, X_test.shape
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
model.fit(X_train, y_train)
score_train = model.score(X_train, y_train)
score_test = model.score(X_test, y_test)
print(score_train, score_test)
display(model.coef_, model.intercept_)
np.round(model.predict_proba(X_test), 3)[:5], y_test[:5]
pred_y = model.predict(X_test)
y_test[y_test!=pred_y]
np.round(model.predict_proba(X_test)[y_test==2], 3)<jupyter_output><empty_output><jupyter_text>### 옵션(C) 변경 및 과적합 판단<jupyter_code>from sklearn.model_selection import train_test_split
Cs = [0.001, 0.01, 0.1, 1, 10, 100, 1000]
s1 = []
s2 = []
X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target)
for c in Cs:
    model = LogisticRegression(C=c)
    model.fit(X_train, y_train)
    score_train = model.score(X_train, y_train)
    score_test = model.score(X_test, y_test)
    s1.append(score_train)
    s2.append(score_test)
    
plt.plot(s1,'bo:')
plt.plot(s2,'rs-')
plt.legend(['train','test'])
plt.xticks(range(len(Cs)),Cs)
plt.ylim(0,1)
plt.xlabel('C')
plt.ylabel('score')<jupyter_output><empty_output><jupyter_text>### 시그모이드(sigmoid) 함수 적용<jupyter_code>plt.figure(figsize=[12,8])
for col in range(4):
    plt.subplot(2,2,col+1)
    plt.scatter(iris.data[:,col], iris.target + np.random.normal(0,0.03,size=len(y)), c=iris.target, s=30, alpha=0.3)
    plt.yticks([0,1,2], ['Setosa', 'Versicolor', 'Virginica'], rotation=90)
    plt.title(iris.feature_names[col], fontsize=15)
X = iris.data[:,[2]]
y = iris.target.copy()
y[y==2] = 1
plt.scatter(X[:,0], y, c=y, s=30)
plt.colorbar()
def sigmoid(t):
    return 1/(1+np.exp(-t))
rng = np.arange(-5,5,0.1)
plt.plot(rng, sigmoid(rng))
plt.hlines([0,0.5,1],-5,5,linestyles='dotted')
plt.vlines([0],0,1,linestyles='dotted')
plt.title('Sigmoid')
plt.scatter(X[:,0], y, c=y, s=30)
plt.colorbar()
rng = np.arange(1,7,0.1)
plt.plot(rng, sigmoid(2*(rng-2.5)), 'r--')
from sklearn.linear_model import LogisticRegression
C = 1
model = LogisticRegression(C=C)
model.fit(X, y)
display(model.score(X, y), model.coef_, model.intercept_)
plt.scatter(X[:,0], y, c=y, s=30)
plt.colorbar()
rng = np.arange(1,7,0.1)
plt.plot(rng, model.coef_[0]*rng + model.intercept_)
경계값 = -model.intercept_/model.coef_
경계값
plt.figure(figsize=[10,8])
plt.scatter(X[:,0], y, c=y, s=30)
plt.colorbar()
rng = np.arange(1,7,0.1)
plt.plot(rng, model.coef_[0]*rng + model.intercept_)
plt.plot(rng, sigmoid(model.coef_[0,0]*rng+model.intercept_[0]), 'r--')
plt.vlines([-model.intercept_[0]/model.coef_[0,0]],0,1,linestyles='dotted')
plt.hlines([0],1,7,linestyles='dashed', alpha=0.3)
plt.text(3, 0.5, 'boundary = %.3f' % (-model.intercept_[0]/model.coef_[0,0]))
plt.title('LogisticRegression (C=%f)' % C)
plt.axis('scaled')
plt.ylim(-0.5, 1.5)<jupyter_output><empty_output><jupyter_text>### 관련 기술
- 공식
$$ sigmoid(t) = \frac{1}{1 + e^{-t}} $$
$$ t = w \cdot x + b $$
$$ sigmoid(t) = \frac{1}{1 + e^{-(w \cdot x + b)}} $$
$$ t = w_1 \cdot x_1 + w_2 \cdot x_2 + ... + b $$
$$ t = w_0 \cdot x_0 + w_1 \cdot x_1 + w_2 \cdot x_2 + ... , (x_0=1)$$
- 소프트맥스(softmax) 함수
- 크로스 엔트로피 (cross entropy)### 크로스 엔트로피
- 로지스틱 회귀의 비용함수로 사용함#### 클래스가 2개인 경우
- cross_entropy = (-y * np.log(pred_y) - (1-y) * np.log(1-pred_y)).sum()#### 클래스가 3개 이상인 경우
- cross_entropy = (-y * np.log(pred_y)).sum()
- 여기서 pred_y  는 소프트맥스 함수를 적용한 결과임
- 그리고, y 는 [1,0,0], [0,1,0], [0,0,1] 과 같이 원핫인코딩이어야 함<jupyter_code>pred_y = [0.7, 0.2, 0.5, 0.3, 0.9]
y = [1, 0, 1, 1, 0]
mse = ((0.3)**2 + (0.2)**2 + (0.5)**2 + (0.7)**2 + (0.9)**2)/5
mse
cross_entropy = -np.log(0.7) - np.log(1-0.2) - np.log(0.5) - \
    np.log(0.3) - np.log(1-0.9)
cross_entropy<jupyter_output><empty_output> | 
	no_license | 
	/머신러닝/02_지도학습_03_로지스틱회귀.ipynb | 
	gubosd/lecture13 | 24 | 
| 
	<jupyter_start><jupyter_text># Population Segmentation with SageMaker
In this notebook, you'll employ two, unsupervised learning algorithms to do **population segmentation**. Population segmentation aims to find natural groupings in population data that reveal some feature-level similarities between different regions in the US.
Using **principal component analysis** (PCA) you will reduce the dimensionality of the original census data. Then, you'll use **k-means clustering** to assign each US county to a particular cluster based on where a county lies in component space. How each cluster is arranged in component space can tell you which US counties are most similar and what demographic traits define that similarity; this information is most often used to inform targeted, marketing campaigns that want to appeal to a specific group of people. This cluster information is also useful for learning more about a population by revealing patterns between regions that you otherwise may not have noticed.
### US Census Data
You'll be using data collected by the [US Census](https://en.wikipedia.org/wiki/United_States_Census), which aims to count the US population, recording demographic traits about labor, age, population, and so on, for each county in the US. The bulk of this notebook was taken from an existing SageMaker example notebook and [blog post](https://aws.amazon.com/blogs/machine-learning/analyze-us-census-data-for-population-segmentation-using-amazon-sagemaker/), and I've broken it down further into demonstrations and exercises for you to complete.
### Machine Learning Workflow
To implement population segmentation, you'll go through a number of steps:
* Data loading and exploration
* Data cleaning and pre-processing 
* Dimensionality reduction with PCA
* Feature engineering and data transformation
* Clustering transformed data with k-means
* Extracting trained model attributes and visualizing k clusters
These tasks make up a complete, machine learning workflow from data loading and cleaning to model deployment. Each exercise is designed to give you practice with part of the machine learning workflow, and to demonstrate how to use SageMaker tools, such as built-in data management with S3 and built-in algorithms.
---First, import the relevant libraries into this SageMaker notebook. <jupyter_code># data managing and display libs
import pandas as pd
import numpy as np
import os
import io
import matplotlib.pyplot as plt
import matplotlib
%matplotlib inline 
# sagemaker libraries
import boto3
import sagemaker<jupyter_output><empty_output><jupyter_text>## Loading the Data from Amazon S3
This particular dataset is already in an Amazon S3 bucket; you can load the data by pointing to this bucket and getting a data file by name. 
> You can interact with S3 using a `boto3` client.<jupyter_code># boto3 client to get S3 data
s3_client = boto3.client('s3')
bucket_name='aws-ml-blog-sagemaker-census-segmentation'<jupyter_output><empty_output><jupyter_text>Take a look at the contents of this bucket; get a list of objects that are contained within the bucket and print out the names of the objects. You should see that there is one file, 'Census_Data_for_SageMaker.csv'.<jupyter_code># get a list of objects in the bucket
obj_list=s3_client.list_objects(Bucket=bucket_name)
# print object(s)in S3 bucket
files=[]
for contents in obj_list['Contents']:
    files.append(contents['Key'])
    
print(files)
# there is one file --> one key
file_name=files[0]
print(file_name)<jupyter_output>Census_Data_for_SageMaker.csv
<jupyter_text>Retrieve the data file from the bucket with a call to `client.get_object()`.<jupyter_code># get an S3 object by passing in the bucket and file name
data_object = s3_client.get_object(Bucket=bucket_name, Key=file_name)
# what info does the object contain?
display(data_object)
# information is in the "Body" of the object
data_body = data_object["Body"].read()
print('Data type: ', type(data_body))<jupyter_output>Data type:  <class 'bytes'>
<jupyter_text>This is a `bytes` datatype, which you can read it in using [io.BytesIO(file)](https://docs.python.org/3/library/io.html#binary-i-o).<jupyter_code># read in bytes data
data_stream = io.BytesIO(data_body)
# create a dataframe
counties_df = pd.read_csv(data_stream, header=0, delimiter=",") 
counties_df.head()<jupyter_output><empty_output><jupyter_text>## Exploratory Data Analysis (EDA)
Now that you've loaded in the data, it is time to clean it up, explore it, and pre-process it. Data exploration is one of the most important parts of the machine learning workflow because it allows you to notice any initial patterns in data distribution and features that may inform how you proceed with modeling and clustering the data.
### EXERCISE: Explore data & drop any incomplete rows of data
When you first explore the data, it is good to know what you are working with. How many data points and features are you starting with, and what kind of information can you get at a first glance? In this notebook, you're required to use complete data points to train a model. So, your first exercise will be to investigate the shape of this data and implement a simple, data cleaning step: dropping any incomplete rows of data.
You should be able to answer the **question**: How many data points and features are in the original, provided dataset? (And how many points are left after dropping any incomplete rows?)There are 3218 non-null data points while originally 3320 data points<jupyter_code>counties_df.shape
counties_df.head()
counties_df.tail()
counties_df.info()
counties_df.describe()
# print out stats about data
# drop any incomplete rows of data, and create a new df
clean_counties_df = counties_df.dropna()
<jupyter_output><empty_output><jupyter_text>### EXERCISE: Create a new DataFrame, indexed by 'State-County'
Eventually, you'll want to feed these features into a machine learning model. Machine learning models need numerical data to learn from and not categorical data like strings (State, County). So, you'll reformat this data such that it is indexed by region and you'll also drop any features that are not useful for clustering.
To complete this task, perform the following steps, using your *clean* DataFrame, generated above:
1. Combine the descriptive columns, 'State' and 'County', into one, new categorical column, 'State-County'. 
2. Index the data by this unique State-County name.
3. After doing this, drop the old State and County columns and the CensusId column, which does not give us any meaningful demographic information.
After completing this task, you should have a DataFrame with 'State-County' as the index, and 34 columns of numerical data for each county. You should get a resultant DataFrame that looks like the following (truncated for display purposes):
```
                TotalPop	 Men	  Women	Hispanic	...
                
Alabama-Autauga	55221	 26745	28476	2.6         ...
Alabama-Baldwin	195121	95314	99807	4.5         ...
Alabama-Barbour	26932	 14497	12435	4.6         ...
...
```<jupyter_code># index data by 'State-County'
clean_counties_df.index = clean_counties_df['State'] + "-" + clean_counties_df['County']
# drop the old State and County columns, and the CensusId column
# clean df should be modified or created anew
clean_counties_df = clean_counties_df.drop(['State','County','CensusId'],axis=1)
clean_counties_df.head()<jupyter_output><empty_output><jupyter_text>Now, what features do you have to work with?<jupyter_code># features
features_list = clean_counties_df.columns.values
print('Features: \n', features_list)<jupyter_output>Features: 
 ['TotalPop' 'Men' 'Women' 'Hispanic' 'White' 'Black' 'Native' 'Asian'
 'Pacific' 'Citizen' 'Income' 'IncomeErr' 'IncomePerCap' 'IncomePerCapErr'
 'Poverty' 'ChildPoverty' 'Professional' 'Service' 'Office' 'Construction'
 'Production' 'Drive' 'Carpool' 'Transit' 'Walk' 'OtherTransp'
 'WorkAtHome' 'MeanCommute' 'Employed' 'PrivateWork' 'PublicWork'
 'SelfEmployed' 'FamilyWork' 'Unemployment']
<jupyter_text>## Visualizing the Data
In general, you can see that features come in a variety of ranges, mostly percentages from 0-100, and counts that are integer values in a large range. Let's visualize the data in some of our feature columns and see what the distribution, over all counties, looks like.
The below cell displays **histograms**, which show the distribution of data points over discrete feature ranges. The x-axis represents the different bins; each bin is defined by a specific range of values that a feature can take, say between the values 0-5 and 5-10, and so on. The y-axis is the frequency of occurrence or the number of county data points that fall into each bin. I find it helpful to use the y-axis values for relative comparisons between different features.
Below, I'm plotting a histogram comparing methods of commuting to work over all of the counties. I just copied these feature names from the list of column names, printed above. I also know that all of these features are represented as percentages (%) in the original data, so the x-axes of these plots will be comparable.<jupyter_code># transportation (to work)
transport_list = ['Drive', 'Carpool', 'Transit', 'Walk', 'OtherTransp']
n_bins = 30 # can decrease to get a wider bin (or vice versa)
for column_name in transport_list:
    ax=plt.subplots(figsize=(6,3))
    # get data by column_name and display a histogram
    ax = plt.hist(clean_counties_df[column_name], bins=n_bins)
    title="Histogram of " + column_name
    plt.title(title, fontsize=12)
    plt.show()<jupyter_output><empty_output><jupyter_text>### EXERCISE: Create histograms of your own
Commute transportation method is just one category of features. If you take a look at the 34 features, you can see data on profession, race, income, and more. Display a set of histograms that interest you!
<jupyter_code># create a list of features that you want to compare or examine
my_list = ['Hispanic','White','Black','Native','Asian','Pacific']
n_bins = 50 # define n_bins
# histogram creation code is similar to above
for column_name in my_list:
    ax=plt.subplots(figsize=(6,3))
    # get data by column_name and display a histogram
    ax = plt.hist(clean_counties_df[column_name], bins=n_bins)
    title="Histogram of " + column_name
    plt.title(title, fontsize=12)
    plt.show()<jupyter_output><empty_output><jupyter_text>### EXERCISE: Normalize the data
You need to standardize the scale of the numerical columns in order to consistently compare the values of different features. You can use a [MinMaxScaler](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html) to transform the numerical values so that they all fall between 0 and 1.<jupyter_code># scale numerical features into a normalized range, 0-1
# store them in this dataframe
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
counties_scaled = pd.DataFrame(scaler.fit_transform(clean_counties_df))
counties_scaled.columns = clean_counties_df.columns
counties_scaled.index = clean_counties_df.index
counties_scaled.head()<jupyter_output><empty_output><jupyter_text>---
# Data Modeling
Now, the data is ready to be fed into a machine learning model!
Each data point has 34 features, which means the data is 34-dimensional. Clustering algorithms rely on finding clusters in n-dimensional feature space. For higher dimensions, an algorithm like k-means has a difficult time figuring out which features are most important, and the result is, often, noisier clusters.
Some dimensions are not as important as others. For example, if every county in our dataset has the same rate of unemployment, then that particular feature doesn’t give us any distinguishing information; it will not help t separate counties into different groups because its value doesn’t *vary* between counties.
> Instead, we really want to find the features that help to separate and group data. We want to find features that cause the **most variance** in the dataset!
So, before I cluster this data, I’ll want to take a dimensionality reduction step. My aim will be to form a smaller set of features that will better help to separate our data. The technique I’ll use is called PCA or **principal component analysis**
## Dimensionality Reduction
PCA attempts to reduce the number of features within a dataset while retaining the “principal components”, which are defined as *weighted*, linear combinations of existing features that are designed to be linearly independent and account for the largest possible variability in the data! You can think of this method as taking many features and combining similar or redundant features together to form a new, smaller feature set.
We can reduce dimensionality with the built-in SageMaker model for PCA.### Roles and Buckets
> To create a model, you'll first need to specify an IAM role, and to save the model attributes, you'll need to store them in an S3 bucket.
The `get_execution_role` function retrieves the IAM role you created at the time you created your notebook instance. Roles are essentially used to manage permissions and you can read more about that [in this documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html). For now, know that we have a FullAccess notebook, which allowed us to access and download the census data stored in S3.
You must specify a bucket name for an S3 bucket in your account where you want SageMaker model parameters to be stored. Note that the bucket must be in the same region as this notebook. You can get a default S3 bucket, which automatically creates a bucket for you and in your region, by storing the current SageMaker session and calling `session.default_bucket()`.<jupyter_code>from sagemaker import get_execution_role
session = sagemaker.Session() # store the current SageMaker session
# get IAM role
role = get_execution_role()
print(role)
# get default bucket
bucket_name = session.default_bucket()
print(bucket_name)
print()<jupyter_output>sagemaker-us-east-2-475103112026
<jupyter_text>## Define a PCA Model
To create a PCA model, I'll use the built-in SageMaker resource. A SageMaker estimator requires a number of parameters to be specified; these define the type of training instance to use and the model hyperparameters. A PCA model requires the following constructor arguments:
* role: The IAM role, which was specified, above.
* train_instance_count: The number of training instances (typically, 1).
* train_instance_type: The type of SageMaker instance for training.
* num_components: An integer that defines the number of PCA components to produce.
* sagemaker_session: The session used to train on SageMaker.
Documentation on the PCA model can be found [here](http://sagemaker.readthedocs.io/en/latest/pca.html).
Below, I first specify where to save the model training data, the `output_path`.<jupyter_code># define location to store model artifacts
prefix = 'counties'
output_path='s3://{}/{}/'.format(bucket_name, prefix)
print('Training artifacts will be uploaded to: {}'.format(output_path))
# define a PCA model
from sagemaker import PCA
# this is current features - 1
# you'll select only a portion of these to use, later
N_COMPONENTS=33
pca_SM = PCA(role=role,
             train_instance_count=1,
             train_instance_type='ml.c4.xlarge',
             output_path=output_path, # specified, above
             num_components=N_COMPONENTS, 
             sagemaker_session=session)
<jupyter_output><empty_output><jupyter_text>### Convert data into a RecordSet format
Next, prepare the data for a built-in model by converting the DataFrame to a numpy array of float values.
The *record_set* function in the SageMaker PCA model converts a numpy array into a **RecordSet** format that is the required format for the training input data. This is a requirement for _all_ of SageMaker's built-in models. The use of this data type is one of the reasons that allows training of models within Amazon SageMaker to perform faster, especially for large datasets.<jupyter_code># convert df to np array
train_data_np = counties_scaled.values.astype('float32')
# convert to RecordSet format
formatted_train_data = pca_SM.record_set(train_data_np)<jupyter_output><empty_output><jupyter_text>## Train the model
Call the fit function on the PCA model, passing in our formatted, training data. This spins up a training instance to perform the training job.
Note that it takes the longest to launch the specified training instance; the fitting itself doesn't take much time.<jupyter_code>%%time
# train the PCA mode on the formatted data
pca_SM.fit(formatted_train_data)<jupyter_output>'get_image_uri' method will be deprecated in favor of 'ImageURIProvider' class in SageMaker Python SDK v2.
's3_input' class will be renamed to 'TrainingInput' in SageMaker Python SDK v2.
'get_image_uri' method will be deprecated in favor of 'ImageURIProvider' class in SageMaker Python SDK v2.
<jupyter_text>## Accessing the PCA Model Attributes
After the model is trained, we can access the underlying model parameters.
### Unzip the Model Details
Now that the training job is complete, you can find the job under **Jobs** in the **Training**  subsection  in the Amazon SageMaker console. You can find the job name listed in the training jobs. Use that job name in the following code to specify which model to examine.
Model artifacts are stored in S3 as a TAR file; a compressed file in the output path we specified + 'output/model.tar.gz'. The artifacts stored here can be used to deploy a trained model.<jupyter_code># Get the name of the training job, it's suggested that you copy-paste
# from the notebook or from a specific job in the AWS console
training_job_name='pca-2020-09-05-02-46-14-785'
# where the model is saved, by default
model_key = os.path.join(prefix, training_job_name, 'output/model.tar.gz')
print(model_key)
# download and unzip model
boto3.resource('s3').Bucket(bucket_name).download_file(model_key, 'model.tar.gz')
# unzipping as model_algo-1
os.system('tar -zxvf model.tar.gz')
os.system('unzip model_algo-1')<jupyter_output>counties/pca-2020-09-05-02-46-14-785/output/model.tar.gz
<jupyter_text>### MXNet Array
Many of the Amazon SageMaker algorithms use MXNet for computational speed, including PCA, and so the model artifacts are stored as an array. After the model is unzipped and decompressed, we can load the array using MXNet.
You can take a look at the MXNet [documentation, here](https://aws.amazon.com/mxnet/).<jupyter_code>import mxnet as mx
# loading the unzipped artifacts
pca_model_params = mx.ndarray.load('model_algo-1')
# what are the params
print(pca_model_params)<jupyter_output>{'s': 
[1.7896362e-02 3.0864021e-02 3.2130770e-02 3.5486195e-02 9.4831578e-02
 1.2699370e-01 4.0288666e-01 1.4084760e+00 1.5100485e+00 1.5957943e+00
 1.7783760e+00 2.1662524e+00 2.2966361e+00 2.3856051e+00 2.6954880e+00
 2.8067985e+00 3.0175958e+00 3.3952675e+00 3.5731301e+00 3.6966958e+00
 4.1890211e+00 4.3457499e+00 4.5410376e+00 5.0189657e+00 5.5786467e+00
 5.9809699e+00 6.3925138e+00 7.6952214e+00 7.9913125e+00 1.0180052e+01
 1.1718245e+01 1.3035975e+01 1.9592180e+01]
<NDArray 33 @cpu(0)>, 'v': 
[[ 2.46869749e-03  2.56468095e-02  2.50773830e-03 ... -7.63925165e-02
   1.59879066e-02  5.04589686e-03]
 [-2.80601848e-02 -6.86634064e-01 -1.96283013e-02 ... -7.59587288e-02
   1.57304872e-02  4.95312130e-03]
 [ 3.25766727e-02  7.17300594e-01  2.40726061e-02 ... -7.68136829e-02
   1.62378680e-02  5.13597298e-03]
 ...
 [ 1.12151138e-01 -1.17030945e-02 -2.88011521e-01 ...  1.39890045e-01
  -3.09406728e-01 -6.34506866e-02]
 [ 2.99992133e-02 -3.13433539e-03 -7.63589665e-02 ...  4.17341813e-02
[...]<jupyter_text>## PCA Model Attributes
Three types of model attributes are contained within the PCA model.
* **mean**: The mean that was subtracted from a component in order to center it.
* **v**: The makeup of the principal components; (same as ‘components_’ in an sklearn PCA model).
* **s**: The singular values of the components for the PCA transformation. This does not exactly give the % variance from the original feature space, but can give the % variance from the projected feature space.
    
We are only interested in v and s. 
From s, we can get an approximation of the data variance that is covered in the first `n` principal components. The approximate explained variance is given by the formula: the sum of squared s values for all top n components over the sum over squared s values for _all_ components:
\begin{equation*}
\frac{\sum_{n}^{ } s_n^2}{\sum s^2}
\end{equation*}
From v, we can learn more about the combinations of original features that make up each principal component.
<jupyter_code># get selected params
s=pd.DataFrame(pca_model_params['s'].asnumpy())
v=pd.DataFrame(pca_model_params['v'].asnumpy())<jupyter_output><empty_output><jupyter_text>## Data Variance
Our current PCA model creates 33 principal components, but when we create new dimensionality-reduced training data, we'll only select a few, top n components to use. To decide how many top components to include, it's helpful to look at how much **data variance** the components capture. For our original, high-dimensional data, 34 features captured 100% of our data variance. If we discard some of these higher dimensions, we will lower the amount of variance we can capture.
### Tradeoff: dimensionality vs. data variance
As an illustrative example, say we have original data in three dimensions. So, three dimensions capture 100% of our data variance; these dimensions cover the entire spread of our data. The below images are taken from the PhD thesis,  [“Approaches to analyse and interpret biological profile data”](https://publishup.uni-potsdam.de/opus4-ubp/frontdoor/index/index/docId/696) by Matthias Scholz, (2006, University of Potsdam, Germany).
Now, you may also note that most of this data seems related; it falls close to a 2D plane, and just by looking at the spread of the data, we  can visualize that the original, three dimensions have some correlation. So, we can instead choose to create two new dimensions, made up of linear combinations of the original, three dimensions. These dimensions are represented by the two axes/lines, centered in the data. 
If we project this in a new, 2D space, we can see that we still capture most of the original data variance using *just* two dimensions. There is a tradeoff between the amount of variance we can capture and the number of component-dimensions we use to represent our data.
When we select the top n components to use in a new data model, we'll typically want to include enough components to capture about 80-90% of the original data variance. In this project, we are looking at generalizing over a lot of data and we'll aim for about 80% coverage.**Note**: The _top_ principal components, with the largest s values, are actually at the end of the s DataFrame. Let's print out the s values for the top n, principal components.<jupyter_code># looking at top 5 components
n_principal_components = 5
start_idx = N_COMPONENTS - n_principal_components  # 33-n
# print a selection of s
print(s.iloc[start_idx:, :])<jupyter_output>            0
28   7.991313
29  10.180052
30  11.718245
31  13.035975
32  19.592180
<jupyter_text>### EXERCISE: Calculate the explained variance
In creating new training data, you'll want to choose the top n principal components that account for at least 80% data variance. 
Complete a function, `explained_variance` that takes in the entire array `s` and a number of top principal components to consider. Then return the approximate, explained variance for those top n components. 
For example, to calculate the explained variance for the top 5 components, calculate s squared for *each* of the top 5 components, add those up and normalize by the sum of *all* squared s values, according to this formula:
\begin{equation*}
\frac{\sum_{5}^{ } s_n^2}{\sum s^2}
\end{equation*}
> Using this function, you should be able to answer the **question**: What is the smallest number of principal components that captures at least 80% of the total variance in the dataset?<jupyter_code># Calculate the explained variance for the top n principal components
# you may assume you have access to the global var N_COMPONENTS
def explained_variance(s, n_top_components):
    '''Calculates the approx. data variance that n_top_components captures.
       :param s: A dataframe of singular values for top components; 
           the top value is in the last row.
       :param n_top_components: An integer, the number of top components to use.
       :return: The expected data variance covered by the n_top_components.'''
    
    start_idx = N_COMPONENTS - n_principal_components
    variance = np.square(s.iloc[start_idx:, :]).sum()/np.square(s).sum()
    return variance<jupyter_output><empty_output><jupyter_text>### Test Cell
Test out your own code by seeing how it responds to different inputs; does it return a reasonable value for the single, top component? What about for the top 5 components?<jupyter_code># test cell
n_top_components = 1 # select a value for the number of top components
# calculate the explained variance
exp_variance = explained_variance(s, n_top_components)
print('Explained variance: ', exp_variance)<jupyter_output>Explained variance:  0    0.717983
dtype: float32
<jupyter_text>As an example, you should see that the top principal component accounts for about 32% of our data variance! Next, you may be wondering what makes up this (and other components); what linear combination of features make these components so influential in describing the spread of our data?
Below, let's take a look at our original features and use that as a reference.<jupyter_code># features
features_list = counties_scaled.columns.values
print('Features: \n', features_list)<jupyter_output>Features: 
 ['TotalPop' 'Men' 'Women' 'Hispanic' 'White' 'Black' 'Native' 'Asian'
 'Pacific' 'Citizen' 'Income' 'IncomeErr' 'IncomePerCap' 'IncomePerCapErr'
 'Poverty' 'ChildPoverty' 'Professional' 'Service' 'Office' 'Construction'
 'Production' 'Drive' 'Carpool' 'Transit' 'Walk' 'OtherTransp'
 'WorkAtHome' 'MeanCommute' 'Employed' 'PrivateWork' 'PublicWork'
 'SelfEmployed' 'FamilyWork' 'Unemployment']
<jupyter_text>## Component Makeup
We can now examine the makeup of each PCA component based on **the weightings of the original features that are included in the component**. The following code shows the feature-level makeup of the first component.
Note that the components are again ordered from smallest to largest and so I am getting the correct rows by calling N_COMPONENTS-1 to get the top, 1, component.<jupyter_code>import seaborn as sns
def display_component(v, features_list, component_num, n_weights=10):
    
    # get index of component (last row - component_num)
    row_idx = N_COMPONENTS-component_num
    # get the list of weights from a row in v, dataframe
    v_1_row = v.iloc[:, row_idx]
    v_1 = np.squeeze(v_1_row.values)
    # match weights to features in counties_scaled dataframe, using list comporehension
    comps = pd.DataFrame(list(zip(v_1, features_list)), 
                         columns=['weights', 'features'])
    # we'll want to sort by the largest n_weights
    # weights can be neg/pos and we'll sort by magnitude
    comps['abs_weights']=comps['weights'].apply(lambda x: np.abs(x))
    sorted_weight_data = comps.sort_values('abs_weights', ascending=False).head(n_weights)
    # display using seaborn
    ax=plt.subplots(figsize=(10,6))
    ax=sns.barplot(data=sorted_weight_data, 
                   x="weights", 
                   y="features", 
                   palette="Blues_d")
    ax.set_title("PCA Component Makeup, Component #" + str(component_num))
    plt.show()
# display makeup of first component
num=1
display_component(v, counties_scaled.columns.values, component_num=num, n_weights=10)<jupyter_output><empty_output><jupyter_text># Deploying the PCA Model
We can now deploy this model and use it to make "predictions". Instead of seeing what happens with some test data, we'll actually want to pass our training data into the deployed endpoint to create principal components for each data point. 
Run the cell below to deploy/host this model on an instance_type that we specify.<jupyter_code>%%time
# this takes a little while, around 7mins
pca_predictor = pca_SM.deploy(initial_instance_count=1, 
                              instance_type='ml.t2.medium')<jupyter_output>Parameter image will be renamed to image_uri in SageMaker Python SDK v2.
<jupyter_text>We can pass the original, numpy dataset to the model and transform the data using the model we created. Then we can take the largest n components to reduce the dimensionality of our data.<jupyter_code># pass np train data to the PCA model
train_pca = pca_predictor.predict(train_data_np)
# check out the first item in the produced training features
data_idx = 0
print(train_pca[data_idx])<jupyter_output>label {
  key: "projection"
  value {
    float32_tensor {
      values: 0.0002009272575378418
      values: 0.0002455431967973709
      values: -0.0005782842636108398
      values: -0.0007815659046173096
      values: -0.00041911262087523937
      values: -0.0005133943632245064
      values: -0.0011316537857055664
      values: 0.0017268601804971695
      values: -0.005361668765544891
      values: -0.009066537022590637
      values: -0.008141040802001953
      values: -0.004735097289085388
      values: -0.00716288760304451
      values: 0.0003725700080394745
      values: -0.01208949089050293
      values: 0.02134685218334198
      values: 0.0009293854236602783
      values: 0.002417147159576416
      values: -0.0034637749195098877
      values: 0.01794189214706421
      values: -0.01639425754547119
      values: 0.06260128319263458
      values: 0.06637358665466309
      values: 0.002479255199432373
      values: 0.10011336207389832
      values: -0.1136140376329422
      values: 0[...]<jupyter_text>### EXERCISE: Create a transformed DataFrame
For each of our data points, get the top n component values from the list of component data points, returned by our predictor above, and put those into a new DataFrame.
You should end up with a DataFrame that looks something like the following:
```
                     c_1	     c_2	       c_3	       c_4	      c_5	   ...
Alabama-Autauga	-0.060274	0.160527	-0.088356	 0.120480	-0.010824	...
Alabama-Baldwin	-0.149684	0.185969	-0.145743	-0.023092	-0.068677	...
Alabama-Barbour	0.506202	 0.296662	 0.146258	 0.297829	0.093111	...
...
```<jupyter_code># create dimensionality-reduced data
def create_transformed_df(train_pca, counties_scaled, n_top_components):
    ''' Return a dataframe of data points with component features. 
        The dataframe should be indexed by State-County and contain component values.
        :param train_pca: A list of pca training data, returned by a PCA model.
        :param counties_scaled: A dataframe of normalized, original features.
        :param n_top_components: An integer, the number of top components to use.
        :return: A dataframe, indexed by State-County, with n_top_component values as columns.        
     '''
    reduced_df = pd.DataFrame([data.label.get('projection').float32_tensor.values for data in train_pca])
    reduced_df.index=counties_scaled.index
    start_idx = N_COMPONENTS - n_top_components
    reduced_df = reduced_df.iloc[:,start_idx:]
        
    return reduced_df.iloc[:, ::-1]<jupyter_output><empty_output><jupyter_text>Now we can create a dataset where each county is described by the top n principle components that we analyzed earlier. Each of these components is a linear combination of the original feature space. We can interpret each of these components by analyzing the makeup of the component, shown previously.
### Define the `top_n` components to use in this transformed data
Your code should return data, indexed by 'State-County' and with as many columns as `top_n` components.
You can also choose to add descriptive column names for this data; names that correspond to the component number or feature-level makeup.<jupyter_code>## Specify top n
top_n = 7
# call your function and create a new dataframe
counties_transformed = create_transformed_df(train_pca, counties_scaled, n_top_components=top_n)
counties_transformed
## TODO: Add descriptive column names
counties_transformed.columns = [f"c_{i}" for i in range(1,top_n+1)]
# print result
counties_transformed.head()<jupyter_output><empty_output><jupyter_text>### Delete the Endpoint!
Now that we've deployed the mode and created our new, transformed training data, we no longer need the PCA endpoint.
As a clean up step, you should always delete your endpoints after you are done using them (and if you do not plan to deploy them to a website, for example).<jupyter_code># delete predictor endpoint
session.delete_endpoint(pca_predictor.endpoint)<jupyter_output><empty_output><jupyter_text>---
# Population Segmentation 
Now, you’ll use the unsupervised clustering algorithm, k-means, to segment counties using their PCA attributes, which are in the transformed DataFrame we just created. K-means is a clustering algorithm that identifies clusters of similar data points based on their component makeup. Since we have ~3000 counties and 34 attributes in the original dataset, the large feature space may have made it difficult to cluster the counties effectively. Instead, we have reduced the feature space to 7 PCA components, and we’ll cluster on this transformed dataset.### EXERCISE: Define a k-means model
Your task will be to instantiate a k-means model. A `KMeans` estimator requires a number of parameters to be instantiated, which allow us to specify the type of training instance to use, and the model hyperparameters. 
You can read about the required parameters, in the [`KMeans` documentation](https://sagemaker.readthedocs.io/en/stable/kmeans.html); note that not all of the possible parameters are required.
### Choosing a "Good" K
One method for choosing a "good" k, is to choose based on empirical data. A bad k would be one so *high* that only one or two very close data points are near it, and another bad k would be one so *low* that data points are really far away from the centers.
You want to select a k such that data points in a single cluster are close together but that there are enough clusters to effectively separate the data. You can approximate this separation by measuring how close your data points are to each cluster center; the average centroid distance between cluster points and a centroid. After trying several values for k, the centroid distance typically reaches some "elbow"; it stops decreasing at a sharp rate and this indicates a good value of k. The graph below indicates the average centroid distance for value of k between 5 and 12.
A distance elbow can be seen around 8 when the distance starts to increase and then decrease at a slower rate. This indicates that there is enough separation to distinguish the data points in each cluster, but also that you included enough clusters so that the data points aren’t *extremely* far away from each cluster.<jupyter_code># define a KMeans estimator
from sagemaker import KMeans
k = 8
kmeans = KMeans(role=role,
             train_instance_count=1,
             train_instance_type='ml.c4.xlarge',
             output_path=output_path,
             k=k)<jupyter_output><empty_output><jupyter_text>### EXERCISE: Create formatted, k-means training data
Just as before, you should convert the `counties_transformed` df into a numpy array and then into a RecordSet. This is the required format for passing training data into a `KMeans` model.<jupyter_code># convert the transformed dataframe into record_set data
kmeans_train_data = counties_transformed.values.astype('float32')
kmeans_rs_data = kmeans.record_set(kmeans_train_data)
kmeans_rs_data<jupyter_output><empty_output><jupyter_text>### EXERCISE: Train the k-means model
Pass in the formatted training data and train the k-means model.<jupyter_code>%%time
# train kmeans
kmeans.fit(kmeans_rs_data)<jupyter_output>'get_image_uri' method will be deprecated in favor of 'ImageURIProvider' class in SageMaker Python SDK v2.
's3_input' class will be renamed to 'TrainingInput' in SageMaker Python SDK v2.
'get_image_uri' method will be deprecated in favor of 'ImageURIProvider' class in SageMaker Python SDK v2.
<jupyter_text>### EXERCISE: Deploy the k-means model
Deploy the trained model to create a `kmeans_predictor`.
<jupyter_code>%%time
# deploy the model to create a predictor
kmeans_predictor = kmeans.deploy(initial_instance_count=1, 
                                 instance_type='ml.t2.medium')<jupyter_output>Parameter image will be renamed to image_uri in SageMaker Python SDK v2.
<jupyter_text>### EXERCISE: Pass in the training data and assign predicted cluster labels
After deploying the model, you can pass in the k-means training data, as a numpy array, and get resultant, predicted cluster labels for each data point.<jupyter_code># get the predicted clusters for all the kmeans training data
cluster_info = kmeans_predictor.predict(kmeans_train_data)<jupyter_output><empty_output><jupyter_text>## Exploring the resultant clusters
The resulting predictions should give you information about the cluster that each data point belongs to.
You should be able to answer the **question**: which cluster does a given data point belong to?<jupyter_code># print cluster info for first data point
data_idx = 0
print('County is: ', counties_transformed.index[data_idx])
print()
print(cluster_info[data_idx])<jupyter_output>County is:  Alabama-Autauga
label {
  key: "closest_cluster"
  value {
    float32_tensor {
      values: 4.0
    }
  }
}
label {
  key: "distance_to_cluster"
  value {
    float32_tensor {
      values: 0.292652428150177
    }
  }
}
<jupyter_text>### Visualize the distribution of data over clusters
Get the cluster labels for each of our data points (counties) and visualize the distribution of points over each cluster.<jupyter_code># get all cluster labels
cluster_labels = [c.label['closest_cluster'].float32_tensor.values[0] for c in cluster_info]
# count up the points in each cluster
cluster_df = pd.DataFrame(cluster_labels)[0].value_counts()
print(cluster_df)<jupyter_output>4.0    1017
1.0     740
3.0     405
0.0     348
6.0     316
2.0     246
5.0      88
7.0      58
Name: 0, dtype: int64
<jupyter_text>Now, you may be wondering, what do each of these clusters tell us about these data points? To improve explainability, we need to access the underlying model to get the cluster centers. These centers will help describe which features characterize each cluster.### Delete the Endpoint!
Now that you've deployed the k-means model and extracted the cluster labels for each data point, you no longer need the k-means endpoint.<jupyter_code># delete kmeans endpoint
session.delete_endpoint(kmeans_predictor.endpoint)<jupyter_output><empty_output><jupyter_text>---
# Model Attributes & Explainability
Explaining the result of the modeling is an important step in making use of our analysis. By combining PCA and k-means, and the information contained in the model attributes within a SageMaker trained model, you can learn about a population and remark on some patterns you've found, based on the data.### EXERCISE: Access the k-means model attributes
Extract the k-means model attributes from where they are saved as a TAR file in an S3 bucket.
You'll need to access the model by the k-means training job name, and then unzip the file into `model_algo-1`. Then you can load that file using MXNet, as before.<jupyter_code>kmeans.latest_training_job.job_name
# download and unzip the kmeans model file
# use the name model_algo-1
training_job_name = 'kmeans-2020-09-05-03-17-56-018'
model_key = os.path.join(prefix, training_job_name, 'output/model.tar.gz')
boto3.resource('s3').Bucket(bucket_name).download_file(model_key, 'model.tar.gz')
os.system('tar -zxvf model.tar.gz')
os.system('unzip model_algo-1')
# get the trained kmeans params using mxnet
kmeans_model_params = mx.ndarray.load('model_algo-1')
print(kmeans_model_params)<jupyter_output>[
[[-1.3107985e-01  3.3773825e-02 -3.9478469e-01  9.1041394e-02
  -2.7093090e-02  7.1059391e-02 -6.4738458e-03]
 [-2.4891388e-01  1.9016270e-02 -3.6323592e-02 -4.6322465e-02
  -4.5324493e-02 -4.2182978e-02 -3.9403420e-04]
 [ 3.4540069e-01 -2.2590320e-01 -9.2412315e-02 -1.4390261e-01
   8.7686941e-02 -8.0439001e-02 -6.2484115e-02]
 [ 3.4308207e-01  2.3579836e-01  6.0148031e-02  2.5438562e-01
   9.5744088e-02 -5.0610404e-02  4.4720769e-02]
 [-6.4765245e-02  8.9860350e-02  1.1614791e-01 -6.9009513e-02
  -2.3470856e-02  3.2169998e-02 -2.5825333e-02]
 [ 1.3612065e+00 -2.2541526e-01 -1.4059803e-01 -4.3050602e-01
  -1.4698723e-01  1.3038024e-01  1.7214298e-01]
 [-2.6872978e-01 -3.9669171e-01  9.1291942e-02  9.1403872e-02
   6.3021392e-02  1.0890700e-02  1.1242294e-01]
 [ 3.6904910e-01 -6.2983692e-01  9.7744316e-02  3.4077433e-01
  -1.3024789e-01 -4.5226868e-03 -2.4969791e-01]]
<NDArray 8x7 @cpu(0)>]
<jupyter_text>There is only 1 set of model parameters contained within the k-means model: the cluster centroid locations in PCA-transformed, component space.
* **centroids**: The location of the centers of each cluster in component space, identified by the k-means algorithm. 
<jupyter_code># get all the centroids
cluster_centroids=pd.DataFrame(kmeans_model_params[0].asnumpy())
cluster_centroids.columns=counties_transformed.columns
display(cluster_centroids)<jupyter_output><empty_output><jupyter_text>### Visualizing Centroids in Component Space
You can't visualize 7-dimensional centroids in space, but you can plot a heatmap of the centroids and their location in the transformed feature space. 
This gives you insight into what characteristics define each cluster. Often with unsupervised learning, results are hard to interpret. This is one way to make use of the results of PCA + clustering techniques, together. Since you were able to examine the makeup of each PCA component, you can understand what each centroid represents in terms of the PCA components.<jupyter_code># generate a heatmap in component space, using the seaborn library
plt.figure(figsize = (12,9))
ax = sns.heatmap(cluster_centroids.T, cmap = 'YlGnBu')
ax.set_xlabel("Cluster")
plt.yticks(fontsize = 16)
plt.xticks(fontsize = 16)
ax.set_title("Attribute Value by Centroid")
plt.show()<jupyter_output><empty_output><jupyter_text>If you've forgotten what each component corresponds to at an original-feature-level, that's okay! You can use the previously defined `display_component` function to see the feature-level makeup.<jupyter_code># what do each of these components mean again?
# let's use the display function, from above
component_num=7
display_component(v, counties_scaled.columns.values, component_num=component_num)<jupyter_output><empty_output><jupyter_text>### Natural Groupings
You can also map the cluster labels back to each individual county and examine which counties are naturally grouped together.<jupyter_code># add a 'labels' column to the dataframe
counties_transformed['labels']=list(map(int, cluster_labels))
# sort by cluster label 0-6
sorted_counties = counties_transformed.sort_values('labels', ascending=True)
# view some pts in cluster 0
sorted_counties.head(20)<jupyter_output><empty_output><jupyter_text>You can also examine one of the clusters in more detail, like cluster 1, for example. A quick glance at the location of the centroid in component space (the heatmap) tells us that it has the highest value for the `comp_6` attribute. You can now see which counties fit that description.<jupyter_code># get all counties with label == 1
cluster=counties_transformed[counties_transformed['labels']==1]
cluster.head()<jupyter_output><empty_output> | 
	permissive | 
	/Population_Segmentation/Pop_Segmentation_Exercise.ipynb | 
	xwilchen/ML_SageMaker_Studies | 42 | 
| 
	<jupyter_start><jupyter_text>Michael Siripongpibul 
CAP4630<jupyter_code>import random
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
%tensorflow_version 2.x
from tensorflow.keras import models
from tensorflow.keras import layers
import tensorflow as tf
<jupyter_output><empty_output><jupyter_text># Question 1 Generate and Display Random Data
 The get_random_data function takes in 5 parameters of w,b,mu,sigma, and m. Where sigma is the standard deviation and mu is the mean; The function returns array data of shape (m, 2) and the array labels of shape (m, 1). <jupyter_code>def get_random_data(w,b,mu,sigma,m):
  # np.random.seed(1)
  labels = []
  data = []
  
  for x in range(0, m):
    labels.append(np.random.choice([0,1]))
  for x in range(0,m):
    c = labels[x]
    n = np.random.default_rng().normal(mu, sigma, 1)
    x_1 = random.uniform(0,1)
    filler = w * x_1 + b + (-1)**c * n
    x_2 = filler[0]
    data.append((x_1,x_2))
  return data, labels
<jupyter_output><empty_output><jupyter_text>display_random_Data takes parameters of arrays labels and data to create a scatterplot of points in data.<jupyter_code>def display_random_data(data, labels, x_new, y_new, old_x, old_y, x1_w, x2_w):
  fig2 = plt.figure()
  ax = plt #.axes(projection='3d')
  xs = [x[0] for x in data]
  ys = [x[1] for x in data]
  # ax.scatter3D(xs, ys)
  for x in range(0,len(labels)):
    if labels[x] == 1:
      ax.scatter(xs[x],ys[x], color = 'r')
    else:
      ax.scatter(xs[x],ys[x], color = 'b')
  ax.plot(x_new, y_new, color='green', label ="Predicted")
  ax.plot(old_x, old_y, color='yellow', label ="Actual")
  x_h = np.linspace(0,1,3)
  plt.plot(x_h, x_h*x1_w + x2_w, color = 'black')
  ax.legend()
  ax.plot()
data, labels = get_random_data(3,3,1,1,1000)
display_random_data(data, labels, 0, 0, 0, 0, 0 , 0)<jupyter_output><empty_output><jupyter_text># Question 2 Logistic Regression with KerasSplits the data generated in question 1 and uses keras to implement logistic regression with two included features. Then plots 2 lines separating the blue and red dots; One is the actual and the other is predicted by logistic regression.<jupyter_code>#Splitting the data into a training set and a validation set
# training set
train_data = data[:800]
train_labels = labels[:800]
train_data = np.array(train_data)
train_labels = np.array(train_labels)
# test set
test_data = data[800:]
test_labels = labels[800:]
test_data = np.array(test_data)
test_labels = np.array(test_labels)
#Creating the model
network = models.Sequential()
network.add(layers.Dense(1, activation = 'sigmoid', input_shape=(2,)))
#Training
network.compile(optimizer=tf.keras.optimizers.RMSprop(lr=0.01), loss='binary_crossentropy', metrics=['accuracy'])
network.fit(train_data, train_labels, epochs=100, validation_data=(test_data, test_labels))<jupyter_output>Epoch 1/100
25/25 [==============================] - 0s 8ms/step - loss: 0.6061 - accuracy: 0.6762 - val_loss: 0.6019 - val_accuracy: 0.6400
Epoch 2/100
25/25 [==============================] - 0s 2ms/step - loss: 0.5832 - accuracy: 0.6888 - val_loss: 0.5769 - val_accuracy: 0.7150
Epoch 3/100
25/25 [==============================] - 0s 3ms/step - loss: 0.5661 - accuracy: 0.7325 - val_loss: 0.5618 - val_accuracy: 0.7600
Epoch 4/100
25/25 [==============================] - 0s 2ms/step - loss: 0.5500 - accuracy: 0.7663 - val_loss: 0.5466 - val_accuracy: 0.7600
Epoch 5/100
25/25 [==============================] - 0s 2ms/step - loss: 0.5339 - accuracy: 0.7862 - val_loss: 0.5330 - val_accuracy: 0.7650
Epoch 6/100
25/25 [==============================] - 0s 3ms/step - loss: 0.5210 - accuracy: 0.7987 - val_loss: 0.5211 - val_accuracy: 0.7700
Epoch 7/100
25/25 [==============================] - 0s 3ms/step - loss: 0.5084 - accuracy: 0.8175 - val_loss: 0.5113 - val_accuracy: 0.7700
Epoch 8/100
2[...]<jupyter_text># Model Accuracy<jupyter_code>test_loss, test_acc = network.evaluate(test_data, test_labels)
test_acc
test_loss
<jupyter_output><empty_output><jupyter_text>Showing predicted Line and the actual line
Actual line:
$Y = w*x+b$
Predicted Line:
$Y = -\frac{b/w2}{b/w1} -\frac{b}{w2}$<jupyter_code>#obtain the separating line determined by the model by extracting the weights from the dense layer using the function get_weights
x = np.linspace(0, 1, 100)
weights = network.layers[0].get_weights()
#bias
b = weights[1][0]
#weights
x1_w = weights[0][0][0]
x2_w = weights[0][1][0]
#calculations
y = (-b/x2_w) + ((-b/x2_w)/(b/x1_w)) * x
old_x = np.array(range(0, 2))
old_y = 3 * old_x + 3
display_random_data(data, labels, x,y, old_x, old_y,0 ,0)
<jupyter_output><empty_output><jupyter_text>The predicted line is a bit hard to see behind the yellow line# HeatMap Visualization of model
to visualize $f: R^2 -> R$ <jupyter_code>x_h = np.linspace( 0, 1, 100)
y_h = np.linspace( 1, 6, 100)
X,Y = np.meshgrid(x_h,y_h)
# Y = np.meshgrid(y_h)
z = network.predict(np.c_[X.flatten(),Y.flatten()])
z = z.reshape((100,100))
plt.contourf(X,Y,z,50)
plt.colorbar()
plt.xlabel('X_1')
plt.ylabel('X_2')<jupyter_output><empty_output><jupyter_text># Question 3 Using numpy to implement logistic regression
# Unfinished<jupyter_code>#Source https://github.com/schneider128k/machine_learning_course/blob/master/linear_regression_gradient_descent.ipynb
#Source https://github.com/schneider128k/machine_learning_course/blob/master/slides/logistic_regression.pdf<jupyter_output><empty_output><jupyter_text>Sigmoid function: 
$=\frac{1}{1+e^-z}$
Binary cross-entropy loss function:
$=-y log a - (1-y)log(1-a)$
<jupyter_code>def sigmoid(x):
  res = 1/(1+np.exp(x))
  return res
def bce(y, a):
  res = -y*np.log10(a)-(1-y)*np.log10(1-a)
  return res
epoches= 200
lr = .01
b_size = 800
weights = np.random.rand(3,1)
X_b = np.column_stack((train_data,np.ones(shape = (b_size, 1))))
weight_path_mgd = []
weight_path_mgd.append(weights)
for epoch in range(epoches):
  # shuffled_indices = np.random.permutation()
  # X_b_shuffled = train_data[shuffled_indices]
  # y_shuffled = y[shuffled_indices]
  for i in range(b_size):
    z= np.dot(X_b[i],weights)
    a= sigmoid(z)
        # gradient = 1 / batch_size * xi.T.dot(xi.dot(weight) - yi)
        # weight = weight - lr * gradient
    # gradient = 1/b_size * X_b.T.dot(X_b.dot(weights)-labels)
    gradient = 1/b_size * (X_b[i]*(a-labels[i])).T
    weights = weights-lr*gradient
layer = network.layers[0]
weight3, bw = layer.get_weights()
weight1 = weight3[0]
weight2= weight3[1]
b_1 = bw
#bias
b = weights[1][0]
#weights
x1_w = weights[0]/weights[1]
#calculations
# y = (-b/x2_w) + ((-b/x2_w)/(b/x1_w)) * x
x2_w = -weights[2]/weights[1]
old_x = np.array(range(0, 2))
old_y = 3 * old_x + 3
display_random_data(data, labels, 0,0, old_x, old_y, x1_w, x2_w)
<jupyter_output><empty_output><jupyter_text># HeatMap visual - IncompleteSimilar to the previous heatmap but using Z values from the new numpy regression model<jupyter_code>
train_data2 = np.append(np.ones((train_data.shape[0],1)), train_data, axis = 1)
# print(train_data2)
w = np.zeros((train_data2.shape[1],1))
# print(w)
loss = 0
accuracy = 0
for i in range(800):
  pred = sigmoid(np.dot(train_data2,w))
  gradient = 1/b_size * np.dot(train_data2.transpose(),pred-train_labels)
  w = w - (lr*gradient)
  # loss = bce(train_labels,pred) - divided by zero error
  # loss2 = 1/b_size * sum(loss)
b = w[0]
w = w[1]
x_h = np.linspace( 0, .1, 100)
y_h = np.linspace( 0, 1, 100)
X,Y = np.meshgrid(x_h,y_h)
z = network.predict(np.c_[X.flatten()* w[0],Y.flatten()*w[1]])
z = z.reshape((100,100))
plt.contourf(X,Y,z,50)
plt.colorbar()
plt.xlabel('X_1')
plt.ylabel('X_2')<jupyter_output><empty_output><jupyter_text>Accuracy 
<jupyter_code>for i in np.arange(len(test_data)):
  pred = (weight2*test_data[i][0])+(weight1*test_data[i][0])+bw
  a = (sigmoid(pred))
  b = test_labels[i]
  check = (a > 0.5) == 1
  if check:
    loss = loss + bce(b,a)
    accuracy = accuracy + 1
  else:
    if b == 0:
      accuracy = accuracy + 1
  
accuracy = accuracy/len(test_data)
loss = loss/len(test_data);
accuracy
<jupyter_output><empty_output> | 
	no_license | 
	/HW_3/HW_3.ipynb | 
	Michael-Siri/AI-CAP4630 | 11 | 
| 
	<jupyter_start><jupyter_text># Decision boundaries<jupyter_code>def plot_decision_bundaries(model, x, h=0.1, cmap='BrBG', torch_model=True, target_class=0):
    x1_min, x1_max = x[:, 0].min() - 1, x[:, 0].max() + 1
    x2_min, x2_max = x[:, 1].min() - 1, x[:, 1].max() + 1
    xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, h),
                           np.arange(x2_min, x2_max, h))
    if torch_model:
        xx = torch.FloatTensor(np.c_[xx1.ravel(), xx2.ravel()])
        Z = model(xx).detach().numpy()[:, target_class]
    else:
        xx = np.c_[xx1.ravel(), xx2.ravel()]
        Z = model.predict_proba(xx)[:, 1]
    Z = Z.reshape(xx1.shape)
    plt.contourf(xx1, xx2, Z, alpha=0.2, cmap=cmap)
    return<jupyter_output><empty_output><jupyter_text>## True decision boundaries<jupyter_code>xnp = x_train.detach().numpy()
ynp = y_train.detach().numpy().ravel()
cmap='BrBG'
plt.figure(figsize=[8, 4])
plt.subplot(121)
plt.title(f'Decision boundaries class 0')
plot_decision_bundaries(model, x_train, h=0.01, cmap=cmap)
plt.scatter(xnp[:, 0], xnp[:, 1], c=ynp, cmap=cmap)
plt.xlim([-0.03, 1.03])
plt.ylim([-0.03, 1.03])
plt.subplot(122)
plt.title(f'Decision boundaries class 1')
plot_decision_bundaries(model, x_train, h=0.01, cmap=cmap, target_class=1)
plt.scatter(xnp[:, 0], xnp[:, 1], c=ynp, cmap=cmap)
plt.xlim([-0.03, 1.03])
plt.ylim([-0.03, 1.03])
plt.show()<jupyter_output><empty_output><jupyter_text>## Local decision boundaries<jupyter_code>for sample_id in range(len(y_train)):
    xin = x_train[sample_id]
    model_reduced = get_reduced_model(model, xin)
    for module in model_reduced.children():
        if isinstance(module, torch.nn.Linear):
            wa = module.weight.detach().numpy()
            break
    output = model_reduced(xin)
    target_class = torch.argmax(output).item()
    explanation = logic.relu_nn.explain_local(model, x_train, y_train, 
                                           xin, concept_names=['f1', 'f2'])
    
    model_class = target_class if y_train[sample_id] == 1 else 1-target_class
    plt.figure(figsize=[16, 4])
    plt.subplot(141)
    plt.title(f'True decision boundary class 0')
    plot_decision_bundaries(model, x_train, h=0.01, cmap=cmap, target_class=target_class)
    plt.scatter(xin[0], xin[1], c='k', marker='x', s=100)
    c = plt.Circle((xin[0], xin[1]), radius=0.2, edgecolor='k', fill=False, linestyle='--')
    plt.gca().add_artist(c)
    plt.scatter(xnp[:, 0], xnp[:, 1], c=ynp, cmap=cmap)
    plt.xlim([-0.5, 1.5])
    plt.ylim([-0.5, 1.5])
    plt.subplot(142)
    plt.title(f'True decision boundary class 1')
    plot_decision_bundaries(model, x_train, h=0.01, cmap=cmap, target_class=1-target_class)
    plt.scatter(xin[0], xin[1], c='k', marker='x', s=100)
    c = plt.Circle((xin[0], xin[1]), radius=0.2, edgecolor='k', fill=False, linestyle='--')
    plt.gca().add_artist(c)
    plt.scatter(xnp[:, 0], xnp[:, 1], c=ynp, cmap=cmap)
    plt.xlim([-0.5, 1.5])
    plt.ylim([-0.5, 1.5])
    plt.subplot(143)
    plt.title(f'IN={xin.detach().numpy()} - OUT={output.detach().numpy()}\nW={wa}\nExplanation: {explanation}')
    plot_decision_bundaries(model_reduced, x_train, target_class=model_class)
    plt.scatter(xin[0], xin[1], c='k', marker='x', s=100)
    c = plt.Circle((xin[0], xin[1]), radius=0.2, edgecolor='k', fill=False, linestyle='--')
    plt.gca().add_artist(c)
    plt.scatter(xnp[:, 0], xnp[:, 1], c=ynp, cmap=cmap)
    plt.xlim([-0.5, 1.5])
    plt.ylim([-0.5, 1.5])
    plt.subplot(144)
    plt.title(f'IN={xin.detach().numpy()} - OUT={output.detach().numpy()}\nW={wa}\nExplanation: {explanation}')
    plot_decision_bundaries(model_reduced, x_train, target_class=1-model_class)
    plt.scatter(xin[0], xin[1], c='k', marker='x', s=100)
    c = plt.Circle((xin[0], xin[1]), radius=0.2, edgecolor='k', fill=False, linestyle='--')
    plt.gca().add_artist(c)
    plt.scatter(xnp[:, 0], xnp[:, 1], c=ynp, cmap=cmap)
    plt.xlim([-0.5, 1.5])
    plt.ylim([-0.5, 1.5])
    plt.show()<jupyter_output><empty_output><jupyter_text># Combine local explanations<jupyter_code>explanation, predictions, counter = logic.relu_nn.combine_local_explanations(model, x_train, y_train,
                                                                         target_class=0, 
                                                                          concept_names=['x1', 'x2'])
explanation
explanation, predictions, counter = logic.relu_nn.combine_local_explanations(model, x_train, y_train,
                                                                         target_class=1, 
                                                                          concept_names=['x1', 'x2'])
explanation<jupyter_output><empty_output> | 
	non_permissive | 
	/examples/api_examples/example_pruning_01_xor.ipynb | 
	pietrobarbiero/logic_explainer_networks | 4 | 
| 
	<jupyter_start><jupyter_text># Data Science Academy - Python Fundamentos - Capítulo 2
## Download: http://github.com/dsacademybr## Strings### Criando uma String
Para criar uma string em Python você pode usar aspas simples ou duplas. Por exemplo:<jupyter_code># Uma única palavra
'Oi'
# Uma frase
'Criando uma string em Python'
# Podemos usar aspas duplas
"Podemos usar aspas duplas ou simples para strings em Python"
# Você pode combinar aspas duplas e simples
"Testando strings em 'Python'"<jupyter_output><empty_output><jupyter_text>### Imprimindo uma String<jupyter_code>print ('Testando Strings em Python')
print ('Testando \nStrings \nem \nPython')
print ('\n')<jupyter_output>
<jupyter_text>### Indexando Strings<jupyter_code># Atribuindo uma string
s = 'Data Science Academy'
print(s)
# Primeiro elemento da string. 
s[0]
s[1]
s[2]<jupyter_output><empty_output><jupyter_text>Podemos usar um : para executar um slicing que faz a leitura de tudo até um ponto designado. Por exemplo:<jupyter_code># Retorna todos os elementos da string, começando pela posição (lembre-se que Python começa a indexação pela posição 0),
# até o fim da string.
s[1:]
# A string original permanece inalterada
s
# Retorna tudo até a posição 3
s[:3]
s[:]
# Nós também podemos usar a indexação negativa e ler de trás para frente.
s[-1]
# Retornar tudo, exceto a última letra
s[:-1]<jupyter_output><empty_output><jupyter_text>Nós também podemos usar a notação de índice e fatiar a string em pedaços específicos (o padrão é 1). Por exemplo, podemos usar dois pontos duas vezes em uma linha e, em seguida, um número que especifica a frequência para retornar elementos. Por exemplo:<jupyter_code>s[::1]
s[::2]
s[::-1]<jupyter_output><empty_output><jupyter_text>### Propriedades de Strings<jupyter_code>s
# Alterando um caracter
s[0] = 'x'
# Concatenando strings
s + ' é a melhor maneira de estar preparado para o mercado de trabalho em Ciência de Dados!'
s = s + ' é a melhor maneira de estar preparado para o mercado de trabalho em Ciência de Dados!'
print(s)
# Podemos usar o símbolo de multiplicação para criar repetição!
letra = 'w'
letra * 3<jupyter_output><empty_output><jupyter_text>### Funções Built-in de Strings<jupyter_code>s
# Upper Case 
s.upper()
# Lower case
s.lower()
# Dividir uma string por espaços em branco (padrão)
s.split()
# Dividir uma string por um elemento específico
s.split('y')<jupyter_output><empty_output><jupyter_text>### Funções String<jupyter_code>s = 'seja bem vindo ao universo de python'
s.capitalize()
s.count('a')
s.find('p')
s.center(20, 'z')
s.isalnum()
s.isalpha()
s.islower()
s.isspace()
s.endswith('o')
s.partition('!')<jupyter_output><empty_output><jupyter_text>### Comparando Strings<jupyter_code>print("Python" == "R")
print("Python" == "Python")<jupyter_output>True
 | 
	no_license | 
	/Cap02/Notebooks/DSA-Python-Cap02-03-Strings.ipynb | 
	dudolbh/PythonAnaliseDados | 9 | 
| 
	<jupyter_start><jupyter_text># Pandas數據分析
今天介紹資料分析近來很紅的 pandas 套件, 作者是 Wes McKinney。Python 會成為一個數據分析的熱門語言, 和 pandas 的出現也有相當的關係。
但是 pandas 雖然功能強, 但有些地方沒那麼直覺, 有時會讓大家以為是個深奧的套件。其實你大約可以把 pandas 想成「Python 的 Excel」, 但是功能更強、更有彈性、也有更多的可能性。
下面介紹個基本上就是把 pandas 當 Excel 學的影片, 相信大家會覺得很親切。
https://youtu.be/9d5-Ti6onew<jupyter_code>import pandas as pd
import matplotlib.pyplot as plt
import numpy as np<jupyter_output><empty_output><jupyter_text>## 1 開始使用 `pandas`
首先我們來讀入一個 CSV 檔, 這裡有個「假的」學測成績, 叫 `grades.csv` 我們來練習一下。<jupyter_code>df = pd.read_csv('C:/Users/Owner/IMLP342/Unit02/data/grades.csv')
df<jupyter_output><empty_output><jupyter_text>用 `df` 是標準的叫法 (雖然這名稱我們隨便取也可以), 意思是 Data Frame, 這是 `pandas` 兩大資料結構之一。我們可以把 Data Frame 想成一張表格 (雖然其實可以是很多張表格)。
我們來看看我們 `df` 的前五筆資料。<jupyter_code>type(df)
df.head()<jupyter_output><empty_output><jupyter_text>如果你曾經手動讀入 CSV 檔, 就知道這省了多少事 (雖然我個人還挺喜歡純手動帶進 CSV)。#### Excel 檔也可以快速讀入
不只 CSV 檔, 很多資料檔案, 像 Excel 檔都很容易在 `pandas` 完成。使用法是這樣:
    df2 = pd.read_excel('filename.xls', 'sheetname')
    
其中 sheetname 那裡要放工作表的名稱, 如果是中文的最好改成英文。## 2 Pandas 基本資料結構
Pandas 有兩個基本資料結構:
* DataFrame: 可以想成一個表格。
* Series: 表格的某一列、某一行, 基本上就是我們以前的 list 或 array
一個 DataFrame, 我們有 `index` (列的名稱), `columns` (行的名稱)。
#### DataFrame
#### Series
剛剛說 series 大概就是一個 list, 一個 array。其實更精準的說, 其實是一個有 "index" 的 array。
DataFrame 的每一行或每一列其實也都是一個 series。我們來看個例子, 例如所有同學的國文成績, 就是一個 series。<jupyter_code>df['國文']<jupyter_output><empty_output><jupyter_text>在 Python 3 中, 我們終於可以和英文同步, 用這種很炫的方式叫出所有國文成績。<jupyter_code>df.國文<jupyter_output><empty_output><jupyter_text>#### 資料畫出來
要畫個圖很容易。<jupyter_code>df.國文.plot()<jupyter_output><empty_output><jupyter_text>當然, 在這個例子中, 其實畫 histogram 圖更有意義一點。<jupyter_code>df.國文.hist(bins=15)<jupyter_output><empty_output><jupyter_text>## 3 一些基本的資料分析算平均。<jupyter_code>df.國文.mean()<jupyter_output><empty_output><jupyter_text>算標準差。<jupyter_code>df.國文.std()<jupyter_output><empty_output><jupyter_text>不如就該算的都幫我們算算...<jupyter_code>df.describe()<jupyter_output><empty_output><jupyter_text>有時我們很愛看的相關係數矩陣。<jupyter_code>df.corr()<jupyter_output><empty_output><jupyter_text>只算兩科間的相關係數當然也可以。<jupyter_code>df.國文.corr(df.數學)<jupyter_output><empty_output><jupyter_text>## 4 增加一行### 【技巧】
我們增加一行, 加入總級分。<jupyter_code>df['總級分'] = df.sum(axis = 1)
df.head()<jupyter_output><empty_output><jupyter_text>### 【技巧】
有計算的當然也可以的。<jupyter_code>df['加權'] = df.國文+df.英文+df.數學*2
df.head()<jupyter_output><empty_output><jupyter_text>## 5 排序和 index 重設### 【重點】排序的方法
我們依總級分來排序。<jupyter_code>df.sort_values(by='總級分',ascending=False).head(10)<jupyter_output><empty_output><jupyter_text>### 【重點】排序的方法
加權分最高, 同分才看總級分<jupyter_code>df2 = df.sort_values(by=['加權','總級分'],ascending=False)
df2.head(10)<jupyter_output><empty_output><jupyter_text>### 【重點】重設 index<jupyter_code>df2.index = range(1,101)
df2.head()<jupyter_output><empty_output><jupyter_text>## 6 篩出我們要的資料
基本上和 NumPy 的 array 篩法很像。### 【重點】
找出數學滿級分同學。<jupyter_code>df2[df2.數學 == 15]<jupyter_output><empty_output><jupyter_text>### 【重點】
找出數學和英文都滿級分的同學。要注意 `and` 要用 `&`, `or` 要用 `|`。每個條件一定要加弧號。<jupyter_code>df_m = df2['數學'] == 15
df_e = df2['英文'] == 15
df2[df_m & df_e]<jupyter_output><empty_output><jupyter_text>## 7 刪除一行或一列### 【重點】刪掉一行
我們來刪掉總級分的那行。<jupyter_code>df2 = df2.drop('總級分',axis=1)
df2<jupyter_output><empty_output><jupyter_text>### 【重點】改變原有的 DataFrame
我們會發現 `pandas` 很多動作都沒有更改原有的 DataFrame, 真的要改要加入
    inplace=True<jupyter_code>df2 = df2.drop('總級分',axis=1,inplace=True)<jupyter_output><empty_output><jupyter_text>### 【重點】刪掉一列
刪掉列就是指定要刪去的 index。<jupyter_code>df2
df2.drop(5).head()<jupyter_output><empty_output><jupyter_text>### 【重點】刪掉一列
通常刪掉符合條件的比較合理 (注意是找到要刪掉的部份, 再找出相對的 index)。<jupyter_code>df2[df2['姓名']=='李士賢']
df2.drop(df2[df2['姓名']=='李士賢'].index)<jupyter_output><empty_output><jupyter_text>## 8 真實股價資料
有個從 `Pandas` 獨立出來的套件叫 `pandas-datareader`, 幾經波折, 先是 Yahoo! 的財務資料不能用, 後來又是 Google 的資料不能用, 不過至少現在看來 Yahoo! 還可以使用。
安裝 `pandas-datareader` 就標準 `conda` 安裝:
    conda install pandas-datareader
    
如果裝過, 但很久沒更新就用:
    conda update pandas-datareader### 【例子】 分析 Apple 股價<jupyter_code>import pandas as pd
import pandas_datareader as pdr
df = pdr.get_data_yahoo('AAPL')
# 為防止網路有問題, 我們把這個檔案以 aapl.csv 存起來, 可以這樣讀入。
# df = pd.read_csv('data/aapl.csv', index_col="Date")
df.head()<jupyter_output><empty_output><jupyter_text>#### 只要最後 300 個交易日!<jupyter_code>df = df[-300:]
df<jupyter_output><empty_output><jupyter_text>#### 20 日的移動平均<jupyter_code>df.Close.plot()
df.Close.rolling(20).mean().plot()<jupyter_output><empty_output><jupyter_text>#### 20 日和 60 日的移動平均<jupyter_code>df.Close.plot(legend=True)
df.Close.rolling(20).mean().plot(label="$MA_{20}$",legend=True)
df.Close.rolling(60).mean().plot(label="$MA_{60}$",legend=True)<jupyter_output><empty_output><jupyter_text>#### 準備做預測
我們用個非常天真的模型...

網路上說這是線性的 (可能嗎)!
<jupyter_code>Close = df.Close.values
len(Close)
x = Close[:-1]
len(x)
y = Close[1:]
len(y)
plt.scatter(x,y)<jupyter_output><empty_output><jupyter_text>哦, 真的有點像線性的, 我們之後用線性迴歸試試看。## 9 手工打造一個 DataFrame*
有時我們用手工打造一個簡單的 DataFrame, 可以更理解整個結構。其實很容易, 一個 DataFrame 基本上就包含兩個主要部份:
* 資料本身: 通常一個二維陣列 (矩陣)
* 行、列的名稱
我們來個簡單的小例子。<jupyter_code>mydata = np.random.randn(4,3)
mydata<jupyter_output><empty_output><jupyter_text>把行列的名字放進去, 就成一個 DataFrame。我們列的部份先讓 Python 自己產生。<jupyter_code>df2 = pd.DataFrame(mydata, columns=list("ABC"))
df2<jupyter_output><empty_output><jupyter_text>#### 兩個表格上下貼起來
我們再來生一個 DataFrame, 再「貼」起來。<jupyter_code>df3 = pd.DataFrame(np.random.randn(3,3), columns=list("ABC"))
df3
df4 = pd.concat([df2,df3],axis=0) #[]為欄位的參數
df4<jupyter_output><empty_output><jupyter_text>前面我們弄得亂七八糟的 index 重設一下。<jupyter_code>df4.index = range(7)
df4<jupyter_output><empty_output><jupyter_text>#### 横向的貼<jupyter_code>df5 = pd.concat([df2,df3],axis=1)<jupyter_output><empty_output><jupyter_text>等等, 這大小好像不太對也可以嗎? 答案是可以的!<jupyter_code>df5<jupyter_output><empty_output><jupyter_text>#### 大一點的例子
我們來做前面「假的」學測資料。首先要有「假的」同學名單, 如果有興趣產生很多名字, 可以用這個服務。
[中文姓名產生器](http://www.richyli.com/name/index.asp)<jupyter_code>df_names = pd.read_csv('C:/Users/Owner/IMLP342/Unit02/data/names.csv',names=['姓名'])
df_names
df_grades = pd.DataFrame(np.random.randint(6,16,(100,5)),
                        columns=['國文','英文','數學','社會','自然'])
df_grades
df_grades.to_csv('data/grades2.csv')  #,index=0
df_grades = pd.read_csv('data/grades2.csv')
df_grades
df_score = pd.concat([df_names,df_grades],axis=1)
df_score<jupyter_output><empty_output> | 
	no_license | 
	/Unit02_02_Pandas數據分析.ipynb | 
	SIOCHEONG/IMLP342 | 35 | 
| 
	<jupyter_start><jupyter_text>### 导入的库包括用于化学信息处理的rdkit库,还有sklearn的一些库<jupyter_code>import numpy as np
import pandas as pd
from rdkit import Chem, DataStructs
from rdkit.Chem import AllChem
from rdkit.Chem import Draw
from rdkit.Chem import PandasTools
from rdkit.Chem.Draw import IPythonConsole
import matplotlib.pyplot as plt
import seaborn as sns
from mol2vec.features import mol2alt_sentence, MolSentence, DfVec, sentences2vec
from mol2vec.helpers import depict_identifier, plot_2D_vectors, IdentifierTable, mol_to_svg
from gensim.models import word2vec
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.model_selection import StratifiedKFold
from sklearn.ensemble import RandomForestClassifier,AdaBoostClassifier
from sklearn import metrics
from sklearn.metrics import precision_recall_curve,recall_score, roc_auc_score
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from rdkit.ML.Descriptors import MoleculeDescriptors
from rdkit.Chem import Descriptors
from rdkit.Chem.EState import Fingerprinter
from rdkit import Chem 
from rdkit.Chem import Draw
from rdkit.Chem import Descriptors
import os
import math
#os.chdir('/admin/Downloads/mol2vec-master')
%cd .
<jupyter_output>C:\Users\admin\Downloads\mol2vec-master
<jupyter_text>### 第一个实验,对分子描述符提取的特征使用分类器分类,这里选用随机森林和mlp<jupyter_code>test_roc1=[]
test_roc12=[]
test_prc1=[]
test_prc12=[]
def get_fps(mol):
    calc=MoleculeDescriptors.MolecularDescriptorCalculator([x[0] for x in Descriptors._descList])
    ds = np.asarray(calc.CalcDescriptors(mol))
    arr=Fingerprinter.FingerprintMol(mol)[0]
    
    return np.append(arr,ds)
#全部的特征随即森林
for i in range(10):
    data =pd.read_csv('fold_'+str(i)+'/train.csv')
    data['mol'] = data['smiles'].apply(lambda x: Chem.MolFromSmiles(x)) 
    data['Descriptors']=data['mol'].apply(get_fps)
    data['Descriptors'].fillna(data.mean())
    xtrain = np.array(list(data['Descriptors']))
    xtrain=np.nan_to_num(xtrain)
    xtrain[xtrain >= np.finfo(np.float32).max]=np.finfo(np.float32).max
    ytrain= np.array(data['activity'])
   
    test =pd.read_csv('fold_'+str(i)+'/test.csv')
    test['mol'] = test['smiles'].apply(lambda x: Chem.MolFromSmiles(x)) 
    test['Descriptors']=test['mol'].apply(get_fps)
    xtrain=np.nan_to_num(xtrain)
    
    xtest = np.array(list(test['Descriptors']))
    xtest=np.nan_to_num(xtest)
    xtest[xtest >= np.finfo(np.float32).max] = np.finfo(np.float32).max
    ytest=test['activity']
    xtrain = StandardScaler().fit_transform(xtrain)
    xtest = StandardScaler().fit_transform(xtest)
    rf = RandomForestClassifier(max_features='auto')
    rf2=MLPClassifier(activation='relu', solver='adam', alpha=0.0001,verbose=10)
    rf.fit(xtrain, ytrain)
    rf2.fit(xtrain, ytrain)
    predict_prob_y = rf.predict_proba(xtest)
    predict_prob_y2 = rf2.predict_proba(xtest)
    test_roc1.append(metrics.roc_auc_score(ytest,predict_prob_y[:,1]))
    test_roc12.append(metrics.roc_auc_score(ytest,predict_prob_y2[:,1]))
    fpr, tpr, thresholds=(precision_recall_curve(ytest,predict_prob_y[:,1]))
    fpr2, tpr2, thresholds=(precision_recall_curve(ytest,predict_prob_y2[:,1]))
    test_prc1.append(metrics.auc( tpr,fpr))
    test_prc12.append(metrics.auc( tpr2,fpr2))
    del rf
    del rf2
    <jupyter_output>Iteration 1, loss = 0.22632262
Iteration 2, loss = 0.09703120
Iteration 3, loss = 0.06666071
Iteration 4, loss = 0.05318610
Iteration 5, loss = 0.04551671
Iteration 6, loss = 0.03998555
Iteration 7, loss = 0.03484627
Iteration 8, loss = 0.03138832
Iteration 9, loss = 0.02876949
Iteration 10, loss = 0.02590400
Iteration 11, loss = 0.02396547
Iteration 12, loss = 0.02204197
Iteration 13, loss = 0.02038440
Iteration 14, loss = 0.01884410
Iteration 15, loss = 0.01747396
Iteration 16, loss = 0.01635332
Iteration 17, loss = 0.01515711
Iteration 18, loss = 0.01422090
Iteration 19, loss = 0.01326010
Iteration 20, loss = 0.01252532
Iteration 21, loss = 0.01182362
Iteration 22, loss = 0.01107983
Iteration 23, loss = 0.01044050
Iteration 24, loss = 0.00989231
Iteration 25, loss = 0.00932359
Iteration 26, loss = 0.00887018
Iteration 27, loss = 0.00837451
Iteration 28, loss = 0.00798907
Iteration 29, loss = 0.00760758
Iteration 30, loss = 0.00725308
Iteration 31, loss = 0.00693033
Iteration 32, los[...]<jupyter_text>### 从打印结果中可以看到,分子描述符特征的随机森林roc,prc结果为0.81和0.45,mlp方法结果为0.79和0.44<jupyter_code>print(np.mean(test_roc1))
print(np.mean(test_prc1))
print(np.mean(test_roc12))
print(np.mean(test_prc12))<jupyter_output>0.811045867532132
0.4549249079343823
0.7930097884300603
0.4413498076293891
<jupyter_text>### 打印具体每一折的roc和prc,可以看到即使是在同一特征上两种方法在不同折的表现也不尽相同,可以参照第一折即可看到两种方法的不同<jupyter_code>print(test_roc1)
print(test_prc1)
print(test_roc12)
print(test_prc12)<jupyter_output>[0.7204081632653061, 0.9295212765957447, 0.8946700507614214, 0.682449494949495, 0.9166666666666666, 0.9048821548821548, 0.7974452554744526, 0.45169082125603865, 0.9911167512690356, 0.821608040201005]
[0.42172121040550326, 0.6071080976250148, 0.7607418971156705, 0.24633341285661292, 0.8423054699537751, 0.5420005341880342, 0.15884387351778653, 0.0037593984962406013, 0.8333333333333334, 0.13310185185185183]
[0.95, 0.9293313069908815, 0.8375634517766497, 0.6590909090909091, 0.9188034188034189, 0.6616161616161615, 0.738138686131387, 0.5458937198067633, 0.9961928934010151, 0.6934673366834171]
[0.6021910929033963, 0.904919169540045, 0.5301480156148342, 0.2736304147778606, 0.7549646686159845, 0.40302682371210286, 0.025902701140041076, 0.005263157894736842, 0.8839285714285714, 0.029523460666317808]
<jupyter_text>### 第二个实验 mol2vec与所有描述符的混合经过随机森林和mlp<jupyter_code>from gensim.models import word2vec
test_roc2=[]
test_prc2=[]
test_roc22=[]
test_prc22=[]
#mol2vec和全部descriptor的混合
model = word2vec.Word2Vec.load('./mol2vec-master/examples/models/model_300dim.pkl')
for i in range(10):
    data =pd.read_csv('fold_'+str(i)+'/train.csv')
    data['mol'] = data['smiles'].apply(lambda x: Chem.MolFromSmiles(x)) 
    data['Descriptors']=data['mol'].apply(get_fps)
    data['Descriptors'].fillna(data.mean())
    data['sentence'] = data.apply(lambda x: MolSentence(mol2alt_sentence(x['mol'], 1)), axis=1)
    data['mol2vec'] = [DfVec(x) for x in sentences2vec(data['sentence'], model, unseen='UNK')]
    
    X_mol = np.array([x.vec for x in data['mol2vec']])
    X_mol = pd.DataFrame(X_mol)
    ytrain=data['activity']
    xtrain=data.drop(columns=['smiles', 'activity','mol','sentence','mol2vec'])
    X_de = np.array([x for x in data['Descriptors']])
    X_de = pd.DataFrame(X_de)
    xtrain = pd.concat((X_de, X_mol), axis=1)
    xtrain=np.nan_to_num(xtrain)
    test =pd.read_csv('fold_'+str(i)+'/test.csv')
    test['mol'] = test['smiles'].apply(lambda x: Chem.MolFromSmiles(x)) 
    test['Descriptors']=test['mol'].apply(get_fps)
    test['Descriptors'].fillna(test.mean())
    test['sentence'] = test.apply(lambda x: MolSentence(mol2alt_sentence(x['mol'], 1)), axis=1)
    test['mol2vec'] = [DfVec(x) for x in sentences2vec(test['sentence'], model, unseen='UNK')]
    ytest=test['activity']
    xtest=test.drop(columns=['smiles', 'activity','mol','sentence','mol2vec'])
    X_mol2 = np.array([x.vec for x in test['mol2vec']])
    X_mol2 = pd.DataFrame(X_mol2)
    X_de2 = np.array([x for x in test['Descriptors']])
    X_de2 = pd.DataFrame(X_de2)
    xtest = pd.concat((X_de2, X_mol2), axis=1)
    xtest=np.nan_to_num(xtest)
    X_train = StandardScaler().fit_transform(xtrain)
    X_test = StandardScaler().fit_transform(xtest)
    #lr = BlendEnsemble(verbose=2)
    lr2=MLPClassifier(activation='relu', solver='adam', alpha=0.0001,verbose=1)
    lr = RandomForestClassifier(max_features='auto')
    lr.fit(X_train, ytrain)
    lr2.fit(X_train, ytrain)
    predict_prob_y = lr.predict_proba(X_test)
    predict_prob_y2 = lr2.predict_proba(X_test)
    test_roc2.append(metrics.roc_auc_score(ytest,predict_prob_y[:,1]))
    test_roc22.append(metrics.roc_auc_score(ytest,predict_prob_y2[:,1]))
    fpr, tpr, thresholds=(precision_recall_curve(ytest,predict_prob_y[:,1]))
    fpr2, tpr2, thresholds=(precision_recall_curve(ytest,predict_prob_y2[:,1]))
    test_prc2.append(metrics.auc( tpr,fpr))
    test_prc22.append(metrics.auc(tpr2,fpr2))
    
    del lr
print(np.mean(test_roc2))
print(np.mean(test_prc2))
print(np.mean(test_roc22))
print(np.mean(test_prc22))
<jupyter_output>0.8020580292519908
0.3954048849638902
0.7638728763498511
0.4542323642203514
<jupyter_text>### 第三个实验,不同分子指纹的分类效果,使用随机森林和mlp作为验证<jupyter_code>#各类指纹功效
import multiprocessing
from joblib import Parallel, delayed
roc = {}
prc={}     
   
for f in fps:
    roc[f] = {}
    prc[f]={}
    for m in models:
        roc[f][m]=[]
        prc[f][m]=[]
        for i in range(10):
            data =pd.read_csv('fold_'+str(i)+'/train.csv')
            data['mol'] = data['smiles'].apply(lambda x: Chem.MolFromSmiles(x)) 
            test =pd.read_csv('fold_'+str(i)+'/test.csv')
            test['mol'] = test['smiles'].apply(lambda x: Chem.MolFromSmiles(x)) 
            fps = {"ECFP4": data['mol'].apply(lambda m: AllChem.GetMorganFingerprintAsBitVect(m, radius=2, nBits=2048)),
           "ECFP6": data['mol'].apply(lambda m: AllChem.GetMorganFingerprintAsBitVect(m, radius=3, nBits=2048)),
           "RDKFP":data['mol'].apply(lambda m: AllChem.RDKFingerprint(m, fpSize=2048))}
            ytrain=np.array(data['activity'])
            fpstest = {"ECFP4": test['mol'].apply(lambda m: AllChem.GetMorganFingerprintAsBitVect(m, radius=2, nBits=2048)),
           "ECFP6": test['mol'].apply(lambda m: AllChem.GetMorganFingerprintAsBitVect(m, radius=3, nBits=2048)),
           "RDKFP":test['mol'].apply(lambda m: AllChem.RDKFingerprint(m, fpSize=2048))}
            xtrain = np.array(fps[f].tolist())
            xtest = np.array(fpstest[f].tolist())
            # Default models
            models = {"rf": RandomForestClassifier(max_features='auto'),
              "nnet":MLPClassifier(activation='relu', solver='adam', alpha=0.0001,verbose=1) }
            ytest=np.array(test['activity'])
            models[m].fit(xtrain, ytrain)
            #scores[f][m + "_r2_train"] = models[m].score(X_train, y_train)
            predict_prob_y = models[m].predict_proba(xtest)
            roc[f][m].append(metrics.roc_auc_score(ytest,predict_prob_y[:,1]))
            fpr, tpr, thresholds=(precision_recall_curve(ytest,predict_prob_y[:,1]))
            prc[f][m].append(metrics.auc( tpr,fpr))
for i in models:
    for j in fps:
        roc[i][j]=np.mean(roc[i][j])
        prc[j][i]=np.mean(prc[j][i])<jupyter_output><empty_output><jupyter_text>### 以下打印的是三种指纹对应的随机森林和多层感知机的分类结果<jupyter_code>print(roc)
print(prc)<jupyter_output>             rf      nnet
ECFP4   0.77559  0.694632
ECFP6  0.768347  0.711039
RDKFP  0.774321  0.721036
             rf      nnet
ECFP4  0.415972  0.330636
ECFP6  0.395199  0.315473
RDKFP  0.397939  0.398258
<jupyter_text>### 第四个实验混合mol2vec和所有描述符和指纹进行分类<jupyter_code>from mlens.ensemble import SuperLearner,BlendEnsemble
from sklearn.metrics import roc_auc_score,accuracy_score
test_auc4=[]
test_prc4=[]
#mol2vec和全部descriptor和指纹的混合
for i in range(10):
    data =pd.read_csv('fold_'+str(i)+'/train.csv')
    data['mol'] = data['smiles'].apply(lambda x: Chem.MolFromSmiles(x)) 
    data['Descriptors']=data['mol'].apply(get_fps)
    data['Descriptors'].fillna(data.mean())
    data['sentence'] = data.apply(lambda x: MolSentence(mol2alt_sentence(x['mol'], 1)), axis=1)
    data['mol2vec'] = [DfVec(x) for x in sentences2vec(data['sentence'], model, unseen='UNK')]
    data['fingerprint']=data['mol'].apply(lambda m: AllChem.GetMorganFingerprintAsBitVect(m, radius=2, nBits=2048))
    xf=np.array(data['fingerprint'].tolist())
    xf=pd.DataFrame(xf)
    X_mol = np.array([x.vec for x in data['mol2vec']])
    X_mol = pd.DataFrame(X_mol)
    
    ytrain=data['activity']
    xtrain=data.drop(columns=['smiles', 'activity','mol','sentence','mol2vec'])
    X_de = np.array([x for x in data['Descriptors']])
    X_de = pd.DataFrame(X_de)
    xtrain = pd.concat((X_de, X_mol,xf), axis=1)
    xtrain=np.nan_to_num(xtrain)
    test =pd.read_csv('fold_'+str(i)+'/test.csv')
    test['mol'] = test['smiles'].apply(lambda x: Chem.MolFromSmiles(x)) 
    test['Descriptors']=test['mol'].apply(get_fps)
    test['Descriptors'].fillna(test.mean())
    test['sentence'] = test.apply(lambda x: MolSentence(mol2alt_sentence(x['mol'], 1)), axis=1)
    test['mol2vec'] = [DfVec(x) for x in sentences2vec(test['sentence'], model, unseen='UNK')]
    test['fingerprint']=test['mol'].apply(lambda m: AllChem.GetMorganFingerprintAsBitVect(m, radius=2, nBits=2048))
    xf2=np.array(test['fingerprint'].tolist())
    xf2=pd.DataFrame(xf2)
    ytest=test['activity']
    xtest=test.drop(columns=['smiles', 'activity','mol','sentence','mol2vec'])
    X_mol2 = np.array([x.vec for x in test['mol2vec']])
    X_mol2 = pd.DataFrame(X_mol2)
    X_de2 = np.array([x for x in test['Descriptors']])
    X_de2 = pd.DataFrame(X_de2)
    xtest = pd.concat((X_de2, X_mol2,xf2), axis=1)
    xtest=np.nan_to_num(xtest)
    X_train = StandardScaler().fit_transform(xtrain)
    X_test = StandardScaler().fit_transform(xtest)
    #lr = BlendEnsemble(verbose=2)
    #lr.add([RandomForestClassifier(max_features='auto'),MLPClassifier(activation='relu', solver='adam', alpha=0.0001,verbose=1)],)
    #lr.add_meta(LogisticRegression(solver='lbfgs', multi_class='auto'))
    #lr=MLPClassifier(activation='relu', solver='adam', alpha=0.0001,verbose=1)
    lr = RandomForestClassifier(max_features='auto')
    lr.fit(X_train, ytrain)
    predict_prob_y = lr.predict_proba(X_test)
    test_auc4.append(metrics.roc_auc_score(ytest,predict_prob_y[:,1]))
    fpr, tpr, thresholds=(precision_recall_curve(ytest,predict_prob_y[:,1]))
    test_prc4.append(metrics.auc( tpr,fpr))
    
    del lr<jupyter_output><empty_output><jupyter_text>### 可以看到混合了所有特征后的模型准确度最高的,达到85%的roc值<jupyter_code>print(np.mean(test_auc4))
print(np.mean(test_prc4))<jupyter_output>0.8502830356668817
0.43819971569910193
<jupyter_text>### 第四个实验,不同采样结果加随机森林分类器的对比
<jupyter_code>#欠采样对比
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import (ClusterCentroids, RandomUnderSampler,
                                     NearMiss,
                                     InstanceHardnessThreshold,
                                     CondensedNearestNeighbour,
                                     EditedNearestNeighbours,
                                     RepeatedEditedNearestNeighbours,
                                     AllKNN,
                                     NeighbourhoodCleaningRule,
                                     OneSidedSelection)
test_auc5={}
test_prc5={}
#mol2vec和全部descriptor的混合的平衡采样
sampling={}
for i in range(10):
    data =pd.read_csv('fold_'+str(i)+'/train.csv')
    data['mol'] = data['smiles'].apply(lambda x: Chem.MolFromSmiles(x)) 
    data['Descriptors']=data['mol'].apply(get_fps)
    data['Descriptors'].fillna(data.mean())
    data['sentence'] = data.apply(lambda x: MolSentence(mol2alt_sentence(x['mol'], 1)), axis=1)
    data['mol2vec'] = [DfVec(x) for x in sentences2vec(data['sentence'], model, unseen='UNK')]
    
    X_mol = np.array([x.vec for x in data['mol2vec']])
    X_mol = pd.DataFrame(X_mol)
    ytrain=np.array(data['activity'])
    xtrain=np.zeros([X_mol.shape[0],279])
    for p in range(X_mol.shape[0]):
        xtrain[p,:]=data['Descriptors'][p]
    
    xtrain = np.concatenate((xtrain, X_mol), axis=1)
    xtrain=np.nan_to_num(xtrain)
    xtrain[xtrain >= np.finfo(np.float32).max]=np.finfo(np.float32).max
    xtrain=np.array(xtrain)
    xtrain.shape
    sampling={'ClusterCentroids':ClusterCentroids().fit_resample(xtrain, ytrain),'RandomUnderSampler':RandomUnderSampler().fit_resample(xtrain, ytrain),
             'NearMiss':NearMiss().fit_resample(xtrain, ytrain),'InstanceHardnessThreshold':InstanceHardnessThreshold().fit_resample(xtrain, ytrain),
              'CondensedNearestNeighbour':CondensedNearestNeighbour().fit_resample(xtrain, ytrain),'SMOTE':SMOTE().fit_resample(xtrain, ytrain)
             }
    test_auc5[str(i)]=[]
    test_prc5[str(i)]=[]
    for s in sampling:
        
        X_resampled, Y_resampled = sampling[s]
        test =pd.read_csv('fold_'+str(i)+'/test.csv')
        test['mol'] = test['smiles'].apply(lambda x: Chem.MolFromSmiles(x)) 
        test['Descriptors']=test['mol'].apply(get_fps)
        test['Descriptors'].fillna(test.mean())
        test['sentence'] = test.apply(lambda x: MolSentence(mol2alt_sentence(x['mol'], 1)), axis=1)
        test['mol2vec'] = [DfVec(x) for x in sentences2vec(test['sentence'], model, unseen='UNK')]
        ytest=test['activity']
        xtest=test.drop(columns=['smiles', 'activity','mol','sentence','mol2vec'])
        X_mol2 = np.array([x.vec for x in test['mol2vec']])
        X_mol2 = pd.DataFrame(X_mol2)
        X_de2 = np.array([x for x in test['Descriptors']])
        X_de2 = pd.DataFrame(X_de2)
        xtest = pd.concat((X_de2, X_mol2), axis=1)
        xtest=np.nan_to_num(xtest)
        X_train = StandardScaler().fit_transform(X_resampled)
        X_test = StandardScaler().fit_transform(xtest)
        lr = RandomForestClassifier(max_features='auto')
        lr.fit(X_train,Y_resampled)
        predict_prob_y = lr.predict_proba(X_test)
        test_auc5[str(i)].append(metrics.roc_auc_score(ytest,predict_prob_y[:,1]))
        fpr, tpr, thresholds=(precision_recall_curve(ytest,predict_prob_y[:,1]))
        test_prc5[str(i)].append(metrics.auc( tpr,fpr))
        
test_auc5
avroc=np.zeros(6)
pvroc=np.zeros(6)
for j in range(6):
    for i in range(10):
        avroc[j]+=test_auc5[str(i)][j]
        pvroc[j]+=test_prc5[str(i)][j]
print(avroc*0.1)
print(pvroc*0.1)<jupyter_output>[0.52863248 0.7603706  0.61471868 0.76443984 0.7965451  0.75578389]
[0.04164929 0.32034436 0.09140746 0.09645921 0.40275834 0.35208798]
<jupyter_text>### 第五个实验不同采样方法加xgboost分类器
由于在上面的采样实验中,所有采样方法配合随机森林使用,都没有超过未采样结果0.80,所以在这里我换用xgboost分类器再次尝试<jupyter_code>from xgboost import XGBClassifier
from imblearn.over_sampling import SMOTE,ADASYN
from imblearn.combine import SMOTEENN,SMOTETomek
from imblearn.under_sampling import (NearMiss)
test_auc5={}
test_prc5={}
#mol2vec和全部descriptor的混合的平衡采样
sampling={}
for i in range(10):
    data =pd.read_csv('fold_'+str(i)+'/train.csv')
    data['mol'] = data['smiles'].apply(lambda x: Chem.MolFromSmiles(x)) 
    data['Descriptors']=data['mol'].apply(get_fps)
    data['Descriptors'].fillna(data.mean())
    data['sentence'] = data.apply(lambda x: MolSentence(mol2alt_sentence(x['mol'], 1)), axis=1)
    data['mol2vec'] = [DfVec(x) for x in sentences2vec(data['sentence'], model, unseen='UNK')]
    
    X_mol = np.array([x.vec for x in data['mol2vec']])
    X_mol = pd.DataFrame(X_mol)
    ytrain=np.array(data['activity'])
    xtrain=np.zeros([X_mol.shape[0],279])
    for p in range(X_mol.shape[0]):
        xtrain[p,:]=data['Descriptors'][p]
    
    xtrain = np.concatenate((xtrain, X_mol), axis=1)
    xtrain=np.nan_to_num(xtrain)
    xtrain[xtrain >= np.finfo(np.float32).max]=np.finfo(np.float32).max
    xtrain=np.array(xtrain)
    xtrain.shape
    sampling={'SMOTE': SMOTE(), 
              'ADASYN':  ADASYN(random_state=42), 
              'NearMiss':NearMiss(version=3), 'SMOTE+ENN':  SMOTEENN(), 'SMOTE+Tomek':  SMOTETomek()
             }
    test_auc5[str(i)]=[]
    test_prc5[str(i)]=[]
    for s in sampling:
        
        X_resampled, Y_resampled = sampling[s].fit_resample(xtrain, ytrain)
        test =pd.read_csv('fold_'+str(i)+'/test.csv')
        test['mol'] = test['smiles'].apply(lambda x: Chem.MolFromSmiles(x)) 
        test['Descriptors']=test['mol'].apply(get_fps)
        test['Descriptors'].fillna(test.mean())
        test['sentence'] = test.apply(lambda x: MolSentence(mol2alt_sentence(x['mol'], 1)), axis=1)
        test['mol2vec'] = [DfVec(x) for x in sentences2vec(test['sentence'], model, unseen='UNK')]
        ytest=test['activity']
        xtest=test.drop(columns=['smiles', 'activity','mol','sentence','mol2vec'])
        X_mol2 = np.array([x.vec for x in test['mol2vec']])
        X_mol2 = pd.DataFrame(X_mol2)
        X_de2 = np.array([x for x in test['Descriptors']])
        X_de2 = pd.DataFrame(X_de2)
        xtest = pd.concat((X_de2, X_mol2), axis=1)
        xtest=np.nan_to_num(xtest)
        X_train = StandardScaler().fit_transform(X_resampled)
        X_test = StandardScaler().fit_transform(xtest)
        lr = XGBClassifier(random_state = 42, n_jobs = -1)
        lr.fit(X_train,Y_resampled)
        predict_prob_y = lr.predict_proba(X_test)
        test_auc5[str(i)].append(metrics.roc_auc_score(ytest,predict_prob_y[:,1]))
        fpr, tpr, thresholds=(precision_recall_curve(ytest,predict_prob_y[:,1]))
        test_prc5[str(i)].append(metrics.auc( tpr,fpr))
        
test_auc5
avroc=np.zeros(5)
pvroc=np.zeros(5)
for j in range(5):
    for i in range(10):
        avroc[j]+=test_auc5[str(i)][j]
        pvroc[j]+=test_prc5[str(i)][j]
<jupyter_output><empty_output><jupyter_text>### 从结果看到尽管换用xgboost分类器,但是结果还是普遍下降(xgboost不采样roc=0.81),这里让我非常疑惑,尽管在这里的两种方法都没有使得roc结果变好,但是在我们组的图网络中采样结果却让结果变好<jupyter_code>print(avroc*0.1)
print(pvroc*0.1)<jupyter_output>[0.72078436 0.69827815 0.64164957 0.698253   0.71224579]
[0.23505263 0.27909201 0.09452962 0.24142565 0.20777271]
<jupyter_text>### 第六个实验集成投票法,从第一个实验中就可以看到,即使用了同样的特征在不同的分类方法下每折的表现也十分不同,所以尝试使用投票法对结果进行估计<jupyter_code>from mlens.ensemble import SuperLearner,BlendEnsemble
from sklearn.metrics import roc_auc_score,accuracy_score
test_auc6=[]
test_prc6=[]
#mol2vec和全部descriptor和指纹的混合
for i in range(10):
    data =pd.read_csv('fold_'+str(i)+'/train.csv')
    data['mol'] = data['smiles'].apply(lambda x: Chem.MolFromSmiles(x)) 
    data['Descriptors']=data['mol'].apply(get_fps)
    data['Descriptors'].fillna(data.mean())
    data['sentence'] = data.apply(lambda x: MolSentence(mol2alt_sentence(x['mol'], 1)), axis=1)
    data['mol2vec'] = [DfVec(x) for x in sentences2vec(data['sentence'], model, unseen='UNK')]
    data['fingerprint']=data['mol'].apply(lambda m: AllChem.GetMorganFingerprintAsBitVect(m, radius=2, nBits=2048))
    xf=np.array(data['fingerprint'].tolist())
    xf=pd.DataFrame(xf)
    X_mol = np.array([x.vec for x in data['mol2vec']])
    X_mol = pd.DataFrame(X_mol)
    
    ytrain=data['activity']
    xtrain=data.drop(columns=['smiles', 'activity','mol','sentence','mol2vec'])
    X_de = np.array([x for x in data['Descriptors']])
    X_de = pd.DataFrame(X_de)
    xtrain = pd.concat((X_de, X_mol,xf), axis=1)
    xtrain=np.nan_to_num(xtrain)
    test =pd.read_csv('fold_'+str(i)+'/test.csv')
    test['mol'] = test['smiles'].apply(lambda x: Chem.MolFromSmiles(x)) 
    test['Descriptors']=test['mol'].apply(get_fps)
    test['Descriptors'].fillna(test.mean())
    test['sentence'] = test.apply(lambda x: MolSentence(mol2alt_sentence(x['mol'], 1)), axis=1)
    test['mol2vec'] = [DfVec(x) for x in sentences2vec(test['sentence'], model, unseen='UNK')]
    test['fingerprint']=test['mol'].apply(lambda m: AllChem.GetMorganFingerprintAsBitVect(m, radius=2, nBits=2048))
    xf2=np.array(test['fingerprint'].tolist())
    xf2=pd.DataFrame(xf2)
    ytest=test['activity']
    xtest=test.drop(columns=['smiles', 'activity','mol','sentence','mol2vec'])
    X_mol2 = np.array([x.vec for x in test['mol2vec']])
    X_mol2 = pd.DataFrame(X_mol2)
    X_de2 = np.array([x for x in test['Descriptors']])
    X_de2 = pd.DataFrame(X_de2)
    xtest = pd.concat((X_de2, X_mol2,xf2), axis=1)
    xtest=np.nan_to_num(xtest)
    X_train = StandardScaler().fit_transform(xtrain)
    X_test = StandardScaler().fit_transform(xtest)
    #lr = BlendEnsemble(verbose=2)
    #lr.add([RandomForestClassifier(max_features='auto'),MLPClassifier(activation='relu', solver='adam', alpha=0.0001,verbose=1)],)
    #lr.add_meta(LogisticRegression(solver='lbfgs', multi_class='auto'))
    lr2=MLPClassifier(activation='relu', solver='adam', alpha=0.0001,verbose=1)
    lr = RandomForestClassifier(max_features='auto')
    lr3 = XGBClassifier(random_state = 42, n_jobs = -1)
    lr.fit(X_train, ytrain)
    lr2.fit(X_train, ytrain)
    lr3.fit(X_train, ytrain)
    predict_prob_y = (lr.predict_proba(X_test)+lr2.predict_proba(X_test)+lr3.predict_proba(X_test))/3
    test_auc6.append(metrics.roc_auc_score(ytest,predict_prob_y[:,1]))
    fpr, tpr, thresholds=(precision_recall_curve(ytest,predict_prob_y[:,1]))
    test_prc6.append(metrics.auc( tpr,fpr))
    
    del lr
np.mean(test_prc6)<jupyter_output><empty_output> | 
	no_license | 
	/classicclassifier.ipynb | 
	SamanthaWangdl/MIT_AIcure_open_drug_task | 13 | 
| 
	<jupyter_start><jupyter_text>El objetivo principal de los métodos Monte Carlo aplicados a estadística bayesiana es 
construir muestras de la densidad de probabilidad posterior.
En el caso de Metropolis-Hastings la exploración se hace dando pasos en el espacio de parámetros, 
el lugar de llegada de cada paso (que puede ser rechazadoo aceptado) se *adivina* sin tener en cuenta
la forma del prior. 
La información del prior solamente se utiliza para aceptar o rechazar el paso propuesto.
Esto puede ser condiderado como una falla, o al menos una elección que no es óptima. Sería deseable 
usar toda la información disponible en cada momento, incluso al momento de proponer un nuevo lugar en la cadena.
Los métodos de Monte Carlo Hamiltoniano buscan una mejora haciendo que la propuesta de un nuevo 
punto en la cadena se haga a partir de una **trayectoria** en el espacio de parámetros. Esta trayectoria se encuentra "dirigida" por el prior.
Antes de entrar de lleno en la formulación del Monte Carlo Hamiltoniano vamos a hacer
un breve repaso sobre la formulación Hamiltoniana de la mecánica y algunos métodos numéricos
asociados.# Mecánica Hamiltoniana La mecánica clásica puede ser reescrita en un formalismo matemático conocido como formalismo Hamiltoniano.
Esta reformulación además de ser útil para la mecánica clásica ha sido fundamental para la mecánica
estadística y la mecánica cuántica.
El objeto matemático central en esta formulación es el **Hamiltoniano**, $\mathcal{H}$.
El Hamiltoniano es una función de las posiciones ($q$) y los momentos ($p$) que describen el sistema que nos interesa. Una vez tenemos el Hamiltoniano en función de $p$ y $q$ podemos encontrar su evolución temporal usando las ecuaciones de Hamilton
$$
\frac{\partial p}{\partial t} = -\frac{\partial \mathcal{H}}{\partial q}
$$
$$
\frac{\partial q}{\partial t} = +\frac{d\mathcal{H}}{\partial p}
$$
En los casos que nos van a interesar el Hamiltoniano corresponde a la energia total del sistema, 
$\mathcal{H}= T+V$ donde $T$ es la energia cinetica y $V$ es la energia potencial.# Ejemplo 7.1
Una partícula de masa $m$ en un plano bidimensional está conectada por un resorte a un punto fijo.
Para simplificar vamos a tomar el origen del sistema de coordenadas como el punto donde está fijo un extremo del resorte. El otro extremo del resorte, donde está nuestra partícula de masa $m$, tiene coordenadas $x,y$.
La energia cinética se puede escribir en coordenadas cartesianas como $ T = p_x^2/2m + p_y^2/2m$ mientras que la energia potencial es $V=kx^2/2 + ky^2/2$. En este caso el Hamiltoniano del sistema es:
$$
\mathcal{H} = \frac{1}{2m}p_x^2 + \frac{1}{2m}p_y^2 + \frac{k}{2}x^2 + \frac{k}{2}y^2.
$$# Integracion de las ecuaciones de Hamilton
El objetivo ahora es resolver las ecuaciones de hamilton numericamente.
Esto quiere decir que vamos a encontrar $p$ y $q$ como funciones del tiempo.
Dado que esto implica resolver numericamente ecuaciones diferenciales acopladas de primer orden
es necesario dar las condiciones iniciales para todos los $p$ y $q$.
En terminos de diferencias finitas podemos reescribir las ecuaciones de Hamilton como
$$
p_{n+1} = p_{n} -\frac{\partial\mathcal{H}}{\partial q}\Delta t,
$$
$$
q_{n+1} = q_{n} + \frac{\partial \mathcal{H}}{\partial p}\Delta t.
$$
La primera ecuacion recibe el nombre de **kick** (es como un golpe que cambia el momentum) y la segunda ecuacion el de **drift** (es como un movimiento libre a velocidad constante).## Ejercicio 7.2
Utilizando el mismo ejemplo de antes vamos a resolver las ecuaciones de Hamilton de manera computacional.<jupyter_code>import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
def partialH_partialq(p, q, k=1, m=1):
    """Expresion analitica de la derivada del hamiltoniano con respecto a q.
    """
    return k*q
def partialH_partialp(p, q, k=1, m=1):
    """Expresion analitica de la derivada del hamiltoniano con respecto a p.
    """
    return p/m
def kick(p, q, delta_t, k=1, m=1):
    """Cambio en el momentum.
    """
    p_new = p - partialH_partialq(p, q, k=k, m=m) * delta_t
    return p_new
def drift(p, q, delta_t, k=1, m=1):
    """Cambio en la posicion.
    """
    q_new = q + partialH_partialp(p, q, k=k, m=m) * delta_t
    return q_new
def solve(delta_t = 1E-2, n_steps=1000):
    """
    Resuelve las ecuaciones de Hamilton a traves de 
    un metodo leapfrog: kick-drift-kick.
    """
    # condiciones iniciales
    x = [1.0]
    y = [2.0]
    px = [5.0]
    py = [-3.0]
    
    # constante del resorte y masa
    k = 1.0
    m = 1.0
    for i in range(1,n_steps):
        # Primero un nuevo punto con un kick de delta_t/2
        px.append(kick(px[i-1], x[i-1],delta_t/2, k=k, m=m))
        py.append(kick(py[i-1], y[i-1],delta_t/2, k=k, m=m))
        
        # Luego un drift de delta_t con el nuevo momentum 
        x.append(drift(px[i], x[i-1],delta_t, k=k, m=m))
        y.append(drift(py[i], y[i-1],delta_t, k=k, m=m))
        
        # Finalmente otro kick de delta_t/2 para fijar el momentum 
        px[i] = kick(px[i], x[i],delta_t/2, k=k, m=m)
        py[i] = kick(py[i], y[i],delta_t/2, k=k, m=m)
    return np.array(x), np.array(y), np.array(px), np.array(py)
x, y, px, py = solve()
plt.plot(x)
plt.xlabel('time steps')
plt.ylabel('x')
plt.plot(x,y)
plt.xlabel('x')
plt.ylabel('y')<jupyter_output><empty_output><jupyter_text>En este caso el resultado era el esperado: movimiento armonico bidimensional.# Monte Carlo Hamiltoniano
¿Qué tiene que ver todo esto con Monte Carlo Hamiltoniano? La idea principal es que 
la distribución de probabilidad que va ser muestreada va a jugar el rol del potencial 
dentro del Hamiltoniano. Una metáfora podría ser la siguiente: un punto de la cadena se le asigna un momento aleatorio para que se mueva dentro de la distribución de probabilidad y generar una nueva propuesta. Para seleccionar si la propuesta se aprueba se sigue el método tradicional de Metropolis Hastings.
Más precisamente, si la distribución que se desea muestrear es $F(q)$, entonces vamos a definir el 
siguiente Hamiltoniano 
$$
\mathcal{H} = \frac{p^2}{2m} -  \log (F(q)) 
$$
y en cada paso de la cadena de Markov generamos un momento inicial con $p$ tomado de una distribucion Gaussiana y $q$ del valor presente de la cadena. Evolucionamos la solucion del Hamiltoniano por $n_{steps}$ con un $\Delta t$ determinado, cambiamos el signo de $p$ (por que? ver la pag. 40 https://arxiv.org/pdf/1701.02434.pdf), para finalmente evaluar si el nuevo lugar en la cade debe aceptarse o no comparando el Hamiltoniano (i.e. la energía) en los dos puntos de interés.[Why to wlak when we can flow](http://elevanth.org/blog/2017/11/28/build-a-better-markov-chain/)
ver charlas 
[MCH_1](https://www.youtube.com/watch?v=pHsuIaPbNbY)
[MCH_2](https://www.youtube.com/watch?v=xWQpEAyI5s8)# Ejercicio 7.2
Vamos a construir una cadena de Markov que samplea una distribución Gaussiana con desviación estándar `sigma` usando el método Hamiltoniano.<jupyter_code>def pdf_to_sample(q, sigma):
    return exp(-0.5*q**2/sigma**2)
def log_pdf_to_sample(q, sigma):
    return -0.5*q**2/sigma**2
def gradient_log_pdf_to_sample(q, sigma):
    return -q/sigma**2
def leapfrog(q,p, sigma, delta_t=1E-1, niter=5):
    q_new = q
    p_new = p
    for i in range(niter):
        p_new = p_new + 0.5 * delta_t * gradient_log_pdf_to_sample(q_new, sigma) #kick
        q_new = q_new + delta_t * p_new #drift
        p_new = p_new + 0.5 * delta_t * gradient_log_pdf_to_sample(q_new, sigma) #kick
    return q_new, p_new
def H(q,p, sigma):
    K = 0.5 * p * p
    U = -log_pdf_to_sample(q, sigma)
    return K + U
def MCMC(nsteps):
    q = np.zeros(nsteps)
    p = np.zeros(nsteps)
    p[0] = np.random.normal(0,1)
    q[0] = np.random.normal(0,1)
    sigma = 0.1
    for i in range(1,nsteps):
        p[i] = np.random.normal(0,1)
        q_new, p_new = leapfrog(q[i-1],p[i-1], sigma) # la propuesta se hace con leapfrog
        p_new = -p_new #negamos a p para que la propuesta sea simetrica.
        E_new = H(q_new, p_new, sigma) # En lugar de evaluar la pdf se evalua la energia.
        E_old = H(q[i-1], p[i-1], sigma)
        alpha = min(1.0,np.exp(-(E_new - E_old))) # Se comparan las dos energias
        beta = np.random.random()
        if beta < alpha:
            q[i] = q_new
        else:
            q[i] = q[i-1]
    return q
q_chain = MCMC(10000)
x = np.linspace(-0.5, 0.5)
sigma = 0.1
y = (1.0/(sigma * np.sqrt(2*np.pi))) * np.exp(-0.5*(x/sigma)**2)
a = plt.hist(q_chain[500:], bins=20, density=True)
plt.plot(x,y)
<jupyter_output><empty_output><jupyter_text># Ejercicio 7.1
Resuelva el ejercicio (ajuste cuadrático) del notebook anterior usando Monte Carlo Hamiltoniano. En ese casos lo más recomendable es calcular las derivadas de manera numérica y considerar además un prior gaussiano (para que el posterior sea continuo y pueda derivarse sin problemas).<jupyter_code>def model(x,param):
    """Modelo polinomial. `param` contiene los coeficientes.
    """
    n_param = len(param)
    y = 0
    for i in range(n_param):
        y += param[i] * x**i
    return y 
def loglikelihood(x_obs, y_obs, sigma_y_obs, param):
    """Logaritmo natural de la verosimilitud construida con los datos observacionales y los 
        parametros que describen el modelo.
    """
    d = y_obs -  model(x_obs, param)
    d = d/sigma_y_obs
    d = -0.5 * np.sum(d**2)
    return d
def logprior(param):
    """Logaritmo natural de los prior para los parametros.
        Todos corresponden a gaussianas con sigma=10.0.
    """
    d = -0.5 * np.sum(param**2/(10.0)**2)
    return d
def divergence_loglikelihood(x_obs, y_obs, sigma_y_obs, param):
    """Divergencia del logaritmo de la funcion de verosimilitud.
    """
    n_param = len(param)
    div = np.ones(n_param)
    delta = 1E-5
    for i in range(n_param):
        delta_parameter = np.zeros(n_param)
        delta_parameter[i] = delta
        div[i] = loglikelihood(x_obs, y_obs, sigma_y_obs, param + delta_parameter) 
        div[i] = div[i] - loglikelihood(x_obs, y_obs, sigma_y_obs, param - delta_parameter)
        div[i] = div[i]/(2.0 * delta)
    return div
def hamiltonian(x_obs, y_obs, sigma_y_obs, param, param_momentum):
    """Hamiltoniano: energia cinetica + potencial: K+V
    """
    m = 100.0
    K = 0.5 * np.sum(param_momentum**2)/m
    V = -loglikelihood(x_obs, y_obs, sigma_y_obs, param)     
    return K + V
def leapfrog_proposal(x_obs, y_obs, sigma_y_obs, param, param_momentum):
    """Integracion tipo leapfrog. 
        `param` representa las posiciones (i.e. los parametros).
        `param_momemtum` representa el momentum asociado a los parametros.
    """
    N_steps = 5
    delta_t = 1E-2
    m = 100.0
    new_param = param.copy()
    new_param_momentum = param_momentum.copy()
    for i in range(N_steps):
        new_param_momentum = new_param_momentum + divergence_loglikelihood(x_obs, y_obs, sigma_y_obs, param) * 0.5 * delta_t
        new_param = new_param + (new_param_momentum/m) * delta_t
        new_param_momentum = new_param_momentum + divergence_loglikelihood(x_obs, y_obs, sigma_y_obs, param) * 0.5 * delta_t
    new_param_momentum = -new_param_momentum
    return new_param, new_param_momentum
def monte_carlo(x_obs, y_obs, sigma_y_obs, N=5000):
    param = [np.random.random(3)]
    param_momentum = [np.random.normal(size=3)]
    for i in range(1,N):
        propuesta_param, propuesta_param_momentum = leapfrog_proposal(x_obs, y_obs, sigma_y_obs, param[i-1], param_momentum[i-1])
        energy_new = hamiltonian(x_obs, y_obs, sigma_y_obs, propuesta_param, propuesta_param_momentum)
        energy_old = hamiltonian(x_obs, y_obs, sigma_y_obs, param[i-1], param_momentum[i-1])
   
        r = min(1,np.exp(-(energy_new - energy_old)))
        alpha = np.random.random()
        if(alpha<r):
            param.append(propuesta_param)
        else:
            param.append(param[i-1])
        param_momentum.append(np.random.normal(size=3))    
    param = np.array(param)
    return param
x_obs = np.array([-2.0,1.3,0.4,5.0,0.1, -4.7, 3.0, -3.5,-1.1])
y_obs = np.array([ -1.931,   2.38,   1.88,  -24.22,   3.31, -21.9,  -5.18, -12.23,   0.822])
sigma_y_obs = ([ 2.63,  6.23, -1.461, 1.376, -4.72,  1.313, -4.886, -1.091,  0.8054])
param_chain = monte_carlo(x_obs, y_obs, sigma_y_obs)
n_param  = len(param_chain[0])
best = []
for i in range(n_param):
    best.append(np.mean(param_chain[:,i]))
x_model = np.linspace(x_obs.min(), x_obs.max(), 100)
y_model = model(x_model, best)
plt.errorbar(x_obs,y_obs, yerr=sigma_y_obs, fmt='o', label='obs')
plt.plot(x_model, y_model, label='model')
plt.xlabel("x")
plt.ylabel("y")
plt.legend()
plt.savefig('best_model.pdf')<jupyter_output><empty_output><jupyter_text># Parte de pruebas con distribucionesVamos a muestrear una distribución gamma con parametros $k=9$ y $\lambda=2$
$$ {\displaystyle f(x)=\lambda e^{-\lambda x}{\frac {(\lambda x)^{k-1}}{\Gamma (k)}}}$$
Con lo que tenemos que la distribución gamma se reduce a:
$$ f(x) = 2 e^{-2 x}{\frac {(2 x)^{8}}{\Gamma (9)}}=\frac{4}{315}e^{-2 x} x^{8}$$
$$\log[f(x)]= \log[4/315] + 8 \log[x] - 2x$$<jupyter_code>def pdf_to_sample(q):
    return 4.0*np.exp(-2.0*q)*q**8/315.0
def log_pdf_to_sample(q):
    return np.log(4./315.) - 2.*q + 8.*np.log(q)
def gradient_log_pdf_to_sample(q):
    return 8.0/q - 2.
def leapfrog(q,p, delta_t=1E-1, niter=30): # es conveniente observar que numero de interacion y delta t es el mas adecuado
    q_new = q
    p_new = p
    for i in range(niter):
        p_new = p_new + 0.5 * delta_t * gradient_log_pdf_to_sample(q_new) #kick
        q_new = q_new + delta_t * p_new #drift
        p_new = p_new + 0.5 * delta_t * gradient_log_pdf_to_sample(q_new) #kick
    return q_new, p_new
def H(q,p):
    K = 0.5 * p * p
    U = -log_pdf_to_sample(q)
    return K + U
def MCMC(nsteps):
    q = np.zeros(nsteps)
    p = np.zeros(nsteps)
    p[0] = np.random.normal(0,1)
    q[0] = np.random.normal(0,1)
    for i in range(1,nsteps):
        p[i] = np.random.normal(0,1)
        q_new, p_new = leapfrog(q[i-1],p[i-1]) # la propuesta se hace con leapfrog
        p_new = -p_new #negamos a p para que la propuesta sea simetrica.
        E_new = H(q_new, p_new) # En lugar de evaluar la pdf se evalua la energia.
        E_old = H(q[i-1], p[i-1])
        alpha = min(1.0,np.exp(-(E_new - E_old))) # Se comparan las dos energias
        beta = np.random.random()
        if beta < alpha:
            q[i] = q_new
        else:
            q[i] = q[i-1]
    return q
q_chain = MCMC(1000)
x = np.linspace(0.0, 15.0,100)
y = pdf_to_sample(x)
a = plt.hist(q_chain[100:], bins=50, density=True)
plt.plot(x,y)<jupyter_output><empty_output><jupyter_text># Ajuste de datos usando MCHAcá vamos a usar lo anterior para ajustar los siguientes datos haciendo uso de MCH<jupyter_code>x_obs = np.array([-2.0,1.3,0.4,5.0,0.1, -4.7, 3.0, -3.5,-1.1])
y_obs = np.array([ -8.3,   -5.77,  -1.86, 8.3, -2.35, -16.98,  0.934, -13.89,  -5.49 ])
sigma_y_obs = np.array([-1.2,-1.36, -1.44, 0.92,-1.02, 2.3,-1.0, 0.8, -1.3])
plt.errorbar(x_obs,y_obs, yerr=sigma_y_obs, fmt='o')<jupyter_output><empty_output><jupyter_text>Para usar Bayes planteamos la siguiente verosimilitud
$$
\mathcal{L}({x_i, y_i, \sigma_i}|m, b) = \prod_{i=1}^{N}\exp\left[-\frac{1}{2}\left(\frac{y_i - y_{model}(x_i, m, b)}{\sigma_i}\right)^2\right]
$$Es necesario acá para poder derivar usar un prior de tipo gausiano, de forma que se pueda derivar, si no se tiene una función continua no se puede efectuar la derivada y por ende no se puede efectuar el metodo de leap-frog<jupyter_code>def model(x,m,b):
    return x*m + b
def loglikelihood(x_obs, y_obs, sigma_y_obs, m, b):
    d = y_obs -  model(x_obs, m, b)
    d = d/sigma_y_obs
    d = -0.5 * np.sum(d**2)
    return d
def logprior(m, b):
    """Logaritmo natural de los prior para los parametros.
        Todos corresponden a gaussianas con sigma=10.0.
    """
    d = -0.5 * (m**2/(1.0)**2 + b**2/(1.0)**2)
    return d
def Grad_loglikelihood(x_obs, y_obs, sigma_y_obs, m,b):
    """Divergencia del logaritmo de la funcion de verosimilitud.
    """
    n_param = 2
    div = np.ones(n_param)
    delta = 1E-5
    for i in range(n_param):
        #delta_parameter = np.zeros(n_param)
        #delta_parameter[i] = delta
        div[i] = loglikelihood(x_obs, y_obs, sigma_y_obs, m + delta,b+delta) 
        div[i] = div[i] - loglikelihood(x_obs, y_obs, sigma_y_obs, m - delta,b - delta)
        div[i] = div[i]/(2.0 * delta)
    return div
def hamiltonian(x_obs, y_obs, sigma_y_obs, m,b , param_momentum):
    """Hamiltoniano: energia cinetica + potencial: K+V
    """
    mass = 1.0 # se le asigna una valor arbitrario a la masa
    K = 0.5 * np.sum(param_momentum**2)/mass
    V = -loglikelihood(x_obs, y_obs, sigma_y_obs, m,b)     
    return K + V
def leapfrog_proposal(x_obs, y_obs, sigma_y_obs,m ,b , param_momentum):
    """Integracion tipo leapfrog. 
        `param` representa las posiciones (i.e. los parametros).
        `param_momemtum` representa el momentum asociado a los parametros.
    """
    N_steps = 5
    delta_t = 1E-2
    mass = 1.0 # este corresponde con la masa arbitraria que se le asigna
    new_m = m
    new_b = b
    new_param_momentum = param_momentum.copy()
    for i in range(N_steps):
        new_param_momentum = new_param_momentum + Grad_loglikelihood(x_obs, y_obs, sigma_y_obs, m,b) * 0.5 * delta_t
        new_m += new_param_momentum[0]*delta_t/mass
        new_b += new_param_momentum[1]*delta_t/mass
        #new_param = new_param + (new_param_momentum/m) * delta_t
        new_param_momentum = new_param_momentum + Grad_loglikelihood(x_obs, y_obs, sigma_y_obs, m,b) * 0.5 * delta_t
    new_param_momentum = -new_param_momentum
    
    return new_m, new_b, new_param_momentum
def monte_carlo(x_obs, y_obs, sigma_y_obs, N=5000):
    m=[np.random.random()]
    b=[np.random.random()]
    param_momentum = [np.random.normal(size=2)]
    logposterior = [loglikelihood(x_obs, y_obs, sigma_y_obs, m[0], b[0]) + logprior(m[0], b[0])]
    for i in range(1,N):
        propuesta_m,propuesta_b, propuesta_param_momentum = leapfrog_proposal(x_obs, y_obs, sigma_y_obs, m[i-1],b[i-1], param_momentum[i-1])
        energy_new = hamiltonian(x_obs, y_obs, sigma_y_obs, propuesta_m,propuesta_b, propuesta_param_momentum)
        energy_old = hamiltonian(x_obs, y_obs, sigma_y_obs, m[i-1],b[i-1], param_momentum[i-1])
        logposterior_viejo = loglikelihood(x_obs, y_obs, sigma_y_obs, m[i-1], b[i-1]) + logprior(m[i-1], b[i-1])
        logposterior_nuevo = loglikelihood(x_obs, y_obs, sigma_y_obs, propuesta_m, propuesta_b) + logprior(propuesta_m, propuesta_b)
        r = min(1,np.exp(-(energy_new - energy_old)))
        alpha = np.random.random()
        if(alpha<r):
            m.append(propuesta_m)
            b.append(propuesta_b)
            logposterior.append(logposterior_nuevo)
            
        else:
            m.append(m[i-1])
            b.append(b[i-1])
            logposterior.append(logposterior_viejo)
        param_momentum.append(np.random.normal(size=2))    
    m = np.array(m)    
    b = np.array(b)
    logposterior = np.array(logposterior)
    return m,b, logposterior
m,b,logposterior = monte_carlo(x_obs, y_obs, sigma_y_obs)
x_model = np.linspace(x_obs.min(), x_obs.max(), 100)
y_model = model(x_model, np.mean(m),np.mean(b))
plt.errorbar(x_obs,y_obs, yerr=sigma_y_obs, fmt='o', label='obs')
plt.plot(x_model, y_model, label='model')
plt.xlabel("x")
plt.ylabel("y")
plt.legend()<jupyter_output><empty_output><jupyter_text># Distribuciones de los parametros despues del MCH<jupyter_code>_=plt.hist(b[500:], bins=40)
_=plt.hist(m[500:], bins=40)
plt.plot(m, b, alpha=0.5)
plt.scatter(m, b, alpha=0.4, c=np.exp(logposterior))
plt.colorbar()
plt.plot(m, b, alpha=0.5)
plt.scatter(m[500:], b[500:], alpha=0.4, c=np.exp(logposterior[500:]))
plt.colorbar()
_=plt.hist2d(m[500:], b[500:], bins=15)
plt.plot(m[500:], label='pendiente')
plt.plot(b[500:], label='intercepto')
plt.plot(logposterior[500:], label='loglikelihood')
plt.legend()<jupyter_output><empty_output> | 
	no_license | 
	/Repaso_Ejercicios/Ejercicio_7/Ejercicio_7.ipynb | 
	JoseMontanaC/Metodos_Computacionales | 7 | 
| 
	<jupyter_start><jupyter_text># moving average convergence/divergence (macd) crossover
<jupyter_code>import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
# load data
# load data
df_sp500 = pd.read_csv('sp500_20210219.csv') #, index_col='Date')
df_sp500.rename(columns={'Adj Close': 'Adj_Close'}, inplace=True)
df_sp500.set_index(pd.DatetimeIndex(df_sp500.loc[:, 'Date'].values), inplace=True)
df_sp500.head()
# visualize data
plt.figure(figsize=(12.5, 4.5))
plt.plot(df_sp500['Close'], label='close')
plt.xticks(rotation=45)
plt.title('S&P 500')
plt.xlabel('date')
plt.ylabel('close price ($)')
plt.show()
# calculate macd line
short_ema = df_sp500.Close.ewm(span=12, adjust=False).mean()
long_ema = df_sp500.Close.ewm(span=26, adjust=False).mean()
macd = short_ema - long_ema
signal = macd.ewm(span=9, adjust=False).mean()
# visualize
plt.figure(figsize=(12.5, 4.5))
plt.plot(df_sp500.index, macd, label='ticker macd', color='red')
plt.plot(df_sp500.index, signal, label='signal line', color='blue', lw='1')
plt.xticks(rotation=45)
plt.legend(loc='upper left')
plt.show()
df_sp500['macd'] = macd
df_sp500['signal_line'] = signal
df_sp500
df_sp500.loc[df_sp500.index[0], 'macd']
# function to signal when to buy/sell
def buy_sell(df):
    buy = []
    sell = []
    flag = -1
    for i in range(len(df)):
        if df.loc[df_sp500.index[i], 'macd'] > df.loc[df_sp500.index[i], 'signal_line']:
            sell.append(np.nan)
            if flag != 1:
                buy.append(df.loc[df_sp500.index[i], 'Close'])
                flag = 1
            else:
                buy.append(np.nan)
        elif df.loc[df_sp500.index[i], 'macd'] < df.loc[df_sp500.index[i], 'signal_line']:
            buy.append(np.nan)
            if flag != 0:
                sell.append(df.loc[df_sp500.index[i], 'Close'])
                flag = 0
            else:
                sell.append(np.nan)
        else:
            buy.append(np.nan)
            sell.append(np.nan)
    return (buy, sell)
        
# add signals
result = buy_sell(df_sp500)
df_sp500['buy_signal_price'] = result[0]
df_sp500['sell_signal_price'] = result[1]
# visualize
beg = 22000
plt.figure(figsize=(12.5, 4.5))
plt.scatter(df_sp500.index[beg:], df_sp500.loc[df_sp500.index[beg]:, 'buy_signal_price'], color='green', label='buy', marker='^', alpha=1)
plt.scatter(df_sp500.index[beg:], df_sp500.loc[df_sp500.index[beg]:, 'sell_signal_price'], color='red', label='sell', marker='v', alpha=1)
plt.plot(df_sp500.loc[df_sp500.index[beg]:, 'Close'], label='close price', alpha=0.35)
plt.xticks(rotation=45)
plt.title('buy/sell signals')
plt.xlabel('date')
plt.ylabel('close price ($)')
plt.legend(loc='upper left')
plt.show()
<jupyter_output><empty_output> | 
	permissive | 
	/moving_average_convergence-divergence_crossover.ipynb | 
	andrewcchoi/moving-average-convergence-divergence-crossover | 1 | 
| 
	<jupyter_start><jupyter_text>
# Plot different SVM classifiers in the iris dataset
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
  regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
  reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
   datasets can help get an intuitive understanding of their respective
   expressive power, be aware that those intuitions don't always generalize to
   more realistic high-dimensional problems.
<jupyter_code>print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
def make_meshgrid(x, y, h=.02):
    """Create a mesh of points to plot in
    Parameters
    ----------
    x: data to base x-axis meshgrid on
    y: data to base y-axis meshgrid on
    h: stepsize for meshgrid, optional
    Returns
    -------
    xx, yy : ndarray
    """
    x_min, x_max = x.min() - 1, x.max() + 1
    y_min, y_max = y.min() - 1, y.max() + 1
    xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
                         np.arange(y_min, y_max, h))
    return xx, yy
def plot_contours(ax, clf, xx, yy, **params):
    """Plot the decision boundaries for a classifier.
    Parameters
    ----------
    ax: matplotlib axes object
    clf: a classifier
    xx: meshgrid ndarray
    yy: meshgrid ndarray
    params: dictionary of params to pass to contourf, optional
    """
    Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
    Z = Z.reshape(xx.shape)
    out = ax.contourf(xx, yy, Z, **params)
    return out
# import some data to play with
iris = datasets.load_iris()
# Take the first two features. We could avoid this by using a two-dim dataset
X = iris.data[:, :2]
y = iris.target
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0  # SVM regularization parameter
models = (svm.SVC(kernel='linear', C=C),
          svm.LinearSVC(C=C, max_iter=10000),
          svm.SVC(kernel='rbf', gamma=0.7, C=C),
          svm.SVC(kernel='poly', degree=3, gamma='auto', C=C))
models = (clf.fit(X, y) for clf in models)
# title for the plots
titles = ('SVC with linear kernel',
          'LinearSVC (linear kernel)',
          'SVC with RBF kernel',
          'SVC with polynomial (degree 3) kernel')
# Set-up 2x2 grid for plotting.
fig, sub = plt.subplots(2, 2)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1)
for clf, title, ax in zip(models, titles, sub.flatten()):
    plot_contours(ax, clf, xx, yy,
                  cmap=plt.cm.coolwarm, alpha=0.8)
    ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
    ax.set_xlim(xx.min(), xx.max())
    ax.set_ylim(yy.min(), yy.max())
    ax.set_xlabel('Sepal length')
    ax.set_ylabel('Sepal width')
    ax.set_xticks(())
    ax.set_yticks(())
    ax.set_title(title)
plt.show()<jupyter_output><empty_output> | 
	no_license | 
	/ai/sklearn/plot_iris_svc.ipynb | 
	dudajiang/learnpython | 1 | 
| 
	<jupyter_start><jupyter_text><jupyter_code>!pip install tensorflow-gpu==2.1.0
import os
os.kill(os.getpid(), 9)
from google.colab import drive
from absl import logging
import time
import cv2
import numpy as np
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import (
    Add,
    Concatenate,
    Conv2D,
    Input,
    Lambda,
    LeakyReLU,
    MaxPool2D,
    UpSampling2D,
    ZeroPadding2D,
)
from tensorflow.keras.regularizers import l2
from tensorflow.keras.losses import (
    binary_crossentropy,
    sparse_categorical_crossentropy
)
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
%matplotlib inline
print(tf.__version__)
print(tf.__git_version__)
class BatchNormalization(tf.keras.layers.BatchNormalization):
    """
    Make trainable=False freeze BN for real (the og version is sad)
    """
    def call(self, x, training=False):
        if training is None:
            training = tf.constant(False)
        training = tf.logical_and(training, self.trainable)
        return super().call(x, training)
yolo_tiny_anchors = np.array([(10, 14), (23, 27), (37, 58),
                              (81, 82), (135, 169),  (344, 319)],
                             np.float32) / 416
yolo_tiny_anchor_masks = np.array([[3, 4, 5], [0, 1, 2]])
yolo_max_boxes = 100
yolo_iou_threshold = 0.5
yolo_score_threshold = 0.5
def DarknetConv(x, filters, size, strides=1, batch_norm=True):
    if strides == 1:
        padding = 'same'
    else:
        x = ZeroPadding2D(((1, 0), (1, 0)))(x)  # top left half-padding
        padding = 'valid'
    x = Conv2D(filters=filters, kernel_size=size,
               strides=strides, padding=padding,
               use_bias=not batch_norm, kernel_regularizer=l2(0.0005))(x)
    if batch_norm:
        x = BatchNormalization()(x)
        x = LeakyReLU(alpha=0.1)(x)
    return x
def DarknetTiny(name=None):
    x = inputs = Input([None, None, 3])
    x = DarknetConv(x, 16, 3)
    x = MaxPool2D(2, 2, 'same')(x)
    x = DarknetConv(x, 32, 3)
    x = MaxPool2D(2, 2, 'same')(x)
    x = DarknetConv(x, 64, 3)
    x = MaxPool2D(2, 2, 'same')(x)
    x = DarknetConv(x, 128, 3)
    x = MaxPool2D(2, 2, 'same')(x)
    x = x_8 = DarknetConv(x, 256, 3)  # skip connection
    x = MaxPool2D(2, 2, 'same')(x)
    x = DarknetConv(x, 512, 3)
    x = MaxPool2D(2, 1, 'same')(x)
    x = DarknetConv(x, 1024, 3)
    return tf.keras.Model(inputs, (x_8, x), name=name)
def YoloConvTiny(filters, name=None):
    def yolo_conv(x_in):
        if isinstance(x_in, tuple):
            inputs = Input(x_in[0].shape[1:]), Input(x_in[1].shape[1:])
            x, x_skip = inputs
            # concat with skip connection
            x = DarknetConv(x, filters, 1)
            x = UpSampling2D(2)(x)
            x = Concatenate()([x, x_skip])
        else:
            x = inputs = Input(x_in.shape[1:])
            x = DarknetConv(x, filters, 1)
        return Model(inputs, x, name=name)(x_in)
    return yolo_conv
def YoloOutput(filters, anchors, classes, name=None):
    def yolo_output(x_in):
        x = inputs = Input(x_in.shape[1:])
        x = DarknetConv(x, filters * 2, 3)
        x = DarknetConv(x, anchors * (classes + 5), 1, batch_norm=False)
        x = Lambda(lambda x: tf.reshape(x, (-1, tf.shape(x)[1], tf.shape(x)[2],
                                            anchors, classes + 5)))(x)
        return tf.keras.Model(inputs, x, name=name)(x_in)
    return yolo_output
def yolo_boxes(pred, anchors, classes):
    # pred: (batch_size, grid, grid, anchors, (x, y, w, h, obj, ...classes))
    grid_size = tf.shape(pred)[1]
    box_xy, box_wh, objectness, class_probs = tf.split(
        pred, (2, 2, 1, classes), axis=-1)
    box_xy = tf.sigmoid(box_xy)
    objectness = tf.sigmoid(objectness)
    class_probs = tf.sigmoid(class_probs)
    pred_box = tf.concat((box_xy, box_wh), axis=-1)  # original xywh for loss
    # !!! grid[x][y] == (y, x)
    grid = tf.meshgrid(tf.range(grid_size), tf.range(grid_size))
    grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2)  # [gx, gy, 1, 2]
    box_xy = (box_xy + tf.cast(grid, tf.float32)) / \
        tf.cast(grid_size, tf.float32)
    box_wh = tf.exp(box_wh) * anchors
    box_x1y1 = box_xy - box_wh / 2
    box_x2y2 = box_xy + box_wh / 2
    bbox = tf.concat([box_x1y1, box_x2y2], axis=-1)
    return bbox, objectness, class_probs, pred_box
def yolo_nms(outputs, anchors, masks, classes):
    # boxes, conf, type
    b, c, t = [], [], []
    for o in outputs:
        b.append(tf.reshape(o[0], (tf.shape(o[0])[0], -1, tf.shape(o[0])[-1])))
        c.append(tf.reshape(o[1], (tf.shape(o[1])[0], -1, tf.shape(o[1])[-1])))
        t.append(tf.reshape(o[2], (tf.shape(o[2])[0], -1, tf.shape(o[2])[-1])))
    bbox = tf.concat(b, axis=1)
    confidence = tf.concat(c, axis=1)
    class_probs = tf.concat(t, axis=1)
    scores = confidence * class_probs
    boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
        boxes=tf.reshape(bbox, (tf.shape(bbox)[0], -1, 1, 4)),
        scores=tf.reshape(
            scores, (tf.shape(scores)[0], -1, tf.shape(scores)[-1])),
        max_output_size_per_class=yolo_max_boxes,
        max_total_size=yolo_max_boxes,
        iou_threshold=yolo_iou_threshold,
        score_threshold=yolo_score_threshold
    )
    return boxes, scores, classes, valid_detections
def YoloV3Tiny(size=None, channels=3, anchors=yolo_tiny_anchors,
               masks=yolo_tiny_anchor_masks, classes=80, training=False):
    x = inputs = Input([size, size, channels], name='input')
    x_8, x = DarknetTiny(name='yolo_darknet')(x)
    x = YoloConvTiny(256, name='yolo_conv_0')(x)
    output_0 = YoloOutput(256, len(masks[0]), classes, name='yolo_output_0')(x)
    x = YoloConvTiny(128, name='yolo_conv_1')((x, x_8))
    output_1 = YoloOutput(128, len(masks[1]), classes, name='yolo_output_1')(x)
    if training:
        return Model(inputs, (output_0, output_1), name='yolov3')
    boxes_0 = Lambda(lambda x: yolo_boxes(x, anchors[masks[0]], classes),
                     name='yolo_boxes_0')(output_0)
    boxes_1 = Lambda(lambda x: yolo_boxes(x, anchors[masks[1]], classes),
                     name='yolo_boxes_1')(output_1)
    outputs = Lambda(lambda x: yolo_nms(x, anchors, masks, classes),
                     name='yolo_nms')((boxes_0[:3], boxes_1[:3]))
    return Model(inputs, outputs, name='yolov3_tiny')
YOLOV3_LAYER_LIST = [
    'yolo_darknet',
    'yolo_conv_0',
    'yolo_output_0',
    'yolo_conv_1',
    'yolo_output_1',
    'yolo_conv_2',
    'yolo_output_2',
]
YOLOV3_TINY_LAYER_LIST = [
    'yolo_darknet',
    'yolo_conv_0',
    'yolo_output_0',
    'yolo_conv_1',
    'yolo_output_1',
]
def load_darknet_weights(model, weights_file, tiny=False):
    wf = open(weights_file, 'rb')
    major, minor, revision, seen, _ = np.fromfile(wf, dtype=np.int32, count=5)
    if tiny:
        layers = YOLOV3_TINY_LAYER_LIST
    else:
        layers = YOLOV3_LAYER_LIST
    for layer_name in layers:
        sub_model = model.get_layer(layer_name)
        for i, layer in enumerate(sub_model.layers):
            if not layer.name.startswith('conv2d'):
                continue
            batch_norm = None
            if i + 1 < len(sub_model.layers) and \
                    sub_model.layers[i + 1].name.startswith('batch_norm'):
                batch_norm = sub_model.layers[i + 1]
            print("{}/{} {}".format(
                sub_model.name, layer.name, 'bn' if batch_norm else 'bias'))
            filters = layer.filters
            size = layer.kernel_size[0]
            in_dim = layer.input_shape[-1]
            if batch_norm is None:
                conv_bias = np.fromfile(wf, dtype=np.float32, count=filters)
            else:
                # darknet [beta, gamma, mean, variance]
                bn_weights = np.fromfile(
                    wf, dtype=np.float32, count=4 * filters)
                # tf [gamma, beta, mean, variance]
                bn_weights = bn_weights.reshape((4, filters))[[1, 0, 2, 3]]
            # darknet shape (out_dim, in_dim, height, width)
            conv_shape = (filters, in_dim, size, size)
            conv_weights = np.fromfile(
                wf, dtype=np.float32, count=np.product(conv_shape))
            # tf shape (height, width, in_dim, out_dim)
            conv_weights = conv_weights.reshape(
                conv_shape).transpose([2, 3, 1, 0])
            if batch_norm is None:
                layer.set_weights([conv_weights, conv_bias])
            else:
                layer.set_weights([conv_weights])
                batch_norm.set_weights(bn_weights)
    assert len(wf.read()) == 0, 'failed to read all data'
    wf.close()
def draw_outputs(img, outputs, class_names):
    boxes, objectness, classes, nums = outputs
    boxes, objectness, classes, nums = boxes[0], objectness[0], classes[0], nums[0]
    wh = np.flip(img.shape[0:2])
    for i in range(nums):
        x1y1 = tuple((np.array(boxes[i][0:2]) * wh).astype(np.int32))
        x2y2 = tuple((np.array(boxes[i][2:4]) * wh).astype(np.int32))
        img = cv2.rectangle(img, x1y1, x2y2, (255, 0, 0), 2)
        img = cv2.putText(img, '{} {:.4f}'.format(
            class_names[int(classes[i])], objectness[i]),
            x1y1, cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
    return img
def draw_labels(x, y, class_names):
    img = x.numpy()
    boxes, classes = tf.split(y, (4, 1), axis=-1)
    classes = classes[..., 0]
    wh = np.flip(img.shape[0:2])
    for i in range(len(boxes)):
        x1y1 = tuple((np.array(boxes[i][0:2]) * wh).astype(np.int32))
        x2y2 = tuple((np.array(boxes[i][2:4]) * wh).astype(np.int32))
        img = cv2.rectangle(img, x1y1, x2y2, (255, 0, 0), 2)
        img = cv2.putText(img, class_names[classes[i]],
                          x1y1, cv2.FONT_HERSHEY_COMPLEX_SMALL,
                          1, (0, 0, 255), 2)
    return img
num_classes = 80
yolo = YoloV3Tiny(classes=num_classes)
yolo.summary()
!wget https://pjreddie.com/media/files/yolov3-tiny.weights
weights = 'yolov3-tiny.weights'
output_weights = 'yolo-tiny.h5'
load_darknet_weights(yolo, weights, True)
yolo.save_weights(output_weights)
num_classes = 80
weights = 'yolo-tiny.h5'
classes = 'coco.names'
size = 416
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
    tf.config.experimental.set_memory_growth(physical_devices[0], True)
yolo = YoloV3Tiny(classes = num_classes, size = size)
print('model created')
yolo.summary()
yolo.load_weights(weights)
print('weights loaded')
class_names = [c.strip() for c in open(classes).readlines()]
print('classes loaded')
def transform_images(x_train, size):
    x_train = tf.image.resize(x_train, (size, size))
    x_train = x_train / 255
    return x_train
image = 'street.jpg'
size = 416
output_img = 'output.jpg'
img_raw = tf.image.decode_image(open(image, 'rb').read(), channels=3)
img = tf.expand_dims(img_raw, 0)
img = transform_images(img, size)
t1 = time.time()
boxes, scores, classes, nums = yolo(img)
t2 = time.time()
print('time: {}'.format(t2 - t1))
print('detections:')
for i in range(nums[0]):
    print('\t{}, {}, {}'.format(class_names[int(classes[0][i])],
                                           np.array(scores[0][i]),
                                           np.array(boxes[0][i])))
img = cv2.cvtColor(img_raw.numpy(), cv2.COLOR_RGB2BGR)
img = draw_outputs(img, (boxes, scores, classes, nums), class_names)
cv2.imwrite(output_img, img)
print('output saved to: {}'.format(output_img))
img=mpimg.imread(output_img)
plt.imshow(img)
plt.show()
converter = tf.lite.TFLiteConverter.from_keras_model(yolo)
converter.allow_custom_ops = True
converter.target_spec.supported_ops = [
        tf.lite.OpsSet.TFLITE_BUILTINS,
        tf.lite.OpsSet.SELECT_TF_OPS
    ]
tflite_model = converter.convert()
open("converted_model.tflite", "wb").write(tflite_model)
# Load TFLite model and allocate tensors.
interpreter = tf.lite.Interpreter(model_content=tflite_model)
#interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
print(input_details)
print(output_details)
# Test the TensorFlow Lite model on random input data.
input_shape = input_details[0]['shape']
print(input_shape)
input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32)
print(input_data.shape)
interpreter.allocate_tensors()
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
# The function `get_tensor()` returns a copy of the tensor data.
# Use `tensor()` in order to get a pointer to the tensor.
tflite_results = interpreter.get_tensor(output_details[0]['index'])
# Test the TensorFlow model on random input data.
tf_results = model(tf.constant(input_data))
# Compare the result.
for tf_result, tflite_result in zip(tf_results, tflite_results):
   np.testing.assert_almost_equal(tf_result, tflite_result, decimal=5)
<jupyter_output>[{'name': 'input', 'index': 8, 'shape': array([  1, 416, 416,   3], dtype=int32), 'dtype': <class 'numpy.float32'>, 'quantization': (0.0, 0)}]
[{'name': 'Identity', 'index': 4, 'shape': array([], dtype=int32), 'dtype': <class 'numpy.float32'>, 'quantization': (0.0, 0)}, {'name': 'Identity_1', 'index': 5, 'shape': array([], dtype=int32), 'dtype': <class 'numpy.float32'>, 'quantization': (0.0, 0)}, {'name': 'Identity_2', 'index': 6, 'shape': array([], dtype=int32), 'dtype': <class 'numpy.float32'>, 'quantization': (0.0, 0)}, {'name': 'Identity_3', 'index': 7, 'shape': array([], dtype=int32), 'dtype': <class 'numpy.float32'>, 'quantization': (0.0, 0)}]
[  1 416 416   3]
(1, 416, 416, 3)
 | 
	permissive | 
	/yolov3_tiny_keras.ipynb | 
	anspire/Notebooks | 1 | 
| 
	<jupyter_start><jupyter_text>End To End Project<jupyter_code># Fetching the data and creat dir 
import os
import tarfile
from six.moves import urllib
DOWNLOAD_ROOT = "https://raw.githubusercontent.com/ageron/handson-ml/master/"
HOUSING_PATH = "datasets/housing"
HOUSING_URL = DOWNLOAD_ROOT + HOUSING_PATH + "/housing.tgz"
def fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH): 
    if not os.path.isdir(housing_path):
        os.makedirs(housing_path)
        tgz_path = os.path.join(housing_path, "housing.tgz")
        urllib.request.urlretrieve(housing_url, tgz_path)
        housing_tgz = tarfile.open(tgz_path)
        housing_tgz.extractall(path=housing_path)
        housing_tgz.close()
    
import pandas as pd
def load_housing_data(housing_path=HOUSING_PATH): 
    csv_path = os.path.join(housing_path, "housing.csv") 
    return pd.read_csv(csv_path)
# Take a quick load 
fetch_housing_data(HOUSING_URL, HOUSING_PATH)
housing = load_housing_data()
housing.head()
housing.info()
# The variable 'ocean_proximity' seems to be a categorial var, so we take a look about it
housing["ocean_proximity"].value_counts()
# For the continous var we use:
# Note: describe() ignore the null values. 
housing.describe()
# Hist plot
%matplotlib inline 
# only in a Jupyter notebook 
import matplotlib.pyplot as plt 
housing.hist(bins=50, figsize=(20,15)) 
plt.show()
housing.describe()
# Creat test set
import numpy as np
def split_train_test(data, test_ratio, seed):
    np.random.seed(seed)
    shuffled_indices = np.random.permutation(len(data)) 
    test_set_size = int(len(data) * test_ratio) 
    test_indices = shuffled_indices[:test_set_size] 
    train_indices = shuffled_indices[test_set_size:]
    return data.iloc[train_indices], data.iloc[test_indices]
## alternative approcahe: hash function => independent on data taken incount
import hashlib
def test_set_check(identifier, test_ratio, hash):
    return hash(np.int64(identifier)).digest()[-1] < 256*test_ratio
def split_train_test_by_id(data, test_ratio, id_column, hash=hashlib.md5): 
    ids = data[id_column]
    in_test_set = ids.apply(lambda id_: test_set_check(id_, test_ratio, hash)) 
    return data.loc[~in_test_set], data.loc[in_test_set]
housing_with_id = housing.reset_index()
train_set, test_set = split_train_test_by_id(housing_with_id, 0.2, "index")
## stratifiedShuffer by given variable. 
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42) 
# Define a new var from mean_income
housing["income_cat"] = np.ceil(housing["median_income"] / 1.5)
# Affect 5 to all values which are greater than 5
housing["income_cat"].where(housing["income_cat"] < 5, 5.0, inplace=True)
for train_index, test_index in split.split(housing, housing["income_cat"]):
    start_train_set = housing.loc[train_index]
    start_test_set = housing.loc[test_index]
# Visualize the Data
housing = start_train_set.copy()
housing.plot(kind = 'scatter', x= 'longitude', y= 'latitude')
housing.plot(kind = 'scatter', x= 'longitude', y = 'latitude', alpha = 0.1)
housing.plot(kind="scatter", x="longitude", y="latitude", alpha=0.4,
         s=housing["population"]/100, label="population",
         c="median_house_value", cmap=plt.get_cmap("jet"), colorbar=True,)
plt.legend()
# Looking for correlation
## Correlation Pearsons
corr_matrix = housing.corr()
## scatter plot for scatter_matrix
from pandas.tools.plotting import scatter_matrix
attributes = ["median_house_value", "median_income", "total_rooms",
                  "housing_median_age"]
scatter_matrix(housing[attributes], figsize=(12, 8))
# Zoom for on variable 
housing.plot(kind="scatter", x="median_income", y="median_house_value",alpha=0.1)
# Prepare the Data for ML
housing = start_train_set.drop("median_house_value", axis = 1)
housing_labels = start_train_set["median_house_value"].copy()
## Data Cleaning: missing data
#housing.dropna(subset=["total_bedrooms"]) # option 1
#housing.drop("total_bedrooms", axis=1)  # option 2
median = housing["total_bedrooms"].median() # option 3
housing["total_bedrooms"].fillna(median)
### imputer handly 
from sklearn.preprocessing import Imputer
imputer = Imputer( strategy = 'median')
housing_num = housing.drop("ocean_proximity", axis = 1)
imputer.fit(housing_num)
imputer.statistics_
housing_num.median().values
X= imputer.transform(housing_num)
housing_tr = pd.DataFrame( X, columns = housing_num.columns)
## Handling Text and Categorical Attributes
### Note: this is not relevant if your var are not quantitavely dependants
from sklearn.preprocessing import LabelEncoder
encoder = LabelEncoder()
housing_cat = housing["ocean_proximity"]
housing_cat_encoded = encoder.fit_transform(housing_cat)
housing_cat_encoded
print(encoder.classes_)
### One hot 
from sklearn.preprocessing import OneHotEncoder
encoder = OneHotEncoder()
housing_cat_1hot = encoder.fit_transform(housing_cat_encoded.reshape(-1,1)) 
housing_cat_1hot # sparse matrix.
housing_cat_1hot.toarray()
### Feature Scaling
### There are 2 methods: min-max and standardization.
# MinMaxScaler
# StandardScaler
from sklearn.base import BaseEstimator, TransformerMixin
rooms_ix, bedrooms_ix, population_ix, household_ix = 3, 4, 5, 6
class CombinedAttributesAdder(BaseEstimator, TransformerMixin):
    def __init__(self, add_bedrooms_per_room = True): # no *args or **kargs
        self.add_bedrooms_per_room = add_bedrooms_per_room 
        
    def fit(self, X, y=None):
        return self # nothing else to do 
    
    def transform(self, X, y=None):
        rooms_per_household = X[:, rooms_ix] / X[:, household_ix] 
        population_per_household = X[:, population_ix] / X[:, household_ix] 
        if self.add_bedrooms_per_room:
            bedrooms_per_room = X[:, bedrooms_ix] / X[:, rooms_ix]
            return np.c_[X, rooms_per_household, population_per_household, bedrooms_per_room]
        else:
            return np.c_[X, rooms_per_household, population_per_household]
        
from sklearn.base import BaseEstimator, TransformerMixin
class DataFrameSelector(BaseEstimator, TransformerMixin): 
    def __init__(self, attribute_names):
        self.attribute_names = attribute_names 
    def fit(self, X, y=None):
        return self
    def transform(self, X):
        return X[self.attribute_names].values
attr_adder = CombinedAttributesAdder(add_bedrooms_per_room=False)
housing_extra_attribs = attr_adder.transform(housing.values)
## Transformation Pipelines
from sklearn.pipeline import Pipeline                                        
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelBinarizer
num_pipeline = Pipeline([('imputer', Imputer(strategy="median")),('attribs_adder', CombinedAttributesAdder()),('std_scaler', StandardScaler()),])
housing_num_tr = num_pipeline.fit_transform(housing_num)
## Union Pipe Line for cat and num 
from sklearn.pipeline import FeatureUnion 
num_attribs = list(housing_num)
cat_attribs = ["ocean_proximity"]
num_pipeline = Pipeline([
             ('selector', DataFrameSelector(num_attribs)),
             ('imputer', Imputer(strategy="median")),
             ('attribs_adder', CombinedAttributesAdder()),
             ('std_scaler', StandardScaler())])
class MyLabelBinarizer(TransformerMixin):
    def __init__(self):
        self.encoder = LabelBinarizer()
    def fit(self, x, y=0):
        self.encoder.fit(x)
        return self
    def transform(self, x, y=0):
        return self.encoder.transform(x)
cat_pipeline = Pipeline([
             ('selector', DataFrameSelector(cat_attribs)),
             ('label_binarizer',MyLabelBinarizer() )])
full_pipeline = FeatureUnion(transformer_list=[
             ("num_pipeline", num_pipeline),
             ("cat_pipeline", cat_pipeline),])
housing_prepared = full_pipeline.fit_transform(housing)
housing_prepared, housing_labels
# SELECT AND TRAIN MODEL
## Linear model
from sklearn.linear_model import LinearRegression 
from sklearn.metrics import mean_squared_error
lin_reg = LinearRegression()
lin_reg.fit(housing_prepared, housing_labels)
housing_predictions = lin_reg.predict(housing_prepared)
lin_mse = mean_squared_error(housing_labels, housing_predictions)
lin_rmse = np.sqrt(lin_mse)
print("Accuracy for linear regression : %f" %lin_rmse)
## DecisionTree
from sklearn.tree import DecisionTreeRegressor 
tree_reg = DecisionTreeRegressor()
tree_reg.fit(housing_prepared, housing_labels)
housing_predictions = tree_reg.predict(housing_prepared)
tree_mse = mean_squared_error(housing_labels, housing_predictions)
tree_rmse = np.sqrt(tree_mse)
print("Accuracy for tree : %f" %tree_mse)
# Overfitting for tree
# CV model
from sklearn.model_selection import cross_val_score
scores = cross_val_score(tree_reg, housing_prepared, housing_labels,
                             scoring="neg_mean_squared_error", cv=10)
rmse_scores = np.sqrt(-scores)
    
def display_scores(scores):
    print("Scores:", scores)
    print("Mean:", scores.mean())
    print("Standard deviation:", scores.std())
lin_scores = cross_val_score(lin_reg, housing_prepared, housing_labels,
                                scoring="neg_mean_squared_error", cv=10) 
lin_rmse_scores = np.sqrt(-lin_scores)
display_scores(lin_rmse_scores)
# Ensemble Method
from sklearn.ensemble import RandomForestRegressor 
forest_reg = RandomForestRegressor()
forest_reg.fit(housing_prepared, housing_labels)
housing_predictions = forest_reg.predict(housing_prepared)
forest_rmse = mean_squared_error( housing_predictions, housing_labels)
display_scores(forest_rmse)
# Tuning hyper parameters
## Grid Search
from sklearn.model_selection import GridSearchCV
param_grid = [{'n_estimators': [3, 10, 30], 'max_features': [2, 4, 6, 8]},
        {'bootstrap': [False], 'n_estimators': [3, 10], 'max_features': [2, 3, 4]},]
forest_reg = RandomForestRegressor()
grid_search = GridSearchCV(forest_reg, param_grid, cv=5,
                               scoring='neg_mean_squared_error')
grid_search.fit(housing_prepared, housing_labels)
grid_search.best_params_
grid_search.best_estimator_
cvres = grid_search.cv_results_
for mean_score, params in zip(cvres["mean_test_score"], cvres["params"]):
    print(np.sqrt(-mean_score), params)
## Importance features
feature_importances = grid_search.best_estimator_.feature_importances_
feature_importances
extra_attribs = ["rooms_per_hhold", "pop_per_hhold", "bedrooms_per_room"] 
#cat_one_hot_attribs = list(encoder.classes_)
#attributes = num_attribs + extra_attribs + cat_one_hot_attribs
#sorted(zip(feature_importances, attributes), reverse=True)
# EVALUATION ON TEST SET
final_model = grid_search.best_estimator_
X_test = start_test_set.drop("median_house_value", axis=1)
y_test = start_test_set["median_house_value"].copy()
X_test_prepared = full_pipeline.transform(X_test)
final_predictions = final_model.predict(X_test_prepared)
final_mse = mean_squared_error(y_test, final_predictions)
final_rmse = np.sqrt(final_mse)<jupyter_output><empty_output> | 
	no_license | 
	/Template/Supervised+.ipynb | 
	AlexandreDOMINH/DATA-SCIENCE | 1 | 
| 
	<jupyter_start><jupyter_text>
# Contourf Hatching
Demo filled contour plots with hatched patterns.
<jupyter_code>import matplotlib.pyplot as plt
import numpy as np
# invent some numbers, turning the x and y arrays into simple
# 2d arrays, which make combining them together easier.
x = np.linspace(-3, 5, 150).reshape(1, -1)
y = np.linspace(-3, 5, 120).reshape(-1, 1)
z = np.cos(x) + np.sin(y)
# we no longer need x and y to be 2 dimensional, so flatten them.
x, y = x.flatten(), y.flatten()<jupyter_output><empty_output><jupyter_text>Plot 1: the simplest hatched plot with a colorbar
<jupyter_code>fig1, ax1 = plt.subplots()
cs = ax1.contourf(x, y, z, hatches=['-', '/', '\\', '//'],
                  cmap='gray', extend='both', alpha=0.5)
fig1.colorbar(cs)<jupyter_output><empty_output><jupyter_text>Plot 2: a plot of hatches without color with a legend
<jupyter_code>fig2, ax2 = plt.subplots()
n_levels = 6
ax2.contour(x, y, z, n_levels, colors='black', linestyles='-')
cs = ax2.contourf(x, y, z, n_levels, colors='none',
                  hatches=['.', '/', '\\', None, '\\\\', '*'],
                  extend='lower')
# create a legend for the contour set
artists, labels = cs.legend_elements()
ax2.legend(artists, labels, handleheight=2)
plt.show()<jupyter_output><empty_output><jupyter_text>------------
References
""""""""""
The use of the following functions, methods and classes is shown
in this example:
<jupyter_code>import matplotlib
matplotlib.axes.Axes.contour
matplotlib.pyplot.contour
matplotlib.axes.Axes.contourf
matplotlib.pyplot.contourf
matplotlib.figure.Figure.colorbar
matplotlib.pyplot.colorbar
matplotlib.axes.Axes.legend
matplotlib.pyplot.legend
matplotlib.contour.ContourSet
matplotlib.contour.ContourSet.legend_elements<jupyter_output><empty_output> | 
	no_license | 
	/3.1.1/_downloads/95c7fa66a2f3fff1a18165a1bf108519/contourf_hatching.ipynb | 
	matplotlib/matplotlib.github.com | 4 | 
| 
	<jupyter_start><jupyter_text># Frequencies of words in novels: a Data Science pipeline
Earlier this week, I did a Facebook Live Code along session. In it, we used some basic Natural Language Processing to plot the most frequently occurring words in the novel _Moby Dick_. In doing so, we also see the efficacy of thinking in terms of the following Data Science pipeline with a constant regard for process:
1. State your question;
2. Get your data;
3. Wrangle your data to answer your question;
4. Answer your question;
5. Present your solution so that others can understand it.
In this live post, you'll learn how to build a data science pipeline to plot frequency distributions of words in *Moby Dick*, among many other novels.
We won't give you the novels: you'll learn to scrape them from the website [Project Gutenberg](https://www.gutenberg.org/) (which basically contains a large corpus of books) using the Python package `requests` and how to extract the novels from this web data using `BeautifulSoup`. Then you'll dive in to analyzing the novels using the Natural Language ToolKit (`nltk`). In the process, you'll learn about important aspects of Natural Language Processing (NLP) such as tokenization and stopwords.
You'll come out being able to visualize word frequency distributions of any novel that you can find on Project Gutenberg. The NLP skills you develop, however, will be applicable to much of the data that data scientists encounter as the vast proportion of the world's data is unstructured data and includes a great deal of text.
For example, what would the following word frequency distribution be from?
This post was generated from a Jupyter Notebook; You can find it in [this repository](https://github.com/datacamp/datacamp_facebook_live_nlp/). If you have any thoughts, responses and/or ruminations, feel free to reach out to me on twitter: [@hugobowne](https://twitter.com/hugobowne).## Pre-steps
Follow the instructions in the README.md to get your system set up and ready to go.## 1. State Your Question
What are the most frequent words in the novel _Moby Dick_ and how often do they occur?## 2. Get Your DataYour raw data is the text of Melville's novel _Moby Dick_. How would you go about getting the text of this ~800 word book into Python? 
Well, there are several ways to do this but first realize that the text is freely available online at [Project Gutenberg](https://www.gutenberg.org/). Let's head there, try to find _Moby Dick_ and then store the relevant URL in your Python namespace:<jupyter_code># Store url
url = 'https://www.gutenberg.org/files/2701/2701-h/2701-h.htm'<jupyter_output><empty_output><jupyter_text>Now that you have the URL, you need to fetch the HTML of the website. 
**Note** that HTML stands for Hypertext Markup Language and is the standard markup language for the web.
You're going to use [`requests`](http://docs.python-requests.org/en/master/) to do this, one of the [most popular](https://pythontips.com/2013/07/30/20-python-libraries-you-cant-live-without/) and useful Python packages out there.
You can find out more in DataCamp's [Importing Data in Python (Part 2) course](https://www.datacamp.com/courses/importing-data-in-python-part-2). 
According to the `requests` package website:
> Requests allows you to send organic, grass-fed HTTP/1.1 requests, without the need for manual labor.
and the following organizations claim to use `requests` internally:
> Her Majesty's Government, Amazon, Google, Twilio, NPR, Obama for America,  Twitter, Sony, and Federal U.S. Institutions that prefer to be unnamed.
Moreover,
> Requests is one of the most downloaded Python packages of all time, pulling in over 13,000,000 downloads every month. All the cool kids are doing it!
You'll be making a `GET` request from the website, which means you're _getting_ data from it. This is what you're doing through a browser when visiting a webpage using a browser. There are other types of requests, such as `POST` requests, but we won't concern ourselves with them here.
`requests` make this easy with its `get` function. Make the request here and check the object type returned.<jupyter_code># Import `requests`
import requests
# Make the request and check object type
r = requests.get(url)
type(r)<jupyter_output><empty_output><jupyter_text>This is a `Response` object. You can see in the [`requests` kickstart guide](http://docs.python-requests.org/en/master/user/quickstart/) that a `Response` object has an attribute `text` that allows you to get the HTML from it! Let's do this and print the HTML to check it out:<jupyter_code># Extract HTML from Response object and print
html = r.text
print(html)<jupyter_output><?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html
   PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
   "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd" >
<html xmlns="http://www.w3.org/1999/xhtml" lang="en">
  <head>
    <title>
      Moby Dick; Or the Whale, by Herman Melville
    </title>
    <style type="text/css" xml:space="preserve">
    body { background:#ffffff; color:black; margin-left:15%; margin-right:15%; text-align:justify }
    P { text-indent: 1em; margin-top: .25em; margin-bottom: .25em; }
    H1,H2,H3,H4,H5,H6 { text-align: center; margin-left: 15%; margin-right: 15%; }
    hr  { width: 50%; text-align: center;}
    .foot { margin-left: 20%; margin-right: 20%; text-align: justify; text-indent: -3em; font-size: 90%; }
    blockquote {font-size: 100%; margin-left: 0%; margin-right: 0%;}
    .mynote    {background-color: #DDE; color: #000; padding: .5em; margin-left: 10%; margin-right: 10%; font-family: sans-serif; font-size: 95%;}
    .toc     [...]<jupyter_text>OK! This HTML is not quite what you want. However, it does _contain_ what you want: the text of _Moby Dick_. What you need to do now is _wrangle_ this HTML to extract the novel. ## 3. Wrangle the Data to Answer the Question### Part 1: Get the Text from the HTML
Here you'll use the package [`BeautifulSoup`](https://www.crummy.com/software/BeautifulSoup/). The package website says:
This looks promising!
Firstly, a word on the name of the package: Beautiful Soup? In web development, the term "tag soup" refers to structurally or syntactically incorrect HTML code written for a web page. What Beautiful Soup does best is to make tag soup beautiful again and to extract information from it with ease! In fact, the main object created and queried when using this package is called `BeautifulSoup`. After creating the soup, we can use its `.get_text()` method to extract the text.<jupyter_code># Import BeautifulSoup from bs4
from bs4 import BeautifulSoup
# Create a BeautifulSoup object from the HTML
soup = BeautifulSoup(html, "html.parser")
type(soup)<jupyter_output><empty_output><jupyter_text>From these soup objects, you can extract all types of interesting information about the website you're scraping, such as title:<jupyter_code># Get soup title
soup.title<jupyter_output><empty_output><jupyter_text>Or the title as a string:<jupyter_code># Get soup title as string
print(soup.title.string)
print(soup.title.get_text())
print(soup.title.text)<jupyter_output>
      Moby Dick; Or the Whale, by Herman Melville
    
      Moby Dick; Or the Whale, by Herman Melville
    
      Moby Dick; Or the Whale, by Herman Melville
    
<jupyter_text>Or all URLs found within a page’s  tags (hyperlinks):<jupyter_code># Get hyperlinks from soup and check out first several
soup.find_all('a')[:8]<jupyter_output><empty_output><jupyter_text>What you want to do is to extract the text from the `soup` and there's a super helpful `.get_text()` method precisely for this. 
Get the text, print it out and have a look at it. Is it what you want?<jupyter_code># Get the text out of the soup and print it
text = soup.get_text()
print(text)<jupyter_output>
      Moby Dick; Or the Whale, by Herman Melville
    
    body { background:#ffffff; color:black; margin-left:15%; margin-right:15%; text-align:justify }
    P { text-indent: 1em; margin-top: .25em; margin-bottom: .25em; }
    H1,H2,H3,H4,H5,H6 { text-align: center; margin-left: 15%; margin-right: 15%; }
    hr  { width: 50%; text-align: center;}
    .foot { margin-left: 20%; margin-right: 20%; text-align: justify; text-indent: -3em; font-size: 90%; }
    blockquote {font-size: 100%; margin-left: 0%; margin-right: 0%;}
    .mynote    {background-color: #DDE; color: #000; padding: .5em; margin-left: 10%; margin-right: 10%; font-family: sans-serif; font-size: 95%;}
    .toc       { margin-left: 10%; margin-bottom: .75em;}
    .toc2      { margin-left: 20%;}
    div.fig    { display:block; margin:0 auto; text-align:center; }
    div.middle { margin-left: 20%; margin-right: 20%; text-align: justify; }
    .figleft   {float: left; margin-left: 0%; margin-right: 1%;[...]<jupyter_text>Notice that this is now nearly what you want. 
It is the text of the novel with some unwanted stuff at the start and some unwanted stuff at the end. You could remove it if you wanted. However, this content is so much smaller in amount than the text of Moby Dick that, to a first approximation, it is fine to leave in and this will be the approach here. To get robust results, I'd suggest removing it.
Now that you have the text of interest, it's time for you to count how many times each word appears and to plot the frequency histogram that you want: Natural Language Processing to the rescue!### Part 2: Extract Words from your Text with NLPYou'll now use `nltk`, the Natural Language Toolkit, to
1. Tokenize the text (fancy term for splitting into tokens, such as words);
2. Remove stopwords (words such as 'a' and 'the' that occur a great deal in ~ nearly all English language texts.
#### Step 1: Tokenize
You want to tokenize your text, that is, split it into a **list a words**. Essentially, you want to split off the parts off the text that are separated by whitespaces.
To do this, you're going to use a powerful tool called _regular expressions_. A regular expression, or _regex_ for short, is a _sequence of characters_ that define a _search pattern_. They are notoriously confusing and best introduced by example.
* You have the string 'peter piper picked a peck of pickled peppers' and you want to extract from the list of _all_ words in it that start with a 'p'. 
The regular expression that matches all words beginning with 'p' is 'p\w+'. Let's unpack this: 
* the 'p' at the beginning of the regular expression means that you'll only match sequences of characters that start with a 'p';
* the '\w' is a special character that will match any alphanumeric A-z, a-z, 0-9, along with underscores;
* The '+' tells you that the previous character in the regex can appear as many times as you want in strings that you;re trying to match. This means that '\w+' will match arbitrary sequences of alphanumeric characters and underscores.
Put this all together and the regular expression 'p\w+' will match all substrings that start with a 'p' and are followed by alphanumeric characters and underscores. In most English language texts that make sense, this will correspond to words beginning with 'p'.
You'll now use the built-in Python package `re` to extract all words beginning with 'p' from the sentence 'peter piper picked a peck of pickled peppers' as a warm-up.<jupyter_code># Import regex package
import re
# Define sentence
sentence = 'peter piper pick a peck of pickled peppers'
# Define regex
ps = 'p\w+'
# Find all words in sentence that match the regex and print a list of them
re.findall(ps, sentence)<jupyter_output><empty_output><jupyter_text>This looks pretty good. Now, if 'p\w+' is the regex that matches words beginning with 'p', what's the regex that matches all words?
It's your job to now do this for our toy Peter Piper sentence above.<jupyter_code># Find all words and print them
re.findall('\w+', sentence)<jupyter_output><empty_output><jupyter_text>Now you can do the same with `text`, the string that contains _Moby Dick_:<jupyter_code># Find all words in Moby Dick and print several
tokens = re.findall('\w+', text)
tokens[:10]<jupyter_output><empty_output><jupyter_text>**Note** that there is also a way to do this with `nltk`, the [Natural Language Toolkit](http://www.nltk.org/):<jupyter_code># Import RegexpTokenizer from nltk.tokenize
from nltk.tokenize import RegexpTokenizer
# Create tokenizer
tokenizer = RegexpTokenizer('\w+')
# Create tokens
tokens = tokenizer.tokenize(text)
tokens[:8]<jupyter_output><empty_output><jupyter_text>OK! You're nearly there. Note, though, that in the above, 'Or' has a capital 'O' and that in other places it may not but both 'Or' and 'or' you will want to count as the same word. For this reason, you will need to build a list of all words in _Moby Dick_ in which all capital letters have been made lower case. You'll find the string method `.lower()` handy:<jupyter_code># Initialize new list
words = []
# Loop through list tokens and make lower case
for word in tokens:
    words.append(word.lower())
# Print several items from list as sanity check
words[:8]<jupyter_output><empty_output><jupyter_text>#### Step 2: Remove stop words
It is common practice to remove words that appear alot in the English language such as 'the', 'of' and 'a' (known as stopwords) because they're not so interesting. For more on all of these techniques, check out our [Natural Language Processing Fundamentals in Python course](https://www.datacamp.com/courses/nlp-fundamentals-in-python). 
The package `nltk` has a list of stopwords in English which you'll now store as `sw` and of which you'll print the first several elements.
If you get an error here, run the command `nltk.download('stopwords')` to install the stopwords on your system.<jupyter_code># Import nltk
import nltk
# nltk.download('stopwords')
# Get English stopwords and print some of them
sw = nltk.corpus.stopwords.words('english')
sw[:10]<jupyter_output><empty_output><jupyter_text>You want the list of all words in `words` that are *not* in `sw`. One way to get this list is to loop over all elements of `words` and add the to a new list if they are *not* in `sw`:<jupyter_code># Initialize new list
words_ns = []
# Add to words_ns all words that are in words but not in sw
for word in words:
    if word not in sw:
        words_ns.append(word)
# Print several list items as sanity check
words_ns[:8]<jupyter_output><empty_output><jupyter_text>## 4. Answer your question
Our question was 'What are the most frequent words in the novel Moby Dick and how often do they occur?' 
You can now plot a frequency histogram of words in Moby Dick in two line of code using `nltk`. To do this,
* You create a frequency distribution object using the function `nltk.FreqDist()`;
* You use the `plot()` method of the resulting object.<jupyter_code>#Import datavis libraries
import matplotlib.pyplot as plt
import seaborn as sns
# Figures inline and set visualization style
%matplotlib inline
sns.set()
# Create freq dist and plot
freqdist1 = nltk.FreqDist(words_ns)
freqdist1.plot(30)<jupyter_output><empty_output><jupyter_text>## 5. Present Your Solution
The cool thing is that, in using `nltk` to answer our question, we actually already presented our solution in a manner that can be communicated to other: a frequency distribution plot! You can read off the most common words, along with their frequency. For example, 'whale' is the most common word in the novel (go figure), excepting stopwords, and it occurs a whopping >1200 times! ___
## BONUS MATERIALAs you have seen that there are lots of novels on Project Gutenberg, we can make these word frequency histograms of, it makes sense to write your own function that does all of this:<jupyter_code>def plot_word_freq(url):
    """Takes a url (from Project Gutenberg) and plots a word frequency
    distribution"""
    # Make the request and check object type
    r = requests.get(url)
    # Extract HTML from Response object and print
    html = r.text
    # Create a BeautifulSoup object from the HTML
    soup = BeautifulSoup(html, "html.parser")
    # Get the text out of the soup and print it
    text = soup.get_text()
    # Create tokenizer
    tokenizer = RegexpTokenizer('\w+')
    # Create tokens
    tokens = tokenizer.tokenize(text)
    # Initialize new list
    words = []
    # Loop through list tokens and make lower case
    for word in tokens:
        words.append(word.lower())
    # Get English stopwords and print some of them
    sw = nltk.corpus.stopwords.words('english')
    # Initialize new list
    words_ns = []
    # Add to words_ns all words that are in words but not in sw
    for word in words:
        if word not in sw:
            words_ns.append(word)
    # Create freq dist and plot
    freqdist1 = nltk.FreqDist(words_ns)
    freqdist1.plot(25)<jupyter_output><empty_output><jupyter_text>Now use the function to plot word frequency distributions from other texts on Project Gutenberg:* Pride and Prejudice:<jupyter_code>plot_word_freq('https://www.gutenberg.org/files/42671/42671-h/42671-h.htm')<jupyter_output><empty_output><jupyter_text>* Robinson Crusoe<jupyter_code>plot_word_freq('https://www.gutenberg.org/files/521/521-h/521-h.htm')<jupyter_output><empty_output><jupyter_text>* The King James Bible<jupyter_code>plot_word_freq('https://www.gutenberg.org/files/10/10-h/10-h.htm')<jupyter_output><empty_output> | 
	permissive | 
	/NLP_datacamp/NLP_FB_live_coding_soln_verbose.ipynb | 
	AllardQuek/Tutorials | 20 | 
| 
	<jupyter_start><jupyter_text>## Seattle Terry Stops Final Project Submission
* Student name: Rebecca Mih
* Student pace: Part Time Online
* Scheduled project review date/time: 
* Instructor name: James Irving
* Blog post URL: 
* **Data Source:**  https://www.kaggle.com/city-of-seattle/seattle-terry-stops
    * Date of last update to the datasource: April 15, 2020
* **Key references:**
* https://assets.documentcloud.org/documents/6136893/SPDs-2019-Annual-Report-on-Stops-and-Detentions.pdf 
* https://www.seattletimes.com/seattle-news/crime/federal-monitor-finds-seattle-police-are-conducting-proper-stops-and-frisks/  
* https://catboost.ai/docs/concepts/python-reference_catboost_grid_search.html
* https://towardsdatascience.com/catboost-vs-light-gbm-vs-xgboost-5f93620723db
<img src= "Seattle Police Dept.jpg"
           width=200"/>
</div
## Background
https://caselaw.findlaw.com/us-supreme-court/392/1.html
This data represents records of police reported stops under Terry v. Ohio, 392 U.S. 1 (1968). Each row represents a unique stop.
 A Terry stop is a seizure under both state and federal law. A Terry stop is
defined in policy as a brief, minimally intrusive seizure of a subject based upon
**articulable reasonable suspicion (ARS) in order to investigate possible criminal activity.**
The stop can apply to people as well as to vehicles. The subject of a Terry stop is
**not** free to leave.
Section 6.220 of the Seattle Police Department (SPD) Manual defines Reasonable Suspicion as:
Specific, objective, articulable facts which, taken together with rational inferences, would
create a  **well-founded suspicion that there is a substantial possibility that a subject has
engaged, is engaging or is about to engage in criminal conduct.**
- Each record contains perceived demographics of the subject, as reported by the officer making the stop and officer demographics as reported to the Seattle Police Department, for employment purposes.
- Where available, data elements from the associated Computer Aided Dispatch (CAD) event (e.g. Call Type, Initial Call Type, Final Call Type) are included.
## Notes on Concealed Weapons in the State of Washington
WHAT ARE WASHINGTON’S CONCEALED CARRY LAWS?
Open carry of a firearm is lawful without a permit in the state of Washington except, according to the law, “under circumstances, and at a time and place that either manifests an intent to intimidate another or that warrants alarm for the safety of other persons.”
**However, open carry of a loaded handgun in a vehicle is legal only with a concealed pistol license. Open carry of a loaded long gun in a vehicle is illegal.**
The criminal charge of “carrying a concealed firearm” happens in this state when someone carries a concealed firearm **without a concealed pistol license**. It does not matter if the weapon was discovered in the defendant’s home, vehicle, or on his or her person.
## Objectives
### Target:
   * Identify Terry Stops which lead to Arrest or Prosecution (Binary Classification)
    
### Features:
   * Location (Precinct)
   * Day of the Week (Date)
   * Shift (Time)
   * Initial Call Type
   * Final Call Type
   * Stop Resolution
   * Weapon type
   * Officer Squad
   * Age of officer
   * Age of detainee
    
    
### Optional Features:
   * Race of officer
   * Race of detainee
   * Gender of officer
   * Gender of detainee
    
   ## Definition of Features ProvidedColumn Names and descriptions provided in the SPD dataset  
* **Subject Age Group**	
Subject Age Group (10 year increments) as reported by the officer. 
* **Subject ID**	
Key, generated daily, identifying unique subjects in the dataset using a character to character match of first name and last name. "Null" values indicate an "anonymous" or "unidentified" subject. Subjects of a Terry Stop are not required to present identification.  **Not Used** 
* **GO / SC Num**
General Offense or Street Check number, relating the Terry Stop to the parent report. This field may have a one to many relationship in the data. **Not Used** 
* **Terry Stop ID**
Key identifying unique Terry Stop reports.  **Not Used**
* **Stop Resolution**
Resolution of the stop**One hot encoding** 
* **Weapon Type**	
Type of weapon, if any, identified during a search or frisk of the subject. Indicates "None" if no weapons was found.  
* **Officer ID**	
Key identifying unique officers in the dataset.
**Not Used** 
* **Officer YOB**	
Year of birth, as reported by the officer.  
* **Officer Gender**	
Gender of the officer, as reported by the officer.
 
* **Officer Race**	
Race of the officer, as reported by the officer. 
* **Subject Perceived Race**	
Perceived race of the subject, as reported by the officer. 
* **Subject Perceived Gender**	
Perceived gender of the subject, as reported by the officer. 
* **Reported Date**	
Date the report was filed in the Records Management System (RMS). Not necessarily the date the stop occurred but generally within 1 day.  
* **Reported Time**	
Time the stop was reported in the Records Management System (RMS). Not the time the stop occurred but generally within 10 hours.  
* **Initial Call Type**	
Initial classification of the call as assigned by 911.  
* **Final Call Type**	
Final classification of the call as assigned by the primary officer closing the event.  
* **Call Type**	
How the call was received by the communication center.
* **Officer Squad**	
Functional squad assignment (not budget) of the officer as reported by the Data Analytics Platform (DAP). 
* **Arrest Flag**	
Indicator of whether a "physical arrest" was made, of the subject, during the Terry Stop. Does not necessarily reflect a report of an arrest in the Records Management System (RMS). 
* **Frisk Flag**	
Indicator of whether a "frisk" was conducted, by the officer, of the subject, during the Terry Stop. 
* **Precinct**	
Precinct of the address associated with the underlying Computer Aided Dispatch (CAD) event. Not necessarily where the Terry Stop occurred. 
* **Sector**	
Sector of the address associated with the underlying Computer Aided Dispatch (CAD) event. Not necessarily where the Terry Stop occurred. 
* **Beat**	
Beat of the address associated with the underlying Computer Aided Dispatch (CAD) event. Not necessarily where the Terry Stop occurred. ## Analysis Workflow (OSEMN)1. **Obtain and Pre-process**
    - [x] Import data
    - [x] Remove unused columns
    - [x] Check data size, NaNs, and # of non-null values which are not valid data 
    - [x] Clean up missing values by imputing values or dropping
    - [x] Replace ? or other non-valid data by imputing values or dropping data
    - [x] Check for duplicates and remove if appropriate
    - [x] Change datatypes of columns as appropriate 
    - [x] Note which features are continuous and which are categorical
2. **Data Scoping**
     - [x] Use value_counts() to identify dummy categories such as "-", or "?" for later re-mapping
     - [x] Identify most common word data
     - [x] Decide on which columns (features) to keep for further feature engineering
   
3. **Transformation of data (Feature Engineering)**
    - [x] Re-bin categories to reduce noise
    - [x] Re-map categories as needed
    - [x] Engineer text data to extract common word information
    - [x] Transform categoricals using 1-hot encoding or label encoding/
    - [x] Perform log transformations on continuous variables (if applicable)
    - [x] Normalize continuous variables
    - [x] Use re-sampling if needed to balance the dataset  
    
4. **Further Feature Selection**
     - [x] Use .describe() and .hist() histograms
     - [x] Identify outliers (based on auto-scaling of plots) and remove or inpute as needed
     - [x] Perform visualizations on key features to understand  
     - [x] Inspect feature correlations (Pearson correlation) to identify co-linear features**
5.  **Create a Vanilla Machine Learning Model**
    - [x] Split into train and test data 
    - [x] Run the model
    - [x] Review Quality indicators of the model 
6. **Run more advanced models**
    - [x] Compare the model quality
    - [x] Choose one or more models for grid searching 
    
7. **Revise data inputs if needed to improve quality indicators**
    - [x] By adding created features, and removing colinear features
    - [x] By improving unbalanced datasets through oversampling or undersampling
    - [x] by removing outliers through filters
    - [x] through use of subject matter knowledge 
    
8. **Write the Report**
    - [X] Explain key findings and recommended next steps
## 1. Obtain and Pre-Process the Data1. **Obtain and Pre-process**
    - [x] Import data
    - [x] Remove unused columns
    - [x] Check data size, NaNs, and # of non-null values which are not valid data 
    - [x] Clean up missing values by imputing values or dropping
    - [x] Replace ? or other non-valid data by imputing values or dropping data
    - [x] Check for duplicates and remove if appropriate
    - [x] Change datatypes of columns as appropriate 
    - [x] Decide the target column, if not already decided
    - [x] Determine if some data is not relevent to the question (drop columns or rows)
    - [x] Note which features which will need to be re-mapped or encoded 
    - [x] Note which features might require feature engineering (example - date, time) <jupyter_code>#!pip install -U fsds_100719
from fsds_100719.imports import *
#import pandas as pd
#import numpy as np
#import matplotlib.pyplot as plt
#import seaborn as sns
import copy
import sklearn
import math
import datetime
#import plotly.express as px
#import plotly.graphy_objects as go
import warnings
warnings.filterwarnings('ignore')
import sklearn.metrics as metrics
pd.options.display.float_format = '{:.2f}'.format
pd.set_option('display.max_columns',0)
pd.set_option('display.max_info_rows',200)
%matplotlib inline
def plot_importance2(tree, top_n=20,figsize=(10,10)):
    df_importance = pd.Series(tree.feature_importances_,index=X_train.columns)
    df_importance.sort_values(ascending=True).tail(top_n).plot(kind='barh',figsize=figsize)
    return df_importance
#check = evaluate_model(y_test,y_hat_test, X_test, xgb_rf)
plot_importance2(xgb_rf)
# Write a function which evaluates the model, and returns
def evaluate_model(y_true, y_pred,X_true,clf,cm_kws=dict(cmap="Blues",
                                  normalize='true'),figsize=(10,4),plot_roc_auc=True):
    
    ## Reporting Scores
    print('Accuracy Score :',accuracy_score(y_true, y_pred))
    print(metrics.classification_report(y_true,y_pred, 
                                        target_names = ['Not Arrested', 'Arrested']))
    if plot_roc_auc:
        num_cols=2
    else:
        num_cols=1
        
    fig, ax = plt.subplots(figsize=figsize,ncols=num_cols)
    
    if not isinstance(ax,np.ndarray):
        ax=[ax]
    metrics.plot_confusion_matrix(clf,X_true,y_true,ax=ax[0],**cm_kws)
    ax[0].set(title='Confusion Matrix')
    
    if plot_roc_auc:
        try:
            y_score = clf.predict_proba(X_true)[:,1]
            fpr,tpr,thresh = metrics.roc_curve(y_true,y_score)
            # print(f"ROC-area-under-the-curve= {}")
            roc_auc = round(metrics.auc(fpr,tpr),3)
            ax[1].plot(fpr,tpr,color='darkorange',label=f'ROC Curve (AUC={roc_auc})')
            ax[1].plot([0,1],[0,1],ls=':')
            ax[1].legend()
            ax[1].grid()
            ax[1].set(ylabel='True Positive Rate',xlabel='False Positive Rate',
                  title='Receiver operating characteristic (ROC) Curve')
            plt.tight_layout()
            plt.show()
        except:
            pass
    try: 
        df_important = plot_importance(clf)
    except:
        df_important = None
    
    return df_important
def plot_importance(tree, top_n=20,figsize=(10,10),expt_name='Model'):
    
    '''Feature Selection tool, which plots the feature importance based on results
    
    Inputs:
      tree: classification learning function utilized
      top_n: top n features contributing to the model, default = 20
      figsize:  size of the plot,  default=(10,10)
      expt_name: Pass in the experiment name, so that the saved feature importance image will be unique
                  default = Model
                  
    Returns: df_importance - series of the model features sorted by importance
    Saves:  Feature importance figure as  "Feature expt_name.png", Default expt_name = "Model" '''
    
    df_importance = pd.Series(tree.feature_importances_,index=X_train.columns)
    df_importance.sort_values(ascending=True).tail(top_n).plot(kind='barh',figsize=figsize)
    
    plt.savefig(("Feature {}.png").format(expt_name))
    #plt.savefig("Feature Importance 2.png", transparent = True)
    return df_importance
#check = evaluate_model(y_test,y_hat_test, X_test, xgb_rf)
plot_importance(xgb_rf)
# Write a function which evaluates the model, and returns
def evaluate_model(y_true, y_pred,X_true,clf,metrics_df,
                    cm_kws=dict(cmap="Greens",normalize='true'),figsize=(10,4),plot_roc_auc=True, 
                    expt_name='Model'):
    
    '''Function which evaluates each model, stores the result and figures
    Inputs: 
        y_true: target output of the model based on test data
        y_pred: target input to the model based on train data
        X_true: result output of the model based on test data
        
        clf:  classification learning function utilized for the model (examples: xgb-rf, Catboost)
        
        metrics_df: dataframe which contains the classification metrics 
                (precision, recall, f1-score, weighted average)
                
        cm_kws: keyword settings for plotting and normalization
                Defaults: cmap="Blues", normalize = "true"
                figsize: size of the plot,  default=(10,10)
        expt_name: Pass in the experiment name, so that the saved feature importance image will be unique
                  default = A
      
        
    Outputs:  df_important -  series of the model features sorted by importance
    Saves:   roc_auc plot - plot of AUC for the model
             Feature importance plot
        
    '''
    
    ## Reporting Scores
    accuracy_result = accuracy_score(y_true, y_pred)
    print('Accuracy Score for {}:',accuracy_result).format('expt_name')
    metrics_report = metrics.classification_report(y_true,y_pred, 
                                            target_names = ['Not Arrested', 'Arrested'],
                                           output_dict=True)
    #print(metrics_report)
    ## Save scores into the results dataframe
    result_df = pd.DataFrame(metrics_report).transpose()
    #display(result_df)
    result_df.drop(labels='macro avg',axis = 0, inplace=True)
    result_df.drop(labels='support', axis = 1, inplace=True)
    #display(result_df)
    # Swap Rows  https://stackoverflow.com/questions/55439469/swapping-two-rows-together-with-index-within-the-same-pandas-dataframe
    result_df.iloc[np.r_[0:len(result_df) - 2, -1, -2]] 
    result_df.rename(index= {'weighted avg':'Weighted Avg', 'accuracy':'Accuracy'}, inplace=True)
    result_df.rename(columns = {'precision': 'Precision', 'recall':'Recall', 
                                'f1-score':'F1 Score'}, inplace=True)
    column_list = result_df.columns
    display(result_df)
    
    
    if plot_roc_auc:
        num_cols=2
    else:
        num_cols=1
        
    fig, ax = plt.subplots(figsize=figsize,ncols=num_cols)
    
    if not isinstance(ax,np.ndarray):
        ax=[ax]
    metrics.plot_confusion_matrix(clf,X_true,y_true,ax=ax[0],**cm_kws)
    ax[0].set(title='Confusion Matrix')
    plt.savefig("Confusion Matrix {}.png").format(expt_name)
    
    if plot_roc_auc:
        try:
            y_score = clf.predict_proba(X_true)[:,1]
            fpr,tpr,thresh = metrics.roc_curve(y_true,y_score)
            roc_auc = round(metrics.auc(fpr,tpr),3)
            
            ax[1].plot(fpr,tpr,color='darkorange',label=f'ROC Curve (AUC={roc_auc})')
            ax[1].plot([0,1],[0,1],ls=':')
            ax[1].legend()
            ax[1].grid()
            ax[1].set(ylabel='True Positive Rate',xlabel='False Positive Rate',
                  title='Receiver operating characteristic (ROC) Curve')
            plt.tight_layout()
            plt.show()
            plt.savefig("ROC Curve {}.png").format(expt_name)
 #           #res = result_df.set_value(len(res), roc_auc, roc_auc, roc_auc)
        except:
            print('ROC-AUC not working')
    try: 
        df_important = plot_importance(clf)
    except:
        df_important = None
        print('importance plotting not working')
    
    return result_df
#def evaluate_model(y_true, y_pred,X_true,clf,metrics_df,
#                    cm_kws=dict(cmap="Greens",normalize='true'),figsize=(10,4),plot_roc_auc=True, 
#                    expt_name='Model'):
''' metrics_report = metrics.classification_report(y_test,y_hat_test, 
                                        target_names = ['Not Arrested', 'Arrested'],
                                       output_dict=True)
#print(metrics_report)
## Save scores into the results dataframe
result_df = pd.DataFrame(metrics_report).transpose()
#display(result_df)
result_df.drop(labels='macro avg',axis = 0, inplace=True)
result_df.drop(labels='support', axis = 1, inplace=True)
#display(result_df)
# Swap Rows  https://stackoverflow.com/questions/55439469/swapping-two-rows-together-with-index-within-the-same-pandas-dataframe
result_df.iloc[np.r_[0:len(result_df) - 2, -1, -2]] 
result_df.rename(index= {'weighted avg':'Weighted Avg', 'accuracy':'Accuracy'}, inplace=True)
result_df.rename(columns = {'precision': 'Precision', 'recall':'Recall', 
                            'f1-score':'F1 Score'}, inplace=True)
column_list = result_df.columns
#result_df = result_df.set_value(len(result_df), 'aoc', 'aoc', 'aoc')
display(result_df) '''
df = pd.read_csv('Terry_Stops.csv',low_memory=False)
df.duplicated().sum()
df.head()<jupyter_output><empty_output><jupyter_text>* Drop Columns which contain IDs, which are not useful features.<jupyter_code>df.drop(columns = ['Subject ID', 'GO / SC Num', 'Terry Stop ID', 'Officer ID'], inplace=True)
df.duplicated().sum()
# After dropping some of the columns, some rows appear to be duplicated.
# However, since the date and time of the incident are NOT exact (i.e. the date could be 24 hours later, and the
# time could be 10 hours later), it's possible to get some that are similar on different consecutive dates.
df.columns
col_names = df.columns
print(col_names)
df.shape
# The rationale for this is to understand how big the dataset is,  how many features are contained in the data
# This helps with planning for function vs lambda functions,  and whether certain kinds of visualizations will be feasible
# for the analysis (with my computer hardware).  With compute limitations, types of correlation plots cause the kernal to die,
# if there are more than 11 features.<jupyter_output><empty_output><jupyter_text>* df.isna().sum()
isna().sum() determines how many data are missing from a given feature
* df.info() 
df.info() helps you determine if there missing values or datatypes that need to be modified
* Handy alternate checks if needed **
    - [x] df.isna().any()
    - [x] df.isnull().any()
    - [x] df.shape<jupyter_code>df.isna().sum()
df['Officer Squad'].fillna('Unknown', inplace=True)<jupyter_output><empty_output><jupyter_text>* Findings from isna().sum() *
* Officer Squad has 535 missing data (1.3% of the data)
    * Impute "Unknown"<jupyter_code>df.isna().sum()
df.info()
df.duplicated().sum()
duplicates = df[df.duplicated(keep = False)]
#duplicates.head(118)<jupyter_output><empty_output><jupyter_text>#### Use value_counts() - inspect for dummy variables, and determine next steps for data cleaning
1. Rationale:  This analysis is useful for flushing out missing values in the form of question marks, dashes or other symbols or dummy variables 
2.  It also gives a preliminary view of the number and distribution of categories in each feature, albeit by numbers rather than graphics 
3. For text data, value_counts serves as a preliminary investigation of the common important word data 
<jupyter_code>for col in df.columns:
    print(col, '\n', df[col].value_counts(), '\n')<jupyter_output>Subject Age Group 
 26 - 35         13615
36 - 45          8547
18 - 25          8509
46 - 55          5274
56 and Above     1996
1 - 17           1876
-                1287
Name: Subject Age Group, dtype: int64 
Stop Resolution 
 Field Contact               16287
Offense Report              13976
Arrest                       9957
Referred for Prosecution      728
Citation / Infraction         156
Name: Stop Resolution, dtype: int64 
Weapon Type 
 None                                 32565
-                                     6213
Lethal Cutting Instrument             1482
Knife/Cutting/Stabbing Instrument      308
Handgun                                262
Firearm Other                          100
Club, Blackjack, Brass Knuckles         49
Blunt Object/Striking Implement         37
Firearm                                 18
Firearm (unk type)                      15
Other Firearm                           13
Mace/Pepper Spray                       12
Club                          [...]<jupyter_text>####  Findings from value_counts() and Next Steps:
1. The "-" is used as a substitute for unknown, in many cases.  Perhaps it would be good to build a function to impute "unknown" for the "-" for multiple features
2. Race and gender need re-mapping
3. Call Types, Weapons need re-binning
4. Officer Squad text can be split and provide the precinct, and the watch.
**Next steps:**
- [x] Investigation of the Stop Resolution, to determine whether the target should be "Stop Resolution - Arrests" or "Arrest Flag", and whether "Frisk Flag" is useful for predicting arrests.
- [x] Decide whether time and location information can be extracted from the "Officer Squad" column instead of the columns for time, Precinct, Sector and Beats
 
    
    <jupyter_code># Viewing the data to get a sense of which Stop Resolutions are correlated to the "Arrest Flag"
df.sort_values(by=['Stop Resolution'], ascending=True).head(100)
# Check out what are the differences between a Stop Resolution of "Arrest" and the "Arrest Flag" 
df.loc[(df['Stop Resolution']=='Arrest') & (df['Arrest Flag']=="N")].shape
# This is the number of cases where the final stop resolution as reported by the officer, was "Arrest" and the
# Arrest Flag was N.  This indicates that many arrests are finalized after the actual Terry Stop
df.loc[(df['Stop Resolution']!='Arrest') & (df['Arrest Flag']=="Y")].shape
# Number of times an arrest was not made,  but the arrest flag was yes (an arrest was made during the Terry Stop)
df.loc[(df['Stop Resolution']=='Arrest') & (df['Arrest Flag']=="Y")].shape
# These are the number of arrests DURING the Terry stop,  that had a final resolution of arrest
# Conclusion:  Use the Stop Resolution of Arrest to capture all the arrests made arising from a Terry stop
# The total number of arrests as repored by the officers is 8210 + 1747 or ~ 25% of the total # of Terry stops
# Check to see whether the Frisk Flag has usefulness
df.loc[(df['Stop Resolution']=='Arrest') & (df['Frisk Flag']=="Y")].shape
# Out of 10,000 arrests (and ~ 9000 Frisks), the number of arrest, that were frisked was ~30%
# It would appear that the 'Frisk Flag' is not helpful for predicting arrests.  Drop the 'Frisk Flag'
# CheckType whether 'Call Type' has usefulness
df.loc[(df['Stop Resolution']=='Arrest') & (df['Call Type']=="911")].shape
# Out of ~10,000 arrests roughly 50% came through 911.  Doesn't appear to be particularly useful for predicting arrests
# Drop the 'Call Type'
df.head()<jupyter_output><empty_output><jupyter_text>## 2. Data Scoping 1. Which is better to use the "Arrest Flag" column or the "Stop Resolution column as the target?: 
* Arrest Flag is a'1' only when there was an actual arrest during the Terry Stop.  Which may not be easy to do, resulting in a lower number (1747) 
* Stop Resolution records ~10,000 arrests, roughly 25% of the total dataset.  Since Stop Resolution is about officers recording the resolution of the Terry Stop, and with a likely performance target for officers,  they are likely to record this more accurately. 
* A quick check of "Frisk Flag" which is an indicator of those Terry stops where a Frisk was performed, does not seem well correlated with arrests.  Recomend to drop "Frisk Flag" 
#### Conclusion: Use "Stop Resolution" Arrests as the target
  - [x] Create a new column called "Arrests" which encodes Stop Resolution Arrests as a "1" and all others "0".  
  - [x] Drop the "Arrest Flag" column
  - [x] Drop the "Frisk Flag" column 
    
2. Location data, there are a number of columns which relate to location such as "Precincts", "Officer Squad", "Sector", "Beat", but are indirect measures of the actual location of the Terry Stop. Inspection of the "Officer Squad" text shows the Location assignment of the officer making the report. In ~10% of cases, Terry stops were performed by field training units or other units which are not captured by precinct (hence roughly 25% of the precincps are unknown). The training unit information is captured in the "Officer Squad" column.  
3. For time data there is a "Reported Time" -- which is the time when the officer report was submitted, and according to the documentation could be delayed up to 10 hours, rather than the time of the actual Terry stop.  
    However, inspection of the text in "Officer Squad" shows that the reporting officer's watch is recorded. In the Seattle police squad there are 3 watches to cover each 24 hour period. Watch 1 (03:00 - 11:00), Watch 2 (11:00 - 19:00), and Watch 3 (19:00 - 03:00).  Since officer performance is rated based on number of cases and crimes prevented or apprehended, likely the "Officer Squad" data which comes from the report is likely to be the most reliable in terms of time.
    
#### Conclusion: Use "Officer Squad" text data for time and location
- [x] Parse the "Officer Squad" data to capture the location and time based on officer assignments, creating columns for location and watch. 
- [x] Drop the "Reported Time", "Precincts", "Sector", and "Beat" columns 
<jupyter_code>df.drop(columns=['Arrest Flag', 'Frisk Flag', 'Reported Time', 'Precinct', 'Sector', 'Beat'], inplace = True)
# Re-Check for duplicates
#duplicates = seattle_df[seattle_df.duplicated(subset =['id'], keep = False)]
#duplicates.sort_values(by=['id']).head()
duplicates = df[df.duplicated(keep = False)]
df.duplicated().sum()<jupyter_output><empty_output><jupyter_text>#### Finding from duplicated():
- If you look at the beginning of the analysis, I checked for duplications with the entire dataset (before removing columns of data, such as "ID"),  there were no duplicates. But after dropping the ID,  there are 118 rows in duplication, 59 pairs. 
- Because the date and time are not exact (the documentation says sometimes the date could have been entered 24 hours later, or the time could be off by 10 hours, so that actually unique Terry stops could have the same data (when the ID columns are removed).
- There are a few that are arrests.  Still open to decide whether to remove the duplicated data or not.  
- What is curious is that the index number is not always consecutive between different pairs of duplicates.  This suggests that perhaps the data was input twice -- maybe due to some computer or internet glitches?##  3. Data Transformation
   * Officer data: YOB, race, gender
   * Subject data- Age Group, race, gender
   * Stop Resolution (target column)
   * Weapons
   * Type of potential crime: Call type Initial and Final 
   * Date to day of week
   * Location and time: from Officer Squad
   ### A. Transform Gender Using Dictionary Mapping .map()
   <jupyter_code># Re-mapping gender categories. 0 = Male, 1 = Female, 2 = Unknown
# officer_gender
officer_gender = {'M':0, 'F':1, 'N':2}
df['Officer Gender'] = df['Officer Gender'].map(officer_gender)
# subject perceived gender
subject_gender = {'Male':0, 'Female':1, 'Unknown':2,  '-':2, 
                 'Unable to Determine':2, 'Gender Diverse (gender non-conforming and/or transgender)':2}
df['Subject Perceived Gender'] = df['Subject Perceived Gender'].map(subject_gender)
#Check the mapping
df.loc[(df['Officer Gender']== 0.0)].shape, df.loc[(df['Subject Perceived Gender']== 0.0)].shape
df['Officer Gender'].value_counts()
df['Subject Perceived Gender'].value_counts()
df.loc[(df['Stop Resolution']=='Arrest') & (df['Subject Perceived Gender']== np.nan)].shape
# Checking to see if those arrested were gender different.  In this case none
# Check the mapping
df['Officer Gender'].isna().sum(), df['Subject Perceived Gender'].isna().sum()  #NAs are not found <jupyter_output><empty_output><jupyter_text>### B. Transform Age Using Dictionary Mapping .map() and binning (.cut)<jupyter_code># Re-mapping subject age categories
subject_age = {'1 - 17':1, '18 - 25':2, '26 - 35':3, '36 - 45':4, '46 - 55':5, '56 and Above':6, '-':0}
df['Subject Age Group'] = df['Subject Age Group'].map(subject_age)
df['Subject Age Group'].isna().sum()
df['Subject Age Group'].value_counts()
# Checking to see of those arrested, how many had an unknown age group
# There are 193 arrests of people whose age is unknown
df.loc[(df['Stop Resolution']=='Arrest') & (df['Subject Age Group']== 0)].shape
# Calculated the Officers Age, and bin into same bins as the subject age
df['Reported Year']=pd.to_datetime(df['Reported Date']).dt.year
df['Reported Year']
df['Officer Age'] = df['Reported Year'] - df['Officer YOB']
df['Officer Age'].value_counts(dropna=False)
#subject_age = {'1 - 17':1, '18 - 25':2, '26 - 35':3, '36 - 45':4, '46 - 55':5, '56 and Above':6, '-':0}
#bins = [0, 17, 25, 35, 45, 55,85]
#age_bins = pd.cut(df['Officer Age'], bins)
#age_bins.cat.as_ordered()
#age_bins.head()
df['Officer Age'] =pd.cut(x=df['Officer Age'], bins=[1,18,25,35,45,55,70,120], labels = [1,2,3,4,5,6,0])
df['Officer Age'].value_counts(dropna=False)
df.head()<jupyter_output><empty_output><jupyter_text>### C. Transform Gender using Dictionary Mapping<jupyter_code># Check how many arrested had unknown race (or - or other)
df.loc[(df['Stop Resolution']=='Arrest') & (df['Subject Perceived Race']== "Unknown")].shape
#df.loc[(df['Stop Resolution']=='Arrest') & (df['Subject Perceived Race']== "-")].shape
#df.loc[(df['Stop Resolution']=='Arrest') & (df['Subject Perceived Race']== "Other")].shape
df['Subject Perceived Race'].value_counts()
race_map = {'White': 'White', 'Black or African American':'African American', 'Hispanic':'Hispanic',
            'Hispanic or Latino':'Hispanic', 'Two or More Races':'Multi-Racial','Multi-Racial':'Multi-Racial',
           'American Indian or Alaska Native':'Native', 'American Indian/Alaska Native':'Native',  
            'Native Hawaiian or Other Pacific Islander':'Native', 'Nat Hawaiian/Oth Pac Islander':'Native',
           '-':'Unknown', 'Other':'Unknown', 'Not Specified':'Unknown','Unknown':'Unknown',
           'Asian': 'Asian',}
df['Subject Perceived Race'] = df['Subject Perceived Race'].map(race_map)
df['Officer Race'] = df['Officer Race'].map(race_map)
df['Officer Race'].value_counts()
df['Subject Perceived Race'].value_counts()<jupyter_output><empty_output><jupyter_text>### D. Transform Stop Resolution Using Dictionary Mapping .map()<jupyter_code># Now address the Stop Resolution categories
df['Stop Resolution'].value_counts()
# Re-map the Stop Resolution, to combine categories Arrest and Referred for Prosecution
# Map Arrest and Referred for Prosecution to 1,  and all others 0
stop_resolution = {'Field Contact': 0, 'Offense Report': 0, 'Arrest': 1,
             'Referred for Prosecution': 1, 'Citation / Infraction': 0}
df['Stop Resolution']=df['Stop Resolution'].map(stop_resolution)
df['Stop Resolution'].value_counts()<jupyter_output><empty_output><jupyter_text>### E. Transform Weapon Type Using a Dictionary and .map()<jupyter_code>df.head()
# Now re-map Weapon Type feature.  First check the categories of Weapons
df['Weapon Type'].value_counts()
weapon_type = {'None':'None', 'None/Not Applicable':'None', 'Fire/Incendiary Device':'Incendiary',
              'Lethal Cutting Instrument':'Lethal Blade', 'Knife/Cutting/Stabbing Instrument':'Lethal Blade',
              'Handgun':'Firearm', 'Firearm Other':'Firearm','Firearm':'Firearm', 'Firearm (unk type)':'Firearm',
              'Other Firearm':'Firearm', 'Rifle':'Firearm', 'Shotgun':'Firearm', 'Automatic Handgun':'Firearm',
              'Club, Blackjack, Brass Knuckles':'Blunt Force', 'Club':'Blunt Force', 
              'Brass Knuckles':'Blunt Force', 'Blackjack':'Blunt Force',
              'Blunt Object/Striking Implement':'Blunt Force', '-':'Unknown',
              'Taser/Stun gun':'Taser', 'Mace/Pepper Spray':'Spray',}
df['Weapon Type']=df['Weapon Type'].map(weapon_type)
df['Weapon Type'].value_counts()<jupyter_output><empty_output><jupyter_text>### F. Transform the Date using to_datetime, .weekday, and .day
* Calculate the reported date of the week
    - [x] Day of the week: 0 = Monday, 6 = Sunday
    
    
* Calculate the first, mid and last weeks of the month because perhaps more crimes / arrests are made when the bills come due
    - [x] Time of month: 1 = First week, 2 = 2nd and 3rd weeks,  4 = last week of the month
<jupyter_code>df['Reported Date'].head()
# Transform the Reported date into a day of the week,  or the time of month 
# Day of the week: 0 = Monday, 6 = Sunday
# Time of month: 1 = First week, 2 = 2nd and 3rd weeks,  4 = last week of the month
df['Reported Date']=pd.to_datetime(df['Reported Date'])  # Processed earlier for Officer YOB calculation
df['Weekday']=df['Reported Date'].dt.weekday
df['Time of Month'] = df['Reported Date'].dt.day
month_map = {1:1, 2:1,3:1,4:1, 5:1, 6:1, 7:1,8:2, 9:2, 10:2, 11:2, 12:2, 13:2, 14:2, 15:2, 
                     16:2, 17:2, 18:2, 19:2, 20:2, 21:2, 22:2, 23:3, 24:3, 25:3, 26:3, 27:3, 28:3, 29:3, 30:3, 31:3}
df['Time of Month'] = df['Time of Month'].map(month_map)
df.isna().sum()
df.head()<jupyter_output><empty_output><jupyter_text>### G. Use Officer Squad data to create the location information (Precinct or Officer Team) and the time of day of the arrest (Officer Watch)
* Use Pandas Regex .str.extract to get the name of the precinct and the Watch if available
* Analyse if some precincts / units never make arrests 
* The Officer Squad text data is likely more reliable estimate assuming use the information provided is the squad name / location, and the watch that handled the reports, not a specific person schedule or squad. 
* With the Reported Date and Time, since the reports can come 1 day, or 10 hours later, the recorded time is not the actual Terry stop time. 
* Features created from Officer Squad: 
    - [x] Precinct or Squad name following the Terry stop
    - [x] Watch: 
        0 = Unknown, if the watch is not normally recorded
        1 = Watch 1 03:00 - 11:00
        2 = Watch 2 11:00 - 19:00
        3 = Watch 3 19:00 - 03:00
  <jupyter_code>df.head()
# Use Python Regex commands to clean up the Call Types and Officer Squad
df['Officer Squad'].value_counts()
df['Precinct'] = df['Officer Squad'].str.extract(r'(\w+)')
df['Watch'] = df['Officer Squad'].str.extract(pat = '([\d])').fillna(0)
df.head(100)
# Some Officer Quads do not recorde the Watch number 
# Don't leave the NaNs in the Watch column, fill with 0
# Watch definition: 0 = Unknown, 1 = 1st Watch, 2 = 2nd Watch, 3 = 3rd Watch
df.isna().sum()
# Identify the Precincts are not typically making arrests, by comparing the number of arrests (Stop Resolution = Arrest)
# to the total number of Terry stops. 
arrest_df = df.loc[df['Stop Resolution'] == 1]  # Dataframe only for those Terry stops that resulted in arrests
arrest_df['Precinct'].value_counts(), df['Precinct'].value_counts()  # compare the value_counts for both dataframes
# Subsetting to only the Stop Resolution of arrest 
# Caculate the # of precincts that have arrests by dividing the arrest_df to the total number of terry stops
arrest_percentage = arrest_df['Precinct'].value_counts() / df['Precinct'].value_counts()
print(f'The percentage of arrests based on terry stops, by squad \n\n',arrest_percentage)
# Create a dictionary for mapping the squads which have successful arrest.  Those officer squads which have
# reported Terry stops with no arrests will be dropped from the dataset
successful_arrest_map=arrest_percentage.to_dict()
# successful_arrest_map # Take a look at the dictionary
df['Precinct Success']=df['Precinct'].map(successful_arrest_map)
df.isna().sum()
# There are 36 units / precincts which do not have any arrests since 2015
# Likely these units are not expected to make arrests
#df.to_csv('terry_stops_cleanup3.csv') #save with all manipulations except for Call Types, without dropping
# Drop out the units Terry stops which do not routinely make arrests
df.dropna(inplace=True)  # Drop the squads with no arrests
df.reset_index(inplace=True)  # Reset the Index
df.drop(columns=['Call Type', 'Reported Date', 'Officer Squad'], inplace = True) # Drop Processed Columns
df.to_csv('terry_stops_cleanup4.csv') #Save after dropping squads with no arrests and columns and reset index
df.head()<jupyter_output><empty_output><jupyter_text>### H. Transform Initial or Final Call Types<jupyter_code>def clean_call_types(df_to_clean, col_name, new_col):
    '''Transform Call Type text into a single identifier
    Inputs:  df,  col_name -  column which has the Call type,  and a new column name
    Outputs: The dataframe with a new column name, and a map'''
    idx = df_to_clean[col_name] == '-' # Create an index of the true and false values for the condition == '-'
    df_to_clean.loc[idx, col_name] = 'Unknown'
    column_series = df_to_clean[col_name]
    df_to_clean[new_col] = column_series.apply(lambda x:x.replace('--','').split('-')[0].strip())
    #df_to_clean[new_col].value_counts(dropna=False).sort_index()
    #df_to_clean.isna().sum()
    df_to_clean[new_col] = df_to_clean[new_col].str.extract(r'(\w+)')
    df_to_clean[new_col] = df_to_clean[new_col].str.lower()
    last_map = df_to_clean[new_col].value_counts().to_dict()
    return last_map
    
final_map = clean_call_types(df,'Final Call Type', 'Final Call Re-map')
initial_map = clean_call_types(df, 'Initial Call Type', 'Initial Call Re-map')
final_map
initial_map
# Check to see if keys of the two dictionaries are the same
diff = set(final_map) - set(initial_map)  # the keys in final_map and not in initial_map
diff2 = set(initial_map) - set(final_map) # the keys that are in initial_map, and not in final_map
diff, diff2
# Expand the existing call map to include additional keys
#  This call dictionary was built on the final calls,  not the initial calls text.  So add the initial calls and input values
call_dictionary = {'unknown': 'unknown',
             'suspicious': 'suspicious',
             'assaults': 'assault',
             'disturbance': 'disturbance',
             'prowler': 'trespass',
             'dv': 'domestic violence',
             'warrant': 'warrant',
             'theft': 'theft',
             'narcotics': 'under influence',
             'robbery': 'theft',
             'burglary': 'theft',
             'traffic': 'traffic',
             'property': 'property damage',
             'weapon': 'weapon',
             'crisis': 'person in crisis',
             'automobiles': 'auto',
             'assist': 'assist others',
             'sex': 'vice',
             'mischief': 'mischief',
             'arson': 'arson',
             'fraud': 'fraud',
             'vice': 'vice',
             'drive': 'auto',
             'misc': 'misdemeanor',
             'premise': 'trespass',
             'alarm': 'suspicious',
             'intox': 'under influence',
             'rape': 'rape',
             'child': 'child',
             'trespass': 'trespass',
             'person': 'person in crisis',
             'homicide': 'homicide',
             'burg': 'theft',
             'kidnap': 'kidnap',
             'animal': 'animal',
             'hazards': 'hazard',
             'aslt': 'assault',
             'casualty': 'homicide',
             'fight': 'disturbance',
             'shoplift': 'theft',
             'auto': 'auto', 
             'haras': 'disturbance',
             'purse': 'theft',
             'weapn': 'weapon',
             'fireworks': 'arson',
             'follow': 'disturbance',
             'dist': 'disturbance',
             'haz': 'hazard',
             'nuisance': 'mischief',
             'threats': 'disturbance',
             'liquor': 'under influence',
             'mvc': 'auto',
             'shots': 'weapon',
             'harbor': 'auto',
             'down': 'homicide',
             'service': 'unknown',
             'hospital': 'unknown',
             'bomb': 'arson',
             'undercover': 'under influence',
             'burn': 'arson',
             'lewd': 'vice',
             'dui': 'under influence',
             'crowd': 'unknown',
             'order': 'assist',
             'escape': 'assist',
             'commercial': 'trespass',
             'noise': 'disturbance',
             'narcotics': 'under influence',
             'awol': 'kidnap',
              'bias': 'unknown',
              'carjacking': 'kidnap',
              'demonstrations':'disturbance',
              'directed':'unknown',
              'doa':'assist',
              'explosion':'arson',
              'foot': 'trespass',
              'found':'unknown',
              'gambling': 'vice',
              'help':'assist',
              'illegal':'assist',
              'injured':'assist',
              'juvenile':'child',
              'littering': 'nuisance',
              'missing': 'kidnap',
              'off':'suspicious',
              'open':'unknown',
              'overdose':'under influence',
              'panhandling':'disturbance',
              'parking':'disturbance',
              'parks':'disturbance',
              'peace':'disturbance',
              'pedestrian':'disturbance',
              'phone':'disturbance',
              'request':'assist',
              'sfd':'assist',
              'sick':'assist',
              'sleeper':'disturbance',
              'suicide':'assist'}
df['Final Call Re-map'] = df['Final Call Re-map'].map(call_dictionary)
df['Final Call Re-map'].value_counts(dropna=False)
df['Initial Call Re-map'] = df['Initial Call Re-map'].map(call_dictionary)
df['Initial Call Re-map'].value_counts(dropna=False)
df.isna().sum()
#Drop all NaNs
df.dropna(inplace=True)
df.reset_index(inplace=True)
df.to_csv('terry_stops_cleanup4.csv')
df.head(100)
df.drop(columns = ['Initial Call Type', 'Final Call Type', 'Precinct Success', 'Officer YOB',
                  'Reported Year', 'level_0', 'index'], inplace=True)
df.info()<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 41028 entries, 0 to 41027
Data columns (total 14 columns):
 #   Column                    Dtype   
---  ------                    -----   
 0   Subject Age Group         int64   
 1   Stop Resolution           int64   
 2   Weapon Type               object  
 3   Officer Gender            int64   
 4   Officer Race              object  
 5   Subject Perceived Race    object  
 6   Subject Perceived Gender  int64   
 7   Officer Age               category
 8   Weekday                   int64   
 9   Time of Month             int64   
 10  Precinct                  object  
 11  Watch                     object  
 12  Final Call Re-map         object  
 13  Initial Call Re-map       object  
dtypes: category(1), int64(6), object(7)
memory usage: 4.1+ MB
<jupyter_text>## 4. Vanilla Model
    
### XBG + Initial Call Type
<jupyter_code>df_to_split = df.drop(columns = 'Final Call Re-map')
category_cols = df_to_split.columns
target_col = ['Stop Resolution']
df.info()
df_to_split = pd.DataFrame()
from sklearn.preprocessing import MinMaxScaler
# Convert catogories to cat.codes
for header in category_cols:
    df_to_split[header] = df[header].astype('category').cat.codes
    
df_to_split.info()
df_to_split.head()
# Check the correlation matrix to see the autocorrelated variables and plot it out
# Will run the correlation matrix for the last kernel run
sns.axes_style("white")
pearson = df_to_split.corr(method = 'pearson')
sns.set(rc={'figure.figsize':(20,12)})
# Generate a mask for the upper triangle
mask = np.zeros_like(pearson)
mask[np.triu_indices_from(mask)] = True
ax = sns.heatmap(data=pearson, mask=mask, cmap="YlGnBu", 
                 linewidth=0.5, annot=True, square=True, cbar_kws={'shrink': 0.5})
# Save the correlations information
plt.savefig("Correlation.png")
plt.savefig("Correlation 2.png", transparent = True)
y = df_to_split['Stop Resolution']
X = df_to_split.drop('Stop Resolution',axis=1)
from sklearn.model_selection import train_test_split
## Train test split
X_train, X_test, y_train,y_test  = train_test_split(X,y,test_size=.3,
                                                    random_state=42,)#,stratify=y)
display(y_train.value_counts(normalize=False),y_test.value_counts(normalize=False))
#!pip3 install xgboost
import xgboost as xbg
from xgboost import XGBRFClassifier,XGBClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import roc_auc_score, roc_curve
xgb_rf = XGBRFClassifier()
xgb_rf.fit(X_train, y_train)
print('Training score: ' ,round(xgb_rf.score(X_train,y_train),2))
print('Test score: ',round(xgb_rf.score(X_test,y_test),2))
y_hat_test = xgb_rf.predict(X_test)
check = evaluate_model(y_test,y_hat_test, X_test, xgb_rf)
# Importance Check
check
def cramers_corrected_stat(df, column1, column2):
    """ Calculate Cramers V statistic for categorial-categorial association.
        uses correction from Bergsma and Wicher, 
        Journal of the Korean Statistical Society 42 (2013): 323-328
        Reference: https://stackoverflow.com/questions/20892799/using-pandas-calculate-cram%C3%A9rs-coefficient-matrix
        
        Inputs: confusion_matrix
        Outputs
    """
    confusion_matrix = pd.crosstab(df[column1], df[column2])
    print(confusion_matrix)
    chi2 = ss.chi2_contingency(confusion_matrix)[0]
    n = confusion_matrix.sum()
    phi2 = chi2/n
    r,k = confusion_matrix.shape
    phi2corr = max(0, phi2 - ((k-1)*(r-1))/(n-1))    
    rcorr = r - ((r-1)**2)/(n-1)
    kcorr = k - ((k-1)**2)/(n-1)
    return np.sqrt(phi2corr / min( (kcorr-1), (rcorr-1)))<jupyter_output><empty_output><jupyter_text>## 5. Vanilla Model Results & Experimental Plan
* Results: 
    - [x] "Initial Call Type" is the most important feature, with "Weapon" and "Officer Age" as the 2nd and 3rd most important features, respectively. 
    - [x] Training accuracy of 0.76, and testing accuracy of 0.74 
    - [x] However, the Confusion Matrix shows the main reason is that the "Non-arrests" are better classified than the arrests.  For arrests, the true negatives were well predicted (97%), and the true positives were poorly predicted (11%), while false positives were 89%.
    - [x] This seems to make sense given the class imbalance (only 25% of the data were arrests) 
    - [x] The AOC was well above random chance  
### B - XGB + Final Call Type
* **The Next Steps will be a set of experiments to look how the models can improve based on:**
    - [1] Feature Selection:  Initial Call Type Versus Final Call Type 
    - [2] Model type:  XGBoost-RF  vs CatBoost
    - [3] Balancing the dataset from best model of [1] and [2]
    - [4] HyperParameter tuning for [3] 
    
* **The Next Experiment will be (bold type):**
    - A = Vanilla Model = XBG + Initial Call Type 
    - **B = XBG + Final Call Type**
    - C = Cat + Initial Call Type
    - D = Cat + Final Call Type
    - E = SMOTE + Best of (A,B,C,D))
    - F = Gridsearch on E
<jupyter_code># Setup a results dataframe to capture all the results
result_idx = ['Accuracy','Precision - no Arrest', 'Precision - Arrest', 'Precision-wt Avg', 
              'Recall - no Arrest', 'Recall - Arrest', 'Recall - wt Avg', 'F1 - no Arrest',
             'F1 - Arrest', 'F1 - wt Avg', 'Training AUC', 'Test AUC']
result_cols = ['XGB + initial', 'XGB + final', 'CB + initial', 'CB + final', 'CBC + initial',
              'CBC + final', "SMOTE+XGB+final", "SMOTE+CB+final", "SMOTE+CBC+final"]
results_df = pd.DataFrame(index = result_idx, columns = result_cols)
#results_df
# Save the initial results
# Change input to drop Initial Call Type and keep Final Call Type
df_to_split = df.drop(columns = 'Initial Call Re-map')
category_cols = df_to_split.columns
target_col = ['Stop Resolution']
df_to_split = pd.DataFrame()
# Convert catogories to cat.codes
for header in category_cols:
    df_to_split[header] = df[header].astype('category').cat.codes
    
df_to_split.head()
y = df_to_split['Stop Resolution']
X = df_to_split.drop('Stop Resolution',axis=1)
X_train, X_test, y_train,y_test  = train_test_split(X,y,test_size=.3,
                                                    random_state=42,)#,stratify=y)
display(y_train.value_counts(normalize=False),y_test.value_counts(normalize=False))
xgb_rf = XGBRFClassifier()
xgb_rf.fit(X_train, y_train)
print('Training score: ' ,round(xgb_rf.score(X_train,y_train),2))
print('Test score: ',round(xgb_rf.score(X_test,y_test),2))
y_hat_test = xgb_rf.predict(X_test)
evaluate_model(y_test,y_hat_test, X_test, xgb_rf)<jupyter_output><empty_output><jupyter_text>## 6. CatBoost with Final Call Type
### D - Catboost + Final Call Type
* **The Next Steps will be a set of experiments to look how the models can improve based on:**
    - [1] Feature Selection:  Initial Call Type Versus Final Call Type 
    - [2] Model type:  XGBoost-RF  vs CatBoost
    - [3] Balancing the dataset from best model of [1] and [2]
    - [4] HyperParameter tuning for [3] 
    
* **The Next Experiment will be (Bold Type):**
    - A = Vanilla Model = XBG + Initial Call Type
    - B = XBG + Final Call Type
    - C = Catboost + Initial Call Type
    - **D = Catboost + Final Call Type**
    - E = SMOTE + Best of (A,B,C,D))
    - F = Gridsearch on E
<jupyter_code>#!pip install -U catboost
from catboost import CatBoostClassifier
clf = CatBoostClassifier()
clf.fit(X_train,y_train,logging_level='Silent')
print('Training score: ' ,round(clf.score(X_train,y_train),2))
print('Test score: ',round(clf.score(X_test,y_test),2))
y_hat_test = clf.predict(X_test)
evaluate_model(y_test,y_hat_test,X_test,clf)<jupyter_output><empty_output><jupyter_text>## 7. Catboost with Initial Call Type
### C - Catboost + Initial Call Type
* **The Next Steps will be a set of experiments to look how the models can improve based on:**
    - [1] Feature Selection:  Initial Call Type Versus Final Call Type 
    - [2] Model type:  XGBoost-RF  vs CatBoost
    - [3] Balancing the dataset from best model of [1] and [2]
    - [4] HyperParameter tuning for [3] 
    
* **The Next Experiment will be (Bold Type):**
    - A = Vanilla Model = XBG + Initial Call Type
    - B = XBG + Final Call Type
    - **C = Catboost + Initial Call Type**
    - D = Catboost + Final Call Type
    - E = SMOTE + Best of (A,B,C,D))
    - F = Gridsearch on E
<jupyter_code>df_to_split = df.drop(columns = 'Final Call Re-map')
category_cols = df_to_split.columns
target_col = ['Stop Resolution']
df_to_split = pd.DataFrame()
# Convert catogories to cat.codes
for header in category_cols:
    df_to_split[header] = df[header].astype('category').cat.codes
    
y = df_to_split['Stop Resolution']
X = df_to_split.drop('Stop Resolution',axis=1)
X_train, X_test, y_train,y_test  = train_test_split(X,y,test_size=.3,
                                                    random_state=42,)#,stratify=y)
clf = CatBoostClassifier()
clf.fit(X_train,y_train,logging_level='Silent')
print('Training score: ' ,round(clf.score(X_train,y_train),2))
print('Test score: ',round(clf.score(X_test,y_test),2))
y_hat_test = clf.predict(X_test)
evaluate_model(y_test,y_hat_test,X_test,clf)
#fig, ax = plt.subplots(1, 1, figsize=(5, 3))
#ax_arr = (ax1, ax2, ax3, ax4)
#weights_arr = ((0.01, 0.01, 0.98), (0.01, 0.05, 0.94),
#               (0.2, 0.1, 0.7), (0.33, 0.33, 0.33))
#for ax, weights in zip(ax_arr, weights_arr):
    #X, y = create_dataset(n_samples=1000, weights=weights)
#    clf = CatBoostClassifier()
#    clf.fit(X_train,y_train,logging_level='Silent')
    #clf = LinearSVC().fit(X, y)
#plot_decision_function(X_train, y_train, clf, ax)
#ax.set_title('Catboost with Final Call Type')
#fig.tight_layout()
#y_train<jupyter_output><empty_output><jupyter_text>## 8. SMOTE + Catboost + Final Call
### E - SMOTE + Best of (A,B,C,D)
* **The Next Steps will be a set of experiments to look how the models can improve based on:**
    - [1] Feature Selection:  Initial Call Type Versus Final Call Type 
    - [2] Model type:  XGBoost-RF  vs CatBoost
    - [3] Balancing the dataset from best model of [1] and [2]
    - [4] HyperParameter tuning for [3] 
    
* **The Next Experiment will be (Bold Type):**
    - A = Vanilla Model = XBG + Initial Call Type
    - B = XBG + Final Call Type
    - C = Catboost + Initial Call Type
    - D = Catboost + Final Call Type
    - **E = SMOTE + Best of (A,B,C,D))**
    - F = Gridsearch on E
<jupyter_code>#!pip install -U imbalanced-learn
from imblearn.over_sampling import SMOTE
smote = SMOTE()
df_to_split = df.drop(columns = 'Initial Call Re-map')
category_cols = df_to_split.columns
target_col = ['Stop Resolution']
df_to_split = pd.DataFrame()
# Convert catogories to cat.codes
for header in category_cols:
    df_to_split[header] = df[header].astype('category').cat.codes
    
y = df_to_split['Stop Resolution']
X = df_to_split.drop('Stop Resolution',axis=1)
X_train, X_test, y_train,y_test  = train_test_split(X,y,test_size=.3,
                                                    random_state=42,stratify=y)
X_train, y_train = smote.fit_sample(X_train, y_train)
display(y_train.value_counts(normalize=False),y_test.value_counts(normalize=False))
clf = CatBoostClassifier()
clf.fit(X_train,y_train,logging_level='Silent')
print('Training score: ' ,round(clf.score(X_train,y_train),2))
print('Test score: ',round(clf.score(X_test,y_test),2))
y_hat_test = clf.predict(X_test)
evaluate_model(y_test,y_hat_test,X_test,clf)
# The SMOTED data on XBG-RF,  just for fun
xgb_rf = XGBRFClassifier()
xgb_rf.fit(X_train, y_train)
print('Training score: ' ,round(xgb_rf.score(X_train,y_train),2))
print('Test score: ',round(xgb_rf.score(X_test,y_test),2))
y_hat_test = xgb_rf.predict(X_test)
evaluate_model(y_test,y_hat_test, X_test, xgb_rf)
# Try a Support Vector Machine,  for the heck of it
from sklearn.svm import SVC,LinearSVC,NuSVC
clf = SVC()
clf.fit(X_train,y_train)
y_hat_test = clf.predict(X_test)
evaluate_model(y_test,y_hat_test,X_test,clf)
# Try Categorical SMOTE
from imblearn.over_sampling import SMOTENC
smote_nc = SMOTENC(categorical_features = [0,11])
df_to_split = df.drop(columns = 'Initial Call Re-map')
category_cols = df_to_split.columns
target_col = ['Stop Resolution']
df_to_split = pd.DataFrame()
# Convert catogories to cat.codes
for header in category_cols:
    df_to_split[header] = df[header].astype('category').cat.codes
    
y = df_to_split['Stop Resolution']
X = df_to_split.drop('Stop Resolution',axis=1)
X_train, X_test, y_train,y_test  = train_test_split(X,y,test_size=.3,
                                                    random_state=42,stratify=y)
# Now modify the training data by oversampling (SMOTENC)
X_train, y_train = smote_nc.fit_sample(X_train, y_train)
display(y_train.value_counts(normalize=False),y_test.value_counts(normalize=False))
clf = CatBoostClassifier()
clf.fit(X_train,y_train,logging_level='Silent')
print('Training score: ' ,round(clf.score(X_train,y_train),2))
print('Test score: ',round(clf.score(X_test,y_test),2))
y_hat_test = clf.predict(X_test)
evaluate_model(y_test,y_hat_test,X_test,clf)<jupyter_output><empty_output><jupyter_text>## 9. SMOTE + CatBoostClassifier + Final Call type
### E - SMOTE + CatBoostClassifer + Final Call type
* **The Next Steps will be a set of experiments to look how the models can improve based on:**
    - [1] Feature Selection:  Initial Call Type Versus Final Call Type 
    - [2] Model type:  XGBoost-RF  vs CatBoost
    - [3] Balancing the dataset from best model of [1] and [2]
    - [4] HyperParameter tuning for [3] 
    
* **The Next Experiment will be (Bold Type):**
    - A = Vanilla Model = XBG + Initial Call Type
    - B = XBG + Final Call Type
    - C = Catboost + Initial Call Type
    - D = Catboost + Final Call Type
        = CatBoostClassifier + Final Call Type
    - **E = SMOTE + Best of (A,B,C,D))**
    - F = Gridsearch on E
#Reference: https://catboost.ai/docs/concepts/python-reference_catboost_grid_search.html
from catboost import CatBoost
model = CatBoostClassifier()
grid = {'learning_rate': [0.01,.04,0.8],
        'depth': [3,5,8,12],
        'l2_leaf_reg': [1, 3, 7, 9]}
grid_search_result = model.grid_search(grid, 
                                       X=X_train, 
                                       y=y_train, 
                                       plot=True)print('Training score: ' ,round(model.score(X_train,y_train),2))
print('Test score: ',round(model.score(X_test,y_test),2))
<jupyter_code>#y_hat_test = model.predict(X_test)
#evaluate_model(y_test,y_hat_test,X_test,model)<jupyter_output><empty_output><jupyter_text>model = CatBoost()
grid = {'learning_rate': [0.03, 0.1],
        'depth': [4, 6, 10],
        'l2_leaf_reg': [1, 3, 5, 7, 9]}
grid_search_result = model.grid_search(grid, 
                                       X=X_train, 
                                       y=y_train, 
                                       plot=True)<jupyter_code>#df_to_split5 = pd.DataFrame()
df_to_split = df.drop(columns = ['Initial Call Re-map','Stop Resolution'])
df_to_split.head()
X = pd.get_dummies(df_to_split, drop_first=True)
y = df['Stop Resolution']
X
#from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4)
from catboost import Pool, CatBoostClassifier
category_cols = X.columns
train_pool =  Pool(data=X_train, label=y_train, cat_features=category_cols)
test_pool = Pool(data=X_test, label=y_test,  cat_features=category_cols)
cb_base = CatBoostClassifier(iterations=500, depth=12,
                            boosting_type='Ordered',
                            learning_rate=0.03,
                            thread_count=-1,
                            eval_metric='AUC',
                            silent=True,
                            allow_const_label=True)#,
                           #task_type='GPU')
cb_base.fit(train_pool,eval_set=test_pool, plot=True, early_stopping_rounds=10)
cb_base.best_score_
# Plotting Feature Importances
important_feature_names = cb_base.feature_names_
important_feature_scores = cb_base.feature_importances_
important_features = pd.Series(important_feature_scores, index = important_feature_names)
important_features.sort_values().plot(kind='barh');
print('Training score: ' ,round(cb_base.score(X_train,y_train),2))
print('Test score: ',round(cb_base.score(X_test,y_test),2))
y_hat_test = cb_base.predict(X_test)
evaluate_model(y_test,y_hat_test,X_test,cb_base)
# Try the same approach, with SMOTENC first
smote_nc = SMOTENC(categorical_features = [0,11])
df_to_split = df.drop(columns = ['Initial Call Re-map', 'Stop Resolution'])
category_cols = df_to_split.columns
#target_col = ['Stop Resolution']
#df_to_split = pd.DataFrame()
# Convert catogories to cat.codes
X = pd.get_dummies(df_to_split, drop_first=True)
y = df['Stop Resolution']
#for header in category_cols:
#    df_to_split[header] = df[header].astype('category').cat.codes
    
X_train, X_test, y_train,y_test  = train_test_split(X,y,test_size=.3,
                                                    random_state=42,stratify=y)#
X_train, y_train = smote_nc.fit_sample(X_train, y_train)
display(y_train.value_counts(normalize=False),y_test.value_counts(normalize=False))
X
clf = CatBoostClassifier()
clf.fit(X_train,y_train,logging_level='Silent')
print('Training score: ' ,round(clf.score(X_train,y_train),2))
print('Test score: ',round(clf.score(X_test,y_test),2))
y_hat_test = clf.predict(X_test)
evaluate_model(y_test,y_hat_test,X_test,clf)
category_cols = X.columns
train_pool =  Pool(data=X_train, label=y_train, cat_features=category_cols)
test_pool = Pool(data=X_test, label=y_test,  cat_features=category_cols)
cb_base = CatBoostClassifier(iterations=500, depth=12,
                            boosting_type='Ordered',
                            learning_rate=0.03,
                            thread_count=-1,
                            eval_metric='AUC',
                            silent=True,
                            allow_const_label=True)#,
                           #task_type='GPU')
cb_base.fit(train_pool,eval_set=test_pool, plot=True, early_stopping_rounds=10)
cb_base.best_score_
print('Training score: ' ,round(cb_base.score(X_train,y_train),2))
print('Test score: ',round(cb_base.score(X_test,y_test),2))
y_hat_test = cb_base.predict(X_test)
evaluate_model(y_test,y_hat_test,X_test,cb_base)<jupyter_output><empty_output><jupyter_text>## 10. Gridsearch on Best Model
### F - Gridsearch on E
* **The Next Steps will be a set of experiments to look how the models can improve based on:**
    - [1] Feature Selection:  Initial Call Type Versus Final Call Type 
    - [2] Model type:  XGBoost-RF  vs CatBoost
    - [3] Balancing the dataset from best model of [1] and [2]
    - [4] HyperParameter tuning for [3] 
    
* **The Next Experiment will be (Bold Type):**
    - A = Vanilla Model = XBG + Initial Call Type
    - B = XBG + Final Call Type
    - C = Catboost + Initial Call Type
    - D = Catboost + Final Call Type
    - E = SMOTE + Best of (A,B,C,D))
    - **F = Gridsearch on E**
## 11. Optional Feature Engineering for Training data only<jupyter_code>### The key concept is that in training we know that the some precincts are more successful than others at getting to an arrest.  Instead of imputed a 1-hot encoded value,  use the percentage of successful arrests as the values for the precinct.
Calculate how successful particular precincts were at making arrests
arrest_percentage = arrest_df['Precinct'].value_counts() / df['Precinct'].value_counts()
print(f'The percentage of arrests based on terry stops, by squad \n\n',arrest_percentage)
### Create a dictionary for mapping the squads which have successful arrest.  Those officer squads which have <br><br>
### reported Terry stops with no arrests will be dropped from the dataset
successful_arrest_map=arrest_percentage.to_dict()
### successful_arrest_map # Take a look at the dictionary
df['Precinct Success']=df['Precinct'].map(successful_arrest_map) # map the dictionary to the dataframe with a new column3
### Perform the same analysis to see which call types lead to more arrests
arrest_df = df.loc[df['Stop Resolution'] == 'Arrest'] # Re-Create the arrest_df in case there were removals earlier
arrest_df['Final Call'].value_counts(),  df['Final Call'].value_counts()
arrest_categories = arrest_df['Final Call Type'].value_counts() / df['Final Call Type'].value_counts() 
arrest_map = arrest_categories.to_dict()
arrest_map # look at the dictionary 
df['Final Call Success'] = df['Final Call Type'].map(arrest_map)
results_df = pd.DataFrame(
                {'Expt Name': ["Accuracy", "Precision Not Arrested", 'Precision Arrested',
                             'Precision Weighted Avg', "RecalL Not Arrested", 'Recall Arrested',
                              'Recall Weighted Avg', 'F1 Not Arrested', 'F1 Arrested', 'F1 Weighted Avg',
                              'AUC'],})
results_df = pd.Dataframe(
                {'Expt Name': ['xgb-rf-initial call'], 'Accuracy':})<jupyter_output><empty_output> | 
	permissive | 
	/.ipynb_checkpoints/Terry Stops v7-checkpoint.ipynb | 
	sn95033/Terry-Stops-Analysis | 23 | 
| 
	<jupyter_start><jupyter_text># 第四題 反序數量<jupyter_code>num = int(input())
data = list(map(int,input().split()))
def s(x,y):
    count = 0
    for i in x:
        for j in y:
            if i>j:
                count = count+1
    return count
def w(x):
    if len(x) == 2:
        if x[0]>x[1]:
            return 1
        else:
            return 0
    elif len(x)==1:
        return 0
    else:
        ind = int(len(x)/2)
        fL=x[:ind]
        bL=x[ind:]
        if len(fL) == 1:
            if bL[0]>bL[1]:
                return 1+s(fL,bL)
            else:
                return s(fL,bL)
        else:
            return w(fL) + w(bL) + s(fL,bL)
print(w(data))<jupyter_output>20
1 2 8 9 3 6 4 5 7 3 5 7 1 2 3 9 7 8 4 3
80
 | 
	no_license | 
	/APCS/.ipynb_checkpoints/201804-checkpoint.ipynb | 
	leomiboy/level1_practice | 1 | 
| 
	<jupyter_start><jupyter_text>In this assignment you are generating sample images of Simpsons with deep convolutional generative adversarial networks (DCGANs). 
You need to do the following:
1- Read and understand this tutorial: https://towardsdatascience.com/image-generator-drawing-cartoons-with-generative-adversarial-networks-45e814ca9b6b (Links to an external site.)Links to an external site.
2- Download the dataset and the source code (Jupyter Notebook) given in the tutorial. Analyze and understand the code. (Note: you need to work on images in the "cropped" folder in the dataset)
3- The code is implemented using Tensorflow. You need to change it to fully uses Keras. Please keep the structure of the code (name of the functions and so on) so it is easier to follow your code. 
4- Change the code to use resized images. The original images are 128x128 but you need to resize them to 64x64 before training. You need to make required adjustments in the network to accommodate this change. Thsi will make training faster too. 
5- Make sure you keep all the plotting parts of the code so you can observe how the system learns in each epoch. 
6- If training on Colab will be slow, you can only run it for 50 epochs (or as many as possible).  To test your code, you can run first one or two epochs. Once everything is working, then you can let it run for 50 or 300 epochs. 
7- If you wanted to run it faster, you can get free trials of GPU instances on AWS or Google cloud. 
8- Submit the notebook with all the results included in the notebook. <jupyter_code>import numpy as np
import pandas as pd 
import keras
from keras.layers import Input, Dense, Reshape, Flatten, Dropout,Concatenate
from keras.backend import random_normal,ones_like,zeros_like,mean
from keras.backend import get_session
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras.initializers import TruncatedNormal
from PIL import Image
import warnings
import os
import time
from glob import glob
import datetime
import matplotlib.pyplot as plt
import seaborn as sns
# Hyperparameters
IMG_SIZE = 64
NOISE_SIZE = 100
LR_D = 0.00004
LR_G = 0.0004
BATCH_SIZE = 64
EPOCHS = 300 # For better results increase this value 
BETA1 = 0.5
WEIGHT_INIT_STDDEV = 0.02
EPSILON = 0.00005
SAMPLES_TO_SHOW = 10
img_rows = 64
img_cols = 64
CHANNELS = 3
img_shape = (img_rows, img_cols, CHANNELS)
def GeneratorFunction(noise_shape=(NOISE_SIZE,)):
    
    input_layer = Input(noise_shape)
    generator = Dense(1024*8*8, activation='relu')(input_layer)
    generator = Reshape((8, 8, 1024)) (generator)
    generator = LeakyReLU(alpha=0.2) (generator)
                        
    generator = keras.layers.Conv2DTranspose(filters=512, kernel_size=[5,5], padding="same",
                                             strides=[2,2], kernel_initializer=TruncatedNormal(stddev = WEIGHT_INIT_STDDEV))(generator)
    generator = BatchNormalization(momentum=0.8,epsilon = EPSILON) (generator)
    generator = keras.layers.LeakyReLU(alpha=0.2) (generator)
                        
    generator = keras.layers.Conv2DTranspose(filters=256, kernel_size=[5,5], padding="same", strides=[2,2],
                                               kernel_initializer=TruncatedNormal(stddev = WEIGHT_INIT_STDDEV))(generator)
    generator = BatchNormalization(momentum=0.8,epsilon = EPSILON,name="batch_trans_conv2") (generator)
    generator = keras.layers.LeakyReLU(alpha=0.2) (generator)
                        
    generator = keras.layers.Conv2DTranspose(filters=128, kernel_size=[5,5], padding="same", strides=[2,2],
                                               kernel_initializer=TruncatedNormal(stddev = WEIGHT_INIT_STDDEV)) (generator)
    generator = BatchNormalization(momentum=0.8,epsilon = EPSILON,name="batch_trans_conv3") (generator)
    generator = keras.layers.LeakyReLU(alpha=0.2)(generator)
    
    generator = keras.layers.Conv2DTranspose(filters=64, kernel_size=[5,5], padding="same", strides=[1,1],
                                               kernel_initializer=TruncatedNormal(stddev = WEIGHT_INIT_STDDEV)) (generator)
    generator = BatchNormalization(momentum=0.8,epsilon = EPSILON,name="batch_trans_conv4") (generator)
    generator = keras.layers.LeakyReLU(alpha=0.2) (generator)
    
    generator = keras.layers.Conv2DTranspose(filters=3, kernel_size=[5,5], padding="same", strides=[1,1],
                                               kernel_initializer=TruncatedNormal(stddev = WEIGHT_INIT_STDDEV)) (generator)
    out = Activation("tanh")(generator)
    
    model = Model(inputs=[input_layer], outputs=out)
    model.summary()
  
    return model
def discriminatorFunction(img_shape=(IMG_SIZE, IMG_SIZE, CHANNELS)):
    
    input_layer = Input(img_shape)
    discriminator = Conv2D(filters = 64, kernel_size=[5,5],strides=[2,2],kernel_initializer=TruncatedNormal(stddev = WEIGHT_INIT_STDDEV), 
                             padding="SAME",input_shape = img_shape)(input_layer)
    
    discriminator = BatchNormalization(momentum=0.8,epsilon = EPSILON)(discriminator)
    discriminator = LeakyReLU(alpha=0.2)(discriminator)
    discriminator = Conv2D(filters=128, kernel_size=[5,5], strides=[2,2], padding="same",
                              kernel_initializer=TruncatedNormal(stddev = WEIGHT_INIT_STDDEV))(discriminator)
    discriminator = BatchNormalization(momentum=0.8,epsilon = EPSILON)(discriminator)
    discriminator = LeakyReLU(alpha=0.2)(discriminator)
    discriminator = Conv2D(filters=256, kernel_size=[5,5], strides=[2,2], padding="same",
                             kernel_initializer=TruncatedNormal(stddev = WEIGHT_INIT_STDDEV))(discriminator)
    discriminator = BatchNormalization(momentum=0.8,epsilon = EPSILON)(discriminator)
    discriminator = LeakyReLU(alpha=0.2)(discriminator)
    discriminator = Conv2D(filters=512, kernel_size=[5,5], strides=[1,1], padding="same",
                             kernel_initializer=TruncatedNormal(stddev = WEIGHT_INIT_STDDEV))(discriminator)
    discriminator = BatchNormalization(momentum=0.8,epsilon = EPSILON)(discriminator)
    discriminator = LeakyReLU(alpha=0.2)(discriminator)
    discriminator = Conv2D(filters=1024, kernel_size=[5,5], strides=[2,2], padding="same",
                             kernel_initializer=TruncatedNormal(stddev = WEIGHT_INIT_STDDEV))(discriminator)
    
    discriminator = BatchNormalization(momentum=0.8,epsilon = EPSILON)(discriminator)
    discriminator = LeakyReLU(alpha=0.2)(discriminator)
    discriminator = Flatten()(discriminator)
    out = Dense(1, activation='sigmoid')(discriminator)
    model = Model(inputs=[input_layer], outputs=out)
    model.summary()
    
    return model
#print("Discriminator")
model_discriminator = discriminatorFunction(img_shape=(IMG_SIZE, IMG_SIZE, CHANNELS))
model_discriminator.compile(loss='binary_crossentropy',
                                    optimizer=Adam(lr=LR_D, beta_1=BETA1),
                                    metrics=['accuracy'])
print("Generator")
model_generator = GeneratorFunction(noise_shape=(NOISE_SIZE,))
#build thwe Gan
z = Input(shape=(NOISE_SIZE,))
img = model_generator(z)
model_discriminator.trainable = False #discriminator is not trainable for GANs
real = model_discriminator(img)
gan = Model(z, real)
gan.compile(loss='binary_crossentropy', optimizer=Adam(lr=LR_G, beta_1=BETA1))
print("Model created based on Discriminator and Generator")
gan.summary()
def show_samples(sample_images, name, epoch):
    figure, axes = plt.subplots(1, len(sample_images), figsize = (IMG_SIZE, IMG_SIZE))
    figure.set_size_inches(15,15)
    for index, axis in enumerate(axes):
        image_array = sample_images[index]
        axis.imshow(image_array)
    plt.tight_layout()
    plt.show()
    plt.close()
def summarize_epoch(d_losses, g_losses):
    fig, ax = plt.subplots()
    plt.plot(d_losses, label='Discriminator', alpha=0.6)
    plt.plot(g_losses, label='Generator', alpha=0.6)
    plt.title("Losses")
    plt.legend()
    plt.show()
    plt.close()
warnings.filterwarnings("ignore")
import random
from scipy import ndimage, misc
# Training
INPUT_DATA_DIR = "../input/cropped/"
OUTPUT_DIR = ""
exclude_img = ["9746","9731","9717","9684","9637","9641","9642","9584","9541","9535",
"9250","9251","9252","9043","8593","8584","8052","8051","8008","7957",
"7958""7761","7762","9510","9307","4848","4791","4785","4465","2709",
"7724","7715","7309","7064","7011","6961","6962","6963","6960","6949",
"6662","6496","6409","6411","6406","6407","6170","6171","6172","5617",
"4363","4232","4086","4047","3894","3889","3493","3393","3362","2780",
"2710","2707","2708","2711","2712","2309","2056","1943","1760","1743",
"1702","1281","1272","772","736","737","691","684","314","242","191"]
exclude_img = [s + ".png" for s in exclude_img]
print("Start!")
import re
images = []
for filename in glob( INPUT_DATA_DIR + '*'):
    if filename not in exclude_img:
        if re.search("\.(jpg|jpeg|png|bmp|tiff)$", filename):
            image = ndimage.imread(filename, mode="RGB")
            image_resized = misc.imresize(image, (64, 64))
            images.append(np.array(image_resized))
images = np.array(images) / 255
#input_images = np.asarray([np.asarray(Image.open(file).resize((IMAGE_SIZE, IMAGE_SIZE))) for file in glob( INPUT_DATA_DIR + '*')])
print ("Input: " + str(images.shape))
np.random.shuffle(images)
sample_images = random.sample(list(images), SAMPLES_TO_SHOW)
show_samples(sample_images, OUTPUT_DIR + "inputs", 0)
# Adversarial ground truths
valid = np.ones((BATCH_SIZE, 1))
fake = np.zeros((BATCH_SIZE, 1))
r, c = 4, 4
noise = np.random.normal(0, 1, (r*c, NOISE_SIZE))
#calculate steps per epoch
steps_per_epoch = len(images)//BATCH_SIZE
print("Training begins... Total epochs: {}, steps per epoch: {}".format(EPOCHS, steps_per_epoch))
steps_list = [x for x in range(steps_per_epoch)]
warnings.filterwarnings("ignore")
from tqdm import tqdm
d_loss_list = []
g_loss_list = []
for epoch in tqdm(range(EPOCHS)):
    epoch += 1 #start from 1
    # So, need to append d_loss, g_loss, and step to a dataframe with epoch as 
    
    for step in range(steps_per_epoch):
        step += 1 #start from 1
        # ---------------------
        #  Train Discriminator
        # ---------------------
        # Select a random half of images
        idx = np.random.randint(0, images.shape[0], BATCH_SIZE)
        imgs = images[idx]
        # Sample noise and generate a batch of new images
        noise = np.random.normal(0, 1, (BATCH_SIZE, NOISE_SIZE))
        gen_imgs = model_generator.predict(noise)
        # Train the discriminator (real classified as ones and generated as zeros)
        d_loss_real = model_discriminator.train_on_batch(imgs, valid)
        d_loss_fake = model_discriminator.train_on_batch(gen_imgs, fake)
        d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
        # ---------------------
        #  Train GAN
        # ---------------------
        # Train the generator (wants discriminator to mistake images as real)
        g_loss = gan.train_on_batch(noise, valid)
        # Plot the progress
        print ("Epoch: {}/{} | Step: {}/{} [D loss: {:.4f}, acc.: {:.2f}%] [G loss: {:.4f}]".format(
                    epoch, EPOCHS, step, steps_per_epoch, d_loss[0], 100*d_loss[1], g_loss
                ))
        # Append d_loss, g_loss, and step to a dataframe
        #step_df = df.append({'d_loss' : d_loss[0], 'g_loss' : g_loss}, ignore_index=True)
        d_loss_zero = d_loss[0]
        d_loss_list.append(d_loss_zero)
        g_loss_list.append(g_loss)
  # Plot Loss
    summarize_epoch(d_loss_list,g_loss_list)
    # Plot images
    gen_imgs = model_generator.predict(noise)
    fig, axs = plt.subplots(r, c)
    cnt = 0
    for i in range(r):
        for j in range(c):
            axs[i,j].imshow(gen_imgs[cnt, :, :, :])
            axs[i,j].axis('off')
            cnt += 1
    plt.show()
    plt.close()
  
    if epoch % 10 == 0:
        model_generator.save("g{}.h5".format(epoch))
<jupyter_output>  0%|          | 0/300 [00:00<?, ?it/s] | 
	no_license | 
	/Image Generator (DCGAN) Simpson _ A4.ipynb | 
	mayankc7991/GAN-on-Simpsons | 1 | 
| 
	<jupyter_start><jupyter_text>### TRAIN
        the train set, containing the user ids and whether they have churned.
        Churn is defined as whether the user did not continue the subscription within 30 days of expiration. 
        is_churn = 1 means churn,
        is_churn = 0 means renewal.<jupyter_code>train_input = pd.read_csv('/home/dissertation/data/train_v2.csv', 
                          dtype = {'msno' : 'category'})
train_input.head()
test_input = pd.read_csv('/home/dissertation/data/sample_submission_v2.csv', 
                          dtype = {'msno' : 'category'})
test_input.head()
train_input.is_churn.value_counts()
np.mean(train_input.is_churn)
train_input.info()
train_input.is_churn.value_counts(dropna=False, normalize=True).plot(kind='bar', title='Data Set - Normalised Count % (target)')
print("",train_input.is_churn.value_counts())
train_input.is_churn.value_counts(normalize=True)
ax = train_input.is_churn\
        .value_counts(dropna=False, normalize=True)\
        .plot(kind='bar', title='Normalised Count % (Churn)')
ax.set(xlabel="Churned", ylabel="%")
train_input.describe(include='all')<jupyter_output><empty_output><jupyter_text>So we have churn records for 970960 members### MEMBERS<jupyter_code>members_input = pd.read_csv('/home/dissertation/data/members_v3.csv',
                            dtype={'registered_via' : np.uint8,
                                   'gender' : str,
                                   'city' : 'category',
                                   'registered_via' : 'category'})
members_input['registration_init_time_dt'] = pd.to_datetime(members_input['registration_init_time'], 
                                                            format='%Y%m%d', errors='ignore')
members_input.head()
members_input.describe(include='all')<jupyter_output><empty_output><jupyter_text>#### BD: Age of member<jupyter_code>fig1, ax1 = plt.subplots()
ax1.set_title('Box Plot of Age')
ax1.boxplot(members_input['bd'])
upper_q = .99
lower_q = .01
upper_price_outlier = members_input['bd'].quantile(upper_q)
lower_price_outlier = members_input['bd'].quantile(lower_q)
print("Using {0} and {1} quantiles would suggest {2} and {3} as the upper and lower bounds of bd".format(upper_q, lower_q, upper_price_outlier, max(0, lower_price_outlier)))
print("This method would  exclude {0} instances".format(len(members_input[~members_input.bd.between(lower_price_outlier, upper_price_outlier)])))
print("Very inconsistent/noisy data in this feature. Needs to be processed somehow")<jupyter_output>Using 0.99 and 0.01 quantiles would suggest 54.0 and 0 as the upper and lower bounds of bd
This method would  exclude 62658 instances
Very inconsistent/noisy data in this feature. Needs to be processed somehow
<jupyter_text>#### Categorical Features<jupyter_code>for idx, col in enumerate(['city','gender','registered_via']):
    plt.figure()
    members_input[col].value_counts(dropna=False, normalize=True).plot(kind='bar', title='Train Set - Normalized Count % ({0})'.format(col))<jupyter_output><empty_output><jupyter_text>#### Notes:
    msno: 6769473 members in total in the system.
    city [1] is by far the most active city in terms of memberships (~ 70%)
    A large portion (>60%) of gender information is missing
    There are 4 main popular approaches to registering, [4, 3, 9, 7]. The rest are minimal#### TRANSACTIONS<jupyter_code>## Next load in the transactions data
transactions_input = pd.read_csv('/home/dissertation/data/transactions.csv',
                                 dtype = {'payment_method' : 'category',
                                          'payment_plan_days' : np.uint8,
                                          'plan_list_price' : np.uint8,
                                          'actual_amount_paid': np.uint8,
                                          'is_auto_renew' : np.bool,
                                          'is_cancel' : np.bool})
transactions_input.head()
transactions_input.plan_list_price.value_counts().plot(kind='bar')
transactions_input.describe(include='all')
plt.hist(transactions_input.actual_amount_paid, bins=50)
for idx, col in enumerate(['payment_method_id','payment_plan_days','is_auto_renew','is_cancel']):
    plt.figure()
    transactions_input[col].value_counts(dropna=False, normalize=True).plot(kind='bar', title='Train Set - Normalized Count % ({0})'.format(col))<jupyter_output><empty_output><jupyter_text>#### Notes:
    payment_method_id 41 accounts for >50% of all payment types
    most transactions are for 30 day plans
    >85% of customers opt to auto_renew 
    >90% have not cancelled their subscription### Merging train_input and members<jupyter_code>merged_input = pd.merge(left=train_input, right=members_input, how='inner', on=['msno'])
merged_input.head()
merged_input.describe(include='all')
merged_input['gender'] = merged_input['gender'].astype(str)
pd.crosstab(merged_input['gender'].fillna('missing'), 
            merged_input['is_churn'].fillna('missing')).plot(kind='bar')
pd.crosstab(merged_input['gender'].fillna('missing'), 
            merged_input['is_churn'].fillna('missing'), normalize=True)
fig1, ax1 = plt.subplots()
ax1.set_title('Box Plot of Age')
ax1.boxplot(merged_input[merged_input.bd.between(0,100)]['bd'])
members_input.head()
transactions_summary.transaction_date_max.max()<jupyter_output><empty_output><jupyter_text>### Merging Transactions with those members involved in the churn study<jupyter_code>transactions_merged = pd.merge(left = merged_input[['msno']], 
                               right = transactions_input, 
                               how='left', 
                               on='msno')
transactions_merged.head()
transactions_merged[transactions_merged.msno == 'ugx0CjOMzazClkFzU2xasmDZaoIqOUAZPsH1q0teWCg=']
%%time
#[transactions_merged.msno == 'ugx0CjOMzazClkFzU2xasmDZaoIqOUAZPsH1q0teWCg=']\
transactions_merged[transactions_merged.msno == 'ugx0CjOMzazClkFzU2xasmDZaoIqOUAZPsH1q0teWCg=']\
        .groupby('msno')\
        .agg({'msno' : {'total_order' : 'count'},
                        'payment_method_id' : {'payment_method_id_mode' : lambda x: x.mode()[0],
                                               'payment_method_id_count' : lambda x: len(np.unique(x))},
                        'payment_plan_days' : {'payment_plan_days_mode' : lambda x: x[x>0].mode()[0],
                                               'payment_plan_days_mean' : 'mean'},
                        'plan_list_price' : {'plan_list_price_mean' : 'mean',
                                             'plan_lifetime_value' : 'sum'},
                        'actual_amount_paid' : {'actual_amount_mean' : 'mean',
                                                'total_actual_amount' : 'sum'},
                        'is_auto_renew' : {'is_auto_renew_mode' : lambda x : x.mode()[0]},
                        'transaction_date' : {'transaction_date_min' : lambda x: x.min(),
                                              'transaction_date_max' : lambda x: x.max()},
                        'is_cancel' : {'cancel_times' : lambda x : sum(x==1)}
            })
transactions_summary = \
    transactions_merged\
            .groupby('msno')\
            .agg({'msno' : {'total_order' : 'count'},
                            'payment_method_id' : {'payment_method_id_mode' : lambda x: x.mode()[0] if len(x) > 1 else x,
                                                   'payment_method_id_count' : lambda x: len(np.unique(x))},
                            'payment_plan_days' : {'payment_plan_days_mode' : lambda x: x.mode()[0] if len(x) > 1 else x
                                                   ,
                                                   'payment_plan_days_mean' : 'mean'},
                            'plan_list_price' : {'plan_list_price_mean' : 'mean',
                                                 'plan_lifetime_value' : 'sum'},
                            'actual_amount_paid' : {'actual_amount_mean' : 'mean',
                                                    'total_actual_amount' : 'sum'},
                            'is_auto_renew' : {'is_auto_renew_mode' : lambda x : x.mode()[0] if len(x) > 1 else x},
                            'transaction_date' : {'transaction_date_min' : lambda x: x.min(),
                                                  'transaction_date_max' : lambda x: x.max()},
                            'is_cancel' : {'cancel_times' : lambda x : sum(x==1)}
                })
        
transactions_summary.columns = transactions_summary.columns.droplevel(0)
transactions_summary.reset_index(inplace=True)
transactions_summary.head()
len(transactions_summary)
transactions_summary.to_hdf('/home/dissertation/data/transactions_summary.h5', key='transactions_summary', mode='w')<jupyter_output><empty_output> | 
	no_license | 
	/.ipynb_checkpoints/KKBox - EDA-checkpoint.ipynb | 
	d18124313/dissertation | 7 | 
| 
	<jupyter_start><jupyter_text>*********************************************************************
MAIN PROGRAM TO COMPUTE A DESIGN MATRIX TO INVERT FOR STRUCTURE --
Copyright (c) 2014-2023: HILARY R. MARTENS, LUIS RIVERA, MARK SIMONS         
This file is part of LoadDef.
   LoadDef is free software: you can redistribute it and/or modify
   it under the terms of the GNU General Public License as published by
   the Free Software Foundation, either version 3 of the License, or
   any later version.
   LoadDef is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   GNU General Public License for more details.
   You should have received a copy of the GNU General Public License
   along with LoadDef.  If not, see .
*********************************************************************IMPORT PRINT FUNCTION<jupyter_code>from __future__ import print_function<jupyter_output><empty_output><jupyter_text>IMPORT MPI MODULE<jupyter_code>from mpi4py import MPI<jupyter_output><empty_output><jupyter_text>MODIFY PYTHON PATH TO INCLUDE 'LoadDef' DIRECTORY<jupyter_code>import sys
import os
sys.path.append(os.getcwd() + "/../")<jupyter_output><empty_output><jupyter_text>IMPORT PYTHON MODULES<jupyter_code>import numpy as np
import scipy as sc
import datetime
import netCDF4 
from math import pi
from scipy import interpolate
from LOADGF.utility import perturb_pmod
from LOADGF.LN import compute_love_numbers
from LOADGF.GF import compute_greens_functions
from CONVGF.CN import load_convolution
from CONVGF.utility import read_station_file
from CONVGF.utility import read_lsmask
from CONVGF.utility import read_greens_fcn_file
from CONVGF.utility import read_greens_fcn_file_norm
from CONVGF.utility import normalize_greens_fcns
from CONVGF.utility import read_AmpPha
from CONVGF.CN import load_convolution
from CONVGF.CN import interpolate_load
from CONVGF.CN import compute_specific_greens_fcns
from CONVGF.CN import generate_integration_mesh
from CONVGF.CN import intmesh2geogcoords
from CONVGF.CN import integrate_greens_fcns
from CONVGF.CN import compute_angularDist_azimuth
from CONVGF.CN import interpolate_lsmask
from CONVGF.CN import coef2amppha
from CONVGF.CN import mass_conservation
from utility.pmes import combine_stations
from CONVGF.utility import read_convolution_file<jupyter_output><empty_output><jupyter_text>--------------- SPECIFY USER INPUTS --------------------- #Full path to planet model text file
    Planet model should be spherically symmetric, elastic,
        non-rotating, and isotropic (SNREI)
    Format: radius(km), vp(km/s), vs(km/s), density(g/cc)
    If the file delimiter is not whitespace, then specify in
        call to function.<jupyter_code>pmod = "PREM"
planet_model = ("../input/Planet_Models/" + pmod + ".txt")<jupyter_output><empty_output><jupyter_text>Perturbation used for the forward-model runs<jupyter_code>perturbation = np.log10(1.01)<jupyter_output><empty_output><jupyter_text>Regions perturbed in the forward-model runs
  Note: The second-order Tikhonov regularization in StructSolv will only work properly if the layers stack on one another.
        For example, the bottom radius of the top-most layer is the top radius of the next layer down. <jupyter_code>nodes = [[6351.,6371.],[6331.,6351.],[6311.,6331.]]<jupyter_output><empty_output><jupyter_text>Reference frame [Blewitt 2003]<jupyter_code>rfm = "cm"<jupyter_output><empty_output><jupyter_text>Full Path to Load Directory and Prefix of Filename<jupyter_code>loadfile_directory = ("../output/Grid_Files/nc/OTL/") <jupyter_output><empty_output><jupyter_text>Prefix for the Load Files (Load Directory will be Searched for all Files Starting with this Prefix)
 :: Note: For Load Files Organized by Date, the End of Filename Name Must be in the Format yyyymmddhhmnsc.txt
 :: Note: If not organized by date, files may be organized by tidal harmonic, for example (i.e. a unique filename ending)
 :: Note: Output names (within output files) will be determined by extension following last underscore character (e.g., date/harmonic/model)<jupyter_code>loadfile_prefix = ("convgf_GOT410c") <jupyter_output><empty_output><jupyter_text>LoadFile Format: ["nc", "txt"]<jupyter_code>loadfile_format = "nc"<jupyter_output><empty_output><jupyter_text>Include imaginary component? For harmonic loads, such as tides, set to "True." Otherwise, for standard displacement data, set to "False."<jupyter_code>inc_imag = True<jupyter_output><empty_output><jupyter_text>Are the Load Files Organized by Datetime?
 :: If False, all Files that match the loadfile directory and prefix will be analyzed.<jupyter_code>time_series = False  <jupyter_output><empty_output><jupyter_text>Date Range for Computation (Year,Month,Day,Hour,Minute,Second)
 :: Note: Only used if 'time_series' is True<jupyter_code>frst_date = [2015,1,1,0,0,0]
last_date = [2016,3,1,0,0,0]<jupyter_output><empty_output><jupyter_text>Are the load values on regular grids (speeds up interpolation); If unsure, leave as false.<jupyter_code>regular = True<jupyter_output><empty_output><jupyter_text>Load Density
 Recommended: 1025-1035 kg/m^3 for oceanic loads (e.g., FES2014, ECCO2); 1 kg/m^3 for atmospheric loads (e.g. ECMWF); 1000 kg/m^3 for fresh water<jupyter_code>ldens = 1030.0<jupyter_output><empty_output><jupyter_text>NEW OPTION: Provide a common geographic mesh?
If True, must provide the full path to a mesh file (see: GRDGEN/common_mesh). 
If False, a station-centered grid will be created within the functions called here. <jupyter_code>common_mesh = True
# Full Path to Grid File Containing Surface Mesh (for sampling the load Green's functions)
#  :: Format: latitude midpoints [float,degrees N], longitude midpoints [float,degrees E], unit area of each patch [float,dimensionless (need to multiply by r^2)]
meshfname = ("commonMesh_global_1.0_1.0_18.0_60.0_213.0_278.0_0.1_0.1_28.0_50.0_233.0_258.0_0.01_0.01_landmask")
convmesh = ("../output/Grid_Files/nc/commonMesh/" + meshfname + ".nc")<jupyter_output><empty_output><jupyter_text>Planet Radius (in meters; used for Greens function normalization)<jupyter_code>planet_radius = 6371000.
  
# Ocean/Land Mask 
#  :: 0 = do not mask ocean or land (retain full model); 1 = mask out land (retain ocean); 2 = mask out oceans (retain land)
#  :: Recommended: 1 for oceanic; 2 for atmospheric
lsmask_type = 1<jupyter_output><empty_output><jupyter_text>Full Path to Land-Sea Mask File (May be Irregular and Sparse)
 :: Format: Lat, Lon, Mask [0=ocean; 1=land]<jupyter_code>lsmask_file = ("../input/Land_Sea/ETOPO1_Ice_g_gmt4_wADD.txt")<jupyter_output><empty_output><jupyter_text>Enforce mass conservation by removing a spatial mean from the load grid?<jupyter_code>mass_cons = False<jupyter_output><empty_output><jupyter_text>Station/Grid-Point Location File (Lat, Lon, StationName)<jupyter_code>sta_file = ("../input/Station_Locations/NOTA.txt")<jupyter_output><empty_output><jupyter_text>Optional: Additional string to include in all output filenames (Love numbers, Green's functions, Convolution)<jupyter_code>outstr = ("")<jupyter_output><empty_output><jupyter_text>Optional: Additional string to include in output filenames for the convolution (e.g. "_2022")<jupyter_code>if (common_mesh == True):
    mtag = "commonMesh"
else:
    mtag = "stationMesh"
outstr_conv = ("_dens" + str(int(ldens)) + "_" + mtag + "_2022")
 
# ------------------ END USER INPUTS ----------------------- #<jupyter_output><empty_output><jupyter_text>-------------------- SETUP MPI --------------------------- #Get the Main MPI Communicator That Controls Communication Between Processors<jupyter_code>comm = MPI.COMM_WORLD
# Get My "Rank", i.e. the Processor Number Assigned to Me
rank = comm.Get_rank()
# Get the Total Number of Other Processors Used
size = comm.Get_size()<jupyter_output><empty_output><jupyter_text>---------------------------------------------------------- #-------------------- BEGIN CODE -------------------------- #Ensure that the Output Directories Exist<jupyter_code>if (rank == 0):
    if not (os.path.isdir("../output/Convolution/")):
        os.makedirs("../output/Convolution/")
    if not (os.path.isdir("../output/DesignMatrixStructure/")):
        os.makedirs("../output/DesignMatrixStructure/")
    if not (os.path.isdir("../output/CombineStations/")):
        os.makedirs("../output/CombineStations/")
    if not (os.path.isdir("../output/Love_Numbers/")):
        os.makedirs("../output/Love_Numbers/")
    if not (os.path.isdir("../output/Love_Numbers/LLN/")):
        os.makedirs("../output/Love_Numbers/LLN")
    if not (os.path.isdir("../output/Love_Numbers/PLN/")):
        os.makedirs("../output/Love_Numbers/PLN")
    if not (os.path.isdir("../output/Love_Numbers/STR/")):
        os.makedirs("../output/Love_Numbers/STR")
    if not (os.path.isdir("../output/Love_Numbers/SHR/")):
        os.makedirs("../output/Love_Numbers/SHR")
    if not (os.path.isdir("../output/Greens_Functions/")):
        os.makedirs("../output/Greens_Functions/")
    if not (os.path.isdir("../output/Planet_Models/")):
        os.makedirs("../output/Planet_Models/")
    if not (os.path.isdir("../output/Convolution/temp/")):
        os.makedirs("../output/Convolution/temp/")
tempdir = "../output/Convolution/temp/"<jupyter_output><empty_output><jupyter_text>Check format of load files<jupyter_code>if not (loadfile_format == "nc"):
    if not (loadfile_format == "txt"):
        print(":: Error: Invalid format for load files. See scripts in the /GRDGEN/load_files/ folder. Acceptable formats: netCDF, txt.")<jupyter_output><empty_output><jupyter_text>Make sure all jobs have finished before continuing<jupyter_code>comm.Barrier()<jupyter_output><empty_output><jupyter_text>---------------- BEGIN PERTURB MODEL ---------------------- #Create a list of planetary models based on layers to perturb<jupyter_code>pmodels = [] # Names of planetary models
lngfext = [] # Extensions for Love number and Green's function files
lnfiles = [] # Names of load Love number files
gffiles = [] # Names of load Green's function files
outdir_pmods = ("../output/Planet_Models/")
# Loop through nodes
for ee in range(0,len(nodes)):
    # Current radial range
    crad_range = nodes[ee]
    # RUN THE PERTURBATIONS
    perturb_pmod.main(planet_model,pmod,perturbation,crad_range,outdir_pmods,suffix=outstr)
    # Current output name (must match what "perturb_pmod" produces!)
    outname = (str('{:.4f}'.format(perturbation)) + "_" + str(crad_range[0]) + "_" + str(crad_range[1]) + outstr)
    # New model for mu
    mu_name = (pmod + "_mu_" + outname)
    fname_mu = (outdir_pmods + mu_name + ".txt")
    lngfext_mu = (mu_name + outstr + ".txt")
    ln_mu = ("../output/Love_Numbers/LLN/lln_" + lngfext_mu)
    gf_mu = ("../output/Greens_Functions/" + rfm + "_" + lngfext_mu)
    # New model for kappa
    kappa_name = (pmod + "_kappa_" + outname)
    fname_kappa = (outdir_pmods + kappa_name + ".txt")
    lngfext_kappa = (kappa_name + outstr + ".txt")
    ln_kappa = ("../output/Love_Numbers/LLN/lln_" + lngfext_kappa)
    gf_kappa = ("../output/Greens_Functions/" + rfm + "_" + lngfext_kappa)
    # New model for rho
    rho_name = (pmod + "_rho_" + outname)
    fname_rho = (outdir_pmods + rho_name + ".txt")
    lngfext_rho = (rho_name + outstr + ".txt")
    ln_rho = ("../output/Love_Numbers/LLN/lln_" + lngfext_rho)
    gf_rho = ("../output/Greens_Functions/" + rfm + "_" + lngfext_rho)
    # Append files to list
    pmodels.append(fname_mu)
    pmodels.append(fname_kappa)
    pmodels.append(fname_rho)
    lngfext.append(lngfext_mu)
    lngfext.append(lngfext_kappa)
    lngfext.append(lngfext_rho)
    lnfiles.append(ln_mu)
    lnfiles.append(ln_kappa)
    lnfiles.append(ln_rho)
    gffiles.append(gf_mu)
    gffiles.append(gf_kappa)
    gffiles.append(gf_rho)
# Append original model
pmodels.append(planet_model)
lngfext.append(pmod + outstr + ".txt")
lnfiles.append("../output/Love_Numbers/LLN/lln_" + pmod + outstr + ".txt")
gffiles.append("../output/Greens_Functions/" + rfm + "_" + pmod + outstr + ".txt")    <jupyter_output><empty_output><jupyter_text>---------------- END PERTURB MODEL ----------------------- #<jupyter_code> 
# ---------------- BEGIN LOVE NUMBERS ---------------------- #<jupyter_output><empty_output><jupyter_text>Loop through planetary models <jupyter_code>for bb in range(0,len(pmodels)): 
    # Current model
    cpmod = pmodels[bb]
    # Output filename
    file_ext = lngfext[bb]
    # Check if file already exists
    if (os.path.isfile(lnfiles[bb])):
        continue
    else: 
 
        # Compute the Love numbers (Load and Potential)
        if (rank == 0):
            # Compute Love Numbers
            ln_n,ln_h,ln_nl,ln_nk,ln_h_inf,ln_l_inf,ln_k_inf,ln_h_inf_p,ln_l_inf_p,ln_k_inf_p,\
                ln_hpot,ln_nlpot,ln_nkpot,ln_hstr,ln_nlstr,ln_nkstr,ln_hshr,ln_nlshr,ln_nkshr,\
                ln_planet_radius,ln_planet_mass,ln_sint,ln_Yload,ln_Ypot,ln_Ystr,ln_Yshr,\
                ln_lmda_surface,ln_mu_surface = \
                compute_love_numbers.main(cpmod,rank,comm,size,file_out=file_ext)
        # For Worker Ranks, Run the Code But Don't Return Any Variables
        else:
            # Workers Compute Love Numbers
            compute_love_numbers.main(cpmod,rank,comm,size,file_out=file_ext)
            # Workers Will Know Nothing About the Data Used to Compute the GFs
            ln_n = ln_h = ln_nl = ln_nk = ln_h_inf = ln_l_inf = ln_k_inf = ln_h_inf_p = ln_l_inf_p = ln_k_inf_p = None
            ln_planet_radius = ln_planet_mass = ln_Yload = ln_Ypot = ln_Ystr = ln_Yshr = None
            ln_hpot = ln_nlpot = ln_nkpot = ln_hstr = ln_nlstr = ln_nkstr = ln_hshr = None
            ln_nlshr = ln_nkshr = ln_sint = ln_lmda_surface = ln_mu_surface = None<jupyter_output><empty_output><jupyter_text>----------------- END LOVE NUMBERS ----------------------- #-------------- BEGIN GREENS FUNCTIONS -------------------- #Make sure all jobs have finished before continuing<jupyter_code>comm.Barrier()<jupyter_output><empty_output><jupyter_text>Set normalization flag<jupyter_code>norm_flag  = False<jupyter_output><empty_output><jupyter_text>Loop through Love number files<jupyter_code>for cc in range(0,len(lnfiles)):
    # Current Love number file
    lln_file = lnfiles[cc]
    # Output filename
    file_out = lngfext[cc]
    # Check if file already exists
    if (os.path.isfile(gffiles[cc])):
        continue
    else: 
  
        # Compute the Displacement Greens functions (For Load Love Numbers Only)
        if (rank == 0):
            u,v,u_norm,v_norm,u_cm,v_cm,u_norm_cm,v_norm_cm,u_cf,v_cf,u_norm_cf,v_norm_cf,gE,gE_norm,gE_cm,gE_cm_norm,\
                gE_cf,gE_cf_norm,tE,tE_norm,tE_cm,tE_cm_norm,tE_cf,tE_cf_norm,\
                e_tt,e_ll,e_rr,e_tt_norm,e_ll_norm,e_rr_norm,e_tt_cm,e_ll_cm,e_rr_cm,e_tt_cm_norm,e_ll_cm_norm,e_rr_cm_norm,\
                e_tt_cf,e_ll_cf,e_rr_cf,e_tt_cf_norm,e_ll_cf_norm,e_rr_cf_norm,gN,tN = \
                    compute_greens_functions.main(lln_file,rank,comm,size,grn_out=file_out)
        # For Worker Ranks, Run the Code But Don't Return Any Variables
        else:
            compute_greens_functions.main(lln_file,rank,comm,size,grn_out=file_out)<jupyter_output><empty_output><jupyter_text>-------------- END GREENS FUNCTIONS ---------------------- #---------------- BEGIN CONVOLUTIONS ---------------------- #Ensure that the Output Directories Exist & Read in the Stations<jupyter_code>if (rank == 0):
    # Read Station File
    slat,slon,sta = read_station_file.main(sta_file)
    # Ensure that Station Locations are in Range 0-360
    neglon_idx = np.where(slon<0.)
    slon[neglon_idx] += 360.
    # Determine Number of Stations Read In
    if isinstance(slat,float) == True: # only 1 station
        numel = 1
    else:
        numel = len(slat)
    # Generate an Array of File Indices
    sta_idx = np.linspace(0,numel,num=numel,endpoint=False)
    np.random.shuffle(sta_idx)
else: # If I'm a worker, I know nothing yet about the data
    slat = slon = sta = numel = sta_idx = None<jupyter_output><empty_output><jupyter_text>Make Sure Everyone Has Reported Back Before Moving On<jupyter_code>comm.Barrier()<jupyter_output><empty_output><jupyter_text>All Processors Get Certain Arrays and Parameters; Broadcast Them<jupyter_code>sta          = comm.bcast(sta, root=0)
slat         = comm.bcast(slat, root=0)
slon         = comm.bcast(slon, root=0)
numel        = comm.bcast(numel, root=0)
sta_idx      = comm.bcast(sta_idx, root=0)<jupyter_output><empty_output><jupyter_text>MPI: Determine the Chunk Sizes for the Convolution<jupyter_code>total_stations = len(slat)
nominal_load = total_stations // size # Floor Divide
# Final Chunk Might Be Different in Size Than the Nominal Load
if rank == size - 1:
    procN = total_stations - rank * nominal_load
else:
    procN = nominal_load<jupyter_output><empty_output><jupyter_text>File information<jupyter_code>cndirectory = ("../output/Convolution/")
if (lsmask_type == 2):
    cnprefix = ("cn_LandOnly_")
elif (lsmask_type == 1):
    cnprefix = ("cn_OceanOnly_")
else:
    cnprefix = ("cn_LandAndOceans_")<jupyter_output><empty_output><jupyter_text>Make some preparations that are common to all stations<jupyter_code>if (rank == 0):
    # Read in the Land-Sea Mask
    if (lsmask_type > 0):
        lslat,lslon,lsmask = read_lsmask.main(lsmask_file)
    else:
        # Doesn't really matter so long as there are some values filled in with something other than 1 or 2
        lat1d = np.arange(-90.,90.,2.)
        lon1d = np.arange(0.,360.,2.)
        olon,olat = np.meshgrid(lon1d,lat1d)
        lslat = olat.flatten()
        lslon = olon.flatten()
        lsmask = np.ones((len(lslat),)) * -1.
    # Ensure that Land-Sea Mask Longitudes are in Range 0-360
    neglon_idx = np.where(lslon<0.)
    lslon[neglon_idx] += 360.
    # Convert Start and End Dates to Datetimes
    if (time_series == True):
        frstdt = datetime.datetime(frst_date[0],frst_date[1],frst_date[2],frst_date[3],frst_date[4],frst_date[5])
        lastdt = datetime.datetime(last_date[0],last_date[1],last_date[2],last_date[3],last_date[4],last_date[5])
    # Check format of load files
    if not (loadfile_format == "nc"):
        if not (loadfile_format == "txt"):
            print(":: Error: Invalid format for load files. See scripts in the /GRDGEN/load_files/ folder. \
                Acceptable formats: netCDF, txt.")
    # Determine Number of Matching Load Files
    load_files = []
    if os.path.isdir(loadfile_directory):
        for mfile in os.listdir(loadfile_directory): # Filter by Load Directory
            if mfile.startswith(loadfile_prefix): # Filter by File Prefix
                if (time_series == True):
                    if (loadfile_format == "txt"):
                        mydt = datetime.datetime.strptime(mfile[-18:-4],'%Y%m%d%H%M%S') # Convert Filename String to Datetime
                    elif (loadfile_format == "nc"):
                        mydt = datetime.datetime.strptime(mfile[-17:-3],'%Y%m%d%H%M%S') # Convert Filename String to Datetime
                    else:
                        print(":: Error: Invalid format for load files. See scripts in the /GRDGEN/load_files/ folder. \
                            Acceptable formats: netCDF, txt.")
                    if ((mydt >= frstdt) & (mydt <= lastdt)): # Filter by Date Range
                        load_files.append(loadfile_directory + mfile) # Append File to List
                else:
                    load_files.append(loadfile_directory + mfile) # Append File to List
    else:
        sys.exit('Error: The loadfile directory does not exist. You may need to create it. \
            The /GRDGEN/load_files/ folder contains utility scripts to convert common models into \
            LoadDef-compatible formats, and will automatically create a loadfile directory.')
    # Test for Load Files
    if not load_files:
        sys.exit('Error: Could not find load files. You may need to generate them. \
            The /GRDGEN/load_files/ folder contains utility scripts to convert \
            common models into LoadDef-compatible formats.')
    # Sort the Filenames
    load_files = np.asarray(load_files)
    fidx = np.argsort(load_files)
    load_files = load_files[fidx]
    num_lfiles = len(load_files)<jupyter_output><empty_output><jupyter_text>If I'm a Worker, I Know Nothing About the Data<jupyter_code>else:
    lslat = lslon = lsmask = load_files = None
    eamp = epha = namp = npha = vamp = vpha = None<jupyter_output><empty_output><jupyter_text>Make Sure Everyone Has Reported Back Before Moving On<jupyter_code>comm.Barrier()<jupyter_output><empty_output><jupyter_text>Prepare the common mesh, if applicable<jupyter_code>if (rank == 0): 
    if (common_mesh == True):
        ## Read in the common mesh
        print(':: Common Mesh True. Reading in ilat, ilon, iarea.')
        lcext = convmesh[-2::]
        if (lcext == 'xt'):
            ilat,ilon,unit_area = np.loadtxt(convmesh,usecols=(0,1,2),unpack=True)
            # convert from unit area to true area of the spherical patch in m^2
            iarea = np.multiply(unit_area, planet_radius**2)
        elif (lcext == 'nc'):
            f = netCDF4.Dataset(convmesh)
            ilat = f.variables['midpoint_lat'][:]
            ilon = f.variables['midpoint_lon'][:]
            unit_area = f.variables['unit_area_patch'][:]
            f.close()
            # convert from unit area to true area of the spherical patch in m^2
            iarea = np.multiply(unit_area, planet_radius**2)
        ## Determine the Land-Sea Mask: Interpolate onto Mesh
        print(':: Common Mesh True. Applying Land-Sea Mask.')
        print(':: Number of Grid Points: %s | Size of LSMask: %s' %(str(len(ilat)), str(lsmask.shape)))
        lsmk = interpolate_lsmask.main(ilat,ilon,lslat,lslon,lsmask)
        print(':: Finished LSMask Interpolation.')
        ## For a common mesh, can already interpolate the load(s) onto the mesh, and also apply the land-sea mask.
        ## Prepare land-sea mask application
        if (lsmask_type == 2):
            test_elements = np.where(lsmk == 0); test_elements = test_elements[0]
        elif (lsmask_type == 1):
            test_elements = np.where(lsmk == 1); test_elements = test_elements[0]
        ## Loop through load file(s)
        full_files = []
        for hh in range(0,len(load_files)):
            ## Current load file
            cldfile = load_files[hh]
            ## Filename identifier
            str_components = cldfile.split('_')
            cext = str_components[-1]
            if (loadfile_format == "txt"):
                file_id = cext[0:-4]
            elif (loadfile_format == "nc"):
                file_id = cext[0:-3]
            else:
                print(':: Error. Invalid file format for load models. [load_convolution.py]')
                sys.exit()
            ## Name of file and check whether it already exists
            custom_file = (tempdir + "temp" + outstr_conv + outstr + "_" + file_id + ".nc")
            full_files.append(custom_file)
            if os.path.isfile(custom_file):
                print(':: File exists: ', custom_file, ' -- moving on.')
                continue
            ## Read the File
            llat,llon,amp,pha,llat1dseq,llon1dseq,amp2darr,pha2darr = read_AmpPha.main(cldfile,loadfile_format,regular_grid=regular)
            ## Find Where Amplitude is NaN (if anywhere) and Set to Zero
            nanidx = np.isnan(amp); amp[nanidx] = 0.; pha[nanidx] = 0.
            ## Convert Amp/Pha Arrays to Real/Imag
            real = np.multiply(amp,np.cos(np.multiply(pha,pi/180.)))
            imag = np.multiply(amp,np.sin(np.multiply(pha,pi/180.)))
            ## Interpolate Load at Each Grid Point onto the Integration Mesh
            ic1,ic2   = interpolate_load.main(ilat,ilon,llat,llon,real,imag,regular)
            ## Multiply the Load Heights by the Load Density
            ic1 = np.multiply(ic1,ldens)
            ic2 = np.multiply(ic2,ldens)
            ## Enforce Mass Conservation, if Desired
            if (mass_cons == True):
                if (lsmask_type == 1): # For Oceans
                    print(':: Warning: Enforcing Mass Conservation Over Oceans.')
                    ic1_mc,ic2_mc = mass_conservation.main(ic1[lsmk==0],ic2[lsmk==0],iarea[lsmk==0])
                    ic1[lsmk==0] = ic1_mc
                    ic2[lsmk==0] = ic2_mc
                else: # For Land and Whole-Globe Models (like atmosphere and continental water)
                    print(':: Warning: Enforcing Mass Conservation Over Entire Globe.')
                    ic1,ic2 = mass_conservation.main(ic1,ic2,iarea)
            ## Apply Land-Sea Mask Based on LS Mask Database (LAND=1;OCEAN=0)
            # If lsmask_type = 2, Set Oceans to Zero (retain land)
            # If lsmask_type = 1, Set Land to Zero (retain ocean)
            # Else, Do Nothing (retain full model)
            if (lsmask_type == 2):
                ic1[lsmk == 0] = 0.
                ic2[lsmk == 0] = 0.
            elif (lsmask_type == 1):
                ic1[lsmk == 1] = 0.
                ic2[lsmk == 1] = 0.
            ## Write results to temporary netCDF files
            print(":: Writing netCDF-formatted temporary file for: ", cldfile)
            # Open new NetCDF file in "write" mode
            dataset = netCDF4.Dataset(custom_file,'w',format='NETCDF4_CLASSIC')
            # Define dimensions for variables
            num_pts = len(ic1)
            latitude = dataset.createDimension('latitude',num_pts)
            longitude = dataset.createDimension('longitude',num_pts)
            real = dataset.createDimension('real',num_pts)
            imag = dataset.createDimension('imag',num_pts)
            parea = dataset.createDimension('area',num_pts)
            # Create variables
            latitudes = dataset.createVariable('latitude',float,('latitude',))
            longitudes = dataset.createVariable('longitude',float,('longitude',))
            reals = dataset.createVariable('real',float,('real',))
            imags = dataset.createVariable('imag',float,('imag',))
            pareas = dataset.createVariable('area',float,('area',))
            # Add units
            latitudes.units = 'degree_north'
            longitudes.units = 'degree_east'
            reals.units = 'kg/m^2 (real part of load * load density)'
            imags.units = 'kg/m^2 (imag part of load * load density)'
            pareas.units = 'm^2 (unit area of patch * planet_radius^2)'
            # Assign data
            latitudes[:] = ilat
            longitudes[:] = ilon
            reals[:] = ic1
            imags[:] = ic2
            pareas[:] = iarea
            # Write Data to File
            dataset.close()
        ## Rename file list
        load_files = full_files.copy()<jupyter_output><empty_output><jupyter_text>Make Sure Everyone Has Reported Back Before Moving On<jupyter_code>comm.Barrier()<jupyter_output><empty_output><jupyter_text> If Using a Common Mesh, Then Re-set the LoadFile Format to Indicate a Common Mesh is Used<jupyter_code>if (common_mesh == True):
    loadfile_format = "common"<jupyter_output><empty_output><jupyter_text>All Processors Get Certain Arrays and Parameters; Broadcast Them<jupyter_code>lslat        = comm.bcast(lslat, root=0)
lslon        = comm.bcast(lslon, root=0)
lsmask       = comm.bcast(lsmask, root=0)
load_files   = comm.bcast(load_files, root=0)
 
# Gather the Processor Workloads for All Processors
sendcounts = comm.gather(procN, root=0)<jupyter_output><empty_output><jupyter_text>Create a Data Type for the Convolution Results<jupyter_code>cntype = MPI.DOUBLE.Create_contiguous(1)
cntype.Commit()<jupyter_output><empty_output><jupyter_text>Create a Data Type for Convolution Results for each Station and Load File<jupyter_code>num_lfiles = len(load_files)
ltype = MPI.DOUBLE.Create_contiguous(num_lfiles)
ltype.Commit()<jupyter_output><empty_output><jupyter_text>Set up suffix names<jupyter_code>cnsuffixes = []<jupyter_output><empty_output><jupyter_text>Loop through Green's function files<jupyter_code>for dd in range(0,len(gffiles)):
    # Current Green's functions
    grn_file = gffiles[dd]
    # Current filename extension
    c_outstr = lngfext[dd]
    c_outstr_noext = c_outstr[0:-4]
    # Current suffix
    csuffix = (rfm + "_" + loadfile_prefix + "_" + c_outstr_noext + outstr_conv + ".txt")
    cnsuffixes.append(csuffix)
    # Scatter the Station Locations (By Index)
    d_sub = np.empty((procN,))
    comm.Scatterv([sta_idx, (sendcounts, None), cntype], d_sub, root=0)
    # No need to Set up the arrays here; no need to use variables passed back from convolution
    # We will just write out the files, and then read them in again later.
    # Loop through the stations
    for ii in range(0,len(d_sub)):
 
        # Current station
        current_sta = int(d_sub[ii]) # Index
        # Remove Index If Only 1 Station
        if (numel == 1): # only 1 station read in
            csta = sta
            clat = slat
            clon = slon
        else:
            csta = sta[current_sta]
            clat = slat[current_sta]
            clon = slon[current_sta]
        # If Rank is Main, Output Station Name
        try:
            csta = csta.decode()
        except:
            pass
        # Output File Name
        cnv_out = (csta + "_" + csuffix)
        # Full file name
        cn_fullpath = (cndirectory + cnprefix + cnv_out)
        # Check if file already exists
        if (os.path.isfile(cn_fullpath)):
            print(":: File already exists: " + cn_fullpath + ". Continuing...")
            continue
        else:
            # Status update
            print(':: Working on station: %s | Number: %6d of %6d | Rank: %6d' %(csta, (ii+1), len(d_sub), rank))
            # Compute Convolution for Current File
            eamp,epha,namp,npha,vamp,vpha = load_convolution.main(\
                grn_file,norm_flag,load_files,loadfile_format,regular,lslat,lslon,lsmask,lsmask_type,clat,clon,csta,cnv_out,load_density=ldens)
 
    # No need to gather the data from MPI processors; no need to use variables passed back from convolution
    # We will just write out the files, and then read them in again later.
  
# Free Data Type
cntype.Free()
ltype.Free()<jupyter_output><empty_output><jupyter_text>Make Sure All Jobs Have Finished Before Continuing<jupyter_code>comm.Barrier()<jupyter_output><empty_output><jupyter_text>Remove load files that are no longer needed<jupyter_code>if (rank == 0):
    if (common_mesh == True):
        for gg in range(0,len(load_files)):
            cfile = load_files[gg]
            os.remove(cfile)<jupyter_output><empty_output><jupyter_text>Make Sure All Jobs Have Finished Before Continuing<jupyter_code>comm.Barrier()
 
# ----------------- END CONVOLUTIONS ----------------------- #<jupyter_output><empty_output><jupyter_text>-------------- BEGIN COMBINE STATIONS -------------------- #Only execute on main processor<jupyter_code>if (rank == 0):
 
    # List of all combined filenames
    combined_filenames = []
 
    # Loop through model files
    for ff in range(0,len(cnsuffixes)):
        # Current convolution suffix
        ccnsuffix = cnsuffixes[ff]
        # Combine the stations into a single file
        outdir_csta = ("../output/CombineStations/")
        c_combined_filenames = combine_stations.main(cndirectory,cnprefix,ccnsuffix,output_directory=outdir_csta)
        # Append to list
        combined_filenames.append(c_combined_filenames)<jupyter_output><empty_output><jupyter_text>Make sure all jobs have finished before continuing<jupyter_code>comm.Barrier()  <jupyter_output><empty_output><jupyter_text>-------------- END COMBINE STATIONS ---------------------- #------------- BEGIN FINITE DIFFERENCE -------------------- #Only execute on main processor<jupyter_code>if (rank == 0):
 
    # Take the difference between each perturbed displacement and the displacements predicted by the primary model (m0)
    #  :: d(Gm)/dm
    #  :: Separately for east, north, up at each station
    #  :: [Gm' - Gm0] / [m' - m0]
    # How many design matrices are we producing? (There will be one for each load model)
    main_files = combined_filenames[-1]
    main_files_out = []
 
    # Loop through main files
    for gg in range(0,len(main_files)):
 
        # Current main file
        mfile = main_files[gg]
    
        # Extract text of filename
        mfilevals = mfile.split('/')
        mfilename = mfilevals[-1]
        mfilename = mfilename[0:-4]
        # Read the main file
        sta,lat,lon,eamp,epha,namp,npha,vamp,vpha = read_convolution_file.main(mfile)
 
        # Convert from amplitude and phase to displacement
        if (inc_imag == False): 
            edisp = np.multiply(eamp,np.cos(np.multiply(epha,(np.pi/180.))))
            ndisp = np.multiply(namp,np.cos(np.multiply(npha,(np.pi/180.))))
            udisp = np.multiply(vamp,np.cos(np.multiply(vpha,(np.pi/180.))))
        # Convert Amp+Phase to Real+Imag
        elif (inc_imag == True): 
            ere = np.multiply(eamp,np.cos(np.multiply(epha,(np.pi/180.))))
            nre = np.multiply(namp,np.cos(np.multiply(npha,(np.pi/180.))))
            ure = np.multiply(vamp,np.cos(np.multiply(vpha,(np.pi/180.))))
            eim = np.multiply(eamp,np.sin(np.multiply(epha,(np.pi/180.))))
            nim = np.multiply(namp,np.sin(np.multiply(npha,(np.pi/180.))))
            uim = np.multiply(vamp,np.sin(np.multiply(vpha,(np.pi/180.))))
        else: 
            sys.exit(':: Error: Incorrect selection for whether to include imaginary components. Must be True or False.')
        # Export a simple text file of the original model
        f_out_main = ("startingmodel_" + mfilename + ".txt")
        f_file_main = ("../output/DesignMatrixStructure/" + f_out_main)
        main_files_out.append(f_file_main)
        temp_head = ("./temp_head_" + str(np.random.randint(500)) + ".txt")
        temp_body = ("./temp_body_" + str(np.random.randint(500)) + ".txt")
        # Prepare Data for Output (as Structured Array)
        if (inc_imag == False): 
            all_data = np.array(list(zip(sta,lat,lon,edisp,ndisp,udisp)), dtype=[('sta','U8'), \
                ('lat',float),('lon',float),('edisp',float),('ndisp',float),('udisp',float)])
            # Write Header Info to File
            hf = open(temp_head,'w')
            temp_str = 'Station  Lat(+N,deg)  Lon(+E,deg)  E-Disp(mm)  N-Disp(mm)  U-Disp(mm)  \n'
            hf.write(temp_str)
            hf.close()
            # Write Model Results to File
            np.savetxt(temp_body,all_data,fmt=["%s"]+["%.7f",]*5,delimiter="        ")
        else:
            all_data = np.array(list(zip(sta,lat,lon,ere,nre,ure,eim,nim,uim)), dtype=[('sta','U8'), \
                ('lat',float),('lon',float),('ere',float),('nre',float),('ure',float),('eim',float),('nim',float),('uim',float)])
            # Write Header Info to File
            hf = open(temp_head,'w')
            temp_str = 'Station  Lat(+N,deg)  Lon(+E,deg)  E-Disp-Re(mm)  N-Disp-Re(mm)  U-Disp-Re(mm)  E-Disp-Im(mm)  N-Disp-Im(mm)  U-Disp-Im(mm)   \n'
            hf.write(temp_str)
            hf.close()
            # Write Model Results to File
            np.savetxt(temp_body,all_data,fmt=["%s"]+["%.7f",]*8,delimiter="        ")
        # Combine Header and Body Files
        filenames_main = [temp_head, temp_body]
        with open(f_file_main,'w') as outfile:
            for fname in filenames_main:
                with open(fname) as infile:
                    outfile.write(infile.read())
        # Remove Header and Body Files
        os.remove(temp_head)
        os.remove(temp_body) 
        # Set up current design matrix
        if (inc_imag == False):
            rowdim = len(sta)*3 # Multiply by three for the three spatial components (e,n,u)
        else:
            rowdim = len(sta)*6 # Multiply by six for the three spatial components (e,n,u), and real & imaginary components for each
        coldim = len(combined_filenames)-1 # -1 so as not to include the main file (only the perturbations to structure; no. of depth ranges * 3 for mu,kappa,rho)
        desmat = np.zeros((rowdim,coldim)) 
        dmrows = np.empty((rowdim,),dtype='U10') # Assumes that station names are no more than 9 characters in length (with E, N, or U also appended)
        sclat = np.zeros((rowdim,))
        sclon = np.zeros((rowdim,))
        bottom_radius = np.zeros((coldim,))
        top_radius = np.zeros((coldim,))
        mat_param = np.empty((coldim,),dtype='U10')
        # Loop through other files that correspond to this main file (perturbations to structure)
        for hh in range(0,len(combined_filenames)-1): # -1 so as not to include the main file
            # Current file with material perturbation
            cpfiles = combined_filenames[hh]
            cpfile = cpfiles[gg]
            # Current depth bottom, depth top, and material parameter
            clngfext = lngfext[hh] # information on current model parameter
            clngfext_rmtxt = clngfext[0:-4] # remove the ".txt" extension
            perturbvars = clngfext_rmtxt.split('_')
            bottom_radius[hh] = perturbvars[3]
            top_radius[hh] = perturbvars[4]
            mat_param[hh] = perturbvars[1]
            # Read the current perturbed file
            sta1,lat1,lon1,eamp1,epha1,namp1,npha1,vamp1,vpha1 = read_convolution_file.main(cpfile)
            # Convert from amplitude and phase to displacement
            if (inc_imag == False):
                edisp1 = np.multiply(eamp1,np.cos(np.multiply(epha1,(np.pi/180.))))
                ndisp1 = np.multiply(namp1,np.cos(np.multiply(npha1,(np.pi/180.))))
                udisp1 = np.multiply(vamp1,np.cos(np.multiply(vpha1,(np.pi/180.))))
            # Convert Amp+Phase to Real+Imag
            else:
                ere1 = np.multiply(eamp1,np.cos(np.multiply(epha1,(np.pi/180.))))
                nre1 = np.multiply(namp1,np.cos(np.multiply(npha1,(np.pi/180.))))
                ure1 = np.multiply(vamp1,np.cos(np.multiply(vpha1,(np.pi/180.))))
                eim1 = np.multiply(eamp1,np.sin(np.multiply(epha1,(np.pi/180.))))
                nim1 = np.multiply(namp1,np.sin(np.multiply(npha1,(np.pi/180.))))
                uim1 = np.multiply(vamp1,np.sin(np.multiply(vpha1,(np.pi/180.))))
            # Subtract displacements from those displacements in the main file
            # And then divide by the perturbation. We want: dG(m)/dm, where dm=m'-m0
            # In log space, m' = log10(m'_linear) and m0 = log10(m0_linear).
            # To perturb the model parameters, we have: m' = m0 + "perturbation".
            # Thus, perturbation = m' - m0, and we want to compute: dG(m)/perturbation.
            # Hence, here, we compute the difference in displacement and divide by the perturbation.
            if (inc_imag == False):
                edisp_diff = np.divide(np.subtract(edisp1,edisp),perturbation)
                ndisp_diff = np.divide(np.subtract(ndisp1,ndisp),perturbation)
                udisp_diff = np.divide(np.subtract(udisp1,udisp),perturbation)
            else:
                ere_diff = np.divide(np.subtract(ere1,ere),perturbation)
                nre_diff = np.divide(np.subtract(nre1,nre),perturbation)
                ure_diff = np.divide(np.subtract(ure1,ure),perturbation)
                eim_diff = np.divide(np.subtract(eim1,eim),perturbation)
                nim_diff = np.divide(np.subtract(nim1,nim),perturbation)
                uim_diff = np.divide(np.subtract(uim1,uim),perturbation)
            # Loop through stations
            for jj in range(0,len(sta1)): 
 
                # Fill in Design Matrix
                if (inc_imag == False): 
                    idxe = (jj*3)+0
                    idxn = (jj*3)+1
                    idxu = (jj*3)+2
                    desmat[idxe,hh] = edisp_diff[jj]
                    desmat[idxn,hh] = ndisp_diff[jj]
                    desmat[idxu,hh] = udisp_diff[jj]
                    dmrows[idxe] = (sta1[jj] + 'E')
                    dmrows[idxn] = (sta1[jj] + 'N')
                    dmrows[idxu] = (sta1[jj] + 'U')
                    sclat[idxe] = lat1[jj]
                    sclat[idxn] = lat1[jj]
                    sclat[idxu] = lat1[jj]
                    sclon[idxe] = lon1[jj]
                    sclon[idxn] = lon1[jj]
                    sclon[idxu] = lon1[jj]
                else:
                    idxere = (jj*6)+0
                    idxnre = (jj*6)+1
                    idxure = (jj*6)+2
                    idxeim = (jj*6)+3
                    idxnim = (jj*6)+4
                    idxuim = (jj*6)+5
                    desmat[idxere,hh] = ere_diff[jj]
                    desmat[idxnre,hh] = nre_diff[jj]
                    desmat[idxure,hh] = ure_diff[jj]
                    desmat[idxeim,hh] = eim_diff[jj]
                    desmat[idxnim,hh] = nim_diff[jj]
                    desmat[idxuim,hh] = uim_diff[jj]
                    dmrows[idxere] = (sta1[jj] + 'Ere')
                    dmrows[idxnre] = (sta1[jj] + 'Nre')
                    dmrows[idxure] = (sta1[jj] + 'Ure')
                    dmrows[idxeim] = (sta1[jj] + 'Eim')
                    dmrows[idxnim] = (sta1[jj] + 'Nim')
                    dmrows[idxuim] = (sta1[jj] + 'Uim')
                    sclat[idxere] = lat1[jj]
                    sclat[idxnre] = lat1[jj]
                    sclat[idxure] = lat1[jj]
                    sclon[idxere] = lon1[jj]
                    sclon[idxnre] = lon1[jj]
                    sclon[idxure] = lon1[jj]
                    sclat[idxeim] = lat1[jj]
                    sclat[idxnim] = lat1[jj]
                    sclat[idxuim] = lat1[jj]
                    sclon[idxeim] = lon1[jj]
                    sclon[idxnim] = lon1[jj]
                    sclon[idxuim] = lon1[jj]
        # Write Design Matrix to File
        print(":: ")
        print(":: ")
        print(":: Writing netCDF-formatted file.")
        f_out = ("designmatrix_" + mfilename + ".nc")
        f_file = ("../output/DesignMatrixStructure/" + f_out)
        # Check if file already exists; if so, delete existing file
        if (os.path.isfile(f_file)):
            os.remove(f_file)
        # Open new NetCDF file in "write" mode
        dataset = netCDF4.Dataset(f_file,'w',format='NETCDF4_CLASSIC')
        # Define dimensions for variables
        desmat_shape = desmat.shape
        num_rows = desmat_shape[0]
        num_cols = desmat_shape[1]
        nstacomp = dataset.createDimension('nstacomp',num_rows)
        nstructure = dataset.createDimension('nstructure',num_cols)
        nchars = dataset.createDimension('nchars',10)
        # Create variables
        sta_comp_id = dataset.createVariable('sta_comp_id','S1',('nstacomp','nchars'))
        design_matrix = dataset.createVariable('design_matrix',float,('nstacomp','nstructure'))
        sta_comp_lat = dataset.createVariable('sta_comp_lat',float,('nstacomp',))
        sta_comp_lon = dataset.createVariable('sta_comp_lon',float,('nstacomp',))
        perturb_radius_bottom = dataset.createVariable('perturb_radius_bottom',float,('nstructure',))
        perturb_radius_top = dataset.createVariable('perturb_radius_top',float,('nstructure',))
        perturb_param = dataset.createVariable('perturb_param','S1',('nstructure','nchars'))
        # Add units
        sta_comp_id.units = 'string'
        if (inc_imag == False): 
            sta_comp_id.long_name = 'station_component_id'
        else: 
            sta_comp_id.long_name = 'station_component_RealImaginary_id'
        design_matrix.units = 'mm'
        design_matrix.long_name = 'displacement_mm'
        sta_comp_lat.units = 'degrees_north'
        sta_comp_lat.long_name = 'station_latitude'
        sta_comp_lon.units = 'degrees_east'
        sta_comp_lon.long_name = 'station_longitude'
        perturb_radius_bottom.units = 'km'
        perturb_radius_bottom.long_name = 'bottom_of_perturbed_layer'
        perturb_radius_top.units = 'km'
        perturb_radius_top.long_name = 'top_of_perturbed_layer'
        perturb_param.units = 'string'
        perturb_param.long_name = 'material_parameter_perturbed'
        # Assign data
        #  https://unidata.github.io/netcdf4-python/ (see "Dealing with Strings")
        #  sta_comp_id[:] = netCDF4.stringtochar(np.array(dmrows,dtype='S10'))
        sta_comp_id._Encoding = 'ascii'
        sta_comp_id[:] = np.array(dmrows,dtype='S10')
        design_matrix[:,:] = desmat
        sta_comp_lat[:] = sclat
        sta_comp_lon[:] = sclon
        perturb_radius_bottom[:] = bottom_radius
        perturb_radius_top[:] = top_radius
        perturb_param._Encoding = 'ascii'
        perturb_param[:] = np.array(mat_param,dtype='S10')
    
        # Write Data to File
        dataset.close()
        # Print the output filename
        print(f_file)
        # Read the netCDF file as a test
        f = netCDF4.Dataset(f_file)
        #print(f.variables)
        sta_comp_ids = f.variables['sta_comp_id'][:]
        design_matrix = f.variables['design_matrix'][:]
        sta_comp_lat = f.variables['sta_comp_lat'][:]
        sta_comp_lon = f.variables['sta_comp_lon'][:]
        perturb_radius_bottom = f.variables['perturb_radius_bottom'][:]
        perturb_radius_top = f.variables['perturb_radius_top'][:]
        perturb_param = f.variables['perturb_param'][:]
        f.close()
    # Remind users that they will also need the original forward models when they run the inversion:
    print(':: ')
    print(':: ')
    print(':: Reminder: You will also need the original forward model when running the inversion. [d-(Gm0)] = [d(Gm)/dm]*[dm]')
    print('::   (Gm0) represents the original forward model. [d-(Gm0)] represents the residual vector between GPS data and the original forward model')
    print('::   [d(Gm)/dm] represents the perturbations to the surface displacements with a perturbation to each model parameter.')
    print('::      It is the design matrix computed here. The default perturbation is 1%.')
    print('::   [dm] represents the model vector to be solved for in the inversion.')
    print('::      It is the perturbation to each model parameter required to best fit the residual data.')
    print(':: The original forward model(s) are: ')
    print(main_files)
    print(':: And the original forward model(s) recast into real and imaginary components are: ')
    print(main_files_out)
    print(':: ')
    print(':: ')<jupyter_output><empty_output> | 
	non_permissive | 
	/desmat/run_dm_structure.ipynb | 
	hrmartens/LoadDef | 55 | 
| 
	<jupyter_start><jupyter_text>### Open Flights Data Wrangling
To practice, you are going to wrangle data from OpenFlights.  You can read about it here: 
http://openflights.org/data.html
This includes three main files, one for each airport, one for each airline, and one for each route.  They can be merged or joined with the appropriate fields.  I have modified the files slightly to include a header row in the .dat files, which makes it a bit easier for you.  
You are required to work through the problems below.  This may take some time.  Be persistent, and ask questions or seek help as needed.  <jupyter_code>import pandas as pd
import numpy as np
# These files use \N as a missing value indicator.  When reading the CSVs, we will tell
# it to use that value as missing or NA.  The double backslash is required because
# otherwise it will interpret \N as a carriage return. 
# Read in the airports data.
airports = pd.read_csv("data/airports.dat", header=None, na_values='\\N')
airports.columns = ["id", "name", "city", "country", "iata", "icao", "latitude", "longitude", "altitude","timezone", "dst", "tz", "type", "source"]
# Read in the airlines data.
airlines = pd.read_csv("data/airlines.dat", header=None, na_values='\\N')
airlines.columns = ["id", "name", "alias", "iata", "icao", "callsign", "country", "active"]
# Read in the routes data.
routes = pd.read_csv("data/routes.dat", header=None, na_values='\\N')
routes.columns = ["airline","id", "source", "source_id", "dest", "dest_id", "codeshare", "stops", "equipment"]<jupyter_output><empty_output><jupyter_text>1) Start by seeing what's in the data.  What columns are there?  What data types are the columns?  
Remember, 'object' means it is a string, while the numerical values can be floats or ints.  Sometimes you will have problems if it reads numeric data in as strings.  If that happens, you can use the function .astype() to convert it.  Look it up in the pandas API to get more details<jupyter_code>type(airports)
print(airports)
print(airlines)
type(airlines)
print(routes)
type(routes)<jupyter_output><empty_output><jupyter_text>2) Select just the routes that go to or from Lexington Bluegrass Airport, and store them in their own dataframe.  
The airport code is LEX.  You should have a much smaller dataframe.  How many inbound routes and how many outbound routes are there? <jupyter_code>routesfrom= routes[routes['source']=='LEX']
routesfrom
routesto= routes[routes['dest']=='LEX']
routesto<jupyter_output><empty_output><jupyter_text>3) Now let's look at which airlines operate in and out of Lexington.  To do this, you need to merge the airline dataframe to the route dataframe.  
How many routes does each airline have?  The value_counts() method may be useful for answering this question.  <jupyter_code>mergefrom= pd.merge(airlines,routesfrom, on= 'id' ,how='right')
mergefrom
mergeto= pd.merge(airlines,routesto, on= 'id' ,how='right')
mergeto
np.where?<jupyter_output><empty_output><jupyter_text>4) It looks like there are some international airlines with Lexington routes.  To look at how many routes they have, create a new column in your dataframe called 'International', which is set to Y for an overseas airline and N for a domestic airline.  Calculate the percent of routes with an overseas airline.  <jupyter_code>mergefrom['International']=np.where(mergefrom['country']=='United States', 'N','Y')
mergefrom
mergeto['International']=np.where(mergeto['country']=='United States', 'N','Y')
mergeto<jupyter_output><empty_output> | 
	no_license | 
	/9-Data Wrangling/open-flights.ipynb | 
	Kevin-Robert/ce599-s17 | 5 | 
| 
	<jupyter_start><jupyter_text>## 梯度下降法模拟<jupyter_code>import numpy as np
import matplotlib.pyplot as plt
plot_x = np.linspace(-1, 6, 141)
plot_x
plot_y = (plot_x - 2.5)**2 - 1
plt.plot(plot_x, plot_y)
plt.show()
def dJ(theta):
    return 2*(theta - 2.5)
#损失函数
def J(theta):
    return (theta - 2.5)**2 - 1
eta = 0.1
epsilon = 1e-8   #定义精度 
theta = 0.0
while True:
    gradient = dJ(theta)
    last_theta = theta
    theta = theta - eta * gradient
    if(abs(J(theta) - J(last_theta)) < epsilon):
        break
print(theta)
print(J(theta))
theta = 0.0
theta_history = [theta]
while True:
    gradient = dJ(theta)
    last_theta = theta
    theta = theta - eta * gradient
    theta_history.append(theta)
    
    if(abs(J(theta) - J(last_theta)) < epsilon):
        break
plt.plot(plot_x, J(plot_x))
plt.plot(np.array(theta_history), J(np.array(theta_history)), color="r", marker='+')
plt.show()
len(theta_history)
def gradient_descent(initial_theta, eta, n_iters = 1e4, epsilon=1e-8):
    theta = initial_theta
    theta_history.append(initial_theta)
    i_iter = 0
    while i_iter < n_iters:
        gradient = dJ(theta)
        last_theta = theta
        theta = theta - eta * gradient
        theta_history.append(theta)
        
        if(abs(J(theta) - J(last_theta)) <epsilon):
            break
        
        i_iter += 1
def plot_theta_history():
    plt.plot(plot_x, J(plot_x))
    plt.plot(np.array(theta_history), J(np.array(theta_history)), color="r", marker='+')
    plt.show()
eta = 0.001
theta_history = []
gradient_descent(0.,eta)
plot_theta_history()
len(theta_history)
eta = 0.8
theta_history = []
gradient_descent(0.,eta)
plot_theta_history()
def J(theta):
    try:
        return (theta-2.5)**2 - 1
    except:
        return float('inf')
eta = 1.1
theta_history = []
gradient_descent(0.,eta)
len(theta_history)
theta_history[-1]
eta = 1.1
theta_history = []
gradient_descent(0.,eta, n_iters=10)
plot_theta_history()<jupyter_output><empty_output> | 
	no_license | 
	/06-Gradient Descent/01-Gradient-Descent-Simulation.ipynb | 
	NOVA-QY/ML-Python | 1 | 
| 
	<jupyter_start><jupyter_text>## Estudo sobre Indexação hierárquica  
### Capítulo 8 PAG 284
>#### Estudo realizado com base no livro 'Python para análise de dados' 
>  Contato
> * [Linkedin](www.linkedin.com/in/isweluiz)<jupyter_code>data = pd.Series(np.random.randn(9), 
                index=[['a', 'a', 'a','b','b', 'c','c','d','d'],
                      [1,2,3,1,3,1,2,2,3]])
data
data.index<jupyter_output><empty_output><jupyter_text> #### Como um objeto hierarquicamente indexado, a chamada indexação parcial é possivel, permitindo selecionar subconjuntos dos dados de forma concisa; <jupyter_code>data['b']
data['a']
data['a':'b']
data.loc['b':'d']<jupyter_output><empty_output><jupyter_text>#### A seleção é até mesmo possível a partir de um nível "mais interno":<jupyter_code>data.loc[:, 2]<jupyter_output><empty_output><jupyter_text>#### Podemos reorganizar o dataframe usando o método unstack <jupyter_code>data.unstack()<jupyter_output><empty_output><jupyter_text>#### A operação inversa é stack()<jupyter_code>data.unstack().stack()<jupyter_output><empty_output><jupyter_text>#### Em um dataframe, qualquer eixo pode ter um índece hierarquico <jupyter_code>frame = pd.DataFrame(np.arange(12).reshape((4,3)),
                    index=[['a','a','b', 'b'], [1,2,1,2]],
                    columns=[['DF01', 'DF02', 'DF03'],
                            ['Taguatinga', 'Ceilândia' , 'Santa Maria']])
frame<jupyter_output><empty_output><jupyter_text>#### Os níveis hierárquicos podem ter nomes(como strings ou qualquer objeto Python)<jupyter_code>frame.index
frame.index.names = ['Key1', 'Key2']
frame.columns.names = ['Distrito', 'Cidade']
frame<jupyter_output><empty_output><jupyter_text>### Com a indexação parcial de colunas agora podemos, de modo semelhante selecionar grupos de colunas<jupyter_code>frame['DF01']<jupyter_output><empty_output><jupyter_text>### Reorganizando e ordenando os níveis<jupyter_code>frame
frame.swaplevel('Key1', 'Key2')<jupyter_output><empty_output><jupyter_text> #### .sort_index, por outro lado, ordena os dados usando os valores de um só nível. <jupyter_code>frame.sort_index(level=1)
frame.swaplevel(0,1).sort_index(level=0)<jupyter_output><empty_output><jupyter_text>## Estatística de resumo por nível#### Podemos fazer uma agregação por nível,  seja nas linhas ou nas colunas, da seguinte maneira ; <jupyter_code>frame.sum(level='Key2')
frame.sum(level='Cidade', axis=1)<jupyter_output><empty_output> | 
	no_license | 
	/indexação_hierarquica.ipynb | 
	isweluiz/data-science | 11 | 
| 
	<jupyter_start><jupyter_text>още известно време просто пробвам някакви неща от презентацията<jupyter_code>pipeline = Pipeline([
    ('features', CountVectorizer()),
    ('clf', LinearSVC())
])
cross_val_score(pipeline, train.text, train.author, cv=3, n_jobs=3)
pipeline.fit(train.text, train.author)
count_vectorizer = pipeline.steps
count_vectorizer
from sklearn.ensemble import RandomForestClassifier
pipeline = Pipeline([
    ('features', CountVectorizer()),
    ('clf', RandomForestClassifier())
])
cross_val_score(pipeline, train.text, train.author, cv=3, n_jobs=3, scoring="neg_log_loss")
explore = train.copy()
# бр. думи в текста
explore['words'] = explore.text.apply(lambda s: len(str(s).split()))
# бр. уникални думи
explore['unique_words'] = explore.text.apply(lambda s: len(set(str(s).split())))
# бр. символи
explore['symbols'] = explore.text.str.len()
# бр. уникални символи
explore['unique_symbols'] = explore.text.apply(lambda s: len(set(str(s))))
import string
import numpy as np
# бр. главни букви
explore['capital_letters'] = explore.text.apply(lambda s: sum([str.isupper(c) for c in str(s)]))
# бр. на думи съдържащи само главни буква
explore['only_capital_letter_words'] = explore.text.apply(lambda s: sum([str.isupper(w) for w in str(s).split()]))
# средна дължина на дума
explore['average_word_lenght'] = explore.text.apply(lambda s: np.mean([len(w) for w in str(s).split()]))
# бр. цифрите
explore['digits'] = explore.text.apply(lambda s: sum([str.isdigit(c) for c in str(s)]))
# бр. на препинателни знаци
train["punctuation"] = train.text.apply(lambda s: sum([c in string.punctuation for c in str(s)]) )
print(string.punctuation)
import nltk
#nltk.download('stopwords')
stopwords = nltk.corpus.stopwords.words('english')
print(len(stopwords))
print(stopwords)
explore['stop_words'] = explore.text.apply(lambda s: sum(w in stopwords for w in str(s).split()))
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
print(explore.columns)
features_names = list(set(explore.columns) - {'text', 'author'})
for feature in features_names:
    plt.figure()
    sns.violinplot(x=feature, y="author", data=explore)
    plt.title(feature);
cross_val_score(RandomForestClassifier(), explore[features_names], explore.author, cv=3, n_jobs=3)<jupyter_output><empty_output><jupyter_text>резултатите тук все още са лоши. Графиките все още също са до голяма степен подобни
можем да пробваме да се оттървем от някой фиичъри или да запазим по-значимите - просто за пробата
визуално бих казал, че първо digits не ни върши абсолютно никаква работа. Също така бих махнал average_word_length<jupyter_code>features_names = list(set(features_names) - {'average_word_lenght', 'digits'})
cross_val_score(RandomForestClassifier(), explore[features_names], explore.author, cv=3, n_jobs=3)
from sklearn.linear_model import LogisticRegression
cross_val_score(LogisticRegression(), explore[features_names], train.author, cv=3, n_jobs=3)<jupyter_output><empty_output><jupyter_text>по-скоро няма да стане така. Но все пак може да махнем още няколко просто за да се пробваме с малко фийчъри<jupyter_code>features_names = ['words', 'unique_words', 'only_capital_letter_words']
cross_val_score(RandomForestClassifier(), explore[features_names], explore.author, cv=3, n_jobs=3)<jupyter_output><empty_output><jupyter_text>и нищо особено интересно не се случи<jupyter_code>from sklearn.decomposition import LatentDirichletAllocation
vectorizer = CountVectorizer(max_df=.15, max_features=1000)
X = vectorizer.fit_transform(train.text)
lda = LatentDirichletAllocation(n_components=10, learning_method="batch", max_iter=15, random_state=0)
topics = lda.fit_transform(X)
cross_val_score(RandomForestClassifier(), topics, explore.author, cv=3, n_jobs=3)<jupyter_output><empty_output><jupyter_text>вероятно не много правилна употреба на LDA<jupyter_code>from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer = TfidfVectorizer(ngram_range=(1, 2), min_df=2, max_df=0.8, lowercase=False)
X = vectorizer.fit_transform(train.text)
lda = LatentDirichletAllocation(n_components=10, learning_method="online", max_iter=15, random_state=0)
topics = lda.fit_transform(X)
from sklearn.naive_bayes import MultinomialNB
cross_val_score(MultinomialNB(), topics, explore.author, cv=3, n_jobs=3)<jupyter_output><empty_output><jupyter_text>последвана от потенциално по-глупава такава, имайки предвид, че Naive Bayes работи с текст (или векторизиран текст)<jupyter_code>topics<jupyter_output><empty_output><jupyter_text>Изглежда странно, но си има обяснение, явно. Една реализация на алгоритъма дава на всяка дума произволен от 10те топика и след това прави някакъв брой итерации(max_iter?), за да види кои думи имат тенденцията да се срещат заедно - това би означавало, че има по-голям шанс да са от един и същи топик. Вектора, който ни дава този "decomposition" най-вероятно представлява каква част от всеки топик се садържа в даденото изречение. Това би ги прави един вид фийчъри, който можем да използваме за класиикацията си<jupyter_code>topics.shape
cross_val_score(LinearSVC(), topics, explore.author, cv=3, n_jobs=3)
import mglearn
sorting = np.argsort(lda.components_, axis=1)[:, ::-1]
feature_names = np.array(vectorizer.get_feature_names())
mglearn.tools.print_topics(topics=range(10), feature_names=feature_names, sorting=sorting, topics_per_chunk=5, n_words=10)<jupyter_output>topic 0       topic 1       topic 2       topic 3       topic 4       
--------      --------      --------      --------      --------      
de            Obed          wonderful     immediate     will not      
queer         worst         principal     regard        ye            
cellar        est           saved         thy           Why           
crowded       value         resistance    Prefect       do you        
remains       Grace         few days      the Prefect   objects       
obliged       nor the       the principal enemies       able          
glorious      to describe   and passed    injury        leave         
Rue           jest          courage and   from our      it will       
accompany     heh           Charles       Martense      able to       
the Rue       volume        The result    Your diddler  folks         
topic 5       topic 6       topic 7       topic 8       topic 9       
--------      --------      --------      --------      --------      
answ[...]<jupyter_text>след като изкарахме разделението на 10 топика виждаме, че в тях няма много смисъл на пръв поглед. Понеже LDA не е адски бърз ще трябва да опитаме да оправим някои неща "на око"<jupyter_code>vectorizer = TfidfVectorizer(stop_words='english')
X = vectorizer.fit_transform(train.text)
lda = LatentDirichletAllocation(n_components=10, learning_method="online", max_iter=15, random_state=0)
topics = lda.fit_transform(X)
sorting = np.argsort(lda.components_, axis=1)[:, ::-1]
feature_names = np.array(vectorizer.get_feature_names())
mglearn.tools.print_topics(topics=range(10), feature_names=feature_names, sorting=sorting, topics_per_chunk=5, n_words=10)
cross_val_score(RandomForestClassifier(), topics, explore.author, cv=3, n_jobs=3)<jupyter_output><empty_output><jupyter_text>в миналите топици имаше повече stop words, който вероятно не помагат на класификацията. min/max _df махнах понеже редките думи може би все пак са полезни за разпознаването, a често срещаните пак може да са полезни. 
Topic-ите са една идея по-смислени, но пак има какво да се желае - може да има по-смислени конфигурации - като повече топици, например. Или дори можем да вкараме още фийчъри, който нямат нищо общо с topic-ите, въпреки че това малко ме съмнява смислеността му.
TfidVectorizer-a може би създава повече проблеми отколкото решава (въпреки, че в примерите с LDA обикновено се ползва той)<jupyter_code>vectorizer = CountVectorizer(stop_words='english', max_features=1000)
X = vectorizer.fit_transform(train.text)
lda = LatentDirichletAllocation(n_components=10, learning_method="batch", max_iter=15, random_state=0)
topics = lda.fit_transform(X)
sorting = np.argsort(lda.components_, axis=1)[:, ::-1]
feature_names = np.array(vectorizer.get_feature_names())
mglearn.tools.print_topics(topics=range(10), feature_names=feature_names, sorting=sorting, topics_per_chunk=5, n_words=10)
cross_val_score(RandomForestClassifier(), topics, explore.author, cv=3, n_jobs=3)<jupyter_output><empty_output><jupyter_text>добре - тайната била в max_features, не в Count или Tfidf - това значи, че освен да върнем Tfidf можем да си поиграем с max и min _df ако имаме само краен брой думи за векторизиране(с чийто брой също можем да си поиграем). Ако 1000 или нещо по-малко е оптимално даже няма смисъл да си игрем с n-грами<jupyter_code>def ldaReport(lda, topics, vectorizer):
    sorting = np.argsort(lda.components_, axis=1)[:, ::-1]
    feature_names = np.array(vectorizer.get_feature_names())
    print(mglearn.tools.print_topics(topics=range(10), feature_names=feature_names, sorting=sorting, topics_per_chunk=5, n_words=10))
    print(cross_val_score(RandomForestClassifier(), topics, explore.author, cv=3, n_jobs=3))
vectorizer = CountVectorizer(stop_words='english', max_df=.15, max_features=2500)
X = vectorizer.fit_transform(train.text)
lda = LatentDirichletAllocation(n_components=20, learning_method="batch", max_iter=15, random_state=0, n_jobs=-1)
topics = lda.fit_transform(X)
ldaReport(lda, topics, vectorizer)
cross_val_score(LinearSVC(), topics, explore.author, cv=3, n_jobs=3)<jupyter_output><empty_output><jupyter_text>С LDA може би няма да стане много по-добре - Tfidf си е бил по-проблемен, LDA е твърде бавен за grid search, 1000 topic-a и 10000 думи не вдигат резултата много, нещата по средата също.
Теми без "К'тулу" и "Франкенштайн" не са може би толкова изненадващи понеже тях може да ги има в един или два цитата, а не във всички и така и така не биха ни помогнали особено да разпознаем писателя в общия случай<jupyter_code>vectorizer = CountVectorizer(stop_words='english', max_df=.15, max_features=2500)
X = vectorizer.fit_transform(train.text)
lda = LatentDirichletAllocation(n_components=20, learning_method="batch", max_iter=15, random_state=0, n_jobs=-1)
topics = lda.fit_transform(X)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(topics, explore.author, stratify=explore.author, random_state=42)
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=3)
knn.fit(X_train, y_train)
knn.score(X_test, y_test)<jupyter_output><empty_output><jupyter_text>Още по-зле, въпреки че това е само най-простия алгоритъм, може би другите ще се справят по-добре<jupyter_code>from sklearn.cluster import AgglomerativeClustering
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(topics)
X_scaled = scaler.transform(topics)
agglo = AgglomerativeClustering(n_clusters=3)
labels = agglo.fit_predict(topics)
from sklearn.metrics import adjusted_rand_score
print(adjusted_rand_score(labels, explore.author))
from sklearn.cluster import DBSCAN
dbscan = DBSCAN()
clusters = dbscan.fit_predict(X_scaled)
print(adjusted_rand_score(clusters, explore.author))<jupyter_output>-0.0103836612116
 | 
	no_license | 
	/Spooky Authors.ipynb | 
	dimiturtrz/machine-learning-with-python | 12 | 
| 
	<jupyter_start><jupyter_text># Clase 5: Visualización con la librería seaborn
Seaborn es una librería de visualizaciones estadísticas la cual está construida por sobre matplotlib. Esto último quiere decir que utiliza todos los elementos primitivos de matplotlib para hacer visualizaciones más atractivas que las que implementa matplotlib por defecto. Esta librera esa en funcionamiento desde diciembre del 2013, y provee múltiples gráficos para visualizar variables numéricas y categóricas. En esta clase vamos a ver la mayoría de estos métodos y aplicarlos en múltiples sets de datos.
<jupyter_code>from IPython.core.display import display, HTML
display(HTML("<style>.container { width:90% !important; }</style>"))<jupyter_output><empty_output><jupyter_text># ¿Cómo empezamos a trabajar con seaborn?
En este caso, la recomendación va por instalar anaconda [(link)](https://www.anaconda.com/products/individual), la cual viene con un conjunto de librerías preinstaladas para computación científica y Data Science. Con esto la instalación y/o actualización de esta librería puede realizar con cualquiera de estos dos simples comandos:
1.- Utilizando el gestor de paquetes de anaconda
```bash
conda install seaborn
```
2.- Utilizando el gestor de paquetes de python (pip)
```bash
pip install seaborn
```
**Nota:** Al igual que con `matplotlib`, esta librería viene preinstala en las sesiones de google colab
Una vez instalado, para poder empezar a graficar necesitamos importar la librería, usualmente con el alias `sns` de la siguiente manera. Luego se llama al metodo `sns.set()` para activar la paleta de colores y estilos de gráficos de seaborn.
Además de esto, importamos la librería de matplotlib (`import matplotlib.pyplot as plt`) debido a lo que detallamos anteriormente (seaborn está construido sobre matplotlib)
```python
import seaborn as sns 
import matplotlib.pyplot as plt
%matplotlib inline # Este activa el backend para jupyter, mostrando los gráficos en los resultados de las celdas
```
<jupyter_code>import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
sns.__version__ #Revisamos que estemos en la ultima versión de seaborn
# Importamos las librerias adicionales para el manejo numerico y de DataFrame
import numpy as np
import pandas as pd<jupyter_output><empty_output><jupyter_text>Vamos a realizar un primer gráfico, pero con `matplotlib`. Esto lo realizamos para mostrar el antes y después del método `sns.set()`. En este caso graficando una curva con el método `plt.plot`:<jupyter_code>plt.plot([1,3,5,4,2])
plt.plot([3,5,7,6,4])
plt.show()<jupyter_output><empty_output><jupyter_text>Ahora aplicaremos el método `sns.set()` con los parámetros por defecto. Como tal, este método es un alias del método `sns.set_theme()` y por lo mismo, se pueden utilizar ambos de la misma forma.
Los principales parámetros que reciben son los siguientes:
- 'context': Corresponde al contexto en que se presentaran los gráficos, y esto se traduce en un escalamiento de las figuras y artistas que la componen. Los posibles valores son: `['paper', 'notebook', 'talk', 'poster']` y estos llevan una escala de `[0.8, 1, 1.3, 1.6]` respectivamente.
- 'style': Controla la estética del gráfico. Principalmente manejo de fondos y la inserción de grillas. Los posibles valores son:`['darkgrid', 'whitegrid', 'dark' ,'white']`
- 'palette': Controla la paleta de colores a utilizar. En este caso, se cambian los colores que vienen por defecto de matplotlib, con múltiples opciones las cuales pueden ser:
    - Nombre de una paleta de seaborn ('deep','muted','bright','pastel','dark','colorblind')
    - Nombre de un colormap de matplotlib
    - Paletas de [husl](https://seaborn.pydata.org/generated/seaborn.husl_palette.html)
    - Paletas de [cubehelix](https://seaborn.pydata.org/generated/seaborn.cubehelix_palette.html) [Paletas de cubehelix](https://jiffyclub.github.io/palettable/cubehelix/)
- 'font': Controla cual font (o tipo de letra para el texto a graficar) se va a utilizar. Para saber cuáles son los *font* instalados, se puede utilizar la siguiente secuencia de comandos de python:
```python
import matplotlib.font_manager
matplotlib.font_manager.findSystemFonts(fontpaths=None, fontext='ttf')
```
Finalmente, en manejos de estilos uno puede utilizar el método `plt.xkcd()`. **¿Se les ocurre que puede generar este estilo :)?**
**Nota:** Para revertir esto último (y toda la aplicación del método `sns.set()`), pueden utilizar el método `plt.rcdefaults()`.
<jupyter_code>sns.set(context='notebook',style='darkgrid',palette='dark')<jupyter_output><empty_output><jupyter_text>En este caso, ahora en adelante todos los gráficos que utilicemos van a tener el formato que nosotros definimos con el método `sns.set()`<jupyter_code>plt.plot([1,3,5,4,2])
plt.plot([3,5,7,6,4])
plt.show() #Nuestras lineas ahora tendran una grilla visible y un color mas oscuro para la paleta de colores<jupyter_output><empty_output><jupyter_text>Vamos a importar múltiples datasets para poder graficar múltiples tipos de datos y así explorar las opciones que nos entrega `seaborn`. En primer lugar, vamos a importar el set de datos de pingüinos:<jupyter_code>penguins_df = pd.read_csv("http://srodriguez.me/Datasets/penguins.csv").dropna()
penguins_df.head()<jupyter_output><empty_output><jupyter_text>El segundo set de datos, corresponde a las temperaturas máximas diarias, en distintas localidades de Australia:<jupyter_code>path = "http://srodriguez.me/Datasets/daily_temps.csv"
temps_df = pd.read_csv(path)
temps_df.Date = pd.to_datetime(temps_df.Date)
temps_df.sample(5)<jupyter_output><empty_output><jupyter_text># Diagramas de dispersión (Scatterplot)
A diferencia de la clase de matplotlib, el "hola mundo" que vamos a realizar con seaborn corresponde a los diagramas de dispersión. Vamos a explorar el cómo crear estos diagramas, pero también vamos a explorar las ventajas que ofrece seaborn aparte de generar estilos con una buena estética. En primer lugar, vamos a graficar utilizando el set de datos de los pingüinos de palmer las columnas `bill_length_mm` `bill_depth_mm`. En este caso, la primera alternativa a realizar corresponde a utilizar lista/arreglos/series para los parámetros `x` e `y`. Luego, para graficar utilizamos el método `sns.scatterplot(x=X,y=Y)`
[Documentación Scatterplot](https://seaborn.pydata.org/generated/seaborn.scatterplot.html?highlight=scatterplot#seaborn.scatterplot)
<jupyter_code>#Tal como matplotlib, solo tenemos que llamar al método de scatterplot, y definir nuestros parametros. 
#En este caso va a haber una diferencia crucial
sns.scatterplot(x=penguins_df['bill_length_mm'], y = penguins_df['bill_depth_mm'])
plt.show()<jupyter_output><empty_output><jupyter_text>Si se fijan, automáticamente se añadieron las etiquetas para cada uno de los ejes X e Y. Esto sucede principalmente por que seaborn está pensado en una completa integración con pandas. En este caso como a los parámetros `x` e `y` son series de pandas (las cuales tienen un nombre asociado a la columna a la que corresponden), automáticamente seaborn va a generar las etiquetas en distintos niveles cuando correspondan. No obstante, nosotros de igual manera podríamos modificar cara elemento del gráfico utilizando todos los métodos vistos para matplotlib.
La otra forma de utilizar los métodos de seaborn, y que en este caso es la buena práctica, corresponde al uso del parámetro `data` el cual debería recibir un `DataFrame`. Este parámetro permitirá a `seaborn` consultar directamente a la fuente de datos con respecto a las columnas que existen en dicho `DataFrame`. Por lo mismo, nosotros ahora para definir nuestros parámetros `x` e `y` simplemente utilizaremos los nombres de las columnas presentes en nuestro `DataFrame`. Veamos el ejemplo:
<jupyter_code>#Si bien esto genera el mismo gráfico que antes, esta sintaxis corresponde a una buena practica
sns.scatterplot(x='bill_length_mm', y = 'bill_depth_mm',data=penguins_df)
plt.show()<jupyter_output><empty_output><jupyter_text>Tal como mencionábamos, `seaborn` está construido por sobre `matplotlib` y podemos utilizar ambas librerías obteniendo lo mejor de ambas: La capacidad y facilidad de graficar con `seaborn` y el control total de nuestra visualización con `matplotlib`. 
A modo de ejemplo, vamos a utilizar los `subplots` de matplotlib para dividir el espacio a graficar y vamos a repetir ambos gráficos vistos anteriormente. Además de esto, vamos a añadir títulos a cada de uno de estos gráficos. La única diferencia ahora al momento de utilizar `seaborn` con subplots, es que nosotros tenemos que agregar el parámetro `ax` y el valor para este parámetro, corresponde al eje del subgráfico (`ax[0]` o `ax[0,0]` dependiendo de nuestra configuración).
<jupyter_code>#Generamos nuestro subplots
fig, ax = plt.subplots(1,2, figsize=(15,5), sharey=True)
#Graficamos con el primer metodo en el primer subplot
sns.scatterplot(x=penguins_df['bill_length_mm'], y = penguins_df['bill_depth_mm'],ax=ax[0])
#Graficamos con el segundo metodo en el segundo subplot
sns.scatterplot(x='bill_length_mm', y = 'bill_depth_mm',data=penguins_df,ax=ax[1])
#Añadimos los titulos correspondientes.
ax[0].set_title("Graficar mediante Series")
ax[1].set_title("Graficar con fuente de datos")
plt.show()<jupyter_output><empty_output><jupyter_text>Supongamos ahora que queremos colorear por la variable especie. Solo a modo de recordatorio se va a mostrar el código presentado en la clase 3:
```python
fig, ax = plt.subplots(1,1,figsize=(10,10))
sorted_df = iris_df.sort_values(['Species'])# Ordenamos el dataframe por Species
setosa_df = sorted_df.iloc[0:50 , :]  #Generamos una seccion del dataframe para iris setosa
versicolor_df = sorted_df.iloc[50:100 , :] # lo mismo para iris versicolor
virginica_df = sorted_df.iloc[100:150:, :] # y finalmente para iris virginica
color1 = "#f97306" # Naranjo en formato RGB Hexadecimal para Iris-Setosa       
color2 = 'Blue' # Azul en formato X11/CSS4 para Iris-Versicolor
color3 = 'xkcd:apple green' # "Verde manzana" en formato xkcd para Iris-Virginica
ax.scatter(setosa_df['PetalWidthCm'], setosa_df['PetalLengthCm'], color = color1, label='Setosa') #Scatter para Setosa, con solo una instancia del color y el label para setosa
ax.scatter(versicolor_df['PetalWidthCm'], versicolor_df['PetalLengthCm'], color = color2, label='Versicolor')  #Scatter para Versicolor
ax.scatter(virginica_df['PetalWidthCm'], virginica_df['PetalLengthCm'], color = color3, label='Virginica') #Scatter para Virginica
ax.legend() #Añadimos la leyenda, la cual va a respetar los colores utilizados.
plt.show()
```
Para pintar distintos colores, teníamos que llamar varias veces al método `ax.scatter` o en su defecto definir la lista de colores para cada uno de los puntos. Sea como sea, involucraba varias operaciones para poder llegar al resultado final. 
Utilizando seaborn, esto ahora es bastante más sencillo. Para colorear por una columna en específico, tenemos que utilizar el parámetro `hue` y el valor tiene que ser la columna como tal. Entonces el código que eran múltiples líneas, ahora queda resumido en una sola línea:
<jupyter_code>sns.scatterplot(x='bill_length_mm', y = 'bill_depth_mm',data=penguins_df,hue='species')
plt.show()<jupyter_output><empty_output><jupyter_text>Aparte de pintar colores para cada uno de los puntos de forma rápida y sencilla, `seaborn` por defecto añade la leyenda asociada a cada uno de los colores pintados en el gráfico. En el mismo estilo para manipular colores, las distintas formas de los marcadores pueden modificado con un simple parámetro: `style`. De la misma forma, los distintos marcadores se agregarán a la leyenda del gráfico.<jupyter_code>plt.figure(figsize=(15,5))
sns.scatterplot(x='bill_length_mm', y = 'bill_depth_mm',data=penguins_df,hue='species', style='island')
plt.show()<jupyter_output><empty_output><jupyter_text>Finalmente, podemos agregar una dimensión más a nuestro gráfico asignándole tamaño a cada uno de los puntos según una columna. **Ojo** que la columna puede ser tanto numérica, como valores categóricos, las cuales seaborn manejara automáticamente la escalas de estas. Los parámetros a utilizar son `size` para asignar la columna y `sizes` para definir los valores mínimos y máximos de los tamaños asociados al gráfico. Este último parámetro corresponde a una tupla/arreglo de dos elementos con el valor mínimo y máximo.<jupyter_code>plt.figure(figsize=(15,5))
#Vamos agregando
sns.scatterplot(x='bill_length_mm', y = 'bill_depth_mm',data=penguins_df,hue='species', style='island', size="body_mass_g",sizes=(20, 200))
plt.show()<jupyter_output><empty_output><jupyter_text>Podemos ver que nuestra leyenda está generando problemas debido a lo grande que es (bloquea un par de puntos). Cuando nosotros llamamos algún método de `seaborn` para graficar, este por defecto va a retornar un objeto artista de `matplotlib`. Entonces, nosotros podemos aprovechar la interfaz orientada a objetos para modificar específicamente el gráfico retornado:<jupyter_code>plt.figure(figsize=(15,5))
# Vamos a guardar el artista retornado en la variable ax
ax = sns.scatterplot(x='bill_length_mm', y = 'bill_depth_mm',data=penguins_df,hue='species', style='island', size="body_mass_g",sizes=(20, 200))
# y modificamos el "bounding box" de la leyenda desplazandola a la derecha 
ax.legend(bbox_to_anchor=(1, 1), loc='upper left')
plt.show()<jupyter_output><empty_output><jupyter_text># Gráficos de barras
Para generar gráficos de barras tenemos múltiples opciones utilizando seaborn, pero siempre con la misma lógica de llamar simplemente a un método y obtener el gráfico asociado. En este caso el primer método corresponde a `sns.barplot(x=X,y=Y,data=df,ci='sd)`, el cual nos generara un gráfico de barras asociados a las variables que nosotros asignemos. Una cosa a notar es el parámetro `ci` el cual hace mención a intervalo de confianza, y que nosotros le hemos dado el valor de `'sd'`. Este método realizara una agregación de forma automática al momento de graficar:
[Documentación Barplot](https://seaborn.pydata.org/generated/seaborn.barplot.html?highlight=barplot#seaborn.barplot)
<jupyter_code>sns.barplot(x='species', y ='bill_depth_mm', ci='sd',
            data=penguins_df)
plt.show()<jupyter_output><empty_output><jupyter_text>**¿Qué será lo que está graficando?**<jupyter_code>penguins_df.groupby("species")['bill_depth_mm'].agg(['mean','std'])<jupyter_output><empty_output><jupyter_text>La respuesta es el promedio y la desviación estándar para dichas columnas. Nosotros podríamos cambiar el método de agregación utilizando el parámetro `agg` y asignándole como valor una función. Ejemplo de una modificación donde muestre el conteo de elementos no nulos y sin barras de "error":<jupyter_code># Dejamos el parametro ci=None para que evite calcular un intervalo de confianza
# y modificamos la función de agregación presente en el parametro `estimator`
sns.barplot(x='species', y ='bill_depth_mm', ci=None,estimator=pd.Series.count, 
            data=penguins_df)
plt.show()<jupyter_output><empty_output><jupyter_text>Alternativamente, existe un método genérico asociado a las variables categorías llamado: `sns.catplot`. Este método puede generar gráficos asociados a las variables categóricas de distinto tipo, utilizando el parámetro `kind` y para generar el primer gráfico de barra visto, simplemente declaramos `kind="bar"`:<jupyter_code>sns.catplot(x="species", y="bill_depth_mm", kind="bar", data=penguins_df,ci='sd')
plt.show()<jupyter_output><empty_output><jupyter_text>Además de `sns.barplot` y `sns.catplot`, tenemos el método `sns.countplot` el cual cuenta los elementos no nulos en las columna a graficar.<jupyter_code>sns.countplot(x='species',data=penguins_df)
plt.show()<jupyter_output><empty_output><jupyter_text>Y a su vez, existe el `kind=count` en el método `sns.catplot`<jupyter_code>sns.catplot(x="species", kind="count", data=penguins_df)<jupyter_output><empty_output><jupyter_text># Matriz de correlación y Mapas de calor (Heatmap)
Utilizando matplotlib, generar un mapa de calor con anotaciones eran varias líneas de código, era difícil que los valores numéricos quedaran correctamente alineados al centro de cada valor de heatmap, y eran varios métodos para construir un heatmap de principio a fin. `seaborn` implementa el método `sns.heatmap(data)`, el cual realiza todas las operaciones necesarias para generar un mapa de calor. El parametro `data` puede ser tanto un `DataFrame` o una matriz de numpy, recordar que si utilizamos un `DataFrame`, `seaborn` va a extraer los nombres de las filas y columnas asociadas a la matriz a graficar.
Además, utilizando el parámetro `annot=True`, permitimos generar un heatmap con anotaciones. Estas anotaciones estarán correctamente alineadas y el uso de color se generará automáticamente para una máxima lecturabilidad de las anotaciones:
[Documentación Heatmap](https://seaborn.pydata.org/generated/seaborn.heatmap.html?highlight=heatmap#seaborn.heatmap)
<jupyter_code>corr = penguins_df.corr() # Obtenemos la matriz de correlación para las variables númericas del set de datos de pingüinos
sns.heatmap(corr,annot=True) #Generamos el heatmap anotado
plt.show()<jupyter_output><empty_output><jupyter_text># Histogramas
Recientemente, en la última versión (0.11) de seaborn se implementó el método `histplot`. Tal como lo menciona el título de esta sección, corresponde al uso de histogramas, y la lógica de uso se mantiene. Existe un parámetro, el cual corresponde a `kde`, el cual recibe un valor `Booleano` y nos permite graficar el *Kernel Density Estimate*. Esta característica permite estimar una distribución continua desde el histograma, y por lo mismo podríamos ver la existencia de asimetría presente en los datos a graficar.
[Documentación Histplot](https://seaborn.pydata.org/generated/seaborn.histplot.html?highlight=histplot#seaborn.histplot)
<jupyter_code>fig, ax = plt.subplots(2,3, figsize=(20,10), )
# subplots con el método histoplot a secas
sns.histplot(x='bill_length_mm', data = penguins_df, ax = ax[0,0])
sns.histplot(x='bill_depth_mm', data = penguins_df, ax = ax[0,1])
sns.histplot(x='flipper_length_mm', data = penguins_df, ax = ax[0,2])
#subplots con el método histplot y el gráfico de KDE
sns.histplot(x='bill_length_mm', data = penguins_df, kde=True, ax = ax[1,0])
sns.histplot(x='bill_depth_mm', data = penguins_df, kde=True, ax = ax[1,1])
sns.histplot(x='flipper_length_mm', data = penguins_df, kde=True, ax = ax[1,2])
plt.show()<jupyter_output><empty_output><jupyter_text># Diagramas de caja y bigotes (Boxplots)
`seaborn`, como buena librería de cómputo estadístico también provee el método para realizar diagramas de cajas y bigotes. Las ventajas siguen siendo la facilidad de uso de `seaborn` por sobre `matplotlib`, además de la mejora estética al momento de uso de colores, y modificaciones a los marcadores de outlets:
[Documentación Boxplots](https://seaborn.pydata.org/generated/seaborn.boxplot.html?highlight=boxplot#seaborn.boxplot)
<jupyter_code>sns.boxplot(x='species',y='bill_length_mm',data=penguins_df)
plt.show()<jupyter_output><empty_output><jupyter_text>Recordando que al estar utilizando `seaborn`, podemos hacer la separación de nuestros gráficos según las columnas que nosotros especifiquemos. A modo de ejemplo, separaremos los boxplots obtenidos anteriormente por la columna `sex`:<jupyter_code>sns.boxplot(x='species',y='flipper_length_mm',data=penguins_df,hue='sex')<jupyter_output><empty_output><jupyter_text># Diagramas de violín (violinplots)
Los diagramas de violín corresponden a un tipo de grafico similar al gráfico de caja y bigotes. En este caso se muestra una barra interna con un punto blanco, donde este punto corresponde a la mediana, y el largo de la barra corresponden a $Q1$ y $Q3$. Se muestra además con unas líneas la misma cobertura de los bigotes, pero la gran diferencia es que el diagrama de violín muestra una estimación de la distribución a través de los distintos valores a graficar. El método corresponde a `sns.violinplot(x=X,y=Y,data=df)`.
[Documentación Violinplot](https://seaborn.pydata.org/generated/seaborn.violinplot.html?highlight=violinplot#seaborn.violinplot)
<jupyter_code>sns.violinplot(x='species',y='flipper_length_mm',
               data=penguins_df)
plt.show()<jupyter_output><empty_output><jupyter_text>Al igual que los boxplots, nosotros podemos separar cada *violinplot* según el parámetro `hue`. Adicionalmente, nosotros podemos utilizar el parámetro `split=True` en conjunto con `hue` para poder presentar de forma divida en un mismo violin, las distintas distribuciones por la que estamos separando según la columna `hue`:<jupyter_code>fig, ax = plt.subplots(1,2, figsize=(10,5))
sns.violinplot(x='species',y='flipper_length_mm',data=penguins_df,hue='sex', ax = ax[0])
sns.violinplot(x='species',y='flipper_length_mm',data=penguins_df,hue='sex', split=True, ax = ax[1])
plt.show()<jupyter_output><empty_output><jupyter_text># Diagramas de Líneas (lineplot)
Para este tipo de gráfico vamos a utilizar el set de datos de la temperatura y el método `sns.lineplot`. Tal como el método de `sns.barplot`, `seaborn` si es que tiene la oportunidad, va a hacer agregación de los datos dependiendo de cómo nosotros presentemos los datos, el uso de los parametros `hue` y posibles valores dependientes de categorías (en este caso, como tenemos 8 ciudades de Australia, vamos a tener 8 mediciones de temperatura para cada día). 
[Documentación Lineplot](https://seaborn.pydata.org/generated/seaborn.lineplot.html?highlight=lineplot#seaborn.lineplot)
<jupyter_code>plt.figure(figsize=(10,5))
sns.lineplot(x='Date',y='max_tmp_day',data=temps_df,ci='sd') # Definimos el Intervalo de confianza como la desviación estandar
plt.show()<jupyter_output><empty_output><jupyter_text># Dinámica 1
Para esta dinámica revisitaremos el reciente set de datos de precios de casa de King County Housing. Recientemente aplicamos los análisis de regresión lineal para ver como podríamos modelar la variable dependiente según las variables presentes en este conjunto de datos. Ahora, realizaremos visualizaciones sencillas utilizando el conocimiento recientemente adquirido:
- Graficar la matriz de correlación para las distintas variables numéricas **nota:** el parámetro `fmt='.2f'` transformara el texto de anotaciones a 2 decimales máximo
- Utilizando subplots genere 4 histogramas de distintas variables presentes en el set de datos
- Utilizando subplots genere 2 boxplots y dos violinplots, comparando el precio para distintas variables (como por ej: 'grade', 'view', 'waterfront', 'bedrooms',etc). **¿Qué puede observar de esto?**
- utilizando subplots, genere 2 lineplots
    - Precio de las casas a través de los años que fueron construidas
    - Precio de las casas a través de los años que fueron construidas pero separadas por la columna **waterfront**
<jupyter_code>housing_df = pd.read_csv("http://srodriguez.me/Datasets/kc_house_data.csv")
housing_df.head()
housing_df.drop(['id'], axis = 1, inplace = True)
plt.figure(figsize=(20,5)) 
housing_df2 = housing_df.drop(['date', 'zipcode'], axis = 1) 
corr = housing_df2.corr() # Obtenemos la matriz de correlación para las variables númericas del set de datos de pingüinos 
sns.heatmap(corr,annot=True,fmt='.2f') #Generamos el heatmap anotado
plt.show() <jupyter_output><empty_output><jupyter_text># Fin Dinámica 1# Visualizaciones adicionales para datos categóricos. 
`seaborn` implementa dos alternativas para poder hacer gráficas categóricas utilizando puntos. La primera de estas corresponde a `sns.stripplot` y este método sitúa la variable categórica en el eje x y la variable cuantitativa en el eje y, mostrando con un punto cada uno de los valores. Para poder distinguir los puntos en el caso de que estén muy próximos, por defecto se aplica un "temblor" (jitter) a los puntos para separarlos del eje vertical de forma aleatoria:
[Documentación stripplot](https://seaborn.pydata.org/generated/seaborn.stripplot.html?highlight=stripplot#seaborn.stripplot)
<jupyter_code>sns.stripplot(y ='bill_depth_mm', data=penguins_df)
plt.show()<jupyter_output><empty_output><jupyter_text>ejemplo de un cambio en el parámetro `jitter`:<jupyter_code>sns.stripplot(y ='bill_depth_mm', data=penguins_df, jitter=0.3)
plt.show()<jupyter_output><empty_output><jupyter_text>Podemos adicionar valores categóricos para realizar la separación de los puntos según categoría.<jupyter_code>sns.stripplot(x='species',y ='bill_depth_mm', data=penguins_df)
plt.show()<jupyter_output><empty_output><jupyter_text>El segundo método corresponde a swarmplot (Gráfico de enjambre). Para poder realizar este gráfico hay que utilizar el método `sns.swarmplot` y genera gráficos semejantes a las generadas por `sns.stripplot`, pero aplicando a la posición de los puntos cierto algoritmo de forma que no solapen. Esto ofrece una mejor visualización a costa de dejar de ser útil para conjuntos de datos muy grandes:
[Documentación Swarmplot](https://seaborn.pydata.org/generated/seaborn.swarmplot.html?highlight=swarmplot#seaborn.swarmplot)
<jupyter_code>sns.swarmplot(y ='body_mass_g', data=penguins_df)
plt.show()<jupyter_output><empty_output><jupyter_text>Podemos adicionar valores categóricos para realizar la separación de los puntos según categoría. (de la misma forma que el stripplot)<jupyter_code>sns.swarmplot(x='island',y ='body_mass_g', data=penguins_df)
plt.show()<jupyter_output><empty_output><jupyter_text>Y recordar que también podemos pintar los puntos, dependiendo de alguna columna de nuestro interés<jupyter_code>sns.swarmplot(x='island',y ='body_mass_g', data=penguins_df,hue="species")
plt.show()<jupyter_output><empty_output><jupyter_text># Dinámica 2Para esta dinámica utilizaremos un set de datos de precios de diamantes. La mayoría de las variables son auto explicativas a excepción de las variables `x`, `y`, `z`. Básicamente, estas variables hacen referencia al largo alto y ancho del diamante respectivamente. La idea es implementar la mayoría de los gráficos que hemos visto, con un par de diferencias:
- Graficar la matriz de correlación para las distintas variables numéricas
- Utilizando subplots genere 7 histogramas para las siguientes variables: carat, depth, table, price, x , y , z
- Utilizando subplots genere 4 boxplots respetando las siguientes instrucciones:
    - Obtenga una muestra aleatoria del set de datos de los diamantes utilizando el método `diamonds_df.sample(100)`
    - Guarde esta muestra en una variable, utilícela para generar los gráficos
    - Genere los boxplots con una paleta de color pastel (puede que el parámetro `palette` puede ser de ayuda)
    - Para los dos primeros boxplots genere adicionalmente swarmplots para las mismas variables utilizadas que los boxplots (y el set de datos muestreado) **¿Surge algún warning de ejecución de codigo? ¿Que cree que pasaría si utilizara una muestra de 1000?**
    - Para los dos últimos boxplots, genere adicionalmente stripplots para las mismas variables utilizadas que los boxplots (y el set de datos muestreado) **¿Surge algún warning de ejecución de codigo?**
- utilizando subplots, genere 4 barplots
    - Cuente cuantos diamantes hay para cada corte (`cut`)
    - Cuente cuantos diamantes hay para cada color
    - Genere el gráfico de barras de precio promedio en relación con la claridad del diamante
    - Genere el gráfico de barras de precio acumulado total en relación a la claridad del diamante **¿Que función de numpy suma todos los elementos de un arreglo?**
<jupyter_code>import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
import numpy as np
import pandas as pd
diamonds_df = pd.read_csv("http://srodriguez.me/Datasets/diamonds.csv")
diamonds_df.head()
del diamonds_df['Unnamed: 0']<jupyter_output><empty_output><jupyter_text>### Matriz de correlación para las distintas variables numéricas<jupyter_code>sns.heatmap(diamonds_df.select_dtypes(include = np.number).corr(), annot = True, fmt = '.2f')<jupyter_output><empty_output><jupyter_text>### Histogramas para las siguientes variables: carat, depth, table, price, x , y , z<jupyter_code>fig, ax = plt.subplots(7, figsize=(10,5), )
# subplots con el método histoplot a secas
sns.histplot(x='carat', data = diamonds_df, ax = ax[0])
sns.histplot(x='depth', data = diamonds_df, ax = ax[1])
sns.histplot(x='table', data = diamonds_df, ax = ax[2])
sns.histplot(x='price', data = diamonds_df, ax = ax[3])
sns.histplot(x='x', data = diamonds_df, ax = ax[4])
sns.histplot(x='y', data = diamonds_df, ax = ax[5])
sns.histplot(x='z', data = diamonds_df, ax = ax[6])<jupyter_output><empty_output><jupyter_text>### BoxplotsUtilizando subplots genere 4 boxplots respetando las siguientes instrucciones:
- Obtenga una muestra aleatoria del set de datos de los diamantes utilizando el método diamonds_df.sample(100)
- Guarde esta muestra en una variable, utilícela para generar los gráficos
- Genere los boxplots con una paleta de color pastel (puede que el parámetro palette puede ser de ayuda)
- Para los dos primeros boxplots genere adicionalmente swarmplots para las mismas variables utilizadas que los boxplots (y el set de datos muestreado) ¿Surge algún warning de ejecución de codigo? ¿Que cree que pasaría si utilizara una muestra de 1000?
- Para los dos últimos boxplots, genere adicionalmente stripplots para las mismas variables utilizadas que los boxplots (y el set de datos muestreado) ¿Surge algún warning de ejecución de codigo?
<jupyter_code>import random
random.seed(42)
diamonds_sample = diamonds_df.sample(100)
diamonds_sample.head()
fig, ax = plt.subplots(4,1, figsize=(10,20), )
sns.boxplot(x = 'color', y = 'price', data = diamonds_sample, palette = 'pastel', ax = ax[0])
sns.boxplot(x = 'clarity', y = 'price', data = diamonds_sample, palette = 'pastel', ax = ax[1])
sns.boxplot(x = 'cut', y = 'price', data = diamonds_sample, palette = 'pastel', ax = ax[2])
sns.boxplot(y = 'price', data = diamonds_sample, ax = ax[3])<jupyter_output><empty_output><jupyter_text># Fin Dinámica 2# Graficas Avanzadas
Hasta ahora hemos visto gráficos que conocíamos desde su implementación en matplotlib. Seaborn, tiene implementaciones muchos más complejas, encapsulando una gran cantidad de operaciones y permitiendo la gráfica con información univariada y bivariada de forma relativamente sencilla. Para esto podemos utilizar el método `sns.jointplot`, donde por defecto, nos generara un diagrama de dispersión para un par de variables e histogramas para cada una de las variables involucradas. Existen también múltiples tipos de gráficos que se pueden utilizar para la zona bivariada, la cual se puede encontrar en la documentación:
[Documentación jointplot](https://seaborn.pydata.org/generated/seaborn.jointplot.html?highlight=jointplot#seaborn.jointplot)
<jupyter_code>sns.jointplot(x='bill_length_mm', y = 'bill_depth_mm',data=penguins_df)
plt.show()<jupyter_output><empty_output><jupyter_text>Cabe destacar que si utilizamos el parámetro `hue`, los histogramas cambiaran hacia diagramas KDE:<jupyter_code>sns.jointplot(x='bill_length_mm', y = 'bill_depth_mm',data=penguins_df, hue='sex')
plt.show()<jupyter_output><empty_output><jupyter_text>Anteriormente en las clases de matplotlib, veíamos que, si queríamos hacer diagramas de dispersión para todas las variables numéricas del set de datos, tendríamos que instanciar los subplots e ir graficando uno a uno las interacciones para cada par de variables. Seaborn por su parte, traer el método `sns.pairplot` el cual realizara una matriz de gráficos de bivariados (por defecto, diagramas de dispersión). En la diagonal por defecto se muestran histogramas, pero estos se pueden cambiar, de la misma forma que los gráficos bivariados. La mayoría de las posibilidades de gráficos bivariados son las mismas que los que existen en jointplot. Para ver todas las posibilidades, ver la documentación:
[Documentación pairplot](https://seaborn.pydata.org/generated/seaborn.pairplot.html?highlight=pairplot#seaborn.pairplot)
<jupyter_code>sns.pairplot(data=penguins_df) #Este método como minimo solo necesita el parametro data
plt.show()
#Si agregamos el parametro hue, la diagonal cambia de histograma a KDE
sns.pairplot(data=penguins_df,hue='species')
plt.show()
# Y si ocupamos el parametro corner, solo vamos a mostrar desde la diagonal, hacia abajo.
sns.pairplot(penguins_df, corner=True,hue='species')
plt.show()<jupyter_output><empty_output><jupyter_text>Además de todos estos gráficos, seaborn presenta un método para poder graficar un diagrama de dispersión, en conjunto con la regresión lineal simple. Para hacer este gráfico, necesitamos llamar al método `sns.regplot()` y no es capaz de separar por alguna columna en específico (por ej, utilizando el parámetro `hue`):
[Documentación Regplot](https://seaborn.pydata.org/generated/seaborn.regplot.html?highlight=regplot#seaborn.regplot)
<jupyter_code>#Regresion lineal simple entre body_mass_g y bill_depth_mm
sns.regplot(x='body_mass_g', y = 'bill_depth_mm',data=penguins_df)
plt.show()<jupyter_output><empty_output><jupyter_text>Un método más completo y que permite mayor personalización de los gráficos generados corresponde a `sns.lmplot`. Este método nos permite modelar múltiples relaciones lineales existentes en nuestros datos. En este caso, podemos utilizar los parámetros `hue` para separar por una columna, pero adicionalmente podemos utilizar el parámetro `col` para generar subgráficos dependiendo de los valores existentes en la columna seleccionada. 
[Documentación lmplot](https://seaborn.pydata.org/generated/seaborn.lmplot.html#seaborn.lmplot)
<jupyter_code>#Generamos un grafico similar a regplot
sns.lmplot(x='body_mass_g', y = 'bill_depth_mm',data=penguins_df)
plt.show()
#Pero ahora, podemos ajustar una regresión para cada una de las especies (dado el parametro hue)
sns.lmplot(x='body_mass_g', y = 'bill_depth_mm',data=penguins_df,hue="species",)
plt.show()
# E inclusive, podemos generar subgraficos con el parametro col (en este caso, estamos separando por isla y ajustando una regresion por especie)
sns.lmplot(x='body_mass_g', y = 'bill_depth_mm',data=penguins_df,hue="species",col='island')
plt.show()<jupyter_output><empty_output><jupyter_text>Finalmente, tenemos el método `sns.displot` que nos permite de igual manera que `sns.lmplot`, generar gráficos y subgráficos con múltiples niveles de separación dependiendo de las columnas que nosotros queramos consultar de nuestro set de datos. En este caso, `sns.displot` está especializado para graficar distribuciones univariadas y bivariadas.
[Documentación distplot](https://seaborn.pydata.org/generated/seaborn.displot.html?highlight=displot#seaborn.displot)
<jupyter_code># En este caso, se va a ver la distribución univariada, utilizando KDE, separados por especies y sexo de los pingüinos
sns.displot(data=penguins_df, x="flipper_length_mm", hue="species", col="sex", kind="kde")
plt.show()
#O en su defecto, un histograma bivariado utilizando el mismo método displot
sns.displot(data=penguins_df, x="flipper_length_mm", y="bill_length_mm")
#Alternativamente tenemos los diagramas de KDE bivariados, donde modela de forma similar (pero con superficies)
#la distribución bivariada de dos columnas de nuestro set de datos
sns.displot(x="flipper_length_mm", y="bill_length_mm", kind="kde", data=penguins_df)
#Y este ultimo gráfico, podemos hacer la misma separación por especies y subgraficos por columna sexo de los pinguinos
sns.displot(x="flipper_length_mm", y="bill_length_mm", hue='species',kind="kde", col="sex",data=penguins_df)<jupyter_output><empty_output><jupyter_text># Dinámica 3Para finalizar con las dinámicas se van a implementar los gráficos avanzados para los dos sets de datos que hemos estado trabajando. En este caso, se pide que se utilice como color algunas de las variables categóricas de los sets de datos (siempre y cuando, el método de graficar lo permita). 
Para **cada set de datos**:
- 1 jointplot
- 1 pairplot
- 1 regplot
- 1 lmplot
- 1 displot
# Fin Dinámica 3**Nota: los desafíos vendrán en la dinámica de la infografía**<jupyter_code>diamonds_df = pd.read_csv("http://srodriguez.me/Datasets/diamonds.csv")
diamonds_df.head()
sns.jointplot(data = diamonds_df.sample(300), x="carat", y="price", hue = 'cut')<jupyter_output><empty_output> | 
	no_license | 
	/3_analisis_exploratorio_y_estadistica/Clase-5.ipynb | 
	bastianabaleiv/diplomado_udd_corfo_2020 | 45 | 
| 
	<jupyter_start><jupyter_text># COMSW 4995 - Deep Learning Project
## Bingchen Liu, Binwei Xu, Hang Yang 
## Columbia University### This is a two-branch MobileNet-Bidirectional LSTM model
The Mobilenet branch is largely based on Beluga's kernel https://www.kaggle.com/gaborfodor/greyscale-mobilenet-lb-0-892
The LSTM branch is inspired by Kevin's kernel https://www.kaggle.com/kmader/quickdraw-baseline-lstm-reading-and-submission 
The two branch model is inspired by Nguyen's kernel https://www.kaggle.com/huyenvyvy/fork-of-combining-cnn-and-rnn/notebook
The following paper also inspired the work: https://arxiv.org/abs/1804.01401
### First import the modules that we will be using<jupyter_code>%matplotlib inline
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
import os
import ast
import datetime as dt
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = [16, 10]
plt.rcParams['font.size'] = 14
import seaborn as sns
import cv2
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Input, concatenate
from tensorflow.keras.layers import Dense, Dropout, Flatten, Activation,GlobalAveragePooling2D
from tensorflow.keras.metrics import categorical_accuracy, top_k_categorical_accuracy, categorical_crossentropy
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.applications import MobileNet
from tensorflow.keras.applications.mobilenet import preprocess_input
from ast import literal_eval
from tensorflow.keras.preprocessing.sequence import pad_sequences
start = dt.datetime.now()<jupyter_output>/home/hangyang/anaconda3/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
  from ._conv import register_converters as _register_converters
<jupyter_text>### Folders that store our datasets, we generated 200 total CSV files from the raw data<jupyter_code>DP_DIR = '/home/bx2157/quickdraw/data/shuffle-csvs/'
INPUT_DIR = "/home/hangyang/QuickDraw/quickdraw/data/"
BASE_SIZE = 256
NCSVS = 200
NCATS = 340
np.random.seed(seed=1987)
tf.set_random_seed(seed=1987)
def f2cat(filename: str) -> str:
    return filename.split('.')[0]
def list_all_categories():
    files = os.listdir(os.path.join(INPUT_DIR, 'train_simplified'))
    return sorted([f2cat(f) for f in files], key=str.lower)<jupyter_output><empty_output><jupyter_text>### Some metric functions that we use during training and prediction<jupyter_code>def apk(actual, predicted, k=3):
    """
    Source: https://github.com/benhamner/Metrics/blob/master/Python/ml_metrics/average_precision.py
    """
    if len(predicted) > k:
        predicted = predicted[:k]
    score = 0.0
    num_hits = 0.0
    for i, p in enumerate(predicted):
        if p in actual and p not in predicted[:i]:
            num_hits += 1.0
            score += num_hits / (i + 1.0)
    if not actual:
        return 0.0
    return score / min(len(actual), k)
def mapk(actual, predicted, k=3):
    """
    Source: https://github.com/benhamner/Metrics/blob/master/Python/ml_metrics/average_precision.py
    """
    return np.mean([apk(a, p, k) for a, p in zip(actual, predicted)])
def preds2catids(predictions):
    return pd.DataFrame(np.argsort(-predictions, axis=1)[:, :3], columns=['a', 'b', 'c'])
def top_3_accuracy(y_true, y_pred):
    return top_k_categorical_accuracy(y_true, y_pred, k=3)<jupyter_output><empty_output><jupyter_text>## MobileNet branch
MobileNets are based on a streamlined architecture that uses depthwise separable convolutions to build light weight deep neural networks.
[MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications](https://arxiv.org/pdf/1704.04861.pdf)### We have tried different parameters here, here are the settings that have the best performance<jupyter_code>STEPS = 1000
EPOCHS = 50
size = 128
batchsize = 340
base_model = MobileNet(input_shape=(size, size,3), alpha=1., weights="imagenet", include_top = False)
inp = base_model.input
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(NCATS, activation='softmax')(x)
model = Model(inp, x)
base_model = Sequential(model.layers[:-2])
base_model.summary()<jupyter_output>_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv1_pad (ZeroPadding2D)    (None, 130, 130, 3)       0         
_________________________________________________________________
conv1 (Conv2D)               (None, 64, 64, 32)        864       
_________________________________________________________________
conv1_bn (BatchNormalization (None, 64, 64, 32)        128       
_________________________________________________________________
conv1_relu (Activation)      (None, 64, 64, 32)        0         
_________________________________________________________________
conv_pad_1 (ZeroPadding2D)   (None, 66, 66, 32)        0         
_________________________________________________________________
conv_dw_1 (DepthwiseConv2D)  (None, 64, 64, 32)        288       
_________________________________________________________________
conv_dw_1_[...]<jupyter_text>## LSTM branch<jupyter_code>from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import BatchNormalization, Conv1D, LSTM, Dense, Dropout, Bidirectional
from tensorflow.keras.layers import CuDNNLSTM as LSTM # this one is about 3x faster on GPU instances
inp = Input(shape = (70,3))
x = BatchNormalization()(inp)
# # filter count and length are taken from the script https://github.com/tensorflow/models/blob/master/tutorials/rnn/quickdraw/train_model.py
x = Conv1D(256, (5,), activation = "relu")(x)
x = Dropout(0.2)(x)
x = Conv1D(256, (5,), activation = 'relu')(x)
x = Dropout(0.2)(x)
x = Conv1D(256, (3,), activation = 'relu')(x)
x = Dropout(0.2)(x)
x = Bidirectional(LSTM(128, return_sequences = True))(x)
x = Dropout(0.2)(x)
x = Bidirectional(LSTM(128, return_sequences = False))(x)
x = Dropout(0.2)(x)
x = Dense(512, activation = 'relu')(x)
x = Dense(NCATS, activation='softmax')(x)
stroke_read_model = Model(inp,x)
stroke_read_model.compile(optimizer = 'adam', 
                          loss = 'categorical_crossentropy', 
                          metrics = ['categorical_accuracy', top_3_accuracy])
stroke_read_model = Sequential(stroke_read_model.layers[:-1])
stroke_read_model.summary()<jupyter_output>_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
batch_normalization (BatchNo (None, 70, 3)             12        
_________________________________________________________________
conv1d (Conv1D)              (None, 66, 256)           4096      
_________________________________________________________________
dropout (Dropout)            (None, 66, 256)           0         
_________________________________________________________________
conv1d_1 (Conv1D)            (None, 62, 256)           327936    
_________________________________________________________________
dropout_1 (Dropout)          (None, 62, 256)           0         
_________________________________________________________________
conv1d_2 (Conv1D)            (None, 60, 256)           196864    
_________________________________________________________________
dropout_2 [...]<jupyter_text>### Combining two branches<jupyter_code>inp = base_model.input
y = base_model.output
y = GlobalAveragePooling2D()(y)
inp2 = Input(shape = (70, 3))
z = stroke_read_model(inp2)
x = concatenate([y, z])
x = Dropout(0.3)(x)
x = Dense(NCATS, activation='softmax')(x)
model = Model([inp, inp2], x)
model.compile(optimizer=Adam(lr=0.001), loss='categorical_crossentropy',
              metrics=[categorical_crossentropy, categorical_accuracy, top_3_accuracy])<jupyter_output><empty_output><jupyter_text>### LSTM Preprocessing<jupyter_code>def _stack_it(raw_strokes):
    """preprocess the string and make 
    a standard Nx3 stroke vector"""
    stroke_vec = literal_eval(raw_strokes) # string->list
    # unwrap the list
    in_strokes = [(xi,yi,i)  
     for i,(x,y) in enumerate(stroke_vec) 
     for xi,yi in zip(x,y)]
    c_strokes = np.stack(in_strokes)
    # replace stroke id with 1 for continue, 2 for new
    c_strokes[:,2] = [1]+np.diff(c_strokes[:,2]).tolist()
    c_strokes[:,2] += 1 # since 0 is no stroke
    # pad the strokes with zeros
    return pad_sequences(c_strokes.swapaxes(0, 1), 
                         maxlen=70, 
                         padding='post').swapaxes(0, 1)
<jupyter_output><empty_output><jupyter_text>## Data Generator<jupyter_code>def draw_cv2(raw_strokes, size=256, lw=6, time_color=True):
    img = np.zeros((BASE_SIZE, BASE_SIZE), np.uint8)
    for t, stroke in enumerate(raw_strokes):
        for i in range(len(stroke[0]) - 1):
            color = 255 - min(t, 10) * 13 if time_color else 255
            _ = cv2.line(img, (stroke[0][i], stroke[1][i]),
                         (stroke[0][i + 1], stroke[1][i + 1]), color, lw)
    if size != BASE_SIZE:
        return cv2.resize(img, (size, size))
    else:
        return img
def image_generator_xd(size, batchsize, ks, lw=6, time_color=True):
    while True:
        for k in np.random.permutation(ks):
            filename = os.path.join(DP_DIR, 'train_k{}.csv.gz'.format(k))
            for df in pd.read_csv(filename, chunksize=batchsize):
                df['drawing1'] = df['drawing'].apply(ast.literal_eval)
                x = np.zeros((len(df), size, size, 1))
                for i, raw_strokes in enumerate(df.drawing1.values):
                    x[i, :, :, 0] = draw_cv2(raw_strokes, size=size, lw=lw,
                                             time_color=time_color)
                x = np.repeat(x, 3, axis =3)
                x = preprocess_input(x).astype(np.float32)
                
                df['drawing'] = df['drawing'].map(_stack_it)
                x2 = np.stack(df['drawing'], 0)
                y = keras.utils.to_categorical(df.y, num_classes=NCATS)
                yield [x, x2], y
def df_to_image_array_xd(df, size, lw=6, time_color=True):
    df['drawing1'] = df['drawing'].apply(ast.literal_eval)
    x = np.zeros((len(df), size, size, 1))
    
    for i, raw_strokes in enumerate(df.drawing1.values):
        x[i, :, :, 0] = draw_cv2(raw_strokes, size=size, lw=lw, time_color=time_color)
    x = np.repeat(x, 3, axis =3)
    x = preprocess_input(x).astype(np.float32)
    df['drawing'] = df['drawing'].map(_stack_it)
    x2 = np.stack(df['drawing'], 0)
    return [x,x2]
train_datagen = image_generator_xd(size=size, batchsize=batchsize, ks=range(NCSVS - 1))
val_datagen = image_generator_xd(size=size, batchsize=batchsize, ks=range(NCSVS - 1, NCSVS))<jupyter_output><empty_output><jupyter_text>### Run all epochs<jupyter_code>callbacks = [
    ReduceLROnPlateau(monitor='val_categorical_accuracy', factor=0.5, patience=3,
                      min_delta=0.005, mode='max', cooldown=3, verbose=1),
    ModelCheckpoint("mobilenet_lstm_12_4.model",monitor='val_categorical_accuracy', 
                                   mode = 'max', save_best_only=True, verbose=1)
]
hists = []
hist = model.fit_generator(
    train_datagen, steps_per_epoch=STEPS, epochs=EPOCHS, verbose=1,
    validation_data=val_datagen, validation_steps = 100,
    callbacks = callbacks
)
hists.append(hist)<jupyter_output>Epoch 1/50
799/800 [============================>.] - ETA: 0s - loss: 1.8260 - categorical_crossentropy: 1.8260 - categorical_accuracy: 0.5675 - top_3_accuracy: 0.7580
Epoch 00001: val_top_3_accuracy improved from -inf to 0.83891, saving model to mobilenet_lstm.model
800/800 [==============================] - 576s 719ms/step - loss: 1.8255 - categorical_crossentropy: 1.8255 - categorical_accuracy: 0.5676 - top_3_accuracy: 0.7581 - val_loss: 1.3584 - val_categorical_crossentropy: 1.3584 - val_categorical_accuracy: 0.6627 - val_top_3_accuracy: 0.8389
Epoch 2/50
799/800 [============================>.] - ETA: 0s - loss: 1.2967 - categorical_crossentropy: 1.2967 - categorical_accuracy: 0.6752 - top_3_accuracy: 0.8516
Epoch 00002: val_top_3_accuracy improved from 0.83891 to 0.85729, saving model to mobilenet_lstm.model
800/800 [==============================] - 545s 681ms/step - loss: 1.2967 - categorical_crossentropy: 1.2967 - categorical_accuracy: 0.6751 - top_3_accuracy: 0.8516 - val_los[...]<jupyter_text>### Plot the accuracy and loss for both training and validation samples during the whole training process. <jupyter_code>hist_df = pd.concat([pd.DataFrame(hist.history) for hist in hists], sort=True)
hist_df.index = np.arange(1, len(hist_df)+1)
fig, axs = plt.subplots(nrows=2, sharex=True, figsize=(16, 10))
axs[0].plot(hist_df.val_categorical_accuracy, lw=5, label='Validation Accuracy')
axs[0].plot(hist_df.categorical_accuracy, lw=5, label='Training Accuracy')
axs[0].set_ylabel('Accuracy')
axs[0].set_xlabel('Epoch')
axs[0].grid()
axs[0].legend(loc=0)
axs[1].plot(hist_df.val_categorical_crossentropy, lw=5, label='Validation MLogLoss')
axs[1].plot(hist_df.categorical_crossentropy, lw=5, label='Training MLogLoss')
axs[1].set_ylabel('MLogLoss')
axs[1].set_xlabel('Epoch')
axs[1].grid()
axs[1].legend(loc=0)
fig.savefig('hist.png', dpi=300)
plt.show();<jupyter_output><empty_output><jupyter_text>### Make predictions using the trained model and prepared the predictons in the format that can be used for Kaggle evaluation. <jupyter_code>df = pd.read_csv(os.path.join(DP_DIR, 'train_k{}.csv.gz'.format(NCSVS-1)), nrows=34000)
for i in range(10):
    valid_df = df.loc[i*3400:(i+1)*3400,:].copy()
    x_valid, x2 = df_to_image_array_xd(valid_df, size)
    y_valid = keras.utils.to_categorical(valid_df.y, num_classes=NCATS)
    print(x_valid.shape, y_valid.shape)
    print('Validation array memory {:.2f} GB'.format(x_valid.nbytes / 1024.**3 ))
    valid_predictions = model.predict([x_valid, x2], batch_size=128, verbose=1)
    map3 = mapk(valid_df[['y']].values, preds2catids(valid_predictions).values)
    print('Map3: {:.3f}'.format(map3))<jupyter_output><empty_output><jupyter_text>## Create Submission<jupyter_code>test = pd.read_csv(os.path.join(INPUT_DIR, 'test_simplified.csv'))
for i in range(10):
    end = min((i+1)*11220, 112199)
    subtest= test.iloc[i*11220:end].copy().reset_index(drop=True)
    x_test = df_to_image_array_xd(subtest, size)
    test_predictions = model.predict(x_test, batch_size=128, verbose=1)
    top3 = preds2catids(test_predictions)
    cats = list_all_categories()
    id2cat = {k: cat.replace(' ', '_') for k, cat in enumerate(cats)}
    top3cats = top3.replace(id2cat)
    subtest['word'] = top3cats['a'] + ' ' + top3cats['b'] + ' ' + top3cats['c']
    subtest.head()
    if i ==0:
        submission = subtest[['key_id', 'word']]
    else: 
        submission = submission.append(subtest[['key_id', 'word']], ignore_index=True)
<jupyter_output><empty_output><jupyter_text>### Save the predictions in a csv file.<jupyter_code>submission.to_csv('mobilenet_lstm_12_4.csv', index=False)
submission.head()
submission.shape
end = dt.datetime.now()
print('Latest run {}.\nTotal time {}s'.format(end, (end - start).seconds))<jupyter_output><empty_output> | 
	no_license | 
	/MobileNet_LSTM.ipynb | 
	bingchen-liu/COMSW4995-Quickdraw-Kaggle-Challenge | 13 | 
| 
	<jupyter_start><jupyter_text># 作業 : (Kaggle)鐵達尼生存預測
https://www.kaggle.com/c/titanic# [作業目標]
- 試著模仿範例寫法, 在鐵達尼生存預測中, 觀察均值編碼的效果# [作業重點]
- 仿造範例, 完成標籤編碼與均值編碼搭配邏輯斯迴歸的預測
- 觀察標籤編碼與均值編碼在特徵數量 / 邏輯斯迴歸分數 / 邏輯斯迴歸時間上, 分別有什麼影響 (In[3], Out[3], In[4], Out[4]) # 作業1
* 請仿照範例,將鐵達尼範例中的類別型特徵改用均值編碼實作一次<jupyter_code># 做完特徵工程前的所有準備 (與前範例相同)
import pandas as pd
import numpy as np
import copy, time
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import LabelEncoder
%matplotlib inline
# solver='liblinear'
data_path = '../data/Part02/'
df_train = pd.read_csv(data_path + 'titanic_train.csv')
df_test = pd.read_csv(data_path + 'titanic_test.csv')
train_Y = df_train['Survived']
ids = df_test['PassengerId']
df_train = df_train.drop(['PassengerId', 'Survived'] , axis=1)
df_test = df_test.drop(['PassengerId'] , axis=1)
df = pd.concat([df_train,df_test])
df.head()
#只取類別值 (object) 型欄位, 存於 object_features 中
object_features = []
for dtype, feature in zip(df.dtypes, df.columns):
    if dtype == 'object':
        object_features.append(feature)
print(f'{len(object_features)} Numeric Features : {object_features}\n')
# 只留類別型欄位
df = df[object_features]
df = df.fillna('None')
train_num = train_Y.shape[0]
df.head()
# 均值編碼 + 邏輯斯迴歸
df_temp = df.loc[:, ['Sex', 'Cabin', 'Embarked']]
data = pd.concat([df_temp[:train_num], train_Y], axis=1)
for c in df_temp.columns:
    mean_df = data.groupby([c])['Survived'].mean().reset_index()
    mean_df.columns = [c, f'{c}_mean']
    data = pd.merge(data, mean_df, on=c, how='left')
    data = data.drop([c] , axis=1)
data = data.drop(['Survived'] , axis=1)
train_X = df_temp[:train_num]
estimator = LogisticRegression(solver='liblinear')
start = time.time()
print(f'shape : {train_X.shape}')
print(f'score : {cross_val_score(estimator, data, train_Y, cv=5).mean()}')
print(f'time : {time.time() - start} sec')<jupyter_output>shape : (891, 3)
score : 0.8350366889413987
time : 0.018953561782836914 sec
<jupyter_text># 作業2
* 觀察鐵達尼生存預測中,均值編碼與標籤編碼兩者比較,哪一個效果比較好? 可能的原因是什麼?均值編碼 + 邏輯斯迴歸算出來的分數比較高,但也有可能是Overfitting造成,因為Cabin欄位有很多不重複的值<jupyter_code># 對照組 : 標籤編碼 + 邏輯斯迴歸
df_temp = df.loc[:, ['Sex', 'Cabin', 'Embarked']]
df_temp2 = pd.DataFrame()
for c in df_temp.columns:
    df_temp2[c] = LabelEncoder().fit_transform(df[c])
train_X = df_temp2[:train_num]
estimator = LogisticRegression(solver='liblinear')
start = time.time()
print(f'shape : {train_X.shape}')
print(f'score : {cross_val_score(estimator, train_X, train_Y, cv=5).mean()}')
print(f'time : {time.time() - start} sec')
# 均值編碼 + 邏輯斯迴歸
#df_temp = df.loc[:, ['Sex', 'Cabin', 'Embarked']]
data = pd.concat([df_temp[:train_num], train_Y], axis=1)
for c in df_temp.columns:
    mean_df = data.groupby([c])['Survived'].mean().reset_index()
    mean_df.columns = [c, f'{c}_mean']
    data = pd.merge(data, mean_df, on=c, how='left')
    data = data.drop([c] , axis=1)
data = data.drop(['Survived'] , axis=1)
train_X = df_temp[:train_num]
estimator = LogisticRegression(solver='liblinear')
start = time.time()
print(f'shape : {train_X.shape}')
print(f'score : {cross_val_score(estimator, data, train_Y, cv=5).mean()}')
print(f'time : {time.time() - start} sec')<jupyter_output>shape : (891, 3)
score : 0.8350366889413987
time : 0.018950700759887695 sec
 | 
	no_license | 
	/homework/Day_023_HW.ipynb | 
	yles9056/2nd-ML100Days | 2 | 
| 
	<jupyter_start><jupyter_text>
# G-MODE KPFM with Fast Free Force Recovery (F3R)
### Oak Ridge National Laboratory
### *Liam Collins, Anugrah Saxena, Rama Vasudevan and Chris Smith*
#### Additional edits: *Rajiv Giridharagopal, University of Washington*
#### Contacts: [email protected] (primary author) and [email protected] 
This notebook will allow fast KPFM by recovery of the electrostatic foce directly from the photodetector response. 
Information on the procedure can be found in Collins et al. 
[DOI: 10.1021/acsnano.7b02114](http://pubs.acs.org/doi/abs/10.1021/acsnano.7b02114)) 
This notebook is designed to go through the process of F3R in G-Mode data in general.
Additional script modifications for working in Spyder can be found:
[Raj's repo](https://github.com/rajgiriUW/GKPFM)
The scripts in that repo are designed to work in Spyder for interactive data. They include much functionality removed here (automatically reloading data, saving figures to file, allowing optional flags for turning on/off certain segments, etc) at the expense of not being in Jupyter. 
This notebook and related notebooks make use of the **Pycroscopy** package. Details and helpful tutorials can be found here:
[Pycroscopy](https://pycroscopy.github.io/pycroscopy/)
Please make sure you are using the latest version. As of ~5/1/2018 there were numerous major changes to the package, and previous functionality may be broken. This notebook is currently operational.
### In this notebook the following procedured are performed:
#### (1) Models the Cantilever Transfer Function (H(w))
**(1a)** Translates Tune file to H5 
**(1b)** Fits Cantilever resonances to SHO Model 
**(1c)** Constructs the effective cantilever transfer function (H(w)) from SHO fits of the tune. 
#### (2)Load, Translate and Denoize the G-KPFM data
**(2a)** Loads and translates the .mat file containing the image data to .H5 file format. 
**(2b)** Fourier Filters data. 
**(2c)** Checks Force Recovery for 1 pixel...here you need to find the phase offset used in 3. 
**(2d)** (Optional) PCA Denoising.
#### (3) Fast Free Force Reconstruction
**(3a)** Divides filtered displacement Y(w) by the effective transfer function (H(w)). 
**step (2b** 
**(3b)** iFFT the response above a user defined noise floor to recovery Force in time domain.
**(3c)** Phase correction (from step 2biii).
**(3d)** (Optional) PCA Denoising. Usually this second set is not necessary
#### (4) Data Analysis
**(4a)** Parabolic fitting to extract CPD.
**(4b)** Exponential fitting of CPD to extract time constants 
#### (5) Data Visualization
**(5a)** CPD in dark and illuminated case.
**(5b)** Surface Photovoltage and CPD time constants.
**(5c)** Static images and animation of CPD over time.
**(5d)** K-means clustering of CPD data.
**(5e)** SVD of CPD data in dark and illuminated case.
## Installing required package<jupyter_code># Checks Python Version
import sys
if sys.version_info < (3, 5):
    print('''This notebook was optimized to work on Python 3.5.
    While it may also run on other Python versions,
    functionality and performance are not guaranteed
    Please consider upgrading your python version.''')<jupyter_output><empty_output><jupyter_text>## Configure Notebook<jupyter_code># set up notebook to show plots within the notebook
%matplotlib inline
%precision %.4g
'''Import necessary libraries'''
# Visualization:
import matplotlib.pyplot as plt
# General utilities:
import os
import sys
from scipy.signal import correlate
from scipy.optimize import curve_fit
# Interactive Value picker
import ipywidgets as widgets
# Computation:
import numpy as np
import numpy.polynomial.polynomial as npPoly
# Parallel computation library:
try:
    import joblib
except ImportError:
    warn('joblib not found.  Will install with pip.')
    import pip
    pip.main(['install', 'joblib'])
import joblib
import h5py
# multivariate analysis:
from sklearn.cluster import KMeans
from sklearn.decomposition import NMF
# Finally, pycroscopy itself
import pycroscopy as px
import pyUSID as usid
# Define Layouts for Widgets
lbl_layout=dict(
    width='15%'
)
widget_layout=dict(
    width='15%',margin='0px 0px 5px 12px'
)
button_layout=dict(
    width='15%',margin='0px 0px 0px 5px'
)<jupyter_output><empty_output><jupyter_text>## Save remote data to local drive## Step 1.) Model the Cantilever Transfer Function
First we need to read in the tune file for the cantilever your used to perform your measurment with. This tune captures the "free" SHO parameters of the cantilever.
If you have previously translated this data you can change the data type in the bottom right corner to .h5, others click the parms file.txt
To save yourself the prompt, you can directly load the file from a subfolder.### Load Cantilever Parameters<jupyter_code>data_file = r'E:\Polymer Ion Transport\ORNL\20191018_dpp3t_CB_Kcl\tune_0003'
save_figure = True
pre_load_files = True # Avoid prompts when loading data
#output_filepath = os.path.expanduser(output_filepath)
# Typical G-KPFM keeps tunes named same as parent folder
if pre_load_files is True:
    idx = data_file.rfind("\\")
    tune_file = [os.path.join(data_file, data_file[idx+1:] + '.h5'),
                 os.path.join(data_file, data_file[idx+1:] + '_bigtime_00.dat')]
    
    del(idx)
    
# For sharing this notebook specifically, adding a few hard-coded paths
tune_file = tune_file + [r'G:\Team Drives\201805_BAPI_paper_source_data\GSKPM\Testing _Notebook\Tune\Tune.h5',
                         r'G:\Team Drives\201805_BAPI_paper_source_data\GSKPM\Testing _Notebook\Tune\BAPI22_TUNE__0009_bigtime_00.dat']<jupyter_output><empty_output><jupyter_text>### Define Cantilever Parameters<jupyter_code>'''
Here you should input the calibrated parameters of the tip from your experiment.
In particular the lever sensitivity (m/V) and Spring Constant (N/m)
    (will be used to convert signals to displacement and force respectively)
'''
# 'k', 'invols', 'Thermal_Q', 'Thermal_resonance'
tune_items = {'TF_norm':[], 
              'yt0_tune':[], 
              'Yt0_tune':[], 
              'f0':[], 
              'F0':[], 
              'TF_vec':[],
              'TF_fit_vec':[]}     
cantl_parms = {'k':[], 'invols':[], 'Thermal_Q':[], 'Thermal_res':[]}
# defaults
cantl_parms['k'] = 1.7 # N/M
cantl_parms['invols'] = 82.76e-9 # m/V
cantl_parms['Thermal_Q'] = 80
cantl_parms['Thermal_res'] = 57076 #Hz
#MAPI data
cantl_parms['k'] = 2.3 # N/M
cantl_parms['invols'] = 67.56e-9 # m/V
cantl_parms['Thermal_Q'] = 83.6
cantl_parms['Thermal_res'] = 57061 #Hz<jupyter_output><empty_output><jupyter_text>#### Step 1A) Translate Tune file to HF5 format<jupyter_code>from pathlib import Path
'''
If tune file not set above, interactive.
Otherwise, defaults to finding H5 file first. 
If that fails, finds the .DAT files
'''
loadTuneValues = False # in next section, this pre-seeds variables from the H5 file if it exists
'''
If we want to pre-load files, we skip the prompt.
Otherwise, this loads from the path specified above. It searches for an H5 file first, then a .DAT file
'''
if pre_load_files is False:
    input_file_path = px.io_utils.file_dialog(caption='Select translated .h5 file or tune data',
                                            file_filter='Parameters for raw G-Line tune (*.dat);; \
                                            Translated file (*.h5)')
    tune_path, _ = os.path.split(input_file_path)
    tune_file_base_name = os.path.basename(tune_path)
else:
    for p in tune_file:
        print(p)
        file = Path(p)
        if file.is_file():
            input_file_path = p
            print('H5 file exists! Can load from source')
            loadTuneValues = True
            break
        #input_file_path = tune_file
if input_file_path.endswith('.dat') == True:
    print('Translating raw data to h5. Please wait')
    tran = px.io.translators.gmode_tune.GTuneTranslator()
    h5_path = tran.translate(input_file_path)
    print(input_file_path)
    
else:
    h5_path = input_file_path
<jupyter_output>E:\Polymer Ion Transport\ORNL\20191018_dpp3t_CB_Kcl\tune_0003\tune_0003.h5
E:\Polymer Ion Transport\ORNL\20191018_dpp3t_CB_Kcl\tune_0003\tune_0003_bigtime_00.dat
H5 file exists! Can load from source
Translating raw data to h5. Please wait
Reading line 0 of 4
Finished reading file: E:\Polymer Ion Transport\ORNL\20191018_dpp3t_CB_Kcl\tune_0003\tune_0003_bigtime_00.dat!
Reading line 0 of 4
Finished reading file: E:\Polymer Ion Transport\ORNL\20191018_dpp3t_CB_Kcl\tune_0003\tune_0003_bigtime_01.dat!
G-Tune translation complete!
E:\Polymer Ion Transport\ORNL\20191018_dpp3t_CB_Kcl\tune_0003\tune_0003_bigtime_00.dat
<jupyter_text>#### Loads data from H5 file instead<jupyter_code>'''
Primarily just care about TF_norm, but loads all other values as well
'''
if loadTuneValues == True:
    hdf = hdf = h5py.File(h5_path, 'r+')
    h5_file = hdf.file
    nm_base = '/Measurement_000'
    tune_base = '/Tune_Values'
    
    if (nm_base+tune_base) in h5_file:
        grp = hdf.file[nm_base+tune_base]
        for key in cantl_parms:
            cantl_parms[key] = list(usid.hdf_utils.get_attributes(grp, key).values())[0]
        for key in tune_items:
            tune_items[key] = usid.hdf_utils.find_dataset(grp, key)[0].value
        TF_norm = tune_items['TF_norm']
    
    parms_dict = usid.hdf_utils.get_attributes(hdf.file[nm_base])
    ex_freq = parms_dict['BE_center_frequency_[Hz]']
    samp_rate = parms_dict['IO_rate_[Hz]']
    
    N_points = parms_dict['num_bins']
    N_points_per_line = parms_dict['points_per_line']
    N_points_per_pixel = parms_dict['num_bins']
    
    dt = 1/samp_rate #delta-time in seconds
    df = 1/dt #delta-frequency in Hz
    
    # Used in plotting
    w_vec2 = np.linspace(-0.5*samp_rate,
                         0.5*samp_rate - 1.0*samp_rate / N_points_per_line,
                         N_points_per_line)<jupyter_output><empty_output><jupyter_text>#### Step 1B) Extract the Resonance Modes Considered in the Force Reconstruction<jupyter_code>#define number of eigenmodes to consider
num_bandsVal=2
#define bands (center frequency +/- bandwith)
center_freq = cantl_parms['Thermal_res']
MB0_w1 = center_freq - 20E3
MB0_w2 = center_freq + 20E3
MB1_w1 = center_freq*6.25 - 20E3
MB1_w2 = center_freq*6.25 + 20E3
MB1_amp = 30E-9
MB2_amp = 1E-9
MB_parm_vec = np.array([MB1_amp,MB0_w1,MB0_w2,MB1_amp,MB1_w1,MB1_w2])
MB_parm_vec.resize(2,3)
band_edge_mat = MB_parm_vec[:,1:3]
# [0] and [1] are the DAQ channels, use HDFView for better understanding
hdf = px.io.HDFwriter(h5_path)
h5_file = hdf.file
h5_resp = usid.hdf_utils.find_dataset(hdf.file, 'Raw_Data')[0]  # from tip
h5_tune= usid.hdf_utils.find_dataset(hdf.file, 'Raw_Data')[-1] # chirp to tip
if not loadTuneValues: # if don't already have parms_dict, don't overwrite
    parms_dict = h5_tune.parent.parent.attrs
ex_freq = parms_dict['BE_center_frequency_[Hz]']
samp_rate = parms_dict['IO_rate_[Hz]']
N_points = parms_dict['num_bins']
N_points_per_line = parms_dict['points_per_line']
N_points_per_pixel = parms_dict['num_bins']
dt = 1/samp_rate #delta-time in seconds
df = 1/dt #delta-frequency in Hz
# Used in plotting
w_vec2 = np.linspace(-0.5*samp_rate,0.5*samp_rate - 1.0*samp_rate / N_points_per_line,N_points_per_line)
# Response
A_pd = np.mean(h5_resp, axis=0)
yt0_tune = A_pd - np.mean(A_pd)
Yt0_tune = np.fft.fftshift(np.fft.fft(yt0_tune,N_points_per_line)*dt)
# BE_wave_train
h5_spec_vals = usid.hdf_utils.get_auxiliary_datasets(h5_tune, aux_dset_name='Spectroscopic_Values')[0]
BE_pd = h5_spec_vals[0, :]
f0 = BE_pd - np.mean(BE_pd)
F0 = np.fft.fftshift(np.fft.fft(f0,N_points_per_line)*dt)
# The value here on right represents the excited bins
excited_bin_ind = np.where(np.abs(F0) > 0.5e-3)
# Transfer Function!
TF_vec = Yt0_tune/F0<jupyter_output>C:\Users\Raj\Anaconda3\lib\site-packages\ipykernel_launcher.py:39: RuntimeWarning: divide by zero encountered in true_divide
<jupyter_text>#### Plot Tune<jupyter_code>usid.hdf_utils.print_tree(h5_file, rel_paths=True)
print(dt)
plt.figure(2)
plt.subplot(2,1,1)
#plt.semilogy(np.abs(w_vec2[excited_bin_ind])*1E-6,
#             np.abs(TF_vec[excited_bin_ind]))
plt.semilogy(np.abs(w_vec2[excited_bin_ind])*1E-3,
             np.abs(TF_vec[excited_bin_ind]), 'r')
plt.semilogy(np.abs(w_vec2[excited_bin_ind])*1E-6,
             np.abs(Yt0_tune[excited_bin_ind]), 'b')
plt.semilogy(np.abs(w_vec2[excited_bin_ind])*1E-6,
             np.abs(F0[excited_bin_ind]), 'k')
plt.xlabel('Frequency (kHz)')
plt.ylabel('Amplitude (a.u.)')
plt.xlim([band_edge_mat[0,0]*1e-3, band_edge_mat[0,1]*1e-3])
plt.tight_layout(pad=0.0, w_pad=0.0, h_pad=0.0)
plt.subplot(2,1,2)
plt.semilogy(np.abs(w_vec2[excited_bin_ind])*1E-3,
             np.angle(TF_vec[excited_bin_ind]))
plt.xlabel('Frequency (kHz)')
plt.ylabel('Phase (Rad)')
plt.xlim([band_edge_mat[0,0]*1e-3, band_edge_mat[0,1]*1e-3])
plt.tight_layout(pad=0.0, w_pad=0.0, h_pad=0.0)<jupyter_output><empty_output><jupyter_text>### Step 1C) Construct an effective Transfer function (TF_Norm) from SHO fits<jupyter_code>TunePhase = -np.pi
num_bands = band_edge_mat.shape[0]
coef_mat = np.zeros((num_bands,4))
coef_guess_vec = np.zeros((4))
# wb is an array of frequency points only where F0 above noise floor
wb = w_vec2[excited_bin_ind]
# Fit function for transfer function
TF_fit_vec = np.zeros((w_vec2.shape))
TFb_vec = TF_vec[excited_bin_ind]
# k1 = eigenmodes of cantilever to evaluate. Default = 2
Q_guesses = [120, 500, 700]
for k1 in range(num_bandsVal):
    
    # locate the fitting region
        # bin_ind1 is where band_edge is in the wb array
        # wbb is an array that spans this region for fitting purposes
    w1 = band_edge_mat[k1][0]
    w2 = band_edge_mat[k1][1]
    bin_ind1 = np.where(np.abs(w1-wb) == np.min(np.abs(w1-wb)))[0][0]
    bin_ind2 = np.where(np.abs(w2-wb) == np.min(np.abs(w2-wb)))[0][0]
    wbb = wb[bin_ind1:bin_ind2+1].T/1e6
    response_vec = TFb_vec[bin_ind1:bin_ind2+1].T
    response_mat = np.array([np.real(response_vec), np.imag(response_vec)]).T
    
    # initial guesses    
    A_max_ind = np.argmax(np.abs(response_vec))
    A_max = response_vec[A_max_ind]
    Q_guess = Q_guesses[k1]
    A_guess = A_max/Q_guess
    wo_guess = wbb[A_max_ind]
    phi_guess = TunePhase
    coef_guess_vec = [np.real(A_guess),
                      wo_guess,
                      Q_guess,
                      phi_guess]
    coef_vec = px.analysis.utils.be_sho.SHOestimateGuess(response_vec, wbb, 10)
    response_guess_vec = px.analysis.utils.be_sho.SHOfunc(coef_guess_vec, wbb)
    response_fit_vec = px.analysis.utils.be_sho.SHOfunc(coef_vec, wbb)
    # Saves the response in MHz, not used anywhere else
    coef_vec[1] = coef_vec[1]*1E6 #convert to MHz
    coef_mat[k1,:] = coef_vec
    response_fit_full_vec = px.analysis.utils.be_sho.SHOfunc(coef_vec,w_vec2)
    TF_fit_vec = TF_fit_vec + response_fit_full_vec # check for length and dimension
    # Plot: blue = data, green = initial guess, red = fit
    fig= plt.figure(10, figsize=(9,9))
    plt.subplot(num_bands,2,k1+1)
    plt.plot(wbb,np.abs(response_vec),'.-')
    plt.plot(wbb,np.abs(response_guess_vec),c='g')
    plt.plot(wbb,np.abs(response_fit_vec),c='r')
    plt.xlabel('Frequency (MHz)')
    plt.ylabel('Amplitude (nm)')
    plt.tight_layout(pad=0.0, w_pad=0.0, h_pad=0.0)
    #plt.figure(11)
    plt.subplot(num_bands,2,(k1+1)+2)
    plt.plot(wbb,np.angle(response_vec),'.-')
    plt.plot(wbb,np.angle(response_guess_vec),'g')
    plt.plot(wbb,np.angle(response_fit_vec),'r')
    plt.xlabel('Frequency (MHz)')
    plt.ylabel('Phase (Rad)')
    plt.tight_layout(pad=0.0, w_pad=0.0, h_pad=0.0)
    if save_figure == True:
        fig.savefig(data_file+'\SHOFitting.eps', format='eps')
        fig.savefig(data_file+'\SHOFitting.tif', format='tiff')
Q = coef_mat[0,2]
TF_norm = ((TF_fit_vec- np.min(np.abs(TF_fit_vec)))/ np.max(np.abs(TF_fit_vec))-
           np.min(np.abs(TF_fit_vec))) * Q
#tf_grp = usid.hdf_utils.create_indexed_group(h5_file['/'], 'Tune_Function')
usid.hdf_utils.print_tree(h5_file, rel_paths=True)
#del h5_file['/Tune_Function_000']<jupyter_output>/
Measurement_000
Measurement_000/Channel_000
Measurement_000/Channel_000/Raw_Data
Measurement_000/Channel_001
Measurement_000/Channel_001/Raw_Data
Measurement_000/Position_Indices
Measurement_000/Position_Values
Measurement_000/Spectroscopic_Indices
Measurement_000/Spectroscopic_Values
Tune_Function_000
Tune_Function_000/F0
Tune_Function_000/Position_Indices
Tune_Function_000/Position_Values
Tune_Function_000/Spectroscopic_Indices
Tune_Function_000/Spectroscopic_Values
Tune_Function_000/TF_fit_vec
Tune_Function_000/TF_vec
Tune_Function_000/Tune_Data
Tune_Function_000/Y_freq
Tune_Function_000/f0
Tune_Function_000/y_time
<jupyter_text>#### Saves Data to H5 File (optional)<jupyter_code>#%% Saves data to the h5 File
'''
Need to save cantilever parameters, TF_nom, Q, yt0_tune, Yt0, f0, F0, TF_vec
'''
tf_grp = usid.hdf_utils.create_indexed_group(h5_file['/'], 'Tune_Function')
tf_pos_dim = usid.hdf_utils.Dimension('Single Step', 'a.u.', 1)
tf_spec_dim = usid.hdf_utils.Dimension('Frequency', 'MHz', w_vec2)
h5_tf = usid.hdf_utils.write_main_dataset(tf_grp, 
                                          TF_norm.reshape(1, -1), 
                                          'Tune_Data', 
                                          'Response',
                                          'a.u.',
                                          tf_pos_dim, 
                                          tf_spec_dim)
tune_items = {'y_time':yt0_tune, 
              'Y_freq':Yt0_tune, 
              'f0':f0, 
              'F0':F0, 
              'TF_vec':TF_vec,
              'TF_fit_vec':TF_fit_vec}     
for key, val in tune_items.items():
    tf_grp.create_dataset(key, data=val)<jupyter_output><empty_output><jupyter_text>#### Separate close file to allow debugging without errors<jupyter_code>hdf.close()<jupyter_output><empty_output><jupyter_text># Step (2) Load, Translate and Denoise the G-KPFM data 
At this point, we now are ready to load actual data! 
For many operations, we will want to be iterating through and processing data after some trial and errors. One of the benefits of Pycroscopy is that all processing is stored in the HDF5 file. The easiest way to handle that is in an interactive shell (Spyder). This notebook instead does not dynamically allow you to view the H5 contents but instead shows how to go through processing the G-KPFM data.#### Step (2a) Load and Translates image file to .H5 file format.<jupyter_code>''' Set up some initial parameters.
Set the image length and height directly here. 
Aspect is for plotting as, by default, the G-Mode data acquisition is 4 x 1
'''
img_length = 30e-6
img_height = 7.5e-6
aspect = 0.5 # due to G-mode approach
light_on_time = [3,7]
'''Loads data'''
from pathlib import Path
import os
pre_load_files = False
save_figure = True # do you want to save these to file
# Set save file location
output_filepath = r'E:\Polymer Ion Transport\ORNL\20191018_dpp3t_CB_Kcl\dpp3t_cb_1Vdc_kcl_zoom_0005'
data_file = '' # if reading an H5 file directly and want to skip the prompt, type the full path here e.g. r'E:\Data.H5'
output_filepath = os.path.expanduser(output_filepath)
print('#### IMAGE LENGTH =',img_length,'####')
if pre_load_files is False:
    input_file_path = usid.io_utils.file_dialog(caption='Select translated .h5 file or raw experiment data',
                                            file_filter='Parameters for raw G-Line data (*.dat);; \
                                            Translated file (*.h5)')
else:
    input_file_path = output_filepath
    
folder_path, _ = os.path.split(input_file_path)
if input_file_path.endswith('.dat'):
    print('Translating raw data to h5. Please wait')
    tran = px.io.translators.gmode_line.GLineTranslator()
    h5_path = tran.translate(input_file_path)
    hdf = px.io.HDFwriter(h5_path)
    preLoaded = False
else:
    h5_path = input_file_path
    hdf = px.io.HDFwriter(h5_path)
    px.hdf_utils.print_tree(hdf.file, rel_paths=True)
    preLoaded = True #for pre-loading some data
    
# to automatically set light_on times is "ms" or "us" in the filename...for illuminated data.
'''
a = output_filepath.find('ms')
b = output_filepath.find('us')
if a != -1:
    tm = int(output_filepath[a-1])
    light_on_time = [1, 1+tm]  # ms   
elif b != -1:
    tm = int(output_filepath[b-3:b])
    light_on_time = [1, 1+tm/1000]  # ms
del(a)
del(b)
'''<jupyter_output>#### IMAGE LENGTH = 3e-05 ####
Translating raw data to h5. Please wait
Reading line 0 of 64
Reading line 10 of 64
Reading line 20 of 64
Reading line 30 of 64
Reading line 40 of 64
Reading line 50 of 64
Reading line 60 of 64
Finished reading file: E:\Polymer Ion Transport\ORNL\20191018_dpp3t_CB_Kcl\dpp3t_cb_1Vdc_kcl_zoom_0005\dpp3t_cb_1Vdc_kcl_zoom_0005_bigtime_00.dat!
Reading line 0 of 64
Reading line 10 of 64
Reading line 20 of 64
Reading line 30 of 64
Reading line 40 of 64
Reading line 50 of 64
Reading line 60 of 64
Finished reading file: E:\Polymer Ion Transport\ORNL\20191018_dpp3t_CB_Kcl\dpp3t_cb_1Vdc_kcl_zoom_0005\dpp3t_cb_1Vdc_kcl_zoom_0005_bigtime_01.dat!
G-Line translation complete!
<jupyter_text>#### Extract some relevant parameters<jupyter_code># Getting ancillary information and other parameters
h5_file = hdf.file
h5_main = usid.hdf_utils.find_dataset(hdf.file,'Raw_Data')[0]
h5_spec_vals = usid.hdf_utils.get_auxiliary_datasets(h5_main, aux_dset_name='Spectroscopic_Values')[0]
h5_spec_inds= usid.hdf_utils.get_auxiliary_datasets(h5_main, aux_dset_name='Spectroscopic_Indices')[0]
# General parameters
parms_dict = h5_main.parent.parent.attrs
samp_rate = parms_dict['IO_rate_[Hz]']
ex_freq = parms_dict['BE_center_frequency_[Hz]']
num_rows = parms_dict['grid_num_rows']
num_cols = parms_dict['grid_num_cols']
parms_dict['num_rows'] = num_rows
parms_dict['num_cols'] = num_cols
h5_pos_vals = usid.hdf_utils.get_auxiliary_datasets(h5_main, aux_dset_name='Position_Values')[0]
h5_pos_inds = usid.hdf_utils.get_auxiliary_datasets(h5_main, aux_dset_name='Position_Indices')[0]
num_pts = h5_main.shape[1]
pnts_per_pix=int(num_pts/num_cols)
# Adding image size to the parameters
parms_dict['FastScanSize'] = img_length
parms_dict['SlowScanSize'] = img_height
N_points = parms_dict['num_bins']
N_points_per_pixel = parms_dict['num_bins']
time_per_osc = (1/parms_dict['BE_center_frequency_[Hz]'])
IO_rate = parms_dict['IO_rate_[Hz]']     #sampling_rate
parms_dict['sampling_rate'] = IO_rate
pnts_per_period = IO_rate * time_per_osc #points per oscillation period
pxl_time = N_points_per_pixel/IO_rate    #seconds per pixel
num_periods = int(pxl_time/time_per_osc) #total # of periods per pixel, should be an integer
# Needed for some backwards compatibility
parms_dict['total_time'] = pxl_time
parms_dict['trigger'] = 0
parms_dict['length'] = img_length
parms_dict['height'] = img_height
parms_dict['light_on_time'] = light_on_time
grp_CPD = px.io.VirtualGroup(h5_main.parent.parent.name)
grp_CPD.attrs['length'] = img_length
grp_CPD.attrs['height'] = img_height
# Excitation waveform for a single pixel
pixel_ex_wfm = h5_spec_vals[0, :int(h5_spec_vals.shape[1]/num_cols)]
# Excitation waveform for a single line / row of data
excit_wfm = h5_spec_vals.value
# Preparing the frequency axis:
w_vec = 1E-3*np.linspace(-0.5*samp_rate, 0.5*samp_rate - samp_rate/num_pts, num_pts)
w_vec_pix = 1E-3*np.linspace(-0.5*samp_rate, 0.5*samp_rate - samp_rate/pnts_per_pix, pnts_per_pix)
# Preparing the time axis:
t_vec_line = 1E3*np.linspace(0, num_pts/samp_rate, num_pts)
t_vec_pix = 1E3*np.linspace(0, pnts_per_pix/samp_rate, pnts_per_pix)
# Dimension objects
rows_vals = np.linspace(0, img_height, num_rows)
cols_vals = np.linspace(0, img_length, num_cols)
time_vals = t_vec_pix
# Correctly adds the ancillary datasets
pos_dims = [usid.write_utils.Dimension('Cols', 'm', cols_vals),
            usid.write_utils.Dimension('Rows', 'm', rows_vals)]
spec_dims = [usid.write_utils.Dimension('Time', 's', time_vals)]<jupyter_output><empty_output><jupyter_text>#### Loads previous data if H5 file loaded directly<jupyter_code># Loads some functions used later for fitting and loading
def fitexp(x, A, tau, y0, x0):
    return A * np.exp(-(x - x0) /tau) + y0
def fitbiexp(x, A1, tau1, A2, tau2, y0, x0):
    return A1*np.exp(-(x-x0)/tau1) + A2*np.exp(-(x-x0)/tau2) + y0
def h5_list(h5file, key):
    '''
    Returns list of names matching a key in the h5 group passed
    h5file = hdf.file['/Measurement_000/Channel_000'] or similar
    '''
    names = []
    for i in h5file:
        if key in i:
            names.append(i)
            
    return names
'''
By default this will load the most recent of each file matching the respective name. 
That is, if there are 8 CPDs generated from previous processing, it will load the 8th one.
'''
if preLoaded == True:
    ''' 
    Loads all the previous missing data so we can skip around to relevant functions
    '''
    print('#### Loading from saved H5 ####')
    # Group addresses, assume only first members are important
    nm_base = '/Measurement_000/Channel_000'
    nm_filt_resh = 'Filtered_Data-Reshape_000'
    nm_h5_resh = 'h5_F3R-Reshape_000'
    nm_SVD = 'Reshaped_Data-SVD_000'
    nm_CPD = nm_base + '/Raw_Data-CPD'
    
    grp = hdf.file['/Measurement_000/Channel_000']
    h5_filt = px.hdf_utils.find_dataset(grp, 'Filtered_Data')[0]
    h5_resh = px.hdf_utils.find_dataset(hdf.file['/'.join([h5_filt.parent.name, nm_filt_resh])],
                                      'Reshaped_Data')[0]
    h5_resh_grp = h5_resh.parent
  
    # Gets the "last" Rebuilt SVD Data if there's more than 1
    names = h5_list(hdf.file['/'.join([h5_filt.parent.name,nm_filt_resh])],
                    'Reshaped_Data-SVD')
    if any(names):
        nm_filt_resh_SVD = names[-1]
        
        # Filtered Data    
        PCA_clean_data_prerecon = px.hdf_utils.find_dataset(hdf.file['/'.join([h5_resh_grp.name, nm_filt_resh_SVD])],'Rebuilt_Data')
        
        if PCA_clean_data_prerecon == []:
            PCA_pre_reconstruction_clean = False
        else:
            PCA_clean_data_prerecon = PCA_clean_data_prerecon[0]
            h5_svd_group = PCA_clean_data_prerecon.parent.parent
            h5_Uprerecon = h5_svd_group['U']
            h5_Vprerecon = h5_svd_group['V']
            h5_Sprerecon = h5_svd_group['S']
        
            abun_maps_prefilter = np.reshape(h5_Uprerecon[:,:25], (num_rows, num_cols,-1))
    else:
        PCA_pre_reconstruction_clean = False
    
    # Post-F3R
    h5_F3R = px.hdf_utils.find_dataset(grp, 'h5_F3R')[0]
    h5_F3Rresh_grp = h5_F3R.parent
    
    # Get correct reshaped data
    names = h5_list(hdf.file[h5_F3R.parent.name],'h5_F3R-Reshape')
    nm_h5_resh = names[-1]
    h5_F3Rresh = px.hdf_utils.find_dataset(hdf.file['/'.join([h5_F3R.parent.name, nm_h5_resh])],'Reshaped_Data')[0]
    PCA_clean_data_postrecon = px.hdf_utils.find_dataset(hdf.file['/'.join([h5_F3Rresh.parent.name, nm_SVD])],
                                                  'Rebuilt_Data')
    if PCA_clean_data_postrecon == []:
        PCA_post_reconstruction_clean = False
    else:
        PCA_post_reconstruction_clean = True
        PCA_clean_data_postrecon = PCA_clean_data_postrecon[0]
        
        h5_svd_group = PCA_clean_data_postrecon.parent.parent
        h5_U = h5_svd_group['U']
        h5_V = h5_svd_group['V']
        h5_S = h5_svd_group['S']
    
        abun_maps_postfilter = np.reshape(h5_U[:,:25], (num_rows, num_cols,-1))
    
    # CPD
    CPD = px.hdf_utils.find_dataset(grp, 'CPD')[0]
    CPD_on_time = px.hdf_utils.find_dataset(grp, 'CPD_on_time')[0]
    CPD_off_time = px.hdf_utils.find_dataset(grp, 'CPD_off_time')[0]
    if type(CPD_on_time != np.ndarray):
        CPD_on_time = CPD_on_time.value
        CPD_off_time = CPD_off_time.value
    
    # Create CPD average image matrices
    CPD_off_avg = np.zeros(CPD_on_time.shape)
    CPD_on_avg = np.zeros(CPD_on_time.shape)
    SPV = CPD_on_avg - CPD_off_avg
    parms_dict = h5_main.parent.parent.attrs
    num_rows = parms_dict['grid_num_rows']
    num_cols = parms_dict['grid_num_cols']
    dtCPD = pxl_time/CPD.shape[1] 
    p_on = int(light_on_time[0]*1e-3 / dtCPD) 
    p_off = int(light_on_time[1]*1e-3 / dtCPD) 
    CPD_on = CPD[:, p_on:p_off]
    CPD_off = CPD[:, p_off:]
    
    for r in np.arange(CPD_on_time.shape[0]):
        for c in np.arange(CPD_on_time.shape[1]):
            CPD_off_avg[r][c] = np.mean(CPD[r*num_cols + c,p_off:])
            CPD_on_avg[r][c] = np.mean(CPD[r*num_cols + c,p_on:p_off])
    
    # Parabola fit
    wHfit3 = px.hdf_utils.find_dataset(hdf.file['/'],'parafit_main')[0]
    reconstruct = False
    # Reconstruct CPD data from parabola fit file (used optionally later)
    CPD_recon = np.zeros([num_rows*num_cols, wHfit3.shape[1]])
    CPD_grad = np.zeros([num_rows*num_cols, wHfit3.shape[1]])
        
    CPD_recon[:,:] = -0.5*np.divide(wHfit3[:,:,1],wHfit3[:,:,2]) # vertex of parabola
    CPD_grad[:,:] = wHfit3[:,:,2]
    
    CPD_grad_resh_on_avg = np.zeros(CPD_on_time.shape)
    CPD_grad_resh_off_avg = np.zeros(CPD_on_time.shape)
    
    for r in np.arange(CPD_on_time.shape[0]):
        for c in np.arange(CPD_on_time.shape[1]):
            CPD_grad_resh_off_avg[r][c] = np.mean(CPD_grad[r*num_cols + c,p_off:])
            CPD_grad_resh_on_avg[r][c] = np.mean(CPD_grad[r*num_cols + c,p_on:p_off])
    
    dset = wHfit3[:,:,:]
    pnts_per_CPDpix = CPD_recon.shape[1]<jupyter_output><empty_output><jupyter_text>#### Step 2b Fourier Filter data.
-- Define filter parameters in first cell 
-- Then test on a single row 
-- Finally perform on full dataset 
** Here you can play with Noise tolerance **<jupyter_code># Set Filter parameters here:
num_spectral_pts = h5_main.shape[1]
#hpf = px.processing.fft.HarmonicPassFilter(num_pts, samp_rate, ex_freq, 1E+3, 10)
#default filtering, note the bandwidths --> DC filtering and certain noise peaks
lpf = px.processing.fft.LowPassFilter(num_pts, samp_rate, 200E+3)
nbf = px.processing.fft.NoiseBandFilter(num_pts, samp_rate, 
                                        [5E3, 50E3, 100E3, 150E3, 200E3],
                                        [10E3, 1E3, 1E3, 1E3, 1E3])
#no DC filtering
#nbf = px.processing.fft.NoiseBandFilter(num_pts, samp_rate, 
#                                        [50E3, 100E3, 125E3],
#                                        [1E3, 1E3, 1.5E3])
freq_filts = [lpf, nbf]
noise_tolerance = 50e-7
narrowband = False
if narrowband == True:
    nbf = px.processing.fft.HarmonicPassFilter(num_pts, samp_rate, ex_freq, 1e3, 5)
    freq_filts = [nbf]
# Test filter on a single line:
row_ind = 12
filt_line, fig_filt, axes_filt = px.processing.gmode_utils.test_filter(h5_main[row_ind],
                                                                       frequency_filters=freq_filts,
                                                                       noise_threshold=noise_tolerance,
                                                                       show_plots=True)
if save_figure == True:
    fig = fig_filt
    fig.savefig(output_filepath+'\FFTFiltering.eps', format='eps')
    fig.savefig(output_filepath+'\FFTFiltering.tif', format='tiff')
filt_row = filt_line.reshape(-1, pixel_ex_wfm.size)
fig, axes = usid.plot_utils.plot_curves(pixel_ex_wfm, filt_row,use_rainbow_plots=True, 
                                     x_label='Bias (V)', title='FFT Filtering',
                                     num_plots=16, y_label='Deflection (a.u.)')<jupyter_output><empty_output><jupyter_text>#### Step 2B.i) Testing F3R and finding phase on the Filtered row data from previous step
We need to find the phase offset between the measured response and drive voltage.
Adjust phase to close the parabola in the second set of images
This segment does two things:
    0) optional: Try and auto-find the phase-offset. 
    1) Tests whether the above filters are effective (FFT plot) and shows the result
    2) Tests if the phase-offset is correct to account for cable pathlengths
#### Step 2B.i.0) Finds the optimal phase-offset 
###### (can skip to Step 2B.i.1 if you know the phase or to search manually)<jupyter_code># Calculates NoiseLimit
search_phase = False # set to true to search, this is slow!
if search_phase == True:
    
    fft_h5row = np.fft.fftshift(np.fft.fft(h5_main[row_ind]))
    noise_floor = px.processing.fft.get_noise_floor(fft_h5row, noise_tolerance)[0]
    print('Noise floor = ', noise_floor)
    Noiselimit = np.ceil(noise_floor)
    G_line = np.zeros(w_vec2.size,dtype=complex)         # G = raw
    G_wPhase_line = np.zeros(w_vec2.size,dtype=complex)  # G_wphase = phase-shifted
    
    signal_ind_vec = np.arange(w_vec2.size)
    ind_drive = (np.abs(w_vec2-ex_freq)).argmin()
    
    test_line = filt_line-np.mean(filt_line)
    test_line = np.fft.fftshift(np.fft.fft(test_line))
    signal_kill = np.where(np.abs(test_line) < Noiselimit)
    signal_ind_vec = np.delete(signal_ind_vec, signal_kill)
    fits = []
    xpts = np.arange(-2*np.pi, 2*np.pi, 0.1)
    for i in xpts:
        test_shifted = (test_line)*np.exp(-1j*w_vec2/(w_vec2[ind_drive])*i)
        G_wPhase_line[signal_ind_vec] = test_shifted[signal_ind_vec]
        G_wPhase_line = (G_wPhase_line/TF_norm)
        G_wPhase_time_line = np.real(np.fft.ifft(np.fft.ifftshift(G_wPhase_line)))
        phaseshifted = G_wPhase_time_line.reshape(-1, pixel_ex_wfm.size)
        p1, _ = npPoly.polyfit(pixel_ex_wfm[8:24], phaseshifted[0,8:24], 2, full=True)
        p2, _ = npPoly.polyfit(pixel_ex_wfm[24:40], phaseshifted[0,24:40], 2, full=True)
        fit1 = -0.5*p1[1]/p1[2]
        fit2 = -0.5*p2[1]/p2[2]
        fits.append(np.abs(fit2-fit1))
    print(np.argmin(fits),xpts[np.argmin(fits)])
    ph = xpts[np.argmin(fits)]<jupyter_output><empty_output><jupyter_text>#### Step 2B.i.1) Finds the optimal phase-offset <jupyter_code># Try Force Conversion on Filtered data
# Phase Offset
ph = -.38   # phase from cable delays between excitation and response. Set this manually!
# Calculates NoiseLimit
fft_h5row = np.fft.fftshift(np.fft.fft(h5_main[row_ind]))
noise_floor = px.processing.fft.get_noise_floor(fft_h5row, noise_tolerance)[0]
print('Noise floor = ', noise_floor)
Noiselimit = np.ceil(noise_floor)
# Try Force Conversion on Filtered data of single line (row_ind above)
G_line = np.zeros(w_vec2.size,dtype=complex)         # G = raw
G_wPhase_line = np.zeros(w_vec2.size,dtype=complex)  # G_wphase = phase-shifted
signal_ind_vec = np.arange(w_vec2.size)
ind_drive = (np.abs(w_vec2-ex_freq)).argmin()
# filt_line is from filtered data above
test_line = filt_line-np.mean(filt_line)
test_line = np.fft.fftshift(np.fft.fft(test_line))
signal_kill = np.where(np.abs(test_line) < Noiselimit)
signal_ind_vec = np.delete(signal_ind_vec, signal_kill)
# Original/raw data; TF_norm is from the Tune file transfer function
G_line[signal_ind_vec] = test_line[signal_ind_vec]
G_line = (G_line/TF_norm)
G_time_line = np.real(np.fft.ifft(np.fft.ifftshift(G_line))) #time-domain 
# Phase-shifted data
test_shifted = (test_line)*np.exp(-1j*w_vec2/(w_vec2[ind_drive])*ph)
G_wPhase_line[signal_ind_vec] = test_shifted[signal_ind_vec]
G_wPhase_line = (G_wPhase_line/TF_norm)
G_wPhase_time_line = np.real(np.fft.ifft(np.fft.ifftshift(G_wPhase_line)))
# On a single line, row_ind is above in previous cell
FRaw_resp = np.fft.fftshift(np.fft.fft(h5_main[row_ind]))
phaseshifted = G_wPhase_time_line.reshape(-1, pixel_ex_wfm.size)
fig, axes = usid.plot_utils.plot_curves(pixel_ex_wfm, phaseshifted, use_rainbow_plots=True, 
                                     x_label='Voltage (Vac)', title='Phase Shifted',
                                     num_plots=4, y_label='Deflection (a.u.)')<jupyter_output>Noise floor =  35.994179110357344
<jupyter_text>#### Plotting F3R<jupyter_code>fig, ax = plt.subplots(figsize=(12, 7))
plt.semilogy(w_vec, (np.abs(FRaw_resp)), '^b' ,label='Response')
#plt.semilogy(w_vec[signal_ind_vec], (np.abs(G[signal_ind_vec])), 'og')
plt.semilogy(w_vec[signal_ind_vec], (np.abs(FRaw_resp[signal_ind_vec])),'.r', label='F3r')
plt.semilogy(w_vec[signal_ind_vec], np.abs(TF_norm[signal_ind_vec]), 'k', label='Tune TF')
ax.set_xlabel('Frequency (kHz)', fontsize=16)
ax.set_ylabel('Amplitude (a.u.)', fontsize=16)
ax.legend(fontsize=14)
ax.set_yscale('log')
ax.set_xlim(0, 200)
ax.set_title('Noise Spectrum for row ' + str(row_ind), fontsize=16)
usid.plot_utils.set_tick_font_size(ax, 14)
# In time domain again, compare pre/post-phase-shift-corrected versions
unshifted = G_time_line.reshape(-1, pixel_ex_wfm.size)
phaseshifted = G_wPhase_time_line.reshape(-1, pixel_ex_wfm.size)
# Unshifted phase, pre-FFT filter
raw = np.real(np.fft.ifft(np.fft.ifftshift(FRaw_resp)))
raw = raw.reshape(-1, pixel_ex_wfm.size)
#fig, axes = px.plot_utils.plot_loops(pixel_ex_wfm, raw, use_rainbow_plots=True, 
#                                     x_label='Voltage (Vac)', title='Raw',
#                                     plots_on_side=2, y_label='Deflection (a.u.)')
# Unshifted phases, post-FFT filter
fig, axes = usid.plot_utils.plot_curves(pixel_ex_wfm, unshifted, use_rainbow_plots=True, 
                                     x_label='Voltage (Vac)', title='Raw',
                                     num_plots=2, y_label='Deflection (a.u.)')
# Shifted phase; ideally parabolas should overlap
fig, axes = usid.plot_utils.plot_curves(pixel_ex_wfm, phaseshifted, use_rainbow_plots=True, 
                                     x_label='Voltage (Vac)', title='Phase Shifted',
                                     num_plots=2, y_label='Deflection (a.u.)')
fig.savefig(output_filepath+r'\PostFilter_Displacements.tif', format='tiff')<jupyter_output><empty_output><jupyter_text>#### Filter the full data set 
###### **(this process is quite slow!)**<jupyter_code>h5_filt_grp = usid.hdf_utils.check_for_old(h5_main, 'FFT_Filtering')#, new_parms=filter_parms)
if not h5_filt_grp:
    
    sig_filt = px.processing.SignalFilter(h5_main, frequency_filters=freq_filts, 
                                          noise_threshold=noise_tolerance,
                                          write_filtered=True, write_condensed=False, 
                                          num_pix=1,verbose=True, cores=1, max_mem_mb=512)
    h5_filt_grp = sig_filt.compute()
    
else:
    print('Taking previously computed results')
if isinstance(h5_filt_grp, list):
    h5_filt = h5_filt_grp[0]['Filtered_Data']
else:
    h5_filt = h5_filt_grp['Filtered_Data']
# Reshapes the filtered response into a matrix per-pixel instead of in lines (as recorded by NI box)
print('\n','#### Done! Now reshaping... ####')
h5_main_filt = usid.hdf_utils.find_dataset(hdf.file,'Filtered_Data')[0]
scan_width=1
h5_resh = px.processing.gmode_utils.reshape_from_lines_to_pixels(h5_filt, pixel_ex_wfm.size,
                                                                 scan_width / num_cols)
h5_resh_grp = h5_resh.parent
h5_resh.shape<jupyter_output>No mpi4py found or script was not called via mpixexec / mpirun. Assuming single node computation
Rank 0: Upgrading from a regular h5py.Dataset to a USIDataset
Rank 0: The HDF5 dataset is now a USIDataset
Rank 0 - on socket with 4 cores and 964.7 MB avail. RAM shared by 1 ranks each given 1 cores.
User has requested to use no more than 512.0 MB of memory.
Rank 0: Each of the 1 workers on this socket are allowed to use 512.0 MB of RAM.
Each position in the SOURCE dataset is 8.0 MB large.
Rank 0: Workers on this socket allowed to read 64 positions of the SOURCE dataset only per chunk.
Finished collecting info on memory and workers
Consider calling test() to check results before calling compute() which computes on the entire dataset and writes back to the HDF5 file
Allowed to read 21 pixels per chunk
Checking for duplicates:
Creating HDF5 group and datasets to hold results
Rank 0 - Finished creating the Composite_Filter dataset
Rank 0 - Reusing source datasets position datasets
h5 group an[...]<jupyter_text>#### PCA on the filtered response<jupyter_code>h5_svd = px.processing.svd_utils.SVD(h5_resh, num_components=256)
h5_svd_group = h5_svd.compute()
h5_Uprecon = h5_svd_group['U']
h5_Vprecon = h5_svd_group['V']
h5_Sprecon = h5_svd_group['S']
skree_sum = np.zeros(h5_Sprecon.shape)
for i in range(h5_Sprecon.shape[0]):
    skree_sum[i] = np.sum(h5_Sprecon[:i])/np.sum(h5_Sprecon)
plt.figure()
plt.plot(skree_sum, 'o')
print('Need', skree_sum[skree_sum<0.8].shape[0],'components for 80%')
print('Need', skree_sum[skree_sum<0.9].shape[0],'components for 90%')
print('Need', skree_sum[skree_sum<0.95].shape[0],'components for 95%')
# Since the two spatial dimensions (x, y) have been collapsed to one, we need to reshape the abundance maps:
# The "25" is how many of the eigenvectors to keep
abun_maps = np.reshape(h5_Uprecon[:,:25], (num_rows, num_cols,-1))<jupyter_output>Consider calling test() to check results before calling compute() which computes on the entire dataset and writes back to the HDF5 file
Took 1.68 mins to compute randomized SVD
Need 1 components for 80%
Need 2 components for 90%
Need 25 components for 95%
<jupyter_text>##### Visualize PCA prior to F3R<jupyter_code>fig, axes = usid.plot_utils.plot_scree(h5_Sprecon, title='Skree plot')
if save_figure == True:
    fig.savefig(output_filepath+'\PCARaw_Skree.eps', format='eps')
    fig.savefig(output_filepath+'\PCARaw_Skree.tif', format='tiff')
# Visualize the eigenvectors; 
first_evecs = h5_Vprecon[:9, :]
fig, axes = usid.plot_utils.plot_curves(pixel_ex_wfm, first_evecs, use_rainbow_plots=True, 
                                        x_label='Voltage (Vac)', y_label='Displacement (a.u.)', 
                                        num_plots=9, subtitle_prefix='Component', 
                                        title='SVD Eigenvectors (F3R)', evenly_spaced=False)
if save_figure == True:
    fig.savefig(output_filepath+'\PCARaw_Eig.eps', format='eps')
    fig.savefig(output_filepath+'\PCARaw_Eig.tif', format='tiff')
# Visualize the abundance maps:
fig, axes = usid.plot_utils.plot_map_stack(abun_maps, num_comps=9, title='SVD Abundance Maps',
                                            color_bar_mode='single', cmap='inferno', reverse_dims=True)
if save_figure == True:
    fig.savefig(output_filepath+r'\PCARaw_Loading.eps', format='eps')
    fig.savefig(output_filepath+r'\PCARaw_Loading.tif', format='tiff')<jupyter_output><empty_output><jupyter_text>#### Optionally Clean Data
Here, we are looking primarily to capture the most variance without capturing noise. In this case, the Skree plot is a good visual indication of where to filter up to. Also, some components might contribute to variance but be noise-- instrument noise, for example. 
As a precaution, you should try and be a little conservative as aggressive PCA filtering will ultimately change your data.<jupyter_code>''' 
Performs PCA filtering prior to F3R Step
To avoid constantly redoing SVD, this segment also checks the components_used attribute to see if the SVD rebuilt has been
done with these components before. 
clean_components can either be:
    -a single component; [0]
    -a range; [0,2] is same as [0,1,2]
    -a subset of components; [0,1,4,5] would not include 2,3, and 6-end 
'''
PCA_pre_reconstruction_clean = True
# Filters out the components specified from h5_resh (the reshaped h5 data)
if PCA_pre_reconstruction_clean == True:
    
    # important! If choosing components, min is 3 or interprets as start/stop range of slice
    clean_components = np.array([0,1,2,3]) # np.append(range(5,9),(17,18))
    # checks for existing SVD
    itms = [i for i in h5_resh.parent.items()]
    svdnames = []
    for i in itms:
        if 'Reshaped_Data-SVD' in i[0]:
            svdnames.append(i[1])
    
    SVD_exists = False
    for i in svdnames:
        print(i.name.split('/')[-1])
        if usid.hdf_utils.find_dataset(hdf.file[i.name], 'Rebuilt_Data') != []:
            rb = usid.hdf_utils.find_dataset(hdf.file[i.name], 'Rebuilt_Data')[0]
            if np.array_equal(rb.parent.attrs['components_used'], clean_components):
                print(i.name.split('/')[-1],'has same components')
                SVD_exists = True
                test = rb
    
    if SVD_exists == False:
        print('#### Doing SVD ####')
        test = px.processing.svd_utils.rebuild_svd(h5_resh, components=clean_components)
    
    PCA_clean_data_prerecon = test[:,:].reshape(num_rows,-1)<jupyter_output>Reshaped_Data-SVD_000
#### Doing SVD ####
Reconstructing in batches of 2048 positions.
Batchs should be 512.03125 Mb each.
Completed reconstruction of data from SVD results.  Writing to file.
Done writing reconstructed data to file.
<jupyter_text>## Step 3) Fast Free Force Reconstruction### Step 3A) Divide Filtered displacement Y(w) by effective transfer function H(w)
This process takes awhile. A few minutes is normal.<jupyter_code># Divide Image-data h5_main by Tune-data TF_norm
ind_drive = (np.abs(w_vec2-ex_freq)).argmin()
G = np.zeros(w_vec2.size,dtype=complex)
G_time = np.zeros(shape=h5_filt.shape, dtype=h5_filt.dtype)
signal_ind_vec = np.arange(w_vec2.size)
NoiseLimit = np.ceil(noise_floor)
for i in range(num_rows):
    if (i%10 == 0)
        print('Row', i)
        
    signal_ind_vec=np.arange(w_vec2.size)
      
    G = np.zeros(w_vec2.size,dtype=complex)         # G = raw
    
    # Step 3B) Phase correction; ph value is defined way above in Step 2B.i
    if PCA_pre_reconstruction_clean == True:
        test_data = PCA_clean_data_prerecon[i,:] - np.mean(PCA_clean_data_prerecon[i,:])   
    else:
        test_data = h5_filt[i,:] - np.mean(h5_filt[i,:])
    
    # filt_line is from filtered data above  
    test_data = np.fft.fftshift(np.fft.fft(test_data))
    signal_kill = np.where(np.abs(test_data) < NoiseLimit)
    signal_ind_vec = np.delete(signal_ind_vec,signal_kill)
    test_data_ph = (test_data) * np.exp(-1j*w_vec2/(w_vec2[ind_drive])*ph)
    # Step 3C)  iFFT the response above a user defined noise floor to recover Force in time domain.
    G[signal_ind_vec] = test_data_ph[signal_ind_vec]
    G = G/TF_norm
    G_time[i,:] = np.real(np.fft.ifft(np.fft.ifftshift(G)))
    FRaw_resp = np.fft.fftshift(np.fft.fft(h5_main[i]))
# Saves as backup in Python for later analysis; can remove from this notebook if you'd like
if PCA_pre_reconstruction_clean == False:
    G_time_noPCA = np.copy(G_time)
else:
    G_time_PCA = np.copy(G_time)<jupyter_output><empty_output><jupyter_text>#### Check a row to validate results<jupyter_code>fig, ax = plt.subplots(figsize=(12, 7))
plt.semilogy(w_vec, (np.abs(FRaw_resp)), label='Response')
plt.semilogy(w_vec[signal_ind_vec], (np.abs(G[signal_ind_vec])), 'og')
plt.semilogy(w_vec[signal_ind_vec], (np.abs(FRaw_resp[signal_ind_vec])),'.r', label='F3r')
ax.set_xlabel('Frequency (kHz)', fontsize=16)
ax.set_ylabel('Amplitude (a.u.)', fontsize=16)
ax.legend(fontsize=14)
ax.set_yscale('log')
ax.set_xlim(0, 200)
ax.set_title('Noise Spectrum for row ' + str(i), fontsize=16)
usid.plot_utils.set_tick_font_size(ax, 14)
if save_figure == True:
    if PCA_pre_reconstruction_clean == False:
        fig.savefig(output_filepath+r'\Noise_Spectra_noprePCA.eps', format='eps')
        fig.savefig(output_filepath+r'\Noise_Spectra_noprePCA.tif', format='tiff')
    else:
        fig.savefig(output_filepath+r'\Noise_Spectra_prePCA.eps', format='eps')
        fig.savefig(output_filepath+r'\Noise_Spectra_prePCA.tif', format='tiff')        
phaseshifted = G_time[i].reshape(-1, pixel_ex_wfm.size)
fig, axes = usid.plot_utils.plot_curves(pixel_ex_wfm, phaseshifted, use_rainbow_plots=True, 
                                     x_label='Voltage (Vac)', title='Phase Shifted',
                                     num_plots=4, y_label='Deflection (a.u.)')
if PCA_pre_reconstruction_clean == False:
    fig.savefig(output_filepath+r'\PostFilter_Displacement_noprePCA.tif', format='tiff')
else:
    fig.savefig(output_filepath+r'\PostFilter_Displacement_prePCA_['+str(clean_components)+'].tif', format='tiff')<jupyter_output><empty_output><jupyter_text>#### Reshaping and Storing  Results<jupyter_code># copies h5_filt over to H5_F3R; if necessary can come back here to reprocess
h5_F3R = usid.hdf_utils.create_empty_dataset(source_dset=h5_filt,
                                           dtype=h5_filt.dtype,
                                           dset_name='h5_F3R',
                                           new_attrs=dict(),
                                           skip_refs=False)
usid.hdf_utils.copy_main_attributes(h5_filt, h5_F3R)
h5_F3R[:,:] = G_time[:,:]
h5_F3R.file.flush()
usid.hdf_utils.link_as_main(h5_main=h5_F3R, h5_pos_inds=h5_pos_inds,
                          h5_pos_vals=h5_pos_vals, h5_spec_inds=h5_spec_inds,
                          h5_spec_vals=h5_spec_vals)
h5_F3Rresh_grp = usid.hdf_utils.find_results_groups(h5_F3R, 'Reshape')
scan_width = 1
h5_F3Rresh = px.processing.gmode_utils.reshape_from_lines_to_pixels(h5_F3R, pixel_ex_wfm.size, scan_width / num_cols)
h5_F3Rresh_grp = h5_F3Rresh.parent
# Saves whether it was PCA cleaned before or not
if PCA_pre_reconstruction_clean == True:
    h5_F3Rresh_grp.attrs['pre_PCA'] = clean_components
else:
    h5_F3Rresh_grp.attrs['pre_PCA'] = -1
print('Data was reshaped from shape', h5_F3R.shape,
      'reshaped to ', h5_F3Rresh.shape)
# Sanity check that our reshape works. Since this is usually the case this is commented out for now
#raw = np.reshape(h5_F3Rresh, [-1, pixel_ex_wfm.size])
#fig, axes = px.plot_utils.plot_curves(pixel_ex_wfm, raw[128:256],use_rainbow_plots=True, 
#                                     x_label='Voltage (Vac)', title='Raw',
#                                     num_plots=4, y_label='Deflection (a.u.)')<jupyter_output>Starting to reshape G-mode line data. Please be patient
Finished reshaping G-mode line data to rows and columns
Data was reshaped from shape (64, 4194304) reshaped to  (8192, 32768)
<jupyter_text>#### Do PCA on F3R recovered data<jupyter_code># SVD and save results
h5_svd = px.processing.svd_utils.SVD(h5_F3Rresh, num_components=256)
h5_svd_group = h5_svd.compute()
h5_U = h5_svd_group['U']
h5_V = h5_svd_group['V']
h5_S = h5_svd_group['S']
# Since the two spatial dimensions (x, y) have been collapsed to one, we need to reshape the abundance maps:
abun_maps_postfilter = np.reshape(h5_U[:,:25], (num_rows, num_cols,-1))<jupyter_output>Consider calling test() to check results before calling compute() which computes on the entire dataset and writes back to the HDF5 file
Took 2.21 mins to compute randomized SVD
<jupyter_text>#### Visualize PCA on F3R Data<jupyter_code>fig, axes = usid.plot_utils.plot_scree(h5_S, title='Skree plot')
if save_figure == True:
    if PCA_pre_reconstruction_clean == False:
        fig.savefig(output_filepath+'\PCF3R_Skree_noPrePCa.tif', format='tiff')
    else:
        fig.savefig(output_filepath+'\PCF3R_Skree_withPrePCA.tif', format='tiff')
skree_sum = np.zeros(h5_S.shape)
for i in range(h5_S.shape[0]):
    skree_sum[i] = np.sum(h5_S[:i])/np.sum(h5_S)
plt.figure()
plt.plot(skree_sum, 'o')
print('Need', skree_sum[skree_sum<0.8].shape[0],'components for 80%')
print('Need', skree_sum[skree_sum<0.9].shape[0],'components for 90%')
print('Need', skree_sum[skree_sum<0.95].shape[0],'components for 95%')
print('Need', skree_sum[skree_sum<0.99].shape[0],'components for 99%')
# Visualize the eigenvectors:
first_evecs = h5_V[:16, :]
fig, axes = usid.plot_utils.plot_curves(pixel_ex_wfm, first_evecs, x_label='Voltage (Vac)', use_rainbow_plots=True, 
                                    y_label='Displacement (a.u.)', num_plots=16,
                                    subtitle_prefix='Component', title='SVD Eigenvectors (F3R)', evenly_spaced=False)
if save_figure == True:
    if PCA_pre_reconstruction_clean == False:
        fig.savefig(output_filepath+'\PCAF3R_Eig_noPrePCA.eps', format='eps')
        fig.savefig(output_filepath+'\PCF3R_Eig_noPrePCA.tif', format='tiff')
    else:
        fig.savefig(output_filepath+'\PCAF3R_Eig_withPrePCA.eps', format='eps')
        fig.savefig(output_filepath+'\PCF3R_Eig_withPrePCA.tif', format='tiff')
# Visualize the abundance maps:
fig, axes = usid.plot_utils.plot_map_stack(abun_maps_postfilter, num_comps=16, title='SVD Abundance Maps',
                             color_bar_mode='single', cmap='inferno', reverse_dims=True)
if save_figure == True:
    if PCA_pre_reconstruction_clean == False:
        fig.savefig(output_filepath+'\PCAF3R_Loadings_noPrePCA.eps', format='eps')
        fig.savefig(output_filepath+'\PCF3R_Loadings_noPrePCA.tif', format='tiff')
    else:
        fig.savefig(output_filepath+'\PCAF3R_Loadings_withPrePCA.eps', format='eps')
        fig.savefig(output_filepath+'\PCF3R_Loadings_withPrePCA.tif', format='tiff')<jupyter_output>Need 1 components for 80%
Need 1 components for 90%
Need 2 components for 95%
Need 4 components for 99%
<jupyter_text>#### Optionally Clean Data
Here, we are looking primarily to capture the most variance without capturing noise. If you used PCA filtering before, the chances are high that most of the variance is in the first 1-2 components. 
Include too few components, though, and the CPDs will all look mostly identical (depends on your data).<jupyter_code>''' 
As before, we check to make sure we aren't doing SVD over and over again with same components during the debugging phase.
'''
PCA_post_reconstruction_clean = False
if PCA_post_reconstruction_clean == True:
    clean_components = np.array([0,1,2,3,5,7]) ##Components you want to keep
    #num_components = len(clean_components)
    
    # checks for existing SVD
    rebuilt_grp = h5_F3Rresh.parent.name + '/Reshaped_Data-SVD_000'
    names = h5_list(hdf.file[rebuilt_grp],'Rebuilt_Data')
    
    # Checks if SVD has been done somewhere with these components already to save time
    SVD_exists = False
    for i in names:
        rb = hdf.file[rebuilt_grp+'/'+i]
        print(i,rb.attrs['components_used'])
        if np.array_equal(rb.attrs['components_used'],clean_components):
            print(i,'has same components')
            SVD_exists = True
            test = rb['Rebuilt_Data']
            
    if SVD_exists == False:
        test = px.processing.svd_utils.rebuild_svd(h5_F3Rresh, components=clean_components)
        
    PCA_clean_data_postrecon = test[:,:].reshape(num_rows*num_cols,-1)<jupyter_output><empty_output><jupyter_text>### Test fitting on sample data<jupyter_code># This is number of periods you want to average over,
# for best time resolution =1 (but takes much longer to fit)
periods = 4
num_periods_per_sample = int(np.floor(num_periods / periods))
pnts_per_sample = int(np.floor(pnts_per_period * periods))
light_on_time = [2.8, 5.5]
complete_periods = True
'''
We can maximize the number of points by using "incomplete" periods per cycle, 
since number of cycles will not fit exactly into the acquired dataset
'''
if complete_periods == False:
    decimation = 2**int(np.floor(np.log2(pnts_per_sample)))
    pnts_per_CPDpix = int(N_points_per_pixel/decimation)
    remainder = 0
else:
    # old approach, but add section for missing period at the end
    decimation = int(np.floor(pnts_per_sample))
    pnts_per_CPDpix = int(N_points_per_pixel/decimation)
    remainder = N_points_per_pixel - pnts_per_CPDpix*decimation
print('Time resolution:',pxl_time/pnts_per_CPDpix)
# time scale for plotting
tx = np.linspace(0, pxl_time, pnts_per_CPDpix) 
deg = 2
row = 3*num_cols+14  #random sample pixel
p = 3 #random oscillation in that pixel
# note k4 cannot exceed Npoints_per_pixel/periods, obviously
##Raw F3R response
# Use PCA clean or not
if PCA_post_reconstruction_clean == False:
    print('Not post-filtered')
    resp = np.float32(h5_F3Rresh[row][pnts_per_CPDpix*p:pnts_per_CPDpix*(p+1)])
else:
    resp = np.float32(PCA_clean_data_postrecon[row][pnts_per_CPDpix*p:pnts_per_CPDpix*(p+1)])
resp = resp-np.mean(resp)
V_per_osc = pixel_ex_wfm[pnts_per_CPDpix*p:pnts_per_CPDpix*(p+1)]
p1,s = npPoly.polyfit(V_per_osc,resp,deg,full=True)
y1 = npPoly.polyval(V_per_osc,p1)
print(-0.5*p1[1]/p1[2], ' V for CPD')
plt.figure()
plt.plot(V_per_osc,resp, 'k')
plt.plot(V_per_osc,y1, 'g')
test_wH = np.zeros((pnts_per_CPDpix, deg+1))
# Tests plotting a few random CPDS from some subset of pixels
# This does not test for actual number of rows/columns available
rows = [1*num_cols+14, 44*num_cols+16, 32*num_cols+67]  
fig,a = plt.subplots(nrows=1, figsize=(8,6))
a.set_xlabel('Time (s)')
a.set_ylabel('CPD (V)')
a.set_title('Random CPD pixels')
# For testing CPD fits; these are pixel values for "light on" and "light off" time
p_on = int(light_on_time[0]*1e-3 * pnts_per_CPDpix/pxl_time) 
p_off = int(light_on_time[1]*1e-3 * pnts_per_CPDpix/pxl_time) 
time = np.linspace(0, pxl_time, pnts_per_CPDpix)
time_on = time[p_on:p_off]
time_off = time[p_off:]   
# Curve fitting limits. 
bds_on = ([-10, (1e-5), -5, time_on[0]-1e-10], 
          [10, (1e-1), 5, time_on[0]+1e-10])  
bds_off = ([-10, (1e-5), -5, time_off[0]-1e-10], 
           [10, (1e-1), 5, time_off[0]+1e-10])  
plot_fits = True
for row in rows:
    for p in range(pnts_per_CPDpix-min(1,remainder)):
    
        if PCA_post_reconstruction_clean == False:
            resp = np.float32(h5_F3Rresh[row][decimation*p:decimation*(p+1)])
        else:
            resp = np.float32(PCA_clean_data_postrecon[row][decimation*p:decimation*(p+1)])
                
        resp = (resp-np.mean(resp))
        V_per_osc = pixel_ex_wfm[decimation*p:decimation*(p+1)]
        popt, _ = npPoly.polyfit(V_per_osc, resp, deg, full=True)
        test_wH[p] = popt
    
    # if using complete periods approach, then last point will be cycle+leftover
    if remainder > 0:
        if PCA_post_reconstruction_clean == False:
            resp = np.float32(h5_F3Rresh[row][(pnts_per_CPDpix-1)*decimation:])
        else:
            resp = np.float32(PCA_clean_data_postrecon[row][(pnts_per_CPDpix-1)*decimation:])
       
        resp = (resp-np.mean(resp))
        V_per_osc = pixel_ex_wfm[(pnts_per_CPDpix-1)*decimation:]
        popt, _ = npPoly.polyfit(V_per_osc, resp, deg, full=True)
        
        test_wH[-1,:] = popt
    
    if plot_fits == True:    
        test_CPD = -0.5 * test_wH[:,1]/test_wH[:,2]
        [cuton, cutoff] = [test_CPD[p_on:p_off] - test_CPD[0], test_CPD[p_off:] - test_CPD[0]]
        popt1, _ = curve_fit(fitexp, time_on, cuton, bounds=bds_on)
        popt2, _ = curve_fit(fitexp, time_off, cutoff, bounds=bds_off)
        a.plot(tx, test_CPD-test_CPD[0], time_on, fitexp(time_on, *popt1), time_off, fitexp(time_off, *popt2))
        print(popt1[1]*1e3,' ms for ON')
        print(popt2[1]*1e3,' ms for OFF')
    else:
        a.plot(tx, test_CPD-test_CPD[0])
# Save results
prerecon = 'PrePCA' if PCA_pre_reconstruction_clean == True else ''
if PCA_post_reconstruction_clean == True:
    fig.savefig(output_filepath+'\RandomCPDs_'+prerecon+'PCA'+str(clean_components)
                +'_'+str(periods)+'periods.tif', format='tiff')
else:
    fig.savefig(output_filepath+'\RandomCPDs_'+prerecon+'noPCA_'+str(periods)+'periods.tif', format='tiff')
<jupyter_output>Time resolution: 6.159398496240601e-05
Not post-filtered
0.22934478334924224  V for CPD
0.5737753964968406  ms for ON
0.36553380146931913  ms for OFF
0.5716113060396533  ms for ON
0.3466770399313745  ms for OFF
0.573556949948167  ms for ON
0.31714398247758735  ms for OFF
<jupyter_text>### Repeat on the full dataset
This is very slow! Probably 2-4x as long as generating F3R<jupyter_code># Uses the periods settings up above. Uncomment the following lines if you want to jump right here.
#periods = 4
#complete_periods = True
 
num_periods_per_sample = int(np.floor(num_periods / periods))
pnts_per_sample = int(np.floor(pnts_per_period * periods))
if complete_periods == False:
    # new approach since it's base-2 samples and can curve-fit to less than full cycle
    decimation = 2**int(np.floor(np.log2(pnts_per_sample)))
    pnts_per_CPDpix = int(N_points_per_pixel/decimation)
    remainder = 0
else:
    # old approach, but add section for missing period at the end
    decimation = int(np.floor(pnts_per_sample))
    pnts_per_CPDpix = int(N_points_per_pixel/decimation)
    remainder = N_points_per_pixel - pnts_per_CPDpix*decimation
# time scale for plotting
tx = np.linspace(0, pxl_time, pnts_per_CPDpix) 
deg = 2 #parabola
wHfit3 = np.zeros((num_rows*num_cols, pnts_per_CPDpix, deg+1))
print('#### Generating CPD from F3R ####')
for n in range((num_rows*num_cols)):
    # Just for updating you this is in progress..
    if n%1000 == 0:
        print('Pixel: ', n)
        
    for p in range(pnts_per_CPDpix-min(1,remainder)): 
        if PCA_post_reconstruction_clean == False:
            resp = np.float32(h5_F3Rresh[n][decimation*p:decimation*(p+1)])
        else:
            resp = np.float32(PCA_clean_data_postrecon[n][decimation*p:decimation*(p+1)])
                
        resp = resp-np.mean(resp)
        V_per_osc = pixel_ex_wfm[decimation*p:decimation*(p+1)]
        popt, _ = npPoly.polyfit(V_per_osc, resp, deg, full=True)
        wHfit3[n,p,:] = popt
        
    # if using complete periods approach, then last point will be cycle+leftover
    if remainder > 0:
        if PCA_post_reconstruction_clean == False:
            resp = np.float32(h5_F3Rresh[n][(pnts_per_CPDpix-1)*decimation:])
        else:
            resp = np.float32(PCA_clean_data_postrecon[n][(pnts_per_CPDpix-1)*decimation:])
       
        resp = (resp-np.mean(resp))
        V_per_osc = pixel_ex_wfm[(pnts_per_CPDpix-1)*decimation:]
        popt, _ = npPoly.polyfit(V_per_osc, resp, deg, full=True)
        
        wHfit3[n,-1,:] = popt
    
# polyfit returns a + bx + cx^2 coefficients
        
    
# lets us debug further; cap is just capacitance (curvature), CPD is from peak of parabola
if PCA_post_reconstruction_clean == True:
    CPD_PCA = -0.5*np.divide(wHfit3[:,:,1],wHfit3[:,:,2]) # vertex of parabola
    CPD_PCA_cap = wHfit3[:,:,2]
    CPD = np.copy(CPD_PCA[:,:])
    CPD_grad = np.copy(CPD_PCA_cap[:,:])
    CPD_PCA_offset = wHfit3[:,:,0]
    CPD_offset = np.copy(CPD_PCA_offset)
    
else:
    
    CPD_raw = -0.5*np.divide(wHfit3[:,:,1],wHfit3[:,:,2])
    CPD_raw_cap = wHfit3[:,:,2]
    CPD = np.copy(CPD_raw[:,:])
    CPD_grad = np.copy(CPD_raw_cap[:,:])
    CPD_raw_offset = wHfit3[:,:,0]
    CPD_offset = np.copy(CPD_raw_offset)<jupyter_output><empty_output><jupyter_text>## Store to H5<jupyter_code># Saves the CPD to the H5 file
e = h5_main.parent.name + '/' + 'Raw_Data-CPD'
if e in hdf.file:
    print('Overwriting CPD dataset')
    grp_name = hdf.file[e]
    del grp_name['CPD']
    grp_name['CPD'] = CPD[:,:]
    
    print(np.allclose(grp_name['CPD'].value, CPD))
    
else:    
    print('Creating new dataset')
    grp_CPD = usid.hdf_utils.create_indexed_group(h5_main.parent, 'CPD')
    CPD_spec_dims = [usid.write_utils.Dimension('Time', 's', tx)]
    h5_CPD = usid.hdf_utils.write_main_dataset(grp_CPD,
                                             CPD,
                                             'CPD',
                                             'CPD',
                                             'V',
                                             pos_dims,
                                             CPD_spec_dims)
# Writes the parabola to H5 file    
try:
    dset = hdf.file.create_dataset("parafit_main", shape=wHfit3.shape, dtype=np.float32)
    dset[:,:] = wHfit3
except:
    print('Overwriting Parabola Fit Save')
    dset = hdf.file['parafit_main']
    dset = wHfit3
    
hdf.file.flush()<jupyter_output><empty_output><jupyter_text>#### (Optional) Reconstruct CPD from parabola fit data (when loading an old) file<jupyter_code>#%% Reconstruct CPD from parafit these data
reconstruct = False
# dset is NxP, N = num_pixels total, P= number of points per CPD trace (8192=8.192 ms)
if reconstruct:
    
    CPD_recon = np.zeros([num_rows*num_cols, dset.shape[1]])
    CPD_grad_recon = np.zeros([num_rows*num_cols, dset.shape[1]])
    CPD_offset_recon = np.zeros([num_rows*num_cols, dset.shape[1]])
        
    CPD_recon[:,:] = -0.5*np.divide(dset[:,:,1],dset[:,:,2]) # vertex of parabola
    CPD_grad_recon[:,:] = dset[:,:,2]
    CPD_offset_recon[:,:] = dset[:,:,0]<jupyter_output><empty_output><jupyter_text>## Data Visualization 
#### CPD vs Time<jupyter_code># Set up some variables
CPD_off = CPD[:,:]
CPD_on = CPD[:,:]
time = np.linspace(0.0, pxl_time, CPD.shape[1])
dtCPD = pxl_time/CPD.shape[1] #dt for the CPD since not same length as raw data
p_on = int(light_on_time[0]*1e-3 / dtCPD) 
p_off = int(light_on_time[1]*1e-3 / dtCPD) 
time_on = time[p_on:p_off]
time_off = time[p_off:]   # last point is sometimes NaN for some reason
bds_on = ([-10, (1e-5), -5, time_on[0]-1e-10], 
       [10, (1e-1), 5, time_on[0]+1e-10])
p0on = [-0.025, 1e-3, 0, time_on[0]]
bds_off = ([-10, (1e-5), -5, time_off[0]-1e-10], 
           [10, (1e-1), 5, time_off[0]+1e-10])
p0off = [.025, 1e-3, 0, time_off[0]]
# This will overwrite CPD_on_avg, etc that are loaded from the H5 file originally. Normally that is okay.
# Make CPD on and off, reshape into images by takign averages
CPD_on = CPD[:, p_on:p_off]
CPD_off = CPD[:, p_off:]
CPD_grad_on = CPD_grad[:, p_on:p_off]
CPD_grad_off = CPD_grad[:, p_off:]
CPD_offset_on = CPD_offset[:, p_on:p_off]
CPD_offset_off = CPD_offset[:, p_off:]
CPD_on_avg = np.zeros((num_rows, num_cols))
CPD_off_avg = np.zeros((num_rows, num_cols))
CPD_grad_on_avg = np.zeros((num_rows, num_cols))
CPD_grad_off_avg = np.zeros((num_rows, num_cols))
CPD_offset_on_avg = np.zeros((num_rows, num_cols))
CPD_offset_off_avg = np.zeros((num_rows, num_cols))
CPD_on_time = np.zeros((num_rows, num_cols))
CPD_off_time = np.zeros((num_rows, num_cols))
CPD_bion_time = np.zeros((num_rows, num_cols))
CPD_bioff_time_fast = np.zeros((num_rows, num_cols))
CPD_bioff_time_slow = np.zeros((num_rows, num_cols))
CPD_on_mag = np.zeros((num_rows, num_cols))
CPD_off_mag  = np.zeros((num_rows, num_cols))<jupyter_output><empty_output><jupyter_text>#### Test a single CPD Dataset<jupyter_code># Tests a single random CPD and shows the expected fits.
# This plots them separately if you wish to save for debugging purposes.
# random pixel
r = 32
c = 40
test = CPD[r*num_cols+c,:]
plt.figure(figsize=(8,6))
plt.plot(time,test)
plt.xlabel('Time (ms)', fontsize=16)
plt.ylabel('CPD (V)', fontsize=16)
#plt.savefig(output_filepath+'\CPD_sample.tif', format='tiff')
# Fit bounds and initial guesses
bds = ([-10, (1e-5), -5, time_on[0]-1e-10], 
       [10, (1e-1), 5, time_on[0]+1e-10])
p0s = [-0.025, 1e-3, 0, time_on[0]]
cut = CPD_on[r*num_cols + c, :] - CPD_on[r*num_cols + c, 0]
popt1, _ = curve_fit(fitexp, time_on, cut, bounds=bds, p0=p0s)
print(popt1[1]*1e3, ' ms CPD on tau')
plt.figure(figsize=(8,6))
plt.plot(time_on, cut)
plt.plot(time_on, fitexp(time_on, *popt1), 'g--')
#plt.savefig(output_filepath+'\CPD_on_fitting_example.tif', format='tiff')
bds = ([-10, (1e-5), -5, time_off[0]-1e-10], 
       [10, (1e-1), 5, time_off[0]+1e-10])
cut = CPD_off[r*num_cols + c, :] - CPD_off[r*num_cols + c, 0]
popt1, _ = curve_fit(fitexp, time_off, cut, bounds=bds )
print(popt1[1]*1e3, ' ms CPD off tau')
plt.figure(figsize=(8,6))
plt.plot(time_off, cut)
plt.plot(time_off, fitexp(time_off, *popt1), 'r--')
#plt.savefig(output_filepath+'\CPD_off_fitting_example.tif', format='tiff')
# The following are to test biexponential fits, but this hasn't proven useful to date. It's there for interest's sake
'''
bds_bion = ([1e-15,     1e-5,   1e-15,     1e-5,   -5, time_on[0]-1e-10], 
       [1,         1e-1,   5,      500,   5,  time_on[0]+1e-10])
cut = CPD_on[r*num_cols + c, :] - CPD_on[r*num_cols + c, 0]
popt1, _ = curve_fit(fitbiexp, time_on, cut, bounds=bds_bion)
print(popt1[1]*1e3, ' ms CPD on tau', popt1[3]*1e3,' ms CPD on tau2')
plt.figure(figsize=(8,6))
plt.plot(time_on, cut)
plt.plot(time_on, fitbiexp(time_on, *popt1), 'g--')
#plt.savefig(output_filepath+'\CPD_on_fitting_example-biexponential.tif', format='tiff')
bds_bioff = ([-5,     1e-5,   -5,     1e-5,   -5, time_off[0]-1e-10], 
       [-1e-15, 1e-1,   -1e-15,      500,   5,  time_off[0]+1e-10])
cut = CPD_off[r*num_cols + c, :] - CPD_off[r*num_cols + c, 0]
popt1, _ = curve_fit(fitbiexp, time_off, cut, bounds=bds_bioff)
print(popt1[1]*1e3, ' ms CPD off tau', popt1[3]*1e3,' ms CPD on tau2')
plt.figure(figsize=(8,6))
plt.plot(time_off, cut)
plt.plot(time_off, fitbiexp(time_off, *popt1), 'g--')
#plt.savefig(output_filepath+'\CPD_off_fitting_example-biexponential.tif', format='tiff')
'''
#%% Generate CPD
# Biexponential fitting makes this VERY VERY slow! Warning!
doBiexp_fit = False
print('#### Generating CPD rate images ####')
for r in np.arange(CPD_on_avg.shape[0]):
    if r%10 == 1:
        print('Row: ', r)
        print('Average CPD on = ', np.mean(CPD_on_time[r-1, :])*1e3,'ms')
        print('Average CPD off = ', np.mean(CPD_off_time[r-1, :])*1e3,'ms')
    for c in np.arange(CPD_on_avg.shape[1]):
        
        CPD_on_avg[r][c] = np.mean(CPD_on[r*num_cols + c,:])
        CPD_grad_on_avg[r][c] = np.mean(CPD_grad_on[r*num_cols + c,:])
        CPD_offset_on_avg[r][c] = np.mean(CPD_offset_on[r*num_cols + c,:])
        cut = CPD_on[r*num_cols + c, :] - CPD_on[r*num_cols + c, 0]
        try:
            popt, _ = curve_fit(fitexp, time_on, cut, 
                                bounds=bds_on, p0=p0on)
            CPD_on_time[r][c] = popt[1]
            CPD_on_mag[r][c] = popt[0]
            
            if doBiexp_fit == True:
                #biexponential
                popt, _ = curve_fit(fitbiexp, time_on, cut, bounds=bds_bion)
                CPD_bion_time[r][c] = popt[1]   # takes tau1, the "fast" part
        except:
            CPD_on_time[r][c] = CPD_on_time[r][c-1] # blur bad pixels
            CPD_bion_time[r][c] = CPD_bion_time[r][c-1] #blur bad pixels
            print( 'error_on')
            print(r, ' ', c)
        CPD_off_avg[r][c] = np.mean(CPD_off[r*num_cols + c,:])
        CPD_grad_off_avg[r][c] = np.mean(CPD_grad_off[r*num_cols + c,:])
        CPD_offset_off_avg[r][c] = np.mean(CPD_offset_off[r*num_cols + c,:])
        cut = CPD_off[r*num_cols + c, :] - CPD_off[r*num_cols + c, 0]
        try:
            popt, _ = curve_fit(fitexp, time_off, cut, bounds=bds_off)
            CPD_off_time[r][c] = popt[1]
            CPD_off_mag[r][c] = popt[0]
            
            if doBiexp_fit == True:
                #biexponential
                popt, _ = curve_fit(fitbiexp, time_off, cut, bounds=bds_bioff)
                CPD_bioff_time_fast[r][c] = popt[1]   # takes tau1, the "fast" part
                CPD_bioff_time_slow[r][c] = popt[3]   # takes tau1, the "fast" part
        except:
            CPD_off_time[r][c] = CPD_off_time[r][c-1] #blur bad pixels
            CPD_bioff_time_fast[r][c] = CPD_bioff_time_fast[r][c-1] #blur bad pixels
            CPD_bioff_time_slow[r][c] = CPD_bioff_time_slow[r][c-1] #blur bad pixels
            print( 'error')
            print(r, ' ', c)
SPV = CPD_on_avg - CPD_off_avg<jupyter_output>#### Generating CPD rate images ####
Row:  1
Average CPD on =  0.5626397806826171 ms
Average CPD off =  1.1129513062921026 ms
Row:  11
Average CPD on =  0.5580557545799163 ms
Average CPD off =  1.1075938234136375 ms
Row:  21
Average CPD on =  0.560796391155097 ms
Average CPD off =  1.107962397899583 ms
Row:  31
Average CPD on =  0.5611954055570394 ms
Average CPD off =  1.1106591870646572 ms
Row:  41
Average CPD on =  0.5614417712841862 ms
Average CPD off =  1.1106237828239378 ms
Row:  51
Average CPD on =  0.5589220737596197 ms
Average CPD off =  1.1146689949055026 ms
Row:  61
Average CPD on =  0.5617961854902702 ms
Average CPD off =  1.114589895458611 ms
<jupyter_text>#### Saves CPD data to H5<jupyter_code># Saves text files to disk. You can also do this manually by finding the dataset in the HDF5 file
prefix = 'no' if PCA_post_reconstruction_clean else ''
np.savetxt(output_filepath+r'\CPD_on_'+prefix+'PCApost.txt', CPD_on_avg, delimiter=' ')
np.savetxt(output_filepath+r'\CPD_off_'+prefix+'PCApost.txt', CPD_off_avg, delimiter=' ')
np.savetxt(output_filepath+r'\CPD_on_time_'+prefix+'PCApost.txt', CPD_on_time, delimiter=' ')
np.savetxt(output_filepath+r'\CPD_off_time_'+prefix+'PCApost.txt', CPD_off_time, delimiter=' ')
np.savetxt(output_filepath+r'\CPDGrad_on_'+prefix+'PCApost.txt', CPD_grad_on_avg, delimiter=' ')
np.savetxt(output_filepath+r'\CPDGrad_off_'+prefix+'PCApost.txt', CPD_grad_off_avg, delimiter=' ')
np.savetxt(output_filepath+r'\CPDOffset_on_'+prefix+'PCApost.txt', CPD_offset_on_avg, delimiter=' ')
np.savetxt(output_filepath+r'\CPDOffset_off_'+prefix+'PCApost.txt', CPD_offset_off_avg, delimiter=' ')
np.savetxt(output_filepath+r'\SPV_'+prefix+'PCApost.txt', SPV, delimiter=' ')
# Save CPD to the H5 file
grp_name = h5_CPD.name
CPD_exists = usid.hdf_utils.find_dataset(h5_file[grp_name].parent, 'CPD_on_time')
if CPD_exists: 
    CPD_exists = usid.hdf_utils.find_dataset(h5_file[grp_name].parent, 'CPD_on_time')[0]
    CPD_exists = CPD_on_time
    
    CPD_exists = usid.hdf_utils.find_dataset(h5_file[grp_name].parent, 'CPD_off_time')[0]
    CPD_off_exists = CPD_off_time
    
    SPV_exists = usid.hdf_utils.find_dataset(h5_file[grp_name].parent, 'SPV')[0]
    SPV_exists = SPV
    
    print('Overwriting CPD Data!')
else:
    print('Creating new Datasets')
    
    # write the data directly to the CPD folder
    # in the future will need to make this a unique subfolder for multiple attempts
    dset = h5_file.create_dataset(h5_CPD.parent.name + '/CPD_on_time', CPD_on_time.shape)
    dset[:,:] = CPD_on_time[:,:]
    
    dset = h5_file.create_dataset(h5_CPD.parent.name + '/SPV', SPV.shape)
    dset[:,:] = SPV[:,:]
    
    dset = h5_file.create_dataset(h5_CPD.parent.name + '/CPD_off_time', CPD_off_time.shape)
    dset[:,:] = CPD_off_time[:,:]
    <jupyter_output>Creating new Datasets
<jupyter_text>### Visualize the CPD Data<jupyter_code># Plotting
#1e3 to put in mV
mx = np.max([np.max(CPD_on_avg), np.max(CPD_off_avg)])*1e3
mn = np.min([np.min(CPD_on_avg), np.min(CPD_off_avg)])*1e3
xv = np.linspace(0,img_length*1e6, num_cols)
yv = np.linspace(0,img_height*1e6, num_rows)
# Plots the CPD averages
fig, a = plt.subplots(nrows=2, figsize=(13, 6))
_, cbar = usid.plot_utils.plot_map(a[0], CPD_off_avg*1e3, cmap='inferno', aspect=aspect, 
                       x_vec=xv, y_vec = yv, stdevs = 2,
                       cbar_label='CPV (mV)')
cbar.set_label('CPD (mV)', rotation=270, labelpad=16)
a[0].set_title('CPD Off Average', fontsize=12)
_, cbar = usid.plot_utils.plot_map(a[1], CPD_on_avg*1e3, cmap='inferno', aspect=aspect, 
                       x_vec=xv, y_vec = yv, stdevs = 2,
                       cbar_label='CPV (mV)')
cbar.set_label('CPD (mV)', rotation=270, labelpad=16)
a[1].set_title('CPD On Average', fontsize=12)
if save_figure == True:
    if PCA_post_reconstruction_clean == True:
        fig.savefig(output_filepath+'\CPDon_vs_off_PCApost.eps', format='eps')
        fig.savefig(output_filepath+'\CPDon_vs_off_PCApost_'+str(clean_components)+'.tif', format='tiff')
    else:
        fig.savefig(output_filepath+'\CPDon_vs_off_noPCApost.eps', format='eps')
        fig.savefig(output_filepath+'\CPDon_vs_off_noPCApost.tif', format='tiff')
    
# some clean-up for plotting to remove curve-fit errors; only for setting color range
from scipy import signal
testC = signal.medfilt(CPD_on_time, kernel_size=[3,3])
testD = signal.medfilt(CPD_off_time, kernel_size=[3,3])
mnC = (np.mean(testC) - 2*np.std(testC))*1e3
mxC = (np.mean(testC) + 2*np.std(testC))*1e3
mnD = (np.mean(testD) - 2*np.std(testD))*1e3
mxD = (np.mean(testD) + 2*np.std(testD))*1e3
mn = np.min([mnC, mnD])
mx = np.max([mxC, mxD])
# Plots the CPD Time Constants
fig = plt.figure(figsize=(13,6))
a = fig.add_subplot(211)
a.set_axis_off()
a.set_title('CPD Off Time', fontsize=12)
a.imshow(CPD_off_time*1e3, cmap='inferno', vmin=mn, vmax=mx, aspect=aspect)
a = fig.add_subplot(212)
a.set_axis_off()
a.set_title('CPD On Time', fontsize=12)
im = a.imshow(CPD_on_time*1e3, cmap='inferno', vmin=mn, vmax=mx, aspect=aspect)
cx = fig.add_axes([0.86, 0.11, 0.02, 0.77])
cbar = fig.colorbar(im, cax=cx)
cbar.set_label('Time Constant (ms)', rotation=270, labelpad=16)
if save_figure == True:
    if PCA_post_reconstruction_clean == True:
        fig.savefig(output_filepath+'\CPD_times_noPCA.tif', format='tiff')
    else:
        fig.savefig(output_filepath+'\CPD_times_PCA_'+str(clean_components)+'.tif', format='tiff')
# Plots the CPD time constants separately for auto-contrasting purposes
fig, a = plt.subplots(nrows=1, figsize=(13, 3))
_, cbar = usid.plot_utils.plot_map(a, CPD_off_time*1e3, cmap='inferno', aspect=aspect, 
                       x_vec=xv, y_vec = yv, stdevs = 2,
                       cbar_label='Time Constant (ms)')
cbar.set_label('Time Constant (ms)', rotation=270, labelpad=16)
a.set_title('CPD Off Time', fontsize=12)
if save_figure == True:
    if PCA_post_reconstruction_clean == True:
        fig.savefig(output_filepath+'\CPDoff_times_PCA-Alone_'+str(clean_components)+'.tif', format='tiff')
    else:
        fig.savefig(output_filepath+'\CPDoff_times_noPCA-Alone.tif', format='tiff')    
fig, a = plt.subplots(nrows=1, figsize=(13, 3))
_, cbar = usid.plot_utils.plot_map(a, CPD_on_time*1e3, cmap='inferno', aspect=aspect, 
                       x_vec=xv, y_vec = yv, stdevs = 2,
                       cbar_label='Time Constant (ms)')
cbar.set_label('Time Constant (ms)', rotation=270, labelpad=16)
a.set_title('CPD On Time', fontsize=12)
if save_figure == True:
    if PCA_post_reconstruction_clean == True:
        fig.savefig(output_filepath+'\CPDon_times_PCA-Alone_'+str(clean_components)+'.tif', format='tiff')
    else:
        fig.savefig(output_filepath+'\CPDon_times_noPCA-Alone.tif', format='tiff')    
# Optionally displays the CPD gradient
'''
fig, a = plt.subplots(nrows=1, figsize=(13, 3))
_, cbar = px.plot_utils.plot_map(a, CPD_grad_resh_on_avg, cmap='inferno', aspect=aspect, 
                       x_vec=xv, y_vec = yv, stdevs = 2,
                       cbar_label='Gradient (a.u.)')
cbar.set_label('Time Constant (ms)', rotation=270, labelpad=16)
a.set_title('Capacitive Gradient', fontsize=12)
if save_figure == True:
    if PCA_post_reconstruction_clean == True:
        fig.savefig(output_filepath+'\CPD_gradient_PCA-Alone_'+str(clean_components)+'.tif', format='tiff')
    else:
        fig.savefig(output_filepath+'\CPD_gradient_noPCA-Alone.tif', format='tiff') 
'''<jupyter_output><empty_output><jupyter_text>#### Visualize SPV<jupyter_code># SPV plotting
# 1e3 to put in mV
fig, a = plt.subplots(nrows=1, figsize=(13, 3))
_, cbar = usid.plot_utils.plot_map(a, SPV*1e3, cmap='inferno', aspect=aspect, 
                       x_vec=xv, y_vec=yv, stdevs = 2,
                       cbar_label='SPV (mV)')
cbar.set_label('SPV (mV)', rotation=270, labelpad=16)
a.set_title('SPV (mV)', fontsize=12)
if save_figure == True:
    if PCA_post_reconstruction_clean == True:
        fig.savefig(output_filepath+'\SPV_PCApost.eps', format='eps')
        fig.savefig(output_filepath+'\SPV_PCApost_'+str(clean_components)+'.tif', format='tiff')
    else:
        fig.savefig(output_filepath+'\SPV_noPCApost.eps', format='eps')
        fig.savefig(output_filepath+'\SPV_noPCApost.tif', format='tiff')<jupyter_output><empty_output><jupyter_text>#### Visualize the CPD at several random pixels<jupyter_code># points for CPD slices
# Input the coordinates in unit length that you wish to plot
# these are in x:y format (column:row)
indices = {.1:1,
           4.1:1,
           4.2:6,
           7.8:.9,
           12:3,
           17.2:4.7,
           24:7,
           30:3
           }
img_length = parms_dict['FastScanSize']
img_height = parms_dict['SlowScanSize']
# Random indices to check the CPD
useRandomPixels = True
if useRandomPixels:
    
    keys = np.random.randint(0, num_cols, size=[5])*img_length*1e6/num_cols
    vals = np.random.randint(0, num_rows, size=[5])*img_height*1e6/num_rows
    indices = dict(zip(keys, vals))
        
cptslabels = [k for k in indices] #column points, row points
rptslabels = [k for k in indices.values()]
cpts = [int(i) for i in np.round(np.array(cptslabels) * (1e-6/ img_length) * num_cols)]
rpts = [int(i) for i in np.round(np.array(rptslabels) * (1e-6/ img_height) * num_rows)]
#cpts = [69, 76] #column points, row points
#rpts = [4, 4]
linecoords = np.arange(rpts[0]*num_cols + cpts[0], rpts[0]*num_cols + cpts[1])
fig, a = plt.subplots(nrows=2, figsize=(13, 10), facecolor='white')
im0 = usid.plot_utils.plot_map(a[0], CPD_on_avg*1e3, cmap='inferno', origin='lower', aspect=0.5,
                             x_vec=xv, y_vec=yv, num_ticks=9,
                             cbar_label='CPV (mV)')
time = np.linspace(0.0, pxl_time, CPD.shape[1])
dtCPD = pxl_time/CPD.shape[1] #dt for the CPD since not same length as raw data
colors = ['C'+str(i) for i in np.arange(0,len(cpts))]
markers =  ['C'+str(i)+'s' for i in np.arange(0,len(cpts))]
for k,j,m,c in zip(cpts, rpts, markers, colors):
    a[0].plot(k, j, m, markersize=10, mec='white', mew=1)
    a[1].plot(time, CPD[j*num_cols + k,:], c)
    a[1].set_xlabel('Time (s)')
    a[1].set_ylabel('CPD (V)')
    print(CPD_off_time[j, k], ' s CPD off time at ', j, ' ', k)
    print(CPD_on_time[j, k], ' s CPD on time at ', j, ' ', k)
    
if save_figure == True:
    if PCA_post_reconstruction_clean == True:
        fig.savefig(output_filepath+'\CPD_slices_PCApost.tif', format='tiff')
    else:
        fig.savefig(output_filepath+'\CPD_slices_noPCApost.tif', format='tiff')<jupyter_output>0.00034227643393861505  s CPD off time at  35   17
0.0005737001492532013  s CPD on time at  35   17
0.00036425991522076264  s CPD off time at  49   12
0.000561602435476554  s CPD on time at  49   12
0.0003121276309530868  s CPD off time at  4   55
0.000582722136580472  s CPD on time at  4   55
0.000318708703850456  s CPD off time at  32   77
0.0005528844675072474  s CPD on time at  32   77
0.0002790795464359325  s CPD off time at  28   75
0.0005732392998353735  s CPD on time at  28   75
<jupyter_text>#### PCA of the CPD data<jupyter_code># Shows where most of the information in the CPD is, in terms of principal components
from sklearn.utils.extmath import randomized_svd
U, S, V = randomized_svd(CPD[:,:-1], 256, n_iter=3)
# Since the two spatial dimensions (x, y) have been collapsed to one, we need to reshape the abundance maps:
abun_maps = np.reshape(U[:,:25], (num_rows, num_cols,-1))
# Visualize the variance / statistical importance of each component:
fig, axes = usid.plot_utils.plot_scree(S, title='Skree plot')
if save_figure == True:
    fig.savefig(output_filepath+'\CPDtotal_Skree.tif', format='tiff')
# Visualize the eigenvectors:
first_evecs = V[:6, :]
fig, axes = usid.plot_utils.plot_curves(time[:-1]*1E+3, first_evecs, x_label='Time (ms)', y_label='CPD Eig (a.u.)', num_plots=9,
                         subtitle_prefix='Component', title='SVD Eigenvectors (F3R)', evenly_spaced=False)
if save_figure == True:
    fig.savefig(output_filepath+'\CPDtotal_Eig.tif', format='tiff')
# Visualize the abundance maps:
fig, axes = usid.plot_utils.plot_map_stack(abun_maps, num_comps=9, title='SVD Abundance Maps',
                             color_bar_mode='single', cmap='inferno', reverse_dims=True)
if save_figure == True:
    fig.savefig(output_filepath+'\CPDtotal_Loadings.tif', format='tiff')<jupyter_output><empty_output><jupyter_text>#### Visualize the CPD over time<jupyter_code>#%% CPD Time Slices
timeslice = np.floor(np.arange(0.5, 8, .5) *1e-3/dtCPD)
# find correct mn and mx for color scale
# you can manually change mn and mx after running this block to maximize the contrast
CPD_mn = np.reshape(CPD[:, p_on+int((p_off-p_on)/2)], [64, 128])
mn = np.mean(CPD_mn) - 2.5*np.std(CPD_mn)
CPD_mx = np.reshape(CPD[:, p_off+int((CPD.shape[1]-p_off)/2)], [64, 128])
mx = np.mean(CPD_mx) + 3*np.std(CPD_mx)
#mn = -.13
#mx = -.0600
for k in timeslice:
    fig = plt.figure(figsize=(13,3))
    a = fig.add_subplot(111)
    CPD_rs = np.reshape(CPD[:, int(k)], [64, 128])
    im = a.imshow(CPD_rs, cmap='inferno', vmin=mn, vmax=mx, aspect=aspect)
    a.set_axis_off()
    tl = '{0:.2f}'.format(k*dtCPD/1e-3)
    plt.title('At '+ tl + ' ms', fontsize=12)
    cx = fig.add_axes([0.9, 0.11, 0.02, 0.77])
    cbar = fig.colorbar(im, cax=cx)
    cbar.set_label('CPD (mV)', rotation=270, labelpad=16)
    #fig.savefig(output_filepath+'\CPDslice_' + tl + '_ms.eps', format='eps')
    fig.savefig(output_filepath+'\CPDslice_' + tl + '_ms.tif', format='tiff')<jupyter_output><empty_output><jupyter_text>#### Visualize the CPD spatially across a subset of the image<jupyter_code>#%% Cross-sectional animation, setup
'''
Set the cooordinates to draw a line across here. 
1) Note these are in in length units (in microns here)! 
2) Rpts must have same start and finish. This segment only allows horizontal lines at this time. Sorry!
'''
cptslabels = [16, 20] #column points, row points
rptslabels = [3  , 3]
cpts = [int(i) for i in np.array(cptslabels) * (1e-6/ img_length) * num_cols]
rpts = [int(i) for i in np.array(rptslabels) * (1e-6/ img_height) * num_rows]
#cpts = [69, 76] #column points, row points
#rpts = [4, 4]
linecoords = np.arange(rpts[0]*num_cols + cpts[0], rpts[0]*num_cols + cpts[1])
clen = cpts[1] - cpts[0]
rlen = rpts[1] - rpts[0]
pxl_size = img_length/num_cols #meter length of a pixel
pxl_ht = img_height/num_rows #meter height of a pixel
dtCPD = pxl_time/CPD.shape[1] #dt for the CPD since not same length as raw data
p_on = int(light_on_time[0]*1e-3 / dtCPD) 
p_off = int(light_on_time[1]*1e-3 / dtCPD) 
ccoords = np.arange(cpts[0],cpts[1])
rcoords = np.arange(rpts[0],rpts[1])
time = np.linspace(0.0, pxl_time, CPD.shape[1])
xax = ccoords*pxl_size*1e6
fig, a = plt.subplots(nrows=3, figsize=(13, 10), facecolor='white')
im0 = a[0].imshow(CPD_on_avg, cmap='inferno', origin='lower',
                    extent=[0, img_length*1e6, 0, img_height*1e6])
cbar = plt.colorbar(im0, ax=a[0], orientation='vertical',
                    fraction=0.046, pad=0.01, use_gridspec=True)
cbar.set_label('CPD (V)', rotation=270, labelpad = 20)
a[0].plot(ccoords*pxl_size*1e6, rpts[0]*pxl_ht*1e6*np.ones(len(ccoords)), 'w')
ims = []
a[1].set_ylabel('Normalized CPD (mV)')
a[2].set_xlabel('Distance (um)')
a[2].set_ylabel('CPD (mV)')
txtcoord = np.max(CPD[linecoords,0])*1e3
#Colorscale
# Here you should change this to best accentuate the contrast in the image. 
CPD_mn = np.reshape(CPD[:, p_on+int((p_off-p_on)/2)], [64, 128])
mn = np.mean(CPD_mn) - 3*np.std(CPD_mn)
CPD_mx = np.reshape(CPD[:, p_off+int((CPD.shape[1]-p_off)/2)], [64, 128])
mx = np.mean(CPD_mx) + 3*np.std(CPD_mx)
displays = np.array([1, p_on, int(p_on+(p_off-p_on)/2), p_off, pnts_per_CPDpix-5])
markers = ['^-','o-','s-','D-', 'v-']
labels = ['{0:.2f}'.format(i*dtCPD/1e-3)+ ' ms' for i in displays]
for k in range(len(displays)):
    CPD_rs = np.reshape(CPD[:, displays[k]], [64, 128])
    sectn = CPD[linecoords,displays[k]]
    a[1].plot(xax, (sectn-np.min(sectn))/(np.max(sectn)-np.min(sectn)), markers[k], label=labels[k]) 
    a[2].plot(xax, sectn*1e3, markers[k], markersize=8, label=labels[k]) 
length_labels = str(cptslabels[0])+'-'+str(cptslabels[1])+'um_at_'+str(rptslabels[0])+'_um'
fig.savefig(output_filepath+'\CPD_composite_'+length_labels+'.tif', format='tif')
a[1].legend(fontsize='12')
fig.savefig(output_filepath+'\CPD_composite_'+length_labels+'_legend.tif', format='tif')<jupyter_output><empty_output><jupyter_text>#### Animate the above graphic over time! <jupyter_code>'''
Change the following line to use the FFMPeg .Exe file on your local drive.
This animation will be saved on your local folder, it will not display live in Jupyter. It takes a few minutes to process
'''
import matplotlib.animation as animation
plt.rcParams['animation.ffmpeg_path'] = r'C:\Users\Raj\Downloads\ffmpeg-20180124-1948b76-win64-static\bin\ffmpeg.exe'
fig, a = plt.subplots(nrows=3, figsize=(13, 10), facecolor='white')
im0 = a[0].imshow(CPD_on_avg, cmap='inferno', origin='lower',
                    extent=[0, img_length*1e6, 0, img_height*1e6])
cbar = plt.colorbar(im0, ax=a[0], orientation='vertical',
                    fraction=0.046, pad=0.01, use_gridspec=True)
cbar.set_label('CPD (V)', rotation=270, labelpad = 20)
a[0].plot(ccoords*pxl_size*1e6, rpts[0]*pxl_ht*1e6*np.ones(len(ccoords)), 'w')
ims = []
a[1].set_ylabel('Normalized CPD (mV)')
a[2].set_xlabel('Distance (um)')
a[2].set_ylabel('CPD (mV)')
txtcoord = np.max(CPD[linecoords,0])*1e3
for k in np.arange(time.shape[0]):
    
    CPD_rs = np.reshape(CPD[:, int(k)], [64, 128])
    im0 = a[0].imshow(CPD_rs, cmap='inferno', origin='lower',
                    extent=[0, img_length*1e6, 0, img_height*1e6], vmin=mn, vmax=mx)
    
    a[0].plot(ccoords*pxl_size*1e6, rpts[0]*pxl_ht*1e6*np.ones(len(ccoords)), 'w')
    
    if k in np.arange(p_on,p_off):
        tl0 = a[0].text(img_length/2*1e6 - 1, img_height*1e6+0.1, 'LIGHT ON', color='blue', weight='bold')
    else:
        tl0 = a[0].text(img_length/2*1e6 - 1, img_height*1e6+0.1, 'LIGHT OFF', color='black', weight='regular')
    sectn = CPD[linecoords,k]
    im1, = a[1].plot(xax, (sectn-np.min(sectn))/(np.max(sectn)-np.min(sectn)), 'r^-')   #comma unpacks into a list to add titles
    htitle = 'At '+ '{0:.2f}'.format(k*dtCPD/1e-3)+ ' ms'
    tl1 = a[1].text(xax[int(xax.shape[0]/2)], 1.05, htitle)
    
    im2, = a[2].plot(xax, sectn*1e3, 'bo-')   #comma unpacks into a list to add titles
    htitle = 'At '+ '{0:.2f}'.format(k*dtCPD/1e-3)+ ' ms'
    #tl2 = a[2].text(xax[int(xax.shape[0]/2)], txtcoord+5, htitle)
    ims.append([im0, tl0, im1, tl1, im2])
ani = animation.ArtistAnimation(fig, ims, interval=60,repeat_delay=10)
ani.save(output_filepath+'\CPD_graph_'+length_labels+'.mp4')<jupyter_output><empty_output><jupyter_text>#### Visualize via k-means Clustering<jupyter_code>from ffta.utils import dist_cluster, mask_utils
import badpixels
# Specify number of clusters here
clusters=5
img_length = parms_dict['FastScanSize']
img_height = parms_dict['SlowScanSize']
group = h5_main.name + '-CPD'
mask_path = r'G:\Team Drives\201805_BAPI_paper_source_data\GSKPM\Masks\Mask_BAPI20_0008.txt'
mask = mask_utils.load_mask_txt(mask_path, flip=False)
CPD_file = px.hdf_utils.find_dataset(h5_file, 'CPD')[-1]
if CPD_file.name.split('/')[-1] != 'CPD':
    CPD_file = px.hdf_utils.find_dataset(CPD_file.parent, 'CPD')[0]
for k in parms_dict:
    CPD_file.parent.attrs[k] = parms_dict[k]
    CPD_file.parent.attrs['trigger'] = 0 # since kmeans designed for fftrEFM code, this is a fake flag
CPD_clust = dist_cluster.dist_cluster(CPD_file,data_avg='CPD_on_avg', mask=mask, isCPD=True) 
#                                       imgsize=[img_length, img_height], light_on=light_on_time)
# destreak image
fa, bpl = badpixels.find_bad_pixels(CPD_clust.data_avg, 1)
fa = badpixels.remove_bad_pixels(CPD_clust.data_avg, fa, bpl)
CPD_clust.data_avg = fa[:,:]
CPD_clust.analyze()
#_, _, fig = CPD_clust.kmeans(CPD_clust.CPD_scatter, show_results=True,clusters=clusters)
CPD_clust.kmeans(clusters=clusters)
fig, ax = CPD_clust.plot_img()
if save_figure == True:
    fig.savefig(output_filepath+'\masked_image-'+'.tif', format='tiff')
fig, _ = CPD_clust.plot_kmeans()
if save_figure == True:
    fig.savefig(output_filepath+'\k_means_vs_grain_distance_numclusters-'+str(CPD_clust.results.cluster_centers_.shape[0])+'.tif', format='tiff')
fig, _ = CPD_clust.heat_map()
if save_figure == True:
    fig.savefig(output_filepath+'\k_means_vs_grain_distance_heat_map_numclusters-'+str(CPD_clust.results.cluster_centers_.shape[0])+'.tif', format='tiff')
CPD_clust.segment_maps()
CPD_clust.plot_segment_maps(ax)
if save_figure == True:
    fig.savefig(output_filepath+'\clustered_CPD-'+str(CPD_clust.results.cluster_centers_.shape[0])+'.tif', format='tiff')
fig, ax = CPD_clust.plot_centers()
ax.set_ylabel('CPD (V)')
ax.set_xlabel('Time (s)')
if save_figure == True:
    fig.savefig(output_filepath+'\CPD_cluster_centers-'+str(CPD_clust.results.cluster_centers_.shape[0])+'.tif', format='tiff')
fig, ax = CPD_clust.plot_img()
hdf.close()<jupyter_output><empty_output> | 
	no_license | 
	/Notebooks/G-Mode F3R-v2-Copy1.ipynb | 
	rajgiriUW/GKPFM | 43 | 
| 
	<jupyter_start><jupyter_text>Segmenting and Clustering Neighborhoods in Toronto_Part 2## Problem part 2:<jupyter_code>import pandas as pd
import requests
from bs4 import BeautifulSoup
List_url = "https://en.wikipedia.org/wiki/List_of_postal_codes_of_Canada:_M"
source = requests.get(List_url).text
soup = BeautifulSoup(source, 'xml')
table=soup.find('table')
column_names=['Postalcode','Borough','Neighbourhood']
df = pd.DataFrame(columns=column_names)
df
for tr_cell in table.find_all('tr'):
    row_data=[]
    #print(tr_cell)
    for td_cell in tr_cell.find_all('td'):
        td_data=[]
        td_data.append(td_cell.find('b').text.strip())
        td_data.append('Not assigned')
        td_data.append('Not assigned')
        td_data_idx = 1
        for td_ele in td_cell.find_all('a'):
            td_data[td_data_idx] = td_ele.text.strip()
            td_data_idx += 1
            if td_data_idx > 2:
                break
        #print(td_data)
        df.loc[len(df)] = td_data
        #df
df.head()
df=df[df['Borough']!='Not assigned']
df
for index, row in df.iterrows():
    if row['Neighbourhood']=='Not assigned':
        row['Neighbourhood'] = row['Borough']
    
#df[df[]=='Not assigned']=df['Borough']
df.head()
temp_df=df.groupby('Postalcode')['Neighbourhood'].apply(lambda x: "%s" % ', '.join(x))
temp_df=temp_df.reset_index(drop=False)
temp_df.rename(columns={'Neighbourhood':'Neighbourhood_joined'},inplace=True)
df_merge = pd.merge(df, temp_df, on='Postalcode')
df_merge.drop(['Neighbourhood'],axis=1,inplace=True)
df_merge.drop_duplicates(inplace=True)
df_merge.rename(columns={'Neighbourhood_joined':'Neighbourhood'},inplace=True)
df_merge.head()
df_merge.shape
def get_geocode(postal_code):
    # initialize your variable to None
    lat_lng_coords = None
    while(lat_lng_coords is None):
        g = geocoder.google('{}, Toronto, Ontario'.format(postal_code))
        lat_lng_coords = g.latlng
    latitude = lat_lng_coords[0]
    longitude = lat_lng_coords[1]
    return latitude,longitude
#geo_df = pd.read_csv('http://cocl.us/Geospatial_data') # 308 Permanent Redirect to -->
geo_df = pd.read_csv('https://ibm.box.com/shared/static/9afzr83pps4pwf2smjjcf1y5mvgb18rr.csv')
geo_df.head()
geo_df.rename(columns={'Postal Code':'Postalcode'},inplace=True)
geo_merged = pd.merge(geo_df, df_merge, on='Postalcode')
geo_data=geo_merged[['Postalcode','Borough','Neighbourhood','Latitude','Longitude']]
geo_data.head()<jupyter_output><empty_output> | 
	no_license | 
	/My Neighborhood in Toronto-02.ipynb | 
	innovish/Neighborhoods-in-Toronto | 1 | 
| 
	<jupyter_start><jupyter_text>Given the dtypes, there is no possibility of negative values in the dataset. <jupyter_code>%matplotlib inline
import os
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
import numpy as np
from glob import glob
import matplotlib.cm as cm
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.preprocessing import StandardScaler, RobustScaler
smp_sz = 5000
label_to_id_dict = {v:i for i,v in enumerate(np.unique(y_train[:smp_sz]))}
id_to_label_dict = {v: k for k, v in label_to_id_dict.items()}
def visualize_scatter(data_2d, label_ids, figsize=(20,20)):
    plt.figure(figsize=figsize)
    plt.grid()
    
    nb_classes = len(np.unique(label_ids))
    
    for label_id in np.unique(label_ids):
        plt.scatter(data_2d[np.where(label_ids == label_id), 0],
                    data_2d[np.where(label_ids == label_id), 1],
                    marker='o',
                    color= plt.cm.Set1(label_id / float(nb_classes)),
                    linewidth='1',
                    alpha=0.8,
                    label=id_to_label_dict[label_id])
    plt.legend(loc='best')
tsne = TSNE(n_components=2, perplexity=40.0)
tsne_result = tsne.fit_transform(X_train[:smp_sz])
visualize_scatter(tsne_result, y_train[:smp_sz])
tsne_result_scaled = StandardScaler().fit_transform(tsne_result)
visualize_scatter(tsne_result_scaled, y_train[:smp_sz])<jupyter_output><empty_output> | 
	no_license | 
	/.ipynb_checkpoints/EDA - Fashion Mnist-checkpoint.ipynb | 
	azaelmsousa/LogisticRegression-ANN | 1 | 
| 
	<jupyter_start><jupyter_text># LINEAR REGRESSION<jupyter_code>x = df['sqft_living'].values.reshape(-1,1)
y = df['price'].values
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size = 0.23, random_state= 19)
model= LinearRegression()
model.fit(x_train,y_train)
predicted = model.predict(x_test)
print("r:",metrics.r2_score(y_test,predicted))
sns.regplot(y_test,predicted, ci= None)<jupyter_output>C:\Users\pcjl\anaconda3\ANACONDA\lib\site-packages\seaborn\_decorators.py:36: FutureWarning: Pass the following variables as keyword args: x, y. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.
  warnings.warn(
<jupyter_text># MULTI LINEAR REGRESSIONprice {bathrooms,sqft_living,grade,sqft_above,sqft_living15}<jupyter_code>x = df[["grade","bathrooms","sqft_living","sqft_above","sqft_living15"]].values
y = df['price'].values
x_train,x_test,y_train,y_test = train_test_split(x,y , test_size = 0.23, random_state = 40 )
model = LinearRegression()
model.fit(x_train , y_train)
predicted = model.predict(x_test)
print("MSE:",mean_squared_error(y_test,predicted))
print('r:',metrics.r2_score(y_test,predicted))
import seaborn as sns
sns.regplot(y_test, predicted, ci=None, x = 'tested', y = 'real')
<jupyter_output>C:\Users\pcjl\anaconda3\ANACONDA\lib\site-packages\seaborn\_decorators.py:36: FutureWarning: Pass the following variables as keyword args: x, y. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.
  warnings.warn(
<jupyter_text># POLY REGRESSION<jupyter_code>x= df[["grade","bathrooms","sqft_living","sqft_above","sqft_living15"]]
y= df["price"].values
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.35, random_state=40)  #splitting data
lg=LinearRegression()
poly=PolynomialFeatures(degree=3)
x_train_fit = poly.fit_transform(x_train) #transforming our input data
lg.fit(x_train_fit, y_train)
x_test_ = poly.fit_transform(x_test)
predicted = lg.predict(x_test_)
print("MSE: ", metrics.mean_squared_error(y_test, predicted))
print("R squared: ", metrics.r2_score(y_test,predicted))
poly = PolynomialFeatures(degree = 2) 
x_poly = poly.fit_transform(x) 
poly.fit(x_poly, y) 
lg=LinearRegression()
lg.fit(x_poly, y) 
plt.scatter(x, y, color="r")
plt.title("Linear regression")
plt.ylabel("price")
plt.xlabel("features")
plt.plot(x, lg.predict(poly.fit_transform(x)), color="k") 
<jupyter_output><empty_output> | 
	no_license | 
	/FIRST STEP INTO M.L (chekpoint).ipynb | 
	amaterstu/Formation-AI | 3 | 
| 
	<jupyter_start><jupyter_text>## 338. Counting Bits
Given a non negative integer number num. For every numbers i in the range 0 ≤ i ≤ num calculate the number of 1's in their binary representation and return them as an array.
Example 1:
```
Input: 2
Output: [0,1,1]
```
Example 2:
```
Input: 5
Output: [0,1,1,2,1,2]
```
Follow up:
```
It is very easy to come up with a solution with run time O(n*sizeof(integer)). But can you do it in linear time O(n) /possibly in a single pass?
Space complexity should be O(n).
Can you do it like a boss? Do it without using any builtin function like __builtin_popcount in c++ or in any other language.
```<jupyter_code># similar to Hamming distance
def countBits(num):
    dp = [0] * (num+1)
    
    for n in range(1, num+1):
        dp[n] = dp[n & (n-1)] + 1
        
    return dp
print(countBits(2))
print(countBits(5))
print(countBits(10))<jupyter_output>[0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2]
 | 
	permissive | 
	/leetcode/questions/338-CountingBits.ipynb | 
	subramp-prep/pyLeetcode | 1 | 
| 
	<jupyter_start><jupyter_text># Introduction
Maps allow us to transform data in a `DataFrame` or `Series` one value at a time for an entire column. However, often we want to group our data, and then do something specific to the group the data is in. We do this with the `groupby` operation.
In these exercises we'll apply groupwise analysis to our dataset.
# Useful Resources
- [**Grouping Reference and Examples**](https://www.kaggle.com/residentmario/grouping-and-sorting-reference).  
- [Pandas cheat sheet](https://github.com/pandas-dev/pandas/blob/master/doc/cheatsheet/Pandas_Cheat_Sheet.pdf)# Set Up
Run the code cell below to load the data before running the exercises.<jupyter_code>import pandas as pd
from learntools.advanced_pandas.grouping_and_sorting import *
reviews = pd.read_csv("../input/wine-reviews/winemag-data-130k-v2.csv", index_col=0)
pd.set_option("display.max_rows", 5)<jupyter_output><empty_output><jupyter_text># Checking Your Answers
# Checking Answers
**Check your answers in each exercise using the  `check_qN` function** (replacing `N` with the number of the exercise). For example here's how you would check an incorrect answer to exercise 1:<jupyter_code>check_q1(pd.DataFrame())<jupyter_output><empty_output><jupyter_text>If you get stuck, **use the `answer_qN` function to see the code with the correct answer.**
For the first set of questions, running the `check_qN` on the correct answer returns `True`.
For the second set of questions, using this function to check a correct answer will present an informative graph!# Exercises**Exercise 1**: Who are the most common wine reviewers in the dataset? Create a `Series` whose index is the `taster_twitter_handle` category from the dataset, and whose values count how many reviews each person wrote.<jupyter_code># Your code here
# common_wine_reviewers = reviews.groupby('taster_twitter_handle').taster_twitter_handle.count()
# check_q1(common_wine_reviewers)<jupyter_output><empty_output><jupyter_text>**Exercise 2**: What is the best wine I can buy for a given amount of money? Create a `Series` whose index is wine prices and whose values is the maximum number of points a wine costing that much was given in a review. Sort the valeus by price, ascending (so that `4.0` dollars is at the top and `3300.0` dollars is at the bottom).<jupyter_code># Your code here
# best_wine = reviews.groupby('price').points.max().sort_index()
# check_q2(best_wine)<jupyter_output><empty_output><jupyter_text>**Exercise 3**: What are the minimum and maximum prices for each `variety` of wine? Create a `DataFrame` whose index is the `variety` category from the dataset and whose values are the `min` and `max` values thereof.<jupyter_code># Your code here
# wine_price_extremes = reviews.groupby('variety').price.agg([min, max])
# check_q3(wine_price_extremes)<jupyter_output><empty_output><jupyter_text>The rest of the exercises are visual.
**Exercise 4**: Are there significant differences in the average scores assigned by the various reviewers? Create a `Series` whose index is reviewers and whose values is the average review score given out by that reviewer. Hint: you will need the `taster_name` and `points` columns.<jupyter_code># Your code here
# reviewer_mean_ratings = reviews.groupby('taster_name').points.mean()
# check_q4(reviewer_mean_rating)<jupyter_output><empty_output><jupyter_text>**Exercise 5**: What are the rarest, most expensive wine varieties? Create a `DataFrame` whose index is wine varieties and whose values are columns with the `min` and the `max` price of wines of this variety. Sort in descending order based on `min` first, `max` second.<jupyter_code># Your code here
# wine_price_range = reviews.groupby('variety').price.agg([min, max]).sort_values(by=['min', 'max'], ascending=False)
# check_q5(wine_price_range)<jupyter_output><empty_output><jupyter_text>**Exercise 6**: What combination of countries and varieties are most common? Create a `Series` whose index is a `MultiIndex`of `{country, variety}` pairs. For example, a pinot noir produced in the US should map to `{"US", "Pinot Noir"}`. Sort the values in the `Series` in descending order based on wine count.
Hint: first run `reviews['n'] = 0`. Then `groupby` the dataset and run something on the column `n`. You won't need `reset_index`.<jupyter_code># Your code here
# country_variety_pairs = reviews.groupby(['country', 'variety']).size().sort_values(ascending=False)
# check_q6(country_variety_pairs)<jupyter_output><empty_output> | 
	no_license | 
	/datasets/wine-reviews/kernels/X---ostaski---grouping-and-sorting.ipynb | 
	mindis/GDS | 8 | 
| 
	<jupyter_start><jupyter_text># enzymeBayes 
Final project for Alp Kucukelbir's Machine Learning Probabilistic Programming (COMS6998) by Jiayu Zhang and Kiran Gauthier. ### Familiarizing ourselves with the data All data in this analysis has been graciously provided by Prof. Jennifer Ross, Mengqi Xu, and their collaborators from their paper [Direct Single Molecule Imaging of Enhanced Enzyme Diffusion](https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.123.128101) and is available upon request.
Shown below are a representative trajectories from the buffer dataset (i.e. no urea).<jupyter_code>dir_ = 'mengqi_buffer/'
x, y, t, track_info, lookup, track_id = utils.loadRawMinData(dir_, min_length=5, isDx=False)
x, y, t, dx, dy, dt, lookup, track_info = utils.removeAllOutliers(x, y, t, track_info)
sel_ind = [206, 444, 613, 386, 328]
utils.manyplots_real(1, 5, sel_ind, track_info, [x, y, t])<jupyter_output><empty_output><jupyter_text>## Box's Loop 
### Simple Brownian Diffusion model We begin our first iteration of Box's loop, shown below. 
 
#### Modeling 
In the modeling stage, we assume that the data can be analyzed by the simplest possible model, *Brownian diffusion* (BD). Derivation for this, and separate models, are given in helper notebook. 
#### Inference
Assuming an inverse gamma prior for the diffusion coefficient, one can derive the posterior analytically and the posterior results are plotted below for all 813 trajectories. <jupyter_code>utils.draw_vertical_posterior_bm([dx, dy, dt], 0.25, 0.2, track_info)<jupyter_output><empty_output><jupyter_text>#### Criticism  
From the analytic inference of the proposed model, there are two points we would like to address in the criticism stage, 
1. there are a series of very short trajectories ($len \sim 5$) which are very uncertain, marked by the wide posterior distributions in the left plot 
2. plotting the posterior mean of the diffusive trajectories in logspace, the distribution seems bimodal, implying that there is a slowly diffusive population and a more quickly diffusive population 
See above for the slow, confined trajectories which appear blob like, and the larger diffusive trajectories for which a more certain estimate of the diffusion coefficient can be derived. <jupyter_code># track of interest
sx, sy, st = utils.loadSelectTraj(x, y, t, track_info, 613, False)
sx, sy, st = utils.removeOutLiar(sx, sy, st)
sdx, sdy, sdt = sx[1:]-sx[:-1], sy[1:]-sy[:-1], st[1:]-st[:-1]
utils.PPCs([sx, sy, st, sdx, sdy, sdt], 'bm') <jupyter_output><empty_output><jupyter_text>###### Posterior predictive checks (PPC) 
To criticise the model from a more quantitative standpoint, we will use PPC as a tool to generate statistics on two relevant metrics for the enzyme trajectories. Firstly, we consider the standard deviation of the motion of simulated enzymes in $1D$ and secondly, we consider the autocorrelation of the simulated enzymes along this same axis (see below for further discussion on autocorrelation in enzyme trajectories). 
In the case of the simple BD model, we see that it fails both of the PPC checks on this confined trajectory, indicating that it is very unlikely that this data was generated from this model. From this initial iteration of Box's loop, we propose two new model through which we can analyze the observed data. 
1. **Stuck Enzyme Model**: enzyme stuck on surface, all motion are noise due to camera.
2. **Brownian Motion in a Harmonic Potential**: BM subject to a harmoic potential well (polymer spring)### Stuck Enzyme / Harmonic Potential Well models 
#### Modeling (Stuck) 
This model assumes that confined trajectories can be described by a single measurement noise parameter, $M_e$, which is common across all trajectories. See below for examples of a stuck enzyme with varying values of the measurement noise. 
#### Modeling (HPW) 
Although initially not considered for the BD model, we now also consider the measurement noise associated with tracking an individual enzyme. In this, we include three parameters for the harmonic potential well model, the diffusion coefficient, $D$, the potential well strength, $\lambda$, and the measurement noise, $M_e$. 
<jupyter_code>me = [0.6, 0.3, 0.1]
lambda_ = [1e-06, 0.01, 1]
n_times = 423 
utils.simulateData(me, lambda_, n_times)<jupyter_output><empty_output><jupyter_text>#### Inference 
##### Inference on 1-parameter Stuck model ($M_e$) 
Inference for this model was very straightforward and the results of the inference are embedded in the **Criticism** stage of Box's loop below. 
##### Inference on 3-parameter HPW model ($D$, $\lambda$, $M_e$)  
We slightly rewrote the inferential HPW model to express the likelihood of a trajectory in the absence of a potential well, (i.e. tracks for which $\lambda \sim 0$) and identified 10 tracks which were diffusive (i.e. non-confined) to sample over $D$ and $M_e$ alone. Five of the ten are plotted below. <jupyter_code>sel_ind = [268, 411, 286, 402, 374]
utils.manyplots_real(1, 5, sel_ind, track_info, [x, y, t])
utils.plot_DMeAnalysis([dx, dy, dt, track_info])  <jupyter_output><empty_output><jupyter_text>We note that there is only a small likelhood that the measurement noise is on the scale of a pixel, as judged by the posterior density, with much larger probability mass on the subpixel scale. Due to the fact that the vast majority of our trajectories diffuse further than a pixel per timestep, we eliminate $M_e$ from the analysis entirely. #### Criticism 
##### Autocorrelation analysis We quickly realized that in order to deconvolve the final two parameters in the HPW model, the diffusion coefficient, $D$, and the potential well strength, $\lambda$, we must importantly consider their relative values to the time between sequential data points in a trajectory. For the analysis of this dataset where the framerate is $\sim 12 fps$ or, $\sim 0.08$ seconds per frame, the HPW model finds it difficult to converge when $\lambda$ is large relative to $D$, this point is illustrated by the autocorrelation analysis on simulated data below: <jupyter_code>D = 0.175
t_ = np.linspace(0, 8*3, 101*3)
utils.plotAutoCorr(D, t_)<jupyter_output><empty_output><jupyter_text>From a high level view, when $\lambda$ is small relative to a fixed $D$, the track is largely diffusive, for which each sequential point in the trajectory shares large correlation with the previous point. This fits well with *Model 1: Brownian Diffusion*. However, we see that with increasing values of $\lambda$, after say $\lambda \sim 20$, the autocorrelation is effectively zero, indicating that the well is so strong, each sequential position is simply sampled from a normal distribution, producing trajectories similar to the "stuck" case above which moreso fits *Model 2: Harmonic Potential Well*. 
Plotting the $MAP$ estimate of $D$ relative to the true $D$, we note there will be certain tracks in our dataset with highly confined behavior which our HPW model is not able to recover a confident estimation for D and $\lambda$##### Posterior predictive checks 
Finally, we turn back to PPCs to examine the Stuck model from a quantitative standpoint, again examining the standard deviation and autocorrelation of simulated enzyme trajectories in $1D$. In this case, we fail the $1D$ autocorrelation PPC but pass the $1D$ standard deviation PPC. <jupyter_code>utils.PPCs([sx, sy, st, sdx, sdy, sdt], 'me') <jupyter_output><empty_output><jupyter_text>However, when considering the 2-parameter HPW model over $D$, $\lambda$, we pass both PPC checks, giving further creedence to this model being our best candidate for explaining the true enzyme trajectories. <jupyter_code>utils.PPCs([sx, sy, st, sdx, sdy, sdt], 'hpw') <jupyter_output><empty_output><jupyter_text># Conclusions##### Criticism 
Ultimately, we were interested in seeing if we would recover a similar estimate of the population level diffusion coefficient $D_{pop}$ as was arrived at in the paper. In this work, the population level diffusion coefficient was derived by a mean squared displacement (MSD) analysis for which trajectories were rejected if they fell under a Pearson $R^2$ coefficient of 0.9. We note that the total number of trajectories considered after this filter is $N = 141$. 
Incoporating the mean and standard deviation statistics for each individual trajectory in the buffer dataset (for which $N = 813$) relative to the estimate from the paper we see that we are able to arrive at a similar estimate in terms of the mean albeit with larger uncertainty. We look to extend the results of this analysis to prove or disprove whether enzymes do in fact diffuse faster in the presence of their substrate by performing this analysis for enzyme trajectories with non-zero urea concentrations. <jupyter_code>utils.plot_finalResults(track_info, min_length=None) 
utils.plot_finalResults(track_info, min_length=10) <jupyter_output>LOOK AT ME 813 395
 | 
	no_license | 
	/final-project/final-notebook.ipynb | 
	KiranGauthier/enzymeBayes | 9 | 
| 
	<jupyter_start><jupyter_text># Now You Code 4: Shopping List
Write a simple shopping list program. Use a Python `list` as a shopping list. Functions definitions have been written for you, so all you need to do is complete the code inside the function. 
The main program loop has a menu allowing you to 1) add to the list 2) remove an item from the list or 3) print the list
Example Run
```
A - Add Item to Shopping List
P - Print Shopping List
R - Remove Item from Shopping List
Q - Quit Shopping List Program
Enter choice: a
Enter Item to Add: eggs
A - Add Item to Shopping List
P - Print Shopping List
R - Remove Item from Shopping List
Q - Quit Shopping List Program
Enter choice: a
Enter Item to Add: eggs
eggs is already in the list!
A - Add Item to Shopping List
P - Print Shopping List
R - Remove Item from Shopping List
Q - Quit Shopping List Program
Enter choice: p
Your List: ['eggs']
A - Add Item to Shopping List
P - Print Shopping List
R - Remove Item from Shopping List
Q - Quit Shopping List Program
Enter choice: r
Enter Item to Remove from List: peas
peas is not in the list!
A - Add Item to Shopping List
P - Print Shopping List
R - Remove Item from Shopping List
Q - Quit Shopping List Program
Enter choice: r
Enter Item to Remove from List: eggs
A - Add Item to Shopping List
P - Print Shopping List
R - Remove Item from Shopping List
Q - Quit Shopping List Program
Enter choice: p
Your List: []
A - Add Item to Shopping List
P - Print Shopping List
R - Remove Item from Shopping List
Q - Quit Shopping List Program
Enter choice: q
```
Most of the code has been written for you fill in the areas below.
<jupyter_code># TODO: Write Todo lists for
# #1 Add item to list if it does not exist
# #2 Remove item from list when it does not exist
# Write code here
def print_menu():
    print("A - Add Item to Shopping List")
    print("P - Print Shopping List")
    print("R - Remove Item from Shopping List")
    print("Q - Quit Shopping List Program")
    return
# prompts for a new item using input() then adds it to the shopping list
# only when it does not exist
# input: shopping list
# output: shopping list
def add_item(shopping_list):
    item = input("Enter an item to add: ")
    shopping_list.append(item)
    print("Item added to the list...")
    return shopping_list
    #todo write code here
    
# sorts then prints the shopping list
# input: shopping list
# output: shopping list
def print_list(shopping_list):
    #todo write code here
    shopping_list
    return shopping_list
# prompts for an item then removes it from the shopping list
# only when it exists
# input: shopping list
# output: shopping list
def remove_item(shopping_list):
    #todo write code here
    return shopping_list        
## Main Program Written For You
shopping_list = []
while True:
    print_menu()
    choice = input("Enter choice: ").upper()
    if choice == 'A':
        shopping_list = add_item(shopping_list)
    elif choice == 'P':
        shopping_list = print_list(shopping_list)
    elif choice == 'R':
        shopping_list = remove_item(shopping_list)
    elif choice == "Q":
        break
    else:
        print('ERROR:', choice,'is not a menu option')
<jupyter_output><empty_output> | 
	no_license | 
	/content/lessons/09/Now-You-Code/.ipynb_checkpoints/NYC4-ShoppingList-checkpoint.ipynb | 
	Learn2Code-SummerSyr/2019learn2code-auramnar | 1 | 
| 
	<jupyter_start><jupyter_text> Academic and Employability Factors influencing placementCampus placement or campus recruiting is a program conducted within universities or other educational institutions to provide jobs to students nearing completion of their studies. In this type of program, the educational institutions partner with corporations who wish to recruit from the student population.
# About Dataset
This data set consists of Placement data of students in a XYZ campus. It includes secondary and higher secondary school percentage and specialization. It also includes degree specialization, type and Work experience and salary offers to the placed students
link - https://www.kaggle.com/benroshan/factors-affecting-campus-placement# Objective 
In this kernel , my main motive to find out what factors  influenced a candidate in getting placed,does percentage matters for one to get placed,which degree specialization is much demanded by corporate , I will try find out this questions by story telling with data.So let's get startedPlease Upvote my kernel if you like my work.# Table of contents
- 1. Exploring Data 
- 2. How much dependency between MBA percentage and Salary 
- 3. How much dependency between MBA percentage and Salary with specialisation   
- 4. Avg. % marks of mba by specialisation 
- 5. Placement % of mba in each specialisation by gende 
- 6. MBA percentage distribution 
- 7. MBA percentage distribution by specialisation 
- 8. Salary distribution  
- 9. Salary distribution of MBA in each specialisation 
- 10. Relationship between college test percentage for interview and degree percentage
- 11. Degree percentage distribution 
- 12. Etest percentage distribution 
- 13. Ratio of students in each field of degree education 
- 14. Percentage distribution in each field of degree education 
- 15. How much dependency between Degree percentage and Salary? 
- 16. How much dependency between Degree percentage and E test percentage?
- 17. Placement % of degree in each field by work experience 
- 18. Salary distribution in each field of degree 
- 19. Employability test percentage ( conducted by college) vs salary 
- 20. How are ssc % ,hsc % and degree % related to each other for placed and non placed candidates?
- 21. How are the percentage scores spread in 3D for Commerce & Science graduates?
- 22. How are ssc%,hsc% and degree% related to each other for placed candidates?
- 23. Average % marks of hsc and ssc gender wise 
- 24. Percentage distribution of different streams in hsc 
- 25. Percentage distribution of different boards in hsc and ssc #  1. Exploring Data<jupyter_code>data=pd.read_csv('../input/factors-affecting-campus-placement/Placement_Data_Full_Class.csv')
ProfileReport(data)
data.head()<jupyter_output><empty_output><jupyter_text>#  2. How much dependency between MBA percentage and Salary<jupyter_code>fig = px.scatter(data, x='mba_p', y='salary')
fig.update_layout(title='MBA percentage vs salary',xaxis_title="MBA % ",yaxis_title="Salary")
fig.show()<jupyter_output><empty_output><jupyter_text>#  3. How much dependency between MBA percentage and Salary with specialisation <jupyter_code>fig = px.scatter(data, x='mba_p', y='salary',color='specialisation')
fig.update_layout(title='MBA percentage vs salary',xaxis_title="MBA % ",yaxis_title="Salary")
fig.show()<jupyter_output><empty_output><jupyter_text>#  4. Avg. % marks of mba by specialisation <jupyter_code>df=data.groupby('specialisation')['mba_p'].mean()
df=pd.DataFrame(df).rename(columns={'mba_p': 'avg. mba %'}).reset_index()
df
fig = go.Figure([go.Pie(labels=df['specialisation'], values=df['avg. mba %'])])
fig.update_traces(hoverinfo='label+percent', textinfo='value', textfont_size=15,insidetextorientation='radial')
fig.update_layout(title="Avg. %  marks of mba by specialisation",title_x=0.5)
fig.show()
<jupyter_output><empty_output><jupyter_text>#  5. Placement % of mba in each specialisation by gender <jupyter_code>df=pd.DataFrame(data.groupby(['gender','specialisation','status'])['sl_no'].count()).rename(columns={'sl_no': 'no. of students'}).reset_index()
fig = px.sunburst(df, path=['gender','status','specialisation'], values='no. of students')
fig.update_layout(title="Placement % of mba in each specialisation by gender ",title_x=0.5)
fig.show()<jupyter_output><empty_output><jupyter_text>#  6. MBA percentage distribution <jupyter_code>mba_percentage=data['mba_p'].values
fig = go.Figure(go.Box(y=mba_percentage,name="MBA %"))
fig.update_layout(title="MBA percentage distribution")
fig.show()<jupyter_output><empty_output><jupyter_text>#  7. MBA percentage distribution by specialisation <jupyter_code>mba_p_1=data[data['specialisation']=="Mkt&Fin"]['mba_p']
mba_p_2=data[data['specialisation']=="Mkt&HR"]['mba_p']    
fig = go.Figure()
fig.add_trace(go.Box(y=mba_p_1,
                     marker_color="blue",
                     name="Mkt&Fn %"))
fig.add_trace(go.Box(y=mba_p_2,
                     marker_color="red",
                     name="Mkt&HR %"))
fig.update_layout(title="Distribution of percentage marks for specialisation -Mkt%Fn &  MKt&HR ")
fig.show()<jupyter_output><empty_output><jupyter_text>#  8. Salary distribution <jupyter_code>fig = go.Figure(data=[go.Histogram(x=data['salary'],  # To get Horizontal plot ,change axis - y=campus_computer
                                  marker_color="chocolate",
                      xbins=dict(
                      start=200000, #start range of bin
                      end=1000000,  #end range of bin
                      size=10000    #size of bin
                      ))])
fig.update_layout(title="Distribution of Salary",xaxis_title="Salary",yaxis_title="Counts")
fig.show()
mba_sal=data['salary'].values
fig = go.Figure(go.Box(y=mba_sal,name="salary"))
fig.update_layout(title="Salary distribution")
fig.show()<jupyter_output><empty_output><jupyter_text>#  9. Salary distribution of MBA in each specialisation <jupyter_code>mba_sal_1=data[data['specialisation']=="Mkt&Fin"]['salary']
mba_sal_2=data[data['specialisation']=="Mkt&HR"]['salary']    
fig = go.Figure()
fig.add_trace(go.Box(y=mba_sal_1,
                     marker_color="blue",
                     name="Salary of Mkt&Fn"))
fig.add_trace(go.Box(y=mba_sal_2,
                     marker_color="red",
                     name="Salary of Mkt&HR"))
fig.update_layout(title="Distribution of salary for specialisation : Mkt&Fn &  MKt&HR ")
fig.show()
fig = go.Figure()
fig.add_trace(go.Histogram(x=mba_sal_1,marker_color="green",name="Mkt&Fn"))
fig.add_trace(go.Histogram(x=mba_sal_2,marker_color="orange",name="MKt&HR"))
# Overlay both histograms
fig.update_layout(barmode='overlay')
# Reduce opacity to see both histograms
fig.update_traces(opacity=0.75)
fig.update_layout(title="Distribution of salary for specialisation : Mkt&Fn &  MKt&HR",xaxis_title="salary",yaxis_title="Counts")
fig.show()<jupyter_output><empty_output><jupyter_text>#  10. Relationship between college test percentage for interview and degree percentage <jupyter_code>fig = go.Figure(go.Histogram2d(
        x=data['etest_p'],
        y=data['degree_p']
    ))
fig.update_layout(title='Density of Interview Test & Degree Percentage',xaxis_title="Test Percentage",yaxis_title="Degree Percentage")
fig.show()<jupyter_output><empty_output><jupyter_text>> #  11. Degree percentage distribution <jupyter_code>degree_percentage=data['degree_p'].values
fig = go.Figure(go.Box(y=degree_percentage,name="MBA %"))
fig.update_layout(title="Degree percentage distribution")
fig.show()
fig = go.Figure(data=[go.Histogram(x=data['degree_p'],marker_color="chocolate")])
fig.update_layout(title="Distribution of degree %",xaxis_title="degree %")
fig.show()<jupyter_output><empty_output><jupyter_text> #  12. Etest percentage distribution <jupyter_code>etest_percentage=data['etest_p'].values
fig = go.Figure(go.Box(y=etest_percentage,name="etest %"))
fig.update_layout(title="E-test percentage distribution")
fig.show()
fig = go.Figure(data=[go.Histogram(x=data['etest_p'],marker_color="blue")])
fig.update_layout(title="Distribution of etest %",xaxis_title="etest %")
fig.show()<jupyter_output><empty_output><jupyter_text> #  13. Ratio of students in each field of degree education<jupyter_code>fig = go.Figure([go.Pie(labels=data['degree_t'].unique(), values=data['degree_t'].value_counts())])
fig.update_traces(hoverinfo='label+percent', textinfo='value', textfont_size=15,insidetextorientation='radial')
fig.update_layout(title=" Ratio of students in each field of degree education",title_x=0.5)
fig.show()
<jupyter_output><empty_output><jupyter_text> #  14. percentage distribution in each field of degree education<jupyter_code>sci_tech=data[data['degree_t']=='Sci&Tech']['degree_p']
comm_mgmt=data[data['degree_t']=='Comm&Mgmt']['degree_p']
others=data[data['degree_t']=='Others']['degree_p']
fig = go.Figure()
fig.add_trace(go.Box(y=sci_tech,
                     marker_color="blue",
                     name="science & tech"))
fig.add_trace(go.Box(y=comm_mgmt,
                     marker_color="red",
                     name="commerce and management"))
fig.add_trace(go.Box(y=others,
                     marker_color="green",
                     name="others"))
fig.update_layout(title="percentage distribution in each field of degree education")
fig.show()
hist_data = [sci_tech,comm_mgmt,others] # Added more distplot
group_labels = ['science and tech',"Commerce and management","others"]
colors=['blue',"green","orange"]
fig = ff.create_distplot(hist_data, group_labels,show_hist=False, # Set False to hide histogram bars
                         colors=colors,bin_size=[10000,10000,10000])
fig.update_layout(title="percentage distribution in each field of degree education")
fig.show()<jupyter_output><empty_output><jupyter_text>#  15. How much dependency between Degree percentage and Salary? <jupyter_code>fig = px.scatter(data, x='degree_p', y='salary')
fig.update_layout(title='Degree percentage vs salary',xaxis_title="Degree % ",yaxis_title="Salary")
fig.show()<jupyter_output><empty_output><jupyter_text>#  16. How much dependency between Degree percentage and E test percentage? <jupyter_code>fig = px.scatter(data, x='mba_p', y='etest_p')
fig.update_layout(title='Degree % vs etest % ',xaxis_title="Degree % ",yaxis_title="Etest %")
fig.show()<jupyter_output><empty_output><jupyter_text>#  17. Placement % of degree in each field  by work experience <jupyter_code>df=pd.DataFrame(data.groupby(['workex','degree_t','status'])['sl_no'].count()).rename(columns={'sl_no': 'no. of students'}).reset_index()
fig = px.sunburst(df, path=['workex','status','degree_t'], values='no. of students')
fig.update_layout(title="Placement % of degree in each field  by work experience ",title_x=0.5)
fig.show()<jupyter_output><empty_output><jupyter_text>> Not placed ratio is very of students having experience #  18. Salary distribution in each field of degree  <jupyter_code>data['salary'] = data['salary'].fillna(0)
sci_tech=data[data['degree_t']=='Sci&Tech']['salary']
comm_mgmt=data[data['degree_t']=='Comm&Mgmt']['salary']
others=data[data['degree_t']=='Others']['salary']
hist_data = [sci_tech,comm_mgmt,others] # Added more distplot
group_labels = ['science and tech',"Commerce and management","others"]
colors=['blue',"green","orange"]
fig = ff.create_distplot(hist_data, group_labels,show_hist=False, # Set False to hide histogram bars
                         colors=colors,bin_size=[10000,10000,10000])
fig.update_layout(title="salary distribution in each field of degree education")
fig.show()
degree=round(data['degree_p'].mean(),2)
fig = go.Figure(go.Indicator(
    mode = "gauge+number",
    gauge = {
       'axis': {'range': [None, 100]}},
    value = degree,
    title = {'text': "Average degree %"},
    domain = {'x': [0, 1], 'y': [0, 1]}
))
fig.show()
degree=round(data['mba_p'].mean(),2)
fig = go.Figure(go.Indicator(
    mode = "gauge+number",
    gauge = {
       'axis': {'range': [None, 100]}},
    value = degree,
    title = {'text': "Average mba %"},
    domain = {'x': [0, 1], 'y': [0, 1]}
))
fig.show()<jupyter_output><empty_output><jupyter_text>#  19. Employability test percentage ( conducted by college) vs salary <jupyter_code>fig = px.scatter(data, x='etest_p', y='salary')
fig.update_layout(title=' Etest vs salary ',xaxis_title="Etest % ",yaxis_title="salary")
fig.show()<jupyter_output><empty_output><jupyter_text>#  20. How are ssc % ,hsc % and degree % related to each other for placed and non placed candidates?<jupyter_code>fig = px.scatter_3d(data, x='ssc_p', y='hsc_p', z='degree_p',
              color='etest_p', size='etest_p', size_max=18,
              symbol='status', opacity=0.7)
fig.update_layout(margin=dict(l=0, r=0, b=0, t=0))<jupyter_output><empty_output><jupyter_text>#  21. How are the percentage scores spread in 3D for Commerce & Science graduates? <jupyter_code>com=data[data['degree_t']=='Comm&Mgmt']
sci=data[data['degree_t']=='Sci&Tech']
fig = make_subplots(
    rows=1, cols=2,
    specs=[[{'type': 'scatter3d'}, {'type': 'scatter3d'}]])
fig.add_trace(
    go.Scatter3d(x=com['ssc_p'], y=com['hsc_p'], z=com['degree_p'],name="Commerce"),
    row=1, col=1)
fig.add_trace(
    go.Scatter3d(x=sci['ssc_p'], y=sci['hsc_p'], z=sci['degree_p'],name="Science"),
    row=1, col=2)
fig.update_layout(
    title_text='Percentage scores of Commerce & Science graduates',title_x=0.5)
fig.show()<jupyter_output><empty_output><jupyter_text>#  22. How are ssc%,hsc% and degree% related to each other for placed candidates? <jupyter_code>placed=data[data['status']=='Placed']
fig = go.Figure(data=[go.Mesh3d(x=placed['ssc_p'], y=placed['hsc_p'], z=placed['degree_p'], color='lightblue', opacity=0.50)])
fig.show()<jupyter_output><empty_output><jupyter_text>#  23. Average % marks of hsc and ssc gender wise <jupyter_code>df=pd.DataFrame(data.groupby('gender')['hsc_p','ssc_p'].mean()).reset_index()
fig = go.Figure([go.Pie(labels=df['gender'], values=df['hsc_p'])])
fig.update_traces(hoverinfo='label+percent', textinfo='value', textfont_size=15,insidetextorientation='radial')
fig.update_layout(title="Avg. %  marks in hsc by gender",title_x=0.5)
fig.show()
fig = go.Figure([go.Pie(labels=df['gender'], values=df['ssc_p'])])
fig.update_traces(hoverinfo='label+percent', textinfo='value', textfont_size=15,insidetextorientation='radial')
fig.update_layout(title="Avg. %  marks of ssc by gender",title_x=0.5)
fig.show()
<jupyter_output><empty_output><jupyter_text># 24. Percentage distribution of different streams in hsc <jupyter_code>comm=data[data['hsc_s']=='Commerce']['hsc_p']
sci=data[data['hsc_s']=='Science']['hsc_p']
arts=data[data['hsc_s']=='Arts']['hsc_p']
fig = go.Figure()
fig.add_trace(go.Box(y=comm,
                     marker_color="blue",
                     name="commerce"))
fig.add_trace(go.Box(y=sci,
                     marker_color="red",
                     name="science"))
fig.add_trace(go.Box(y=arts,
                     marker_color="green",
                     name="arts"))
fig.update_layout(title="percentage distribution of different streams in hsc")
fig.show()<jupyter_output><empty_output><jupyter_text># 25. Percentage distribution of different boards in hsc  and ssc<jupyter_code>others=data[data['hsc_b']=='Others']['hsc_p']
central=data[data['hsc_b']=='Central']['hsc_p']
fig = go.Figure()
fig.add_trace(go.Box(y=others,
                     marker_color="blue",
                     name="others"))
fig.add_trace(go.Box(y=central,
                     marker_color="red",
                     name="central"))
fig.update_layout(title="percentage distribution of different boards in hsc")
fig.show()
others=data[data['ssc_b']=='Others']['ssc_p']
central=data[data['ssc_b']=='Central']['ssc_p']
fig = go.Figure()
fig.add_trace(go.Box(y=others,
                     marker_color="blue",
                     name="others"))
fig.add_trace(go.Box(y=central,
                     marker_color="red",
                     name="central"))
fig.update_layout(title="percentage distribution of different boards in ssc")
fig.show()<jupyter_output><empty_output> | 
	no_license | 
	/notebooks/yashvi/campus-recruitment-analysis.ipynb | 
	Sayem-Mohammad-Imtiaz/kaggle-notebooks | 25 | 
| 
	<jupyter_start><jupyter_text># Minist 数据集测试
> 范例来自于 https://www.tensorflow.org/tutorials/quickstart/beginner## 准备工作- 引入必要的包<jupyter_code>from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import random as rdm
from os import path, curdir, makedirs<jupyter_output><empty_output><jupyter_text>- 定义保存和读取模型的函数<jupyter_code># 保存模型
def save_weights(model: tf.keras.Model, model_path: str, model_file='model') -> None:
    model_path = path.abspath(path.join(curdir, model_path))
    if not path.exists(model_path):
        makedirs(model_path, exist_ok=True)
    model_path = path.join(model_path, model_file)
    model.save_weights(model_path)
# 读取模型
def load_weights(model: tf.keras.Model, model_path: str, model_file='model') -> bool:
    model_path = path.abspath(path.join(curdir, model_path))
    if not path.exists(model_path):
        return False
    model_path = path.join(model_path, model_file)
    if not path.exists('{}.index'.format(model_path)):
        return False
    model.load_weights(model_path)
    return True<jupyter_output><empty_output><jupyter_text>- 定义显示图片的函数<jupyter_code>def show_image(img: np.ndarray, *, figsize=(1, 1), cmap='gray') -> None:
    plt.figure(figsize=figsize)
    if len(img.shape) > 2:
        img = img.reshape(img.shape[1:])
    plt.imshow(img, cmap=cmap)
    plt.show()<jupyter_output><empty_output><jupyter_text>## 手写数字识别### 读取数据集<jupyter_code>handwriting_mnist = tf.keras.datasets.mnist
(x_train_hw, y_train_hw), (x_test_hw, y_test_hw) = handwriting_mnist.load_data()
x_train_hw, x_test_hw = x_train_hw / 255.0, x_test_hw / 255.0  # 归一化
print('* train and test data loaded, shape is: {}'.format(x_train_hw.shape))<jupyter_output><empty_output><jupyter_text>### 定义预测函数<jupyter_code>def handwriting_predict(model: tf.keras.Model) -> None:
    index = rdm.randint(0, len(x_test_hw))
    print('* random index is {}'.format(index))
    img = np.array([x_test_hw[index]])
    show_image(img.reshape(28, 28))
    result = tf.argmax(model.predict(img)[0])
    print('* result number is: {}'.format(result))<jupyter_output><empty_output><jupyter_text>### 仅通过 softmax 构建网络- 定义模型<jupyter_code>tf.keras.backend.clear_session()
model_hw_softmax = tf.keras.models.Sequential([
    tf.keras.layers.Flatten(input_shape=(28, 28)),  # 输入层,输入 28 点阵图片
    tf.keras.layers.Dropout(0.2),  # dropout 正则化
    tf.keras.layers.Dense(10, activation='softmax')  # 10 单元的 softmax 层作为输出
])
print(model_hw_softmax.summary())
# 编译模型。指定优化函数,损失函数以及度量函数
model_hw_softmax.compile(optimizer='adam',
                         loss='sparse_categorical_crossentropy',
                         metrics=['accuracy'])
print('* compile finished')<jupyter_output><empty_output><jupyter_text>- 训练模型<jupyter_code>model_path = 'models/mnist_num_softmax'
if not load_weights(model_hw_softmax, model_path=model_path):
    model_hw_softmax.fit(x_train_hw, y_train_hw, epochs=5)
    save_weights(model_hw_softmax, model_path=model_path)
    
print('* triainning completed')<jupyter_output><empty_output><jupyter_text>- 测试模型<jupyter_code>lost, accuracy = model_hw_softmax.evaluate(x_test_hw, y_test_hw, verbose=2)
print('* finish test, lost is {}, accuracy is {}'.format(lost, accuracy))<jupyter_output><empty_output><jupyter_text>- 应用模型<jupyter_code>handwriting_predict(model_hw_softmax)<jupyter_output><empty_output><jupyter_text>### 加入隐层- 定义模型<jupyter_code>tf.keras.backend.clear_session()
model_hw_relu = tf.keras.models.Sequential([
    tf.keras.layers.Flatten(input_shape=(28, 28)),  # 输入层,输入 28 点阵图片
    tf.keras.layers.Dense(128, activation='relu'),  # 128 神经元的全连接层
    tf.keras.layers.Dropout(0.2),  # dropout 正则化
    tf.keras.layers.Dense(10, activation='softmax')  # 10 单元的 softmax 层作为输出
])
print(model_hw_relu.summary())
# 编译模型
model_hw_relu.compile(optimizer='adam',
                      loss='sparse_categorical_crossentropy',
                      metrics=['accuracy'])
print('* compile finished')<jupyter_output><empty_output><jupyter_text>- 训练模型<jupyter_code>model_path = 'models/mnist_num_relu'
if not load_weights(model_hw_relu, model_path=model_path):
    model_hw_relu.fit(x_train_hw, y_train_hw, epochs=5)
    save_weights(model_hw_relu, model_path=model_path)
    
print('* trainning completed')<jupyter_output><empty_output><jupyter_text>- 测试模型<jupyter_code>lost, accuracy = model_hw_relu.evaluate(x_test_hw, y_test_hw, verbose=2)
print('* finish test, lost is {}, accuracy is {}'.format(lost, accuracy))<jupyter_output><empty_output><jupyter_text>- 应用模型<jupyter_code>handwriting_predict(model_hw_relu)<jupyter_output><empty_output><jupyter_text>## 服装类别识别
> 范例来源于 https://www.tensorflow.org/tutorials/keras/classification¶- 定义服装类别<jupyter_code>fashion_class_names = [
    'T-shirt/top',
    'Trouser',
    'Pullover',
    'Dress',
    'Coat',
    'Sandal',
    'Shirt',
    'Sneaker',
    'Bag',
    'Ankle boot',
]<jupyter_output><empty_output><jupyter_text>### 读取数据集<jupyter_code>fashion_mnist = tf.keras.datasets.fashion_mnist
(x_train_f, y_train_f), (x_test_f, y_test_f) = fashion_mnist.load_data()
x_train_f, x_test_f = x_train_f / 255.0, x_test_f / 255.0
print('* train and test data loaded, shape is: {}'.format(x_train_f.shape))<jupyter_output><empty_output><jupyter_text>### 定义预测函数<jupyter_code>def fashion_predict(model: tf.keras.Model, dataset=x_test_f) -> None:
    index = rdm.randint(0, len(dataset))
    print('* random index is {}'.format(index))
    img = np.array([dataset[index]])  # select random image from test list
    show_image(img.reshape((28, 28)))
    result = np.argmax(model.predict(img)[0])
    class_name = fashion_class_names[result]
    print('* result is "{}"'.format(class_name))<jupyter_output><empty_output><jupyter_text>### 通过隐层和 softmax 构建网络- 定义模型<jupyter_code>tf.keras.backend.clear_session()
model_f_relu = tf.keras.Sequential([
    tf.keras.layers.Flatten(input_shape=(28, 28)),  # 输入层,输入 28 点阵图片
    tf.keras.layers.Dense(128, activation='relu'),  # 128 神经元的全连接层
    tf.keras.layers.Dropout(0.2),  # dropout 正则化
    tf.keras.layers.Dense(10, activation='softmax')  # 10 单元的 softmax 层作为输出
])
print(model_f_relu.summary())
model_f_relu.compile(optimizer='adam',
                     loss='sparse_categorical_crossentropy',
                     metrics=['accuracy'])
print('* compile finished')<jupyter_output><empty_output><jupyter_text>- 训练模型<jupyter_code>model_path = 'models/mnist_fashion_dense'
if not load_weights(model_f_relu, model_path=model_path):
    model_f_relu.fit(x_train_f, y_train_f, epochs=5)
    save_weights(model_f_relu, model_path)
    
print('* trainning completed')<jupyter_output><empty_output><jupyter_text>- 测试模型<jupyter_code>lost, accuracy = model_f_relu.evaluate(x_test_f, y_test_f, verbose=2)
print('* finish test, lost is {}, accuracy is {}'.format(lost, accuracy))<jupyter_output><empty_output><jupyter_text>- 应用模型<jupyter_code>fashion_predict(model_f_relu)<jupyter_output><empty_output><jupyter_text>### 通过 CNN 构建网络- 重构训练数据<jupyter_code># 增加颜色纬度,单色
x_train_fcnn, x_test_fcnn = x_train_f.reshape(-1, 28, 28, 1), x_test_f.reshape(-1, 28, 28, 1)<jupyter_output><empty_output><jupyter_text>- 定义模型<jupyter_code>tf.keras.backend.clear_session()
model_f_cnn = tf.keras.Sequential([
    tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)),  # 输入层,输入 28 点阵图片
    tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
    tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
    tf.keras.layers.Dropout(0.25),  # dropout 正则化
    tf.keras.layers.Flatten(),  # 平面化
    tf.keras.layers.Dense(128, activation='relu'),  # 128 神经元的全连接层
    tf.keras.layers.Dropout(0.5),  # dropout 正则化
    tf.keras.layers.Dense(10, activation='softmax')  # 10 单元的 softmax 层作为输出
])
print(model_f_cnn.summary())
model_f_cnn.compile(optimizer='adam',
                    loss='sparse_categorical_crossentropy',
                    metrics=['accuracy'])
print('* compile finished')<jupyter_output><empty_output><jupyter_text>- 训练模型<jupyter_code>model_path = 'models/mnist_fashion_cnn'
if not load_weights(model_f_cnn, model_path=model_path):
    model_f_cnn.fit(x_train_fcnn, y_train_f, epochs=5)
    save_weights(model_f_cnn, model_path=model_path)<jupyter_output><empty_output><jupyter_text>- 测试模型<jupyter_code>lost, accuracy = model_f_cnn.evaluate(x_test_fcnn, y_test_f, verbose=2)
print('* finish test, lost is {}, accuracy is {}'.format(lost, accuracy))<jupyter_output><empty_output><jupyter_text>- 应用模型<jupyter_code>fashion_predict(model_f_cnn, dataset=x_test_fcnn)<jupyter_output><empty_output> | 
	non_permissive | 
	/python/tf/mnist.ipynb | 
	alvin-qh/study-ai | 25 | 
| 
	<jupyter_start><jupyter_text># Fully-Connected Neural Nets
In the previous homework you implemented a fully-connected two-layer neural network on CIFAR-10. The implementation was simple but not very modular since the loss and gradient were computed in a single monolithic function. This is manageable for a simple two-layer network, but would become impractical as we move to bigger models. Ideally we want to build networks using a more modular design so that we can implement different layer types in isolation and then snap them together into models with different architectures.In this exercise we will implement fully-connected networks using a more modular approach. For each layer we will implement a `forward` and a `backward` function. The `forward` function will receive inputs, weights, and other parameters and will return both an output and a `cache` object storing data needed for the backward pass, like this:
```python
def layer_forward(x, w):
  """ Receive inputs x and weights w """
  # Do some computations ...
  z = # ... some intermediate value
  # Do some more computations ...
  out = # the output
   
  cache = (x, w, z, out) # Values we need to compute gradients
   
  return out, cache
```
The backward pass will receive upstream derivatives and the `cache` object, and will return gradients with respect to the inputs and weights, like this:
```python
def layer_backward(dout, cache):
  """
  Receive dout (derivative of loss with respect to outputs) and cache,
  and compute derivative with respect to inputs.
  """
  # Unpack cache values
  x, w, z, out = cache
  
  # Use values in cache to compute derivatives
  dx = # Derivative of loss with respect to x
  dw = # Derivative of loss with respect to w
  
  return dx, dw
```
After implementing a bunch of layers this way, we will be able to easily combine them to build classifiers with different architectures.
In addition to implementing fully-connected networks of arbitrary depth, we will also explore different update rules for optimization, and introduce Dropout as a regularizer and Batch/Layer Normalization as a tool to more efficiently optimize deep networks.
  <jupyter_code># As usual, a bit of setup
from __future__ import print_function
import time
import numpy as np
import matplotlib.pyplot as plt
from cs231n.classifiers.fc_net import *
from cs231n.data_utils import get_CIFAR10_data
from cs231n.gradient_check import eval_numerical_gradient, eval_numerical_gradient_array
from cs231n.solver import Solver
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
def rel_error(x, y):
  """ returns relative error """
  return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
# Load the (preprocessed) CIFAR10 data.
data = get_CIFAR10_data()
for k, v in list(data.items()):
  print(('%s: ' % k, v.shape))<jupyter_output>('X_train: ', (49000, 3, 32, 32))
('y_train: ', (49000,))
('X_val: ', (1000, 3, 32, 32))
('y_val: ', (1000,))
('X_test: ', (1000, 3, 32, 32))
('y_test: ', (1000,))
<jupyter_text># Affine layer: foward
Open the file `cs231n/layers.py` and implement the `affine_forward` function.
Once you are done you can test your implementaion by running the following:<jupyter_code># Test the affine_forward function
num_inputs = 2
input_shape = (4, 5, 6)
output_dim = 3
input_size = num_inputs * np.prod(input_shape)
weight_size = output_dim * np.prod(input_shape)
x = np.linspace(-0.1, 0.5, num=input_size).reshape(num_inputs, *input_shape)
w = np.linspace(-0.2, 0.3, num=weight_size).reshape(np.prod(input_shape), output_dim)
b = np.linspace(-0.3, 0.1, num=output_dim)
out, _ = affine_forward(x, w, b)
correct_out = np.array([[ 1.49834967,  1.70660132,  1.91485297],
                        [ 3.25553199,  3.5141327,   3.77273342]])
# Compare your output with ours. The error should be around e-9 or less.
print('Testing affine_forward function:')
print('difference: ', rel_error(out, correct_out))<jupyter_output>Testing affine_forward function:
difference:  9.769849468192957e-10
<jupyter_text># Affine layer: backward
Now implement the `affine_backward` function and test your implementation using numeric gradient checking.<jupyter_code># Test the affine_backward function
np.random.seed(231)
x = np.random.randn(10, 2, 3)
w = np.random.randn(6, 5)
b = np.random.randn(5)
dout = np.random.randn(10, 5)
dx_num = eval_numerical_gradient_array(lambda x: affine_forward(x, w, b)[0], x, dout)
dw_num = eval_numerical_gradient_array(lambda w: affine_forward(x, w, b)[0], w, dout)
db_num = eval_numerical_gradient_array(lambda b: affine_forward(x, w, b)[0], b, dout)
_, cache = affine_forward(x, w, b)
dx, dw, db = affine_backward(dout, cache)
# The error should be around e-10 or less
print('Testing affine_backward function:')
print('dx error: ', rel_error(dx_num, dx))
print('dw error: ', rel_error(dw_num, dw))
print('db error: ', rel_error(db_num, db))<jupyter_output>Testing affine_backward function:
dx error:  5.399100368651805e-11
dw error:  9.904211865398145e-11
db error:  2.4122867568119087e-11
<jupyter_text># ReLU activation: forward
Implement the forward pass for the ReLU activation function in the `relu_forward` function and test your implementation using the following:<jupyter_code># Test the relu_forward function
x = np.linspace(-0.5, 0.5, num=12).reshape(3, 4)
out, _ = relu_forward(x)
correct_out = np.array([[ 0.,          0.,          0.,          0.,        ],
                        [ 0.,          0.,          0.04545455,  0.13636364,],
                        [ 0.22727273,  0.31818182,  0.40909091,  0.5,       ]])
# Compare your output with ours. The error should be on the order of e-8
print('Testing relu_forward function:')
print('difference: ', rel_error(out, correct_out))<jupyter_output>Testing relu_forward function:
difference:  4.999999798022158e-08
<jupyter_text># ReLU activation: backward
Now implement the backward pass for the ReLU activation function in the `relu_backward` function and test your implementation using numeric gradient checking:<jupyter_code>np.random.seed(231)
x = np.random.randn(10, 10)
dout = np.random.randn(*x.shape)
dx_num = eval_numerical_gradient_array(lambda x: relu_forward(x)[0], x, dout)
_, cache = relu_forward(x)
dx = relu_backward(dout, cache)
# The error should be on the order of e-12
print('Testing relu_backward function:')
print('dx error: ', rel_error(dx_num, dx))<jupyter_output>Testing relu_backward function:
dx error:  3.2756349136310288e-12
<jupyter_text>## Inline Question 1: 
We've only asked you to implement ReLU, but there are a number of different activation functions that one could use in neural networks, each with its pros and cons. In particular, an issue commonly seen with activation functions is getting zero (or close to zero) gradient flow during backpropagation. Which of the following activation functions have this problem? If you consider these functions in the one dimensional case, what types of input would lead to this behaviour?
1. Sigmoid
2. ReLU
3. Leaky ReLU
## Answer:
Sigmoid has this problem. ReLU has this problem
# "Sandwich" layers
There are some common patterns of layers that are frequently used in neural nets. For example, affine layers are frequently followed by a ReLU nonlinearity. To make these common patterns easy, we define several convenience layers in the file `cs231n/layer_utils.py`.
For now take a look at the `affine_relu_forward` and `affine_relu_backward` functions, and run the following to numerically gradient check the backward pass:<jupyter_code>from cs231n.layer_utils import affine_relu_forward, affine_relu_backward
np.random.seed(231)
x = np.random.randn(2, 3, 4)
w = np.random.randn(12, 10)
b = np.random.randn(10)
dout = np.random.randn(2, 10)
out, cache = affine_relu_forward(x, w, b)
dx, dw, db = affine_relu_backward(dout, cache)
dx_num = eval_numerical_gradient_array(lambda x: affine_relu_forward(x, w, b)[0], x, dout)
dw_num = eval_numerical_gradient_array(lambda w: affine_relu_forward(x, w, b)[0], w, dout)
db_num = eval_numerical_gradient_array(lambda b: affine_relu_forward(x, w, b)[0], b, dout)
# Relative error should be around e-10 or less
print('Testing affine_relu_forward and affine_relu_backward:')
print('dx error: ', rel_error(dx_num, dx))
print('dw error: ', rel_error(dw_num, dw))
print('db error: ', rel_error(db_num, db))<jupyter_output>Testing affine_relu_forward and affine_relu_backward:
dx error:  2.299579177309368e-11
dw error:  8.162011105764925e-11
db error:  7.826724021458994e-12
<jupyter_text># Loss layers: Softmax and SVM
You implemented these loss functions in the last assignment, so we'll give them to you for free here. You should still make sure you understand how they work by looking at the implementations in `cs231n/layers.py`.
You can make sure that the implementations are correct by running the following:<jupyter_code>np.random.seed(231)
num_classes, num_inputs = 10, 50
x = 0.001 * np.random.randn(num_inputs, num_classes)
y = np.random.randint(num_classes, size=num_inputs)
dx_num = eval_numerical_gradient(lambda x: svm_loss(x, y)[0], x, verbose=False)
loss, dx = svm_loss(x, y)
# Test svm_loss function. Loss should be around 9 and dx error should be around the order of e-9
print('Testing svm_loss:')
print('loss: ', loss)
print('dx error: ', rel_error(dx_num, dx))
dx_num = eval_numerical_gradient(lambda x: softmax_loss(x, y)[0], x, verbose=False)
loss, dx = softmax_loss(x, y)
# Test softmax_loss function. Loss should be close to 2.3 and dx error should be around e-8
print('\nTesting softmax_loss:')
print('loss: ', loss)
print('dx error: ', rel_error(dx_num, dx))<jupyter_output>Testing svm_loss:
loss:  8.999602749096233
dx error:  1.4021566006651672e-09
Testing softmax_loss:
loss:  2.302545844500738
dx error:  9.384673161989355e-09
<jupyter_text># Two-layer network
In the previous assignment you implemented a two-layer neural network in a single monolithic class. Now that you have implemented modular versions of the necessary layers, you will reimplement the two layer network using these modular implementations.
Open the file `cs231n/classifiers/fc_net.py` and complete the implementation of the `TwoLayerNet` class. This class will serve as a model for the other networks you will implement in this assignment, so read through it to make sure you understand the API. You can run the cell below to test your implementation.<jupyter_code>np.random.seed(231)
N, D, H, C = 3, 5, 50, 7
X = np.random.randn(N, D)
y = np.random.randint(C, size=N)
std = 1e-3
model = TwoLayerNet(input_dim=D, hidden_dim=H, num_classes=C, weight_scale=std)
print('Testing initialization ... ')
W1_std = abs(model.params['W1'].std() - std)
b1 = model.params['b1']
W2_std = abs(model.params['W2'].std() - std)
b2 = model.params['b2']
assert W1_std < std / 10, 'First layer weights do not seem right'
assert np.all(b1 == 0), 'First layer biases do not seem right'
assert W2_std < std / 10, 'Second layer weights do not seem right'
assert np.all(b2 == 0), 'Second layer biases do not seem right'
print('Testing test-time forward pass ... ')
model.params['W1'] = np.linspace(-0.7, 0.3, num=D*H).reshape(D, H)
model.params['b1'] = np.linspace(-0.1, 0.9, num=H)
model.params['W2'] = np.linspace(-0.3, 0.4, num=H*C).reshape(H, C)
model.params['b2'] = np.linspace(-0.9, 0.1, num=C)
X = np.linspace(-5.5, 4.5, num=N*D).reshape(D, N).T
scores = model.loss(X)
correct_scores = np.asarray(
  [[11.53165108,  12.2917344,   13.05181771,  13.81190102,  14.57198434, 15.33206765,  16.09215096],
   [12.05769098,  12.74614105,  13.43459113,  14.1230412,   14.81149128, 15.49994135,  16.18839143],
   [12.58373087,  13.20054771,  13.81736455,  14.43418138,  15.05099822, 15.66781506,  16.2846319 ]])
scores_diff = np.abs(scores - correct_scores).sum()
assert scores_diff < 1e-6, 'Problem with test-time forward pass'
print('Testing training loss (no regularization)')
y = np.asarray([0, 5, 1])
loss, grads = model.loss(X, y)
correct_loss = 3.4702243556
assert abs(loss - correct_loss) < 1e-10, 'Problem with training-time loss'
model.reg = 1.0
loss, grads = model.loss(X, y)
correct_loss = 26.5948426952
print(loss,correct_loss)
assert abs(loss - correct_loss) < 1e-10, 'Problem with regularization loss'
# Errors should be around e-7 or less
for reg in [0.0, 0.7]:
  print('Running numeric gradient check with reg = ', reg)
  model.reg = reg
  loss, grads = model.loss(X, y)
  for name in sorted(grads):
    f = lambda _: model.loss(X, y)[0]
    grad_num = eval_numerical_gradient(f, model.params[name], verbose=False)
    print('%s relative error: %.2e' % (name, rel_error(grad_num, grads[name])))<jupyter_output>Testing initialization ... 
Testing test-time forward pass ... 
Testing training loss (no regularization)
26.594842695238583 26.5948426952
Running numeric gradient check with reg =  0.0
W1 relative error: 1.83e-08
W2 relative error: 3.12e-10
b1 relative error: 9.83e-09
b2 relative error: 4.33e-10
Running numeric gradient check with reg =  0.7
W1 relative error: 2.53e-07
W2 relative error: 2.85e-08
b1 relative error: 1.00e+00
b2 relative error: 1.00e+00
<jupyter_text># Solver
In the previous assignment, the logic for training models was coupled to the models themselves. Following a more modular design, for this assignment we have split the logic for training models into a separate class.
Open the file `cs231n/solver.py` and read through it to familiarize yourself with the API. After doing so, use a `Solver` instance to train a `TwoLayerNet` that achieves at least `50%` accuracy on the validation set.<jupyter_code># model = TwoLayerNet(reg=1)
# # solver = None
# ##############################################################################
# # TODO: Use a Solver instance to train a TwoLayerNet that achieves at least  #
# # 50% accuracy on the validation set.                                        #
# ##############################################################################
# # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
# solver = Solver(model, data)
# solver.train()
# # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
# ##############################################################################
# #                             END OF YOUR CODE                               #
# ##############################################################################
# def generate_random_hyperparams(lr_min, lr_max, reg_min, reg_max, h_min, h_max):
#     lr = 10**np.random.uniform(lr_min,lr_max)
#     reg = 10**np.random.uniform(reg_min,reg_max)
#     hidden = np.random.randint(h_min, h_max)
#     return lr, reg, hidden
    
print(lr, reg, hidden_size)
model = TwoLayerNet(hidden_dim = hidden_size, reg= reg)
solver = Solver(model, data, update_rule='sgd', optim_config={'learning_rate':.0005}, 
                        print_every=-1, verbose=False)
solver.train()
# Run this cell to visualize training loss and train / val accuracy
plt.subplot(2, 1, 1)
plt.title('Training loss')
plt.plot(solver.loss_history, 'o')
plt.xlabel('Iteration')
plt.subplot(2, 1, 2)
plt.title('Accuracy')
plt.plot(solver.train_acc_history, '-o', label='train')
plt.plot(solver.val_acc_history, '-o', label='val')
plt.plot([0.5] * len(solver.val_acc_history), 'k--')
plt.xlabel('Epoch')
plt.legend(loc='lower right')
plt.gcf().set_size_inches(15, 12)
plt.show()<jupyter_output><empty_output><jupyter_text># Multilayer network
Next you will implement a fully-connected network with an arbitrary number of hidden layers.
Read through the `FullyConnectedNet` class in the file `cs231n/classifiers/fc_net.py`.
Implement the initialization, the forward pass, and the backward pass. For the moment don't worry about implementing dropout or batch/layer normalization; we will add those features soon.## Initial loss and gradient check
As a sanity check, run the following to check the initial loss and to gradient check the network both with and without regularization. Do the initial losses seem reasonable?
For gradient checking, you should expect to see errors around 1e-7 or less.<jupyter_code>np.random.seed(231)
N, D, H1, H2, C = 2, 15, 20, 30, 10
X = np.random.randn(N, D)
y = np.random.randint(C, size=(N,))
for reg in [0, 3.14]:
  print('Running check with reg = ', reg)
  model = FullyConnectedNet([H1, H2], input_dim=D, num_classes=C,
                            reg=reg, weight_scale=5e-2, dtype=np.float64)
  loss, grads = model.loss(X, y)
  print('Initial loss: ', loss)
  
  # Most of the errors should be on the order of e-7 or smaller.   
  # NOTE: It is fine however to see an error for W2 on the order of e-5
  # for the check when reg = 0.0
  for name in sorted(grads):
    f = lambda _: model.loss(X, y)[0]
    grad_num = eval_numerical_gradient(f, model.params[name], verbose=False, h=1e-5)
    print('%s relative error: %.2e' % (name, rel_error(grad_num, grads[name])))<jupyter_output>Running check with reg =  0
Initial loss:  2.3004790897684924
W1 relative error: 1.48e-07
W2 relative error: 2.21e-05
W3 relative error: 3.53e-07
b1 relative error: 5.38e-09
b2 relative error: 2.09e-09
b3 relative error: 5.80e-11
Running check with reg =  3.14
Initial loss:  7.052114776533016
W1 relative error: 6.86e-09
W2 relative error: 3.52e-08
W3 relative error: 1.32e-08
b1 relative error: 1.48e-08
b2 relative error: 1.72e-09
b3 relative error: 1.80e-10
<jupyter_text>As another sanity check, make sure you can overfit a small dataset of 50 images. First we will try a three-layer network with 100 units in each hidden layer. In the following cell, tweak the **learning rate** and **weight initialization scale** to overfit and achieve 100% training accuracy within 20 epochs.<jupyter_code># TODO: Use a three-layer Net to overfit 50 training examples by 
# tweaking just the learning rate and initialization scale.
num_train = 50
small_data = {
  'X_train': data['X_train'][:num_train],
  'y_train': data['y_train'][:num_train],
  'X_val': data['X_val'],
  'y_val': data['y_val'],
}
weight_scale = 1e-2   # Experiment with this!
learning_rate = 1e-2  # Experiment with this!
model = FullyConnectedNet([100, 100],
              weight_scale=weight_scale, dtype=np.float64)
solver = Solver(model, small_data,
                print_every=10, num_epochs=20, batch_size=25,
                update_rule='sgd',
                optim_config={
                  'learning_rate': learning_rate,
                }
         )
solver.train()
plt.plot(solver.loss_history, 'o')
plt.title('Training loss history')
plt.xlabel('Iteration')
plt.ylabel('Training loss')
plt.show()<jupyter_output>(Iteration 1 / 40) loss: 2.363364
(Epoch 0 / 20) train acc: 0.180000; val_acc: 0.108000
(Epoch 1 / 20) train acc: 0.320000; val_acc: 0.127000
(Epoch 2 / 20) train acc: 0.440000; val_acc: 0.172000
(Epoch 3 / 20) train acc: 0.500000; val_acc: 0.184000
(Epoch 4 / 20) train acc: 0.540000; val_acc: 0.181000
(Epoch 5 / 20) train acc: 0.740000; val_acc: 0.190000
(Iteration 11 / 40) loss: 0.839976
(Epoch 6 / 20) train acc: 0.740000; val_acc: 0.187000
(Epoch 7 / 20) train acc: 0.740000; val_acc: 0.183000
(Epoch 8 / 20) train acc: 0.820000; val_acc: 0.177000
(Epoch 9 / 20) train acc: 0.860000; val_acc: 0.200000
(Epoch 10 / 20) train acc: 0.920000; val_acc: 0.191000
(Iteration 21 / 40) loss: 0.337174
(Epoch 11 / 20) train acc: 0.960000; val_acc: 0.189000
(Epoch 12 / 20) train acc: 0.940000; val_acc: 0.180000
(Epoch 13 / 20) train acc: 1.000000; val_acc: 0.199000
(Epoch 14 / 20) train acc: 1.000000; val_acc: 0.199000
(Epoch 15 / 20) train acc: 1.000000; val_acc: 0.195000
(Iteration 31 / 40) loss: [...]<jupyter_text>Now try to use a five-layer network with 100 units on each layer to overfit 50 training examples. Again, you will have to adjust the learning rate and weight initialization scale, but you should be able to achieve 100% training accuracy within 20 epochs.<jupyter_code># TODO: Use a five-layer Net to overfit 50 training examples by 
# tweaking just the learning rate and initialization scale.
num_train = 50
small_data = {
  'X_train': data['X_train'][:num_train],
  'y_train': data['y_train'][:num_train],
  'X_val': data['X_val'],
  'y_val': data['y_val'],
}
learning_rate = 2e-3  # Experiment with this!
weight_scale = 1e-1   # Experiment with this!
model = FullyConnectedNet([100, 100, 100, 100],
                weight_scale=weight_scale, dtype=np.float64)
solver = Solver(model, small_data,
                print_every=10, num_epochs=20, batch_size=25,
                update_rule='sgd',
                optim_config={
                  'learning_rate': learning_rate,
                }
         )
solver.train()
plt.plot(solver.loss_history, 'o')
plt.title('Training loss history')
plt.xlabel('Iteration')
plt.ylabel('Training loss')
plt.show()<jupyter_output>(Iteration 1 / 40) loss: 166.501707
(Epoch 0 / 20) train acc: 0.100000; val_acc: 0.107000
(Epoch 1 / 20) train acc: 0.320000; val_acc: 0.101000
(Epoch 2 / 20) train acc: 0.160000; val_acc: 0.122000
(Epoch 3 / 20) train acc: 0.380000; val_acc: 0.106000
(Epoch 4 / 20) train acc: 0.520000; val_acc: 0.111000
(Epoch 5 / 20) train acc: 0.760000; val_acc: 0.113000
(Iteration 11 / 40) loss: 3.343141
(Epoch 6 / 20) train acc: 0.840000; val_acc: 0.122000
(Epoch 7 / 20) train acc: 0.920000; val_acc: 0.113000
(Epoch 8 / 20) train acc: 0.940000; val_acc: 0.125000
(Epoch 9 / 20) train acc: 0.960000; val_acc: 0.125000
(Epoch 10 / 20) train acc: 0.980000; val_acc: 0.121000
(Iteration 21 / 40) loss: 0.039138
(Epoch 11 / 20) train acc: 0.980000; val_acc: 0.123000
(Epoch 12 / 20) train acc: 1.000000; val_acc: 0.121000
(Epoch 13 / 20) train acc: 1.000000; val_acc: 0.121000
(Epoch 14 / 20) train acc: 1.000000; val_acc: 0.121000
(Epoch 15 / 20) train acc: 1.000000; val_acc: 0.121000
(Iteration 31 / 40) loss[...]<jupyter_text>## Inline Question 2: 
Did you notice anything about the comparative difficulty of training the three-layer net vs training the five layer net? In particular, based on your experience, which network seemed more sensitive to the initialization scale? Why do you think that is the case?
## Answer:
[FILL THIS IN]
# Update rules
So far we have used vanilla stochastic gradient descent (SGD) as our update rule. More sophisticated update rules can make it easier to train deep networks. We will implement a few of the most commonly used update rules and compare them to vanilla SGD.# SGD+Momentum
Stochastic gradient descent with momentum is a widely used update rule that tends to make deep networks converge faster than vanilla stochastic gradient descent. See the Momentum Update section at http://cs231n.github.io/neural-networks-3/#sgd for more information.
Open the file `cs231n/optim.py` and read the documentation at the top of the file to make sure you understand the API. Implement the SGD+momentum update rule in the function `sgd_momentum` and run the following to check your implementation. You should see errors less than e-8.<jupyter_code>from cs231n.optim import sgd_momentum
N, D = 4, 5
w = np.linspace(-0.4, 0.6, num=N*D).reshape(N, D)
dw = np.linspace(-0.6, 0.4, num=N*D).reshape(N, D)
v = np.linspace(0.6, 0.9, num=N*D).reshape(N, D)
config = {'learning_rate': 1e-3, 'velocity': v}
next_w, _ = sgd_momentum(w, dw, config=config)
expected_next_w = np.asarray([
  [ 0.1406,      0.20738947,  0.27417895,  0.34096842,  0.40775789],
  [ 0.47454737,  0.54133684,  0.60812632,  0.67491579,  0.74170526],
  [ 0.80849474,  0.87528421,  0.94207368,  1.00886316,  1.07565263],
  [ 1.14244211,  1.20923158,  1.27602105,  1.34281053,  1.4096    ]])
expected_velocity = np.asarray([
  [ 0.5406,      0.55475789,  0.56891579, 0.58307368,  0.59723158],
  [ 0.61138947,  0.62554737,  0.63970526,  0.65386316,  0.66802105],
  [ 0.68217895,  0.69633684,  0.71049474,  0.72465263,  0.73881053],
  [ 0.75296842,  0.76712632,  0.78128421,  0.79544211,  0.8096    ]])
# Should see relative errors around e-8 or less
print('next_w error: ', rel_error(next_w, expected_next_w))
print('velocity error: ', rel_error(expected_velocity, config['velocity']))<jupyter_output>next_w error:  8.882347033505819e-09
velocity error:  4.269287743278663e-09
<jupyter_text>Once you have done so, run the following to train a six-layer network with both SGD and SGD+momentum. You should see the SGD+momentum update rule converge faster.<jupyter_code>num_train = 4000
small_data = {
  'X_train': data['X_train'][:num_train],
  'y_train': data['y_train'][:num_train],
  'X_val': data['X_val'],
  'y_val': data['y_val'],
}
solvers = {}
for update_rule in ['sgd', 'sgd_momentum']:
  print('running with ', update_rule)
  model = FullyConnectedNet([100, 100, 100, 100, 100], weight_scale=5e-2)
  solver = Solver(model, small_data,
                  num_epochs=5, batch_size=100,
                  update_rule=update_rule,
                  optim_config={
                    'learning_rate': 5e-3,
                  },
                  verbose=True)
  solvers[update_rule] = solver
  solver.train()
  print()
plt.subplot(3, 1, 1)
plt.title('Training loss')
plt.xlabel('Iteration')
plt.subplot(3, 1, 2)
plt.title('Training accuracy')
plt.xlabel('Epoch')
plt.subplot(3, 1, 3)
plt.title('Validation accuracy')
plt.xlabel('Epoch')
for update_rule, solver in solvers.items():
  plt.subplot(3, 1, 1)
  plt.plot(solver.loss_history, 'o', label="loss_%s" % update_rule)
  
  plt.subplot(3, 1, 2)
  plt.plot(solver.train_acc_history, '-o', label="train_acc_%s" % update_rule)
  plt.subplot(3, 1, 3)
  plt.plot(solver.val_acc_history, '-o', label="val_acc_%s" % update_rule)
  
for i in [1, 2, 3]:
  plt.subplot(3, 1, i)
  plt.legend(loc='upper center', ncol=4)
plt.gcf().set_size_inches(15, 15)
plt.show()<jupyter_output>running with  sgd
(Iteration 1 / 200) loss: 2.559978
(Epoch 0 / 5) train acc: 0.104000; val_acc: 0.107000
(Iteration 11 / 200) loss: 2.356069
(Iteration 21 / 200) loss: 2.214091
(Iteration 31 / 200) loss: 2.205928
(Epoch 1 / 5) train acc: 0.225000; val_acc: 0.193000
(Iteration 41 / 200) loss: 2.132095
(Iteration 51 / 200) loss: 2.118950
(Iteration 61 / 200) loss: 2.116443
(Iteration 71 / 200) loss: 2.132549
(Epoch 2 / 5) train acc: 0.298000; val_acc: 0.260000
(Iteration 81 / 200) loss: 1.977227
(Iteration 91 / 200) loss: 2.007528
(Iteration 101 / 200) loss: 2.004762
(Iteration 111 / 200) loss: 1.885342
(Epoch 3 / 5) train acc: 0.343000; val_acc: 0.287000
(Iteration 121 / 200) loss: 1.891517
(Iteration 131 / 200) loss: 1.923677
(Iteration 141 / 200) loss: 1.957744
(Iteration 151 / 200) loss: 1.966736
(Epoch 4 / 5) train acc: 0.322000; val_acc: 0.305000
(Iteration 161 / 200) loss: 1.801483
(Iteration 171 / 200) loss: 1.973779
(Iteration 181 / 200) loss: 1.666572
(Iteration 191 / 200) los[...]<jupyter_text># RMSProp and Adam
RMSProp [1] and Adam [2] are update rules that set per-parameter learning rates by using a running average of the second moments of gradients.
In the file `cs231n/optim.py`, implement the RMSProp update rule in the `rmsprop` function and implement the Adam update rule in the `adam` function, and check your implementations using the tests below.
**NOTE:** Please implement the _complete_ Adam update rule (with the bias correction mechanism), not the first simplified version mentioned in the course notes. 
[1] Tijmen Tieleman and Geoffrey Hinton. "Lecture 6.5-rmsprop: Divide the gradient by a running average of its recent magnitude." COURSERA: Neural Networks for Machine Learning 4 (2012).
[2] Diederik Kingma and Jimmy Ba, "Adam: A Method for Stochastic Optimization", ICLR 2015.<jupyter_code># Test RMSProp implementation
from cs231n.optim import rmsprop
N, D = 4, 5
w = np.linspace(-0.4, 0.6, num=N*D).reshape(N, D)
dw = np.linspace(-0.6, 0.4, num=N*D).reshape(N, D)
cache = np.linspace(0.6, 0.9, num=N*D).reshape(N, D)
config = {'learning_rate': 1e-2, 'cache': cache}
next_w, _ = rmsprop(w, dw, config=config)
expected_next_w = np.asarray([
  [-0.39223849, -0.34037513, -0.28849239, -0.23659121, -0.18467247],
  [-0.132737,   -0.08078555, -0.02881884,  0.02316247,  0.07515774],
  [ 0.12716641,  0.17918792,  0.23122175,  0.28326742,  0.33532447],
  [ 0.38739248,  0.43947102,  0.49155973,  0.54365823,  0.59576619]])
expected_cache = np.asarray([
  [ 0.5976,      0.6126277,   0.6277108,   0.64284931,  0.65804321],
  [ 0.67329252,  0.68859723,  0.70395734,  0.71937285,  0.73484377],
  [ 0.75037008,  0.7659518,   0.78158892,  0.79728144,  0.81302936],
  [ 0.82883269,  0.84469141,  0.86060554,  0.87657507,  0.8926    ]])
# You should see relative errors around e-7 or less
print('next_w error: ', rel_error(expected_next_w, next_w))
print('cache error: ', rel_error(expected_cache, config['cache']))
# Test Adam implementation
from cs231n.optim import adam
N, D = 4, 5
w = np.linspace(-0.4, 0.6, num=N*D).reshape(N, D)
dw = np.linspace(-0.6, 0.4, num=N*D).reshape(N, D)
m = np.linspace(0.6, 0.9, num=N*D).reshape(N, D)
v = np.linspace(0.7, 0.5, num=N*D).reshape(N, D)
config = {'learning_rate': 1e-2, 'm': m, 'v': v, 't': 5}
next_w, _ = adam(w, dw, config=config)
expected_next_w = np.asarray([
  [-0.40094747, -0.34836187, -0.29577703, -0.24319299, -0.19060977],
  [-0.1380274,  -0.08544591, -0.03286534,  0.01971428,  0.0722929],
  [ 0.1248705,   0.17744702,  0.23002243,  0.28259667,  0.33516969],
  [ 0.38774145,  0.44031188,  0.49288093,  0.54544852,  0.59801459]])
expected_v = np.asarray([
  [ 0.69966,     0.68908382,  0.67851319,  0.66794809,  0.65738853,],
  [ 0.64683452,  0.63628604,  0.6257431,   0.61520571,  0.60467385,],
  [ 0.59414753,  0.58362676,  0.57311152,  0.56260183,  0.55209767,],
  [ 0.54159906,  0.53110598,  0.52061845,  0.51013645,  0.49966,   ]])
expected_m = np.asarray([
  [ 0.48,        0.49947368,  0.51894737,  0.53842105,  0.55789474],
  [ 0.57736842,  0.59684211,  0.61631579,  0.63578947,  0.65526316],
  [ 0.67473684,  0.69421053,  0.71368421,  0.73315789,  0.75263158],
  [ 0.77210526,  0.79157895,  0.81105263,  0.83052632,  0.85      ]])
# You should see relative errors around e-7 or less
print('next_w error: ', rel_error(expected_next_w, next_w))
print('v error: ', rel_error(expected_v, config['v']))
print('m error: ', rel_error(expected_m, config['m']))<jupyter_output>next_w error:  1.1395691798535431e-07
v error:  4.208314038113071e-09
m error:  4.214963193114416e-09
<jupyter_text>Once you have debugged your RMSProp and Adam implementations, run the following to train a pair of deep networks using these new update rules:<jupyter_code>learning_rates = {'rmsprop': 1e-4, 'adam': 1e-3}
for update_rule in ['adam', 'rmsprop']:
  print('running with ', update_rule)
  model = FullyConnectedNet([100, 100, 100, 100, 100], weight_scale=5e-2)
  solver = Solver(model, small_data,
                  num_epochs=5, batch_size=100,
                  update_rule=update_rule,
                  optim_config={
                    'learning_rate': learning_rates[update_rule]
                  },
                  verbose=True)
  solvers[update_rule] = solver
  solver.train()
  print()
plt.subplot(3, 1, 1)
plt.title('Training loss')
plt.xlabel('Iteration')
plt.subplot(3, 1, 2)
plt.title('Training accuracy')
plt.xlabel('Epoch')
plt.subplot(3, 1, 3)
plt.title('Validation accuracy')
plt.xlabel('Epoch')
for update_rule, solver in list(solvers.items()):
  plt.subplot(3, 1, 1)
  plt.plot(solver.loss_history, 'o', label=update_rule)
  
  plt.subplot(3, 1, 2)
  plt.plot(solver.train_acc_history, '-o', label=update_rule)
  plt.subplot(3, 1, 3)
  plt.plot(solver.val_acc_history, '-o', label=update_rule)
  
for i in [1, 2, 3]:
  plt.subplot(3, 1, i)
  plt.legend(loc='upper center', ncol=4)
plt.gcf().set_size_inches(15, 15)
plt.show()<jupyter_output>running with  adam
(Iteration 1 / 200) loss: 3.476928
(Epoch 0 / 5) train acc: 0.126000; val_acc: 0.110000
(Iteration 11 / 200) loss: 2.027712
(Iteration 21 / 200) loss: 2.183358
(Iteration 31 / 200) loss: 1.744257
(Epoch 1 / 5) train acc: 0.363000; val_acc: 0.330000
(Iteration 41 / 200) loss: 1.707951
(Iteration 51 / 200) loss: 1.703835
(Iteration 61 / 200) loss: 2.094758
(Iteration 71 / 200) loss: 1.505558
(Epoch 2 / 5) train acc: 0.419000; val_acc: 0.362000
(Iteration 81 / 200) loss: 1.594431
(Iteration 91 / 200) loss: 1.511452
(Iteration 101 / 200) loss: 1.389230
(Iteration 111 / 200) loss: 1.465176
(Epoch 3 / 5) train acc: 0.506000; val_acc: 0.379000
(Iteration 121 / 200) loss: 1.226219
(Iteration 131 / 200) loss: 1.475588
(Iteration 141 / 200) loss: 1.397937
(Iteration 151 / 200) loss: 1.279100
(Epoch 4 / 5) train acc: 0.543000; val_acc: 0.356000
(Iteration 161 / 200) loss: 1.370917
(Iteration 171 / 200) loss: 1.291703
(Iteration 181 / 200) loss: 1.156816
(Iteration 191 / 200) lo[...]<jupyter_text>## Inline Question 3:
AdaGrad, like Adam, is a per-parameter optimization method that uses the following update rule:
```
cache += dw**2
w += - learning_rate * dw / (np.sqrt(cache) + eps)
```
John notices that when he was training a network with AdaGrad that the updates became very small, and that his network was learning slowly. Using your knowledge of the AdaGrad update rule, why do you think the updates would become very small? Would Adam have the same issue?
## Answer: 
Because the cache values will increase in size like a random walk, so when dividing the updates to w become small. Yes it would.# Train a good model!
Train the best fully-connected model that you can on CIFAR-10, storing your best model in the `best_model` variable. We require you to get at least 50% accuracy on the validation set using a fully-connected net.
If you are careful it should be possible to get accuracies above 55%, but we don't require it for this part and won't assign extra credit for doing so. Later in the assignment we will ask you to train the best convolutional network that you can on CIFAR-10, and we would prefer that you spend your effort working on convolutional nets rather than fully-connected nets.
You might find it useful to complete the `BatchNormalization.ipynb` and `Dropout.ipynb` notebooks before completing this part, since those techniques can help you train powerful models.<jupyter_code>best_model = None
################################################################################
# TODO: Train the best FullyConnectedNet that you can on CIFAR-10. You might   #
# find batch/layer normalization and dropout useful. Store your best model in  #
# the best_model variable.                                                     #
################################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
################################################################################
#                              END OF YOUR CODE                                #
################################################################################<jupyter_output><empty_output><jupyter_text># Test your model!
Run your best model on the validation and test sets. You should achieve above 50% accuracy on the validation set.<jupyter_code>y_test_pred = np.argmax(best_model.loss(data['X_test']), axis=1)
y_val_pred = np.argmax(best_model.loss(data['X_val']), axis=1)
print('Validation set accuracy: ', (y_val_pred == data['y_val']).mean())
print('Test set accuracy: ', (y_test_pred == data['y_test']).mean())<jupyter_output><empty_output> | 
	no_license | 
	/assignment2/.ipynb_checkpoints/FullyConnectedNets-checkpoint.ipynb | 
	willbryk720/cs231_solutions | 18 | 
| 
	<jupyter_start><jupyter_text>Actividad Guiada 1 de Algoritmos de Optimización
David Boñar
https://colab.research.google.com/drive/12Uogb_wSXuY_pVLANr7NY6gxIXHkIvSr
https://github.com/davidbonar1/03MAIR_Algoritmos_de_Optimizacion_2019
Problema 1: Programar una función que resuelva el problema de las torres de Hanoy.<jupyter_code>def torres_hanoy(N,desde,hasta):
  if N==1:
    print("Lleva la ficha desde {} hasta {}.".format(desde,hasta))
  else:
    torres_hanoy(N-1,desde,6 -desde - hasta) #La operación de 6 - desde - hasta otorga siempre la torre que no está ocupada
    print("Lleva la ficha desde {} hasta {}.".format(desde,hasta))
    torres_hanoy(N-1,6 -hasta - desde,hasta)
torres_hanoy(4,1,3)<jupyter_output>Lleva la ficha desde 1 hasta 2.
Lleva la ficha desde 1 hasta 3.
Lleva la ficha desde 2 hasta 3.
Lleva la ficha desde 1 hasta 2.
Lleva la ficha desde 3 hasta 1.
Lleva la ficha desde 3 hasta 2.
Lleva la ficha desde 1 hasta 2.
Lleva la ficha desde 1 hasta 3.
Lleva la ficha desde 2 hasta 3.
Lleva la ficha desde 2 hasta 1.
Lleva la ficha desde 3 hasta 1.
Lleva la ficha desde 2 hasta 3.
Lleva la ficha desde 1 hasta 2.
Lleva la ficha desde 1 hasta 3.
Lleva la ficha desde 2 hasta 3.
<jupyter_text>Problema 2: Implementar un sistema que dada una cantidad y el valor de las monedas de cambio devuelva en el menor número posible de monedas la cantidad solicitada<jupyter_code>def cambio_monedas(cantidad,sistema_monetario):
  solucion = []
  valor_acumulado = 0
  if len(sistema_monetario) != 0:
    for i in range(len(sistema_monetario)):
        monedas = int((cantidad - valor_acumulado)/sistema_monetario[i])
        solucion.append(monedas)
        valor_acumulado += monedas * sistema_monetario[i]
        if valor_acumulado == cantidad:
          return solucion
    if valor_acumulado != cantidad:
      print("Es imposible realizar todo el cambio con el sistema elegido. El cambio más cercano es el siguiente: ".format(solucion))
      return solucion
sol = cambio_monedas(21,[25,10,5,1])
print(sol)<jupyter_output>[0, 2, 0, 1]
<jupyter_text>Problema 3: Programar una función que resuelva el problema de las reinas. En un tablero NxN existen N reinas. Situarlas para que ninguna esté bajo ataque de otra.<jupyter_code>def reinas(N, solucion,etapa):
  
  if N < etapa: 
    return False
  for i in range(1,N+1):
    solucion[etapa] = i
    if es_prometedora(solucion,etapa):
        if etapa == N-1:
          print("\nLa solución es: {}\n".format(solucion))
        else:
          reinas(N,solucion,etapa+1)
    else:
      None
    solucion[etapa] = 0
def es_prometedora(solucion,etapa):
  for i in range(etapa+1): #comprobación de que no haya 2 reinas en la misma columna
    if solucion.count(solucion[i])>1:
      return False
    for j in range(i+1,etapa + 1): #comprobación de que las reinas no estén en la diagonal
      if abs(i-j) == abs(solucion[i]-solucion[j]):
        return False
  return True
  
reinas(4,[0,0,0,0],0)<jupyter_output>
La solución es: [2, 4, 1, 3]
La solución es: [3, 1, 4, 2]
<jupyter_text>Problema 4: Encontrar los puntos más cercanos en 1D, 2D y 3D. 
Para 1D mediante fuerza bruta, para 2D y superiores, mediante divide y vencerás.<jupyter_code>def puntos_cercanos_1d(lista):
  distancia_menor = 1000000
  numeros_cercanos = []
  for i in range(len(lista)):
    for j in range(i+1,len(lista)):
      distancia = abs(lista[i]-lista[j])
      if distancia < distancia_menor:
        numeros_cercanos = []
        distancia_menor = distancia
        numeros_cercanos.append(lista[i])
        numeros_cercanos.append(lista[j])
  return distancia_menor, numeros_cercanos
import random
import math
LISTA_1D = [random.randrange(1,1000) for x in range(7)]
LISTA_2D = [(random.randrange(1,1000),random.randrange(1,1000)) for x in range(3)]
distancia_menor, numeros_cercanos = puntos_cercanos_1d(LISTA_1D)
print(LISTA_1D)
print("Los numeros más cercanos son: " + str(numeros_cercanos) + ", cuya distancia es "+ str(distancia_menor))<jupyter_output>[3, 13, 446, 635, 586, 188, 373]
Los numeros más cercanos son: [3, 13], cuya distancia es 10
<jupyter_text>Para 1 dimensión, el grado de complejidad es N cuadrado.<jupyter_code>print(LISTA_2D)
distancia_n(LISTA_2D,2,0)<jupyter_output><empty_output> | 
	no_license | 
	/David_Boñar_AG1.ipynb | 
	davidbonar1/03MAIR_Algoritmos_de_Optimizacion_2019 | 5 | 
| 
	<jupyter_start><jupyter_text># Exit Survey Analysis
Organizations would typically want to understand why its employees resign. This information is usually gathered by using exit surveys that resigning employees are asked to take.
In this project, we'll work with exit surveys from employees of the Department of Education, Training and Employment (DETE) and the Technical and Further Education (TAFE) institute in Queensland, Australia. 
Data sources:
* [TAFE exit survey](https://data.gov.au/dataset/ds-qld-89970a3b-182b-41ea-aea2-6f9f17b5907e/details?q=exit%20survey)
* [DETE survey](https://data.gov.au/dataset/ds-qld-fe96ff30-d157-4a81-851d-215f2a0fe26d/details?q=exit%20survey)
Our main questions in this analysis are:
-> Are employees who only worked for the institutes for a short period of time resigning due to some kind of dissatisfaction? What about employees who have been there longer?
-> Are younger employees resigning due to some kind of dissatisfaction? What about older employees?
<jupyter_code>#import packages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
#load in the datasets
dete_survey = pd.read_csv('dete_survey.csv')
tafe_survey = pd.read_csv('tafe_survey.csv')
dete_survey.head()
dete_survey.shape
dete_survey.info()<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 822 entries, 0 to 821
Data columns (total 56 columns):
ID                                     822 non-null int64
SeparationType                         822 non-null object
Cease Date                             822 non-null object
DETE Start Date                        822 non-null object
Role Start Date                        822 non-null object
Position                               817 non-null object
Classification                         455 non-null object
Region                                 822 non-null object
Business Unit                          126 non-null object
Employment Status                      817 non-null object
Career move to public sector           822 non-null bool
Career move to private sector          822 non-null bool
Interpersonal conflicts                822 non-null bool
Job dissatisfaction                    822 non-null bool
Dissatisfaction with the department    822 non-null bool
Physical work environ[...]<jupyter_text>Some observations about the DETE survey:
* dete_survey has mostly string columns, followed by boolean values, and just 1 integer column, the ID column
* dete_survey seems to quite a few missing values. striking are the 'aboriginal', 'torres strait', 'south sea', 'disability' and 'NESB' columns, which mostly contain null values
* there are values that are 'not stated'. these could actually be treated as null values instead of strings
* there are lots of columns here that we dont need to answer our questions
* there are multiple columns that indicate disatisfaction<jupyter_code>tafe_survey.head()
tafe_survey.shape
tafe_survey.info()<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 702 entries, 0 to 701
Data columns (total 72 columns):
Record ID                                                                                                                                                        702 non-null float64
Institute                                                                                                                                                        702 non-null object
WorkArea                                                                                                                                                         702 non-null object
CESSATION YEAR                                                                                                                                                   695 non-null float64
Reason for ceasing employment                                                                                                                                    701 non-[...]<jupyter_text>Some observations about the TAFE dataset: 
* tafe_survey only has 2 float columns with the rest of its columns being string columns
* similarly, tafe_survey also has a lot of missing values but the column with the most number of missing values still has a lot more non-null values than the mostly null columns in dete_survey
* there are lots of columns here that we dont need to answer our questions
* there are multiple columns that indicate disatisfaction<jupyter_code>#convert 'not stated' to nulls when read in
dete_survey = pd.read_csv('dete_survey.csv', na_values='Not Stated')
#remove unrelated cols in dete_survey (??)
dropcol_dete = dete_survey.columns[28:49]
dete_survey_updated = dete_survey.drop(dropcol_dete, axis=1)
#remove unrelated cols in tafe_survey (positive sentiments)
dropcol_tafe = tafe_survey.columns[17:66]
tafe_survey_updated = tafe_survey.drop(dropcol_tafe, axis = 1)
newdetecols = dete_survey_updated.columns
newdetecols
#make all colnames lowercase, no trailing whitespace, and turn all other
#whitespaces into _
newdetecols = dete_survey_updated.columns
newdetecols = (newdetecols.str.lower().str.strip()
                 .str.replace(' ', '_'))
dete_survey_updated.columns = newdetecols
dete_survey_updated.columns
#update colnames in tafe_survey_updated to make it cleaner
mapping = {'Record ID': 'id',
           'CESSATION YEAR': 'cease_date',
           'Reason for ceasing employment': 'separationtype',
           'Gender. What is your Gender': 'gender',
           'CurrentAge. Current Age': 'age',
           'Employment Type. Employment Type': 'employment_status',
           'Classification. Classification': 'postition',
           'LengthofServiceOverall. Overall Length of Service at Institute (in years)': 'institute_service',
           'LengthofServiceCurrent. Length of Service at current workplace (in years)': 'role_'
          }
tafe_survey_updated = tafe_survey_updated.rename(columns=mapping)
dete_survey_updated.columns
dete_survey_updated.head()
tafe_survey_updated.head()<jupyter_output><empty_output><jupyter_text>At this point, we have renamed columns to follow the same format (no trailing whitespaces, _ instead of spaces, converted to lowercase). Also, we changed some names in the tafe_survey_updated dataset to set up easier concatenating laterIn both datasets, we can spot the observations that have something to do with resignation through the 'separationtype' column, which lists the reasons why a given observation left their institute.
Let's now have a look at the reasons employees leave for both surveys:<jupyter_code>dete_survey_updated['separationtype'].value_counts()
tafe_survey_updated['separationtype'].value_counts()<jupyter_output><empty_output><jupyter_text>We can see that the reasons for leaving is not just due resignations. Let's filter out those not related to resignations<jupyter_code>#get only resignation-related rows from dete
resiglist = ['Resignation-Other reasons',
             'Resignation-Other employer',
             'Resignation-Move overseas/interstate'
             ]
dete_resignations = (dete_survey_updated[dete_survey_updated['separationtype']
                                        .isin(resiglist)])
#note that df.isin is how you index a df using a list 
#of possible matches
#get only resignation-related rows from tafe
tafe_resignations = tafe_survey_updated[tafe_survey_updated['separationtype']=='Resignation']
tafe_resignations
#we index this df normally, with just one match (vs. a list above)
<jupyter_output><empty_output><jupyter_text>Let's check the year tally for resignations <jupyter_code>dete_resignations['cease_date'].value_counts()<jupyter_output><empty_output><jupyter_text>There's some cleaning to be done with these values to turn them into proper year values. For this, we will use the extract function with RegEx to get the year values from each string value.<jupyter_code>dete_resignations['cease_date'].value_counts()
detepat = r"(?P<Year>[1-2][0-9]{3})"
cease2 = dete_resignations['cease_date'].str.extract(detepat)
cease2 = cease2.astype(float)
dete_resignations['cease2'] = cease2
<jupyter_output>/dataquest/system/env/python3/lib/python3.4/site-packages/ipykernel/__main__.py:5: FutureWarning: currently extract(expand=None) means expand=False (return Index/Series/DataFrame) but in a future version of pandas this will be changed to expand=True (return DataFrame)
/dataquest/system/env/python3/lib/python3.4/site-packages/ipykernel/__main__.py:8: SettingWithCopyWarning: 
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
<jupyter_text>Let's now get the year tally again<jupyter_code>dete_resignations['cease2'].value_counts(ascending=False)
#sanitycheck if it's extracting what it's supposed to
dete_resignations[['cease2', 'cease_date']]<jupyter_output><empty_output><jupyter_text>Let's visualize this range of years with moth matplotlib and seaborn<jupyter_code>ax = dete_resignations['cease2'].plot(kind='box')
ax.margins(x=0, y=0.15)
import seaborn as sns
sns.set(style="white")
ax = sns.boxplot(dete_resignations['cease2'])
ax.set_xlim(2005,2015)
for key, spine in ax.spines.items():
    spine.set_visible(False)
ax.tick_params(bottom="off", left="off", top="off", right="off")<jupyter_output>/dataquest/system/env/python3/lib/python3.4/site-packages/seaborn/categorical.py:454: FutureWarning:
remove_na is deprecated and is a private function. Do not use.
<jupyter_text>We'll do the same thing for the start dates in dete_resignations<jupyter_code>ax = dete_resignations['dete_start_date'].plot(kind='box')
ax.margins(x=0, y=0.15)
ax = sns.boxplot(dete_resignations['dete_start_date'])
ax.set_xlim(1950,2015)
for key, spine in ax.spines.items():
    spine.set_visible(False)
ax.tick_params(bottom="off", left="off", top="off", right="off")<jupyter_output>/dataquest/system/env/python3/lib/python3.4/site-packages/seaborn/categorical.py:454: FutureWarning:
remove_na is deprecated and is a private function. Do not use.
<jupyter_text>Let's move on to their TAFE counterparts<jupyter_code>ax = tafe_resignations['cease_date'].plot(kind='box')
ax.set_ylim(2005, 2015)
ax = sns.boxplot(tafe_resignations['cease_date'])
ax.set_xlim(2005,2015)
for key, spine in ax.spines.items():
    spine.set_visible(False)
ax.tick_params(bottom="off", left="off", top="off", right="off")<jupyter_output>/dataquest/system/env/python3/lib/python3.4/site-packages/seaborn/categorical.py:454: FutureWarning:
remove_na is deprecated and is a private function. Do not use.
<jupyter_text>From what we see, there are no major issues with the years in the columns we're interested in. However, the years in each column does  not seem to have the same time frameNow we'll calculate the service period of the resigned employees from the dete_resignations.<jupyter_code>newcol = (dete_resignations['cease2'] 
          - dete_resignations['dete_start_date'])
dete_resignations['institute_service'] = newcol #settingwithcopy warn
#check if calulation makes sense
dete_resignations[['cease2', 'dete_start_date', 'institute_service']]<jupyter_output><empty_output><jupyter_text>Next, we have to identify which employees who resigned because they were dissatisfied. From the two dataframes, we can idenfity the columns that we can use to identify these particular employees. 
TAFE 
* Contributing Factors, Dissatisfaction
* Contributing Factors, Job Dissatisfaction
DETE
* job_dissatisfaction
* dissatisfaction_with_the_department
* physical_work_environment
* lack_of_recognition
* lack_of_job_security
* work_location
* employment_conditions
* work_life_balance
* workload
Due to the way the dataset is organized, we'll need to check the values across rows to see if our columns of interest are marked. If they are, then we'll consider them dissatisfied. First we'll check what we're working with by tallying the number of observations we expect to have "dissatisfied" value of True.<jupyter_code>tafe_resignations['Contributing Factors. Dissatisfaction'].value_counts(dropna=False)
tafe_resignations['Contributing Factors. Job Dissatisfaction'].value_counts(dropna=False)
# sp = tafe_resignations['Contributing Factors. Job Dissatisfaction'][17]
# tafe_resignations['Contributing Factors. Dissatisfaction'] = tafe_resignations['Contributing Factors. Dissatisfaction'].str.strip()
# dictD= {'Contributing Factors. Dissatisfaction': 'True',
#         '-': 'False'
#         }
# dictJD= {sp: 'True',
#           '-': 'False'
#          }
# x = tafe_resignations['Contributing Factors. Dissatisfaction'].map(dictD)
# y = tafe_resignations['Contributing Factors. Job Dissatisfaction'].map(dictJD)
# x.value_counts(dropna=False)
# y.value_counts(dropna=False)
# tafe_resignations['Contributing Factors. Dissatisfaction'] = x
# tafe_resignations['Contributing Factors. Job Dissatisfaction'] = y
# tafe_resignations[['Contributing Factors. Dissatisfaction', 'Contributing Factors. Job Dissatisfaction']].apply(pd.value_counts, dropna=False)
#function to be applied element-wise on the different columns 
#to check if they have marked it or not.
def update_vals(val):
    if pd.isnull(val):   #this is how to check if NaN
        return np.nan
    elif val == '-':
        return False
    else:
        return True
#apply the function on all the columns of interest
tafe_resignations['Contributing Factors. Dissatisfaction'] = \
(tafe_resignations['Contributing Factors. Dissatisfaction']
                     .apply(update_vals))
tafe_resignations['Contributing Factors. Job Dissatisfaction'] = \
(tafe_resignations['Contributing Factors. Job Dissatisfaction']
                     .apply(update_vals))
#for every obs, check if any of the columns of interest are True
#then return True if that condition holds, False otherwise
dete_select_disat = ['job_dissatisfaction',
                     'dissatisfaction_with_the_department',
                     'physical_work_environment',
                     'lack_of_recognition',
                     'lack_of_job_security',
                     'work_location',
                     'employment_conditions',
                     'work_life_balance',
                     'workload'
                    ]
tafe_select_disat = ['Contributing Factors. Dissatisfaction',
                     'Contributing Factors. Job Dissatisfaction'
                    ]
dete_resignations['dissatisfied'] = (dete_resignations[dete_select_disat]
                                     .any(axis=1, skipna=False))
tafe_resignations['dissatisfied'] = (tafe_resignations[tafe_select_disat]
                                     .any(axis=1, skipna=False))
tafe_resignations['Contributing Factors. Job Dissatisfaction'].value_counts(dropna=False)
tafe_resignations['Contributing Factors. Dissatisfaction'].value_counts(dropna=False)
dete_resignations_up = dete_resignations.copy()
tafe_resignations_up = tafe_resignations.copy()
<jupyter_output><empty_output><jupyter_text>Before we concatenate the datasets, =let's also make an 'institute' column that will indicate which dataset the observation comes from<jupyter_code>dete_resignations_up['institute'] = 'DETE'
tafe_resignations_up['institute'] = 'TAFE'
dete_resignations_up
combined = pd.concat([dete_resignations_up, tafe_resignations_up])
combined<jupyter_output><empty_output><jupyter_text>Let's check if the concatenation went as planned. The shapes should add up<jupyter_code>dete_resignations_up.shape
tafe_resignations_up.shape
combined.shape<jupyter_output><empty_output><jupyter_text>Before proceeding, let's address the null values of this new combined dataset. Let's check for nulls<jupyter_code>combined.notnull().sum().sort_values()<jupyter_output><empty_output><jupyter_text>We can see that there are a lot of columns with null values. We can set a threshold of >500 non-nulls for a column to be retained. Although not all columns are needed for this analysis, we do this to retain a cleaner dataset with not much null values in the future.
We also see that the columns that we need do have a significant amount of non-nulls, so our analysis can proceed<jupyter_code>#drop columns where non-nulls don't reach 500 obs
combined_updated = combined.dropna(axis=1, thresh=500)
combined_updated.shape<jupyter_output><empty_output><jupyter_text>We retained only columns with >500 non-null values. the result is a df with just 8 columns leftNow, let's turn our attention to the age column. We now want to see the rows without an age value <jupyter_code>combined_updated[combined_updated['age'].isnull()]<jupyter_output><empty_output><jupyter_text>Let's also check the unique values for the age and institute_service columns<jupyter_code>combined_updated['age']
combined_updated['institute_service'].value_counts()<jupyter_output><empty_output><jupyter_text>We see that there are different formats for the values that represent years. We need to clean this up as we place each value in a different category using a custom function
We will follow the institute_service categorization as follows:
New: Less than 3 years at a company
Experienced: 3-6 years at a company
Established: 7-10 years at a company
Veteran: 11 or more years at a company<jupyter_code>def categorize(val):
    if val in ['Less than 1 year', '1-2']:
        return 'New'
    elif val in ['3-4', '5-6']:
        return 'Experienced'
    elif val == '7-10':
        return 'Established'
    elif val in ['11-20', 'More than 20 years']:
        return 'Veteran'
    elif pd.isnull(val):
        return np.nan
    elif val < 3:
        return 'New'
    elif 3 <= val <= 6:
        return 'Experienced'
    elif 7 <= val <= 10:
        return 'Established'
    elif val > 10:
        return 'Veteran'
    
      
#test the function
categorize('11-20')
combined_updated['service_cat'] = combined_updated['institute_service'].apply(categorize)
#test to see if our 'service_cat' column is correct
combined_updated[['service_cat', 'institute_service']]
combined_updated['service_cat'].value_counts(dropna=False)
#review the dissatisfied column
combined_updated['dissatisfied'].value_counts(dropna=False)<jupyter_output><empty_output><jupyter_text>We'll make a judgment on how to deal with the null values here. Since this is a boolean column and there are not a lot of null values, we will just use the mode (False) to replace the null values.<jupyter_code>#all NaNs converted to False, the mode of the column
combined_updated['dissatisfied'] = combined_updated['dissatisfied'].fillna(False)
#check data structure
combined_updated.info()<jupyter_output><class 'pandas.core.frame.DataFrame'>
Int64Index: 651 entries, 3 to 701
Data columns (total 9 columns):
age                  596 non-null object
cease_date           635 non-null object
dissatisfied         651 non-null bool
employment_status    597 non-null object
id                   651 non-null float64
institute            651 non-null object
institute_service    563 non-null object
separationtype       651 non-null object
service_cat          563 non-null object
dtypes: bool(1), float64(1), object(7)
memory usage: 66.4+ KB
<jupyter_text>with the nulls taken care of, we can finally aggregate the 'dissatisfied' col (bool col: 0 or 1) on the services categories ('service_cat') that we're interested in <jupyter_code>combined_updated<jupyter_output><empty_output><jupyter_text>Now we can finally see how many of the different service categories left because of job dissatisfaction. This is one of the questions we started out with<jupyter_code>#default pv aggfunc is np.mean, which is convenient since
#booleans can be averaged.
pv = combined_updated.pivot_table(values='dissatisfied', 
                             index='service_cat'
                            )
pv
import matplotlib.pyplot as plt
plt.figure(figsize=(12,7))
ax = pv.plot(kind='bar')
ax.set_title('proportion of resignations due to dissatisfaction')
for key, spine in ax.spines.items():
    spine.set_visible(False)
ax.set_title('proportion of resignations due to dissatisfaction')<jupyter_output><empty_output> | 
	no_license | 
	/exit_survey_analysis.ipynb | 
	nrabang/DQ-exit-survey-analysis | 23 | 
| 
	<jupyter_start><jupyter_text># Group 6 Mini Project 2
# 1. Term - Frequency Inverse Document Frequency 
1) Remove Stopwords (1 Mark)
2) Remove the punctuations. the special characters and convert the text to lower case.        (2 Mark)
3) create bigrams and trigrams for the entire dataset and list down 20 most frequent bigram and 20 most frequent trigrams ( 3 Marks )
4) You have to implement TF-IDF the Algorithm from scratch.   ( 3 Mark )
5) Use the above-implemented algorithm and the values to calculate TF-IDF (using TF IDF formula) on the dataset and list down the top 10 words which have the highest TF-IDF Value. (2 Marks)<jupyter_code>#Imports
import numpy as np
import math
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
# for integer encoding using sklearn
from sklearn.preprocessing import LabelEncoder
import pandas as pd
import seaborn as sns
from matplotlib import style
import plotly.express as px
import warnings
warnings.filterwarnings('ignore')
import datetime as dt
from tkinter import *
from tkinter.font import Font
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import scale
from pyclustertend import hopkins
from sklearn.decomposition import PCA
from tqdm import tqdm 
from math import sqrt 
from time import gmtime, strftime
from sklearn.cluster import KMeans
import os
##print(os.listdir("../input"))
sns.set()
from sklearn import preprocessing as pp
from sklearn.cluster import KMeans
import random
import pylab as pl
%matplotlib inline
import matplotlib.pyplot as plt
from scipy.spatial import distance_matrix
from scipy.cluster.hierarchy import dendrogram, linkage
from sklearn.cluster import DBSCAN,AgglomerativeClustering, KMeans
from apyori import apriori
######## Capture the start time to check the runt ime of the whole notebook #########
startTime = strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime())
from sklearn.feature_extraction.text import TfidfVectorizer,CountVectorizer
from nltk.corpus import stopwords
from nltk.stem.snowball import SnowballStemmer
import unicodedata
import nltk
from nltk import word_tokenize
import re  #regular expression
from bs4 import BeautifulSoup
from gensim.parsing.preprocessing import remove_stopwords
import gensim.corpora as corpora
from pprint import pprint
import gensim
import pyLDAvis.gensim_models
import pickle 
import pyLDAvis
sw = stopwords.words('english')
stemmer = SnowballStemmer("english")
url_re = r'(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?«»“”‘’]))'
#Extract and read text files
sentence = pd.DataFrame()
file1 = open('TF-IDF_dataset-Copy.txt', 'r',encoding="mbcs")
# Using readlines()
try:
    
    
    Lines = file1.readlines()
 
    count = 0
    # Strips the newline character
    chapter = 'Chapter 1'
    for line in Lines:
        count += 1
        
        if len(line.strip()) > 0 and line.strip().find('Chapter') == -1 :
            current_df = pd.DataFrame({'sentences': [ line.strip()] , 'chapter':[chapter]})
            sentence = sentence.append(current_df, ignore_index=True)
        else:
            if line.strip().find('Chapter') != -1:
                
                chapter = line.strip()
                #print(chapter)
            
    file1.close() 
except Exception as e:
    print('Exception occured ',str(e))
    file1.close
    
print(sentence)
#Sentence curation functions
def stopwords(text):
    '''a function for removing the stopword'''
    
    # removing the stop words and lowercasing the selected words
    #text = [word.lower() for word in text.split() if word.lower() not in sw]
    # joining the list of words with space separator
    text = remove_stopwords(text)
    text = [word.lower() for word in text.split() if word.lower() not in sw]
    #return(text)
    return " ".join(text)
def remove_punctuation(text):
    '''a function for removing punctuation'''
    import string
    # replacing the punctuations with no space, 
    # which in effect deletes the punctuation marks 
    translator = str.maketrans('', '', string.punctuation)
    # return the text stripped of punctuation marks
    return text.translate(translator)
def stemming(text):    
    
    '''a function which stems each word in the given text'''
    text = [stemmer.stem(word) for word in text.split()]
    return " ".join(text)     
def remove_html(text):
    '''
    remove the HTML tags and URLS from the tweets
    '''
    if text:
        # BeautifulSoup on content
        soup = BeautifulSoup(text, "html.parser")
        # Stripping all <code> tags with their content if any
        if soup.code:
            soup.code.decompose()
        # Get all the text out of the html
        text =  soup.get_text()
        # Returning text stripping out all uris
        return re.sub(url_re, "", text)
    else:
        return ""
    
def remove_emojis(text):
    
    emoji_pattern = re.compile("["
        r"\U0001F600-\U0001F64F"  # emoticons
        r"\U0001F300-\U0001F5FF"  # symbols & pictographs
        r"\U0001F680-\U0001F6FF"  # transport & map symbols
        r"\U0001F1E0-\U0001F1FF"  # flags (iOS)
                           "]+", flags=re.UNICODE)
    text = emoji_pattern.sub(r'', text)# no emoji 
  
    return text
def remove_specialChars(text):
    text = re.sub('[^a-zA-Z0-9.\d\s]', '', text)
    return text
## Remove unwanted words ##
sentence_duplicate = sentence.copy()
sentence['sentences'] = sentence['sentences'].apply(remove_html)
sentence['sentences'] = sentence['sentences'].apply(remove_emojis)
sentence['sentences'] = sentence['sentences'].apply(remove_punctuation)
sentence['sentences'] = sentence['sentences'].apply(stopwords)
sentence['sentences'] = sentence['sentences'].apply(remove_specialChars)
sentence_duplicate = sentence.copy()
#sentence['sentences'] = sentence['sentences'].apply(stemming)
print(sentence)
## Extract the BIGrams and the TRIGrams ##
tokenized_text = []
for index , row in sentence.iterrows():
    #print(sentence)
    sequence = word_tokenize(row["sentences"]) 
    tokenized_text.extend(sequence) 
#Words = sentence['sentences'].tolist()
#hf = ["".join(review) for review in sentence['sentences'].values]
#print(hf)
#print(tokenized_text)
print('20 most frequent bigrams \n',(pd.Series(nltk.ngrams(tokenized_text, 2)).value_counts())[:20])
print('\n20 most frequent trigrams \n',(pd.Series(nltk.ngrams(tokenized_text, 3)).value_counts())[:20])
## Implement TF- IDF ##
# Create the dictionary of words
final_dictionary=set()
for index , row in sentence.iterrows():
    split_sentence = row['sentences'].split(" ")
    set_sentence =  set(split_sentence)
    final_dictionary = final_dictionary.union(set_sentence)
print('length of word dictionary', len(final_dictionary))
wordDict = []#pd.DataFrame()
for index , row in tqdm(sentence.iterrows()):
    split_sentence = row['sentences'].split(" ")
    wordDictA = dict.fromkeys(final_dictionary, 0)
    for word in split_sentence:
        wordDictA[word]+=1
    #print([wordDictA])
    wordDict.append(wordDictA.copy())
    #print(wordDict)
#print(len(wordDict))
#Create the Term Frequency for each sentence
def computeTF(wordDict, doc):
    tfDict = {}
    corpusCount = len(doc)
    for word, count in wordDict.items():
        tfDict[word] = count/float(corpusCount)
    return(tfDict)
wordDictTF = pd.DataFrame()
for index , row in tqdm(sentence.iterrows()):
    split_sentence = row['sentences'].split(" ")
    wordDictA = dict.fromkeys(final_dictionary, 0)
    for word in split_sentence:
        wordDictA[word]+=1
    #print([wordDictA])
    tfSentence = computeTF(wordDictA, split_sentence)
    wordDictTF = wordDictTF.append(tfSentence,ignore_index = True)
    #print(wordDict)
#print("TF word dictionary \n",wordDictTF)
#Create the IDF for the corpus
def computeIDF(docList):
    idfDict = {}
    N = len(docList)
    
    idfDict = dict.fromkeys(docList[0].keys(), 0)
    for word , val in idfDict.items():
        wordcount = 0
        for itemlist in docList:
            if itemlist[word] > 0:
                wordcount = wordcount+1
        #print("word is ",word)
        #print("val is ",wordcount )
        
        idfDict[word] = math.log10(N / (float(wordcount) + 1))
        
    return(idfDict)
idfs = computeIDF(wordDict)
#print(idfs)
# Calculate TF- IDF
FinalTFIDF = pd.DataFrame()
def computeTFIDF(tfBow, idfs):
    tfidf = {}
    for word, val in tfBow.items():
        tfidf[word] = val*idfs[word]
    return(tfidf)
for index , row in tqdm(wordDictTF.iterrows()):
    tfIDF = computeTFIDF(row.to_dict(), idfs)
    FinalTFIDF = FinalTFIDF.append(tfIDF,ignore_index = True)
# Print the TF IDF Vector final
print("Final TD IDF Vector: \n",FinalTFIDF)
# Top 10 words with highest TF-IDF value
print("Top 10 words with the highest TFIDF value:\n",FinalTFIDF.mean().sort_values(ascending=False)[:10])
## Extract TF-IDF Vector using library - Extra done to check the accuracy of the self implemented TF-IDF algorithm ##
# instantiate the vectorizer object
tfidfvectorizer = TfidfVectorizer(analyzer='word',stop_words= 'english')
# convert th sentences into a matrix
#hf = sentence['sentences'].values #[" ".join(review) for review in sentence['sentences'].values]
hf = [" ".join(review) for review in sentence['sentences'].values]
'''
for review in tqdm(sentence['sentences'].values):
    hf = hf + " " + review
'''
#print(hf)
tfidf_wm = tfidfvectorizer.fit_transform(sentence['sentences'])# becauase TFIDF vectorizer takes texts and not lists
#retrieve the terms found in the corpora
tfidf_tokens = tfidfvectorizer.get_feature_names()
df_tfidfvect = pd.DataFrame(data = tfidf_wm.toarray(),columns = tfidf_tokens)#index = ['Doc1','Doc2'])
print("\nTD-IDF Vectorizer\n")
print(df_tfidfvect)
print("Top 10 words with the highest TFIDF value:\n",df_tfidfvect.mean().sort_values(ascending=False)[:10])<jupyter_output>
TD-IDF Vectorizer
     7th  able  absence  absent  abstruse  accompanied  accomplishment  \
0    0.0   0.0      0.0     0.0       0.0          0.0             0.0   
1    0.0   0.0      0.0     0.0       0.0          0.0             0.0   
2    0.0   0.0      0.0     0.0       0.0          0.0             0.0   
3    0.0   0.0      0.0     0.0       0.0          0.0             0.0   
4    0.0   0.0      0.0     0.0       0.0          0.0             0.0   
..   ...   ...      ...     ...       ...          ...             ...   
215  0.0   0.0      0.0     0.0       0.0          0.0             0.0   
216  0.0   0.0      0.0     0.0       0.0          0.0             0.0   
217  0.0   0.0      0.0     0.0       0.0          0.0             0.0   
218  0.0   0.0      0.0     0.0       0.0          0.0             0.0   
219  0.0   0.0      0.0     0.0       0.0          0.0             0.0   
     accordingly  account  acquaintance  ...  write  writes  writeto  written  \
0         [...]<jupyter_text># Perform Part of Speech Tagging using the Viterbi Algorithm, 
6) Label the cleaned Tf-IDF dataset ( obtained after performing step 1 and step 2 )   ( 2 Mark )
7) Split the Train and the Test Dataset                      (1 Mark)
8) Implement the Viterbi Algorithm ( you can use Library) to get the Part of Speech Tagging.        ( 3 Marks)
9) Calculate the Accuracy and F1 score. ( Number of Predicted Correct Tag in the test set / Total number of Data points in the test set)   (2 Marks)<jupyter_code>## Create the POS tagging label for cleaned corpus ##
tagged_text = []
for index , row in sentence_duplicate.iterrows():
      
    # Word tokenizers is used to find the words 
    # and punctuation in a string
    wordsList = nltk.word_tokenize(row["sentences"])
  
    #  Using a Tagger. Which is part-of-speech 
    # tagger or POS-tagger. 
    tagged = nltk.pos_tag(wordsList)
    tagged_text.extend([tagged])
print(tagged_text)
#Getting the tagged sentences
#sent_tag = brown.tagged_sents()
mod_sent_tag=[]
for s in tagged_text:
  s.insert(0,('##','##'))
  s.append(('&&','&&'))
  mod_sent_tag.append(s)
print(mod_sent_tag)
# split data into training and validation set in the ratio 80:20
train_data,test_data =train_test_split(mod_sent_tag,train_size=0.80,test_size=0.20,random_state = 101)
print(train_data[:5])
#Creating a dictionary whose keys are tags and values contain words which were assigned the correspoding tag
# ex:- 'TAG':{word1: count(word1,'TAG')}
train_word_tag = {}
for s in train_data:
  for (w,t) in s:
    w=w.lower()
    try:
      try:
        train_word_tag[t][w]+=1
      except:
        train_word_tag[t][w]=1
    except:
      train_word_tag[t]={w:1}
print(train_word_tag)
#Calculating the emission probabilities using train_word_tag
train_emission_prob={}
for k in train_word_tag.keys():
  train_emission_prob[k]={}
  count = sum(train_word_tag[k].values())
  for k2 in train_word_tag[k].keys():
    train_emission_prob[k][k2]=train_word_tag[k][k2]/count
print(train_emission_prob)
#Estimating the bigram of tags to be used for transition probability
bigram_tag_data = {}
for s in train_data:
  bi=list(nltk.bigrams(s))
  for b1,b2 in bi:
    try:
      try:
        bigram_tag_data[b1[1]][b2[1]]+=1
      except:
        bigram_tag_data[b1[1]][b2[1]]=1
    except:
      bigram_tag_data[b1[1]]={b2[1]:1}
print(bigram_tag_data)
#Calculating the probabilities of tag bigrams for transition probability  
bigram_tag_prob={}
for k in bigram_tag_data.keys():
  bigram_tag_prob[k]={}
  count=sum(bigram_tag_data[k].values())
  for k2 in bigram_tag_data[k].keys():
    bigram_tag_prob[k][k2]=bigram_tag_data[k][k2]/count
print(bigram_tag_prob)
#Calculating the possible tags for each word
#Note: Here we have used the whole data(Train+Test)
#Reason: There may be some words which are not present in train data but are present in test data 
tags_of_tokens = {}
count=0
for s in train_data:
  for (w,t) in s:
    w=w.lower()
    try:
      if t not in tags_of_tokens[w]:
        tags_of_tokens[w].append(t)
    except:
      l = []
      l.append(t)
      tags_of_tokens[w] = l
        
for s in test_data:
  for (w,t) in s:
    w=w.lower()
    try:
      if t not in tags_of_tokens[w]:
        tags_of_tokens[w].append(t)
    except:
      l = []
      l.append(t)
      tags_of_tokens[w] = l
print(tags_of_tokens)
#Dividing the test data into test words and test tags
test_words=[]
test_tags=[]
for s in test_data:
  temp_word=[]
  temp_tag=[]
  for (w,t) in s:
    temp_word.append(w.lower())
    temp_tag.append(t)
  test_words.append(temp_word)
  test_tags.append(temp_tag)
#Executing the Viterbi Algorithm
predicted_tags = []                #intializing the predicted tags
for x in range(len(test_words)):   # for each tokenized sentence in the test data
  s = test_words[x]
  #storing_values is a dictionary which stores the required values
  #ex: storing_values = {step_no.:{state1:[previous_best_state,value_of_the_state]}}                
  storing_values = {}              
  for q in range(len(s)):
    step = s[q]
    #for the starting word of the sentence
    if q == 1:                
      storing_values[q] = {}
      tags = tags_of_tokens[step]
      for t in tags:
        #this is applied since we do not know whether the word in the test data is present in train data or not
        try:
          storing_values[q][t] = ['##',bigram_tag_prob['##'][t]*train_emission_prob[t][step]]
        #if word is not present in the train data but present in test data we assign a very low probability of 0.0001
        except:
          storing_values[q][t] = ['##',0.0001]#*train_emission_prob[t][step]]
    
    #if the word is not at the start of the sentence
    if q>1:
      storing_values[q] = {}
      previous_states = list(storing_values[q-1].keys())   # loading the previous states
      current_states  = tags_of_tokens[step]               # loading the current states
      #calculation of the best previous state for each current state and then storing
      #it in storing_values
      for t in current_states:                             
        temp = []
        for pt in previous_states:                         
          try:
            temp.append(storing_values[q-1][pt][1]*bigram_tag_prob[pt][t]*train_emission_prob[t][step])
          except:
            temp.append(storing_values[q-1][pt][1]*0.0001)
        max_temp_index = temp.index(max(temp))
        best_pt = previous_states[max_temp_index]
        storing_values[q][t]=[best_pt,max(temp)]
  #Backtracing to extract the best possible tags for the sentence
  pred_tags = []
  total_steps_num = storing_values.keys()
  last_step_num = max(total_steps_num)
  for bs in range(len(total_steps_num)):
    step_num = last_step_num - bs
    if step_num == last_step_num:
      pred_tags.append('&&')
      pred_tags.append(storing_values[step_num]['&&'][0])
    if step_num<last_step_num and step_num>0:
      pred_tags.append(storing_values[step_num][pred_tags[len(pred_tags)-1]][0])
  predicted_tags.append(list(reversed(pred_tags)))
#Calculating the accuracy based on tagging each word in the test data.
right = 0 
wrong = 0
total_data_points = 0
for i in range(len(test_tags)):
  gt = test_tags[i]
  total_data_points = total_data_points + len(gt)
  pred = predicted_tags[i]
  for h in range(len(gt)):
    if gt[h] == pred[h]:
      right = right+1
    else:
      wrong = wrong +1 
print('Accuracy on the test data is: ',right/(right+wrong))
print('Loss on the test data is: ',wrong/(right+wrong))
print('F1 Score is:',right/(total_data_points))<jupyter_output>Accuracy on the test data is:  0.9379084967320261
Loss on the test data is:  0.06209150326797386
F1 Score is: 0.9379084967320261
<jupyter_text>#    Topic Modelling        
10) Using the LDA algorithm create the Topics (10) for the Corpus             (3 Marks)
11) List down the 10 words in each of the Topics Extracted.           (2 Marks)<jupyter_code>## Create the word corpus ##
tagged_text = []
for index , row in sentence_duplicate.iterrows():
      
    # Word tokenizers is used to find the words 
    # and punctuation in a string
    wordsList = nltk.word_tokenize(row["sentences"])
  
    #  Using a Tagger. Which is part-of-speech 
    # tagger or POS-tagger. 
    tagged_text.extend([wordsList])
print(tagged_text[:4][3][:100])
## Create the dictionary ##
# Create Dictionary
id2word = corpora.Dictionary(tagged_text)
# Create Corpus
texts = tagged_text
# Term Document Frequency
corpus = [id2word.doc2bow(text) for text in texts]
# View
print(corpus[:1][0][:30])
## Print number of topics and 10 words in each topic ##
num_topics = 10
# Build LDA model
lda_model = gensim.models.LdaMulticore(corpus=corpus,
                                       id2word=id2word,
                                       num_topics=num_topics)
# Print the top 10 Keyword in the 10 topics
pprint(lda_model.print_topics())
doc_lda = lda_model[corpus]
# Visualize the topics
pyLDAvis.enable_notebook()
#LDAvis_data_filepath = os.path.join('results')
# # this is a bit time consuming - make the if statement True
# # if you want to execute visualization prep yourself
if 1 == 1:
    LDAvis_prepared = pyLDAvis.gensim_models.prepare(lda_model, corpus, id2word)
    with open('ldavisfile', 'wb') as f:
        pickle.dump(LDAvis_prepared,f)
# load the pre-prepared pyLDAvis data from disk
with open('ldavisfile', 'rb') as f:
    LDAvis_prepared = pickle.load(f)
pyLDAvis.save_html(LDAvis_prepared, './/results//ldavis_prepared_'+ str(num_topics) +'.html')
LDAvis_prepared
endTime = strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime())
print ("Run started at : ",startTime)
print ("Run ended at : ",endTime)<jupyter_output>Run started at :  Fri, 02 Jul 2021 17:03:48 +0000
Run ended at :  Fri, 02 Jul 2021 17:04:06 +0000
 | 
	no_license | 
	/Text Mining Course/Mini Project 1/GROUP6_MINIPROJECT2_UNSUPERVISEDLEARNING.ipynb | 
	superchiku/MachineLearningCodeAssignments-BITS | 3 | 
| 
	<jupyter_start><jupyter_text># Face Recognition
In this assignment, you will build a face recognition system. Many of the ideas presented here are from [FaceNet](https://arxiv.org/pdf/1503.03832.pdf). In lecture, we also talked about [DeepFace](https://research.fb.com/wp-content/uploads/2016/11/deepface-closing-the-gap-to-human-level-performance-in-face-verification.pdf). 
Face recognition problems commonly fall into two categories: 
- **Face Verification** - "is this the claimed person?". For example, at some airports, you can pass through customs by letting a system scan your passport and then verifying that you (the person carrying the passport) are the correct person. A mobile phone that unlocks using your face is also using face verification. This is a 1:1 matching problem. 
- **Face Recognition** - "who is this person?". For example, the video lecture showed a [face recognition video](https://www.youtube.com/watch?v=wr4rx0Spihs) of Baidu employees entering the office without needing to otherwise identify themselves. This is a 1:K matching problem. 
FaceNet learns a neural network that encodes a face image into a vector of 128 numbers. By comparing two such vectors, you can then determine if two pictures are of the same person.
    
**In this assignment, you will:**
- Implement the triplet loss function
- Use a pretrained model to map face images into 128-dimensional encodings
- Use these encodings to perform face verification and face recognition
#### Channels-first notation
* In this exercise, we will be using a pre-trained model which represents ConvNet activations using a **"channels first"** convention, as opposed to the "channels last" convention used in lecture and previous programming assignments. 
* In other words, a batch of images will be of shape $(m, n_C, n_H, n_W)$ instead of $(m, n_H, n_W, n_C)$. 
* Both of these conventions have a reasonable amount of traction among open-source implementations; there isn't a uniform standard yet within the deep learning community. ## Updates
#### If you were working on the notebook before this update...
* The current notebook is version "3a".
* You can find your original work saved in the notebook with the previous version name ("v3") 
* To view the file directory, go to the menu "File->Open", and this will open a new tab that shows the file directory.
#### List of updates
* `triplet_loss`: Additional Hints added.
* `verify`: Hints added.
* `who_is_it`: corrected hints given in the comments.
* Spelling and formatting updates for easier reading.
#### Load packages
Let's load the required packages. <jupyter_code>from keras.models import Sequential
from keras.layers import Conv2D, ZeroPadding2D, Activation, Input, concatenate
from keras.models import Model
from keras.layers.normalization import BatchNormalization
from keras.layers.pooling import MaxPooling2D, AveragePooling2D
from keras.layers.merge import Concatenate
from keras.layers.core import Lambda, Flatten, Dense
from keras.initializers import glorot_uniform
from keras.engine.topology import Layer
from keras import backend as K
K.set_image_data_format('channels_first')
import cv2
import os
import numpy as np
from numpy import genfromtxt
import pandas as pd
import tensorflow as tf
from fr_utils import *
from inception_blocks_v2 import *
%matplotlib inline
%load_ext autoreload
%autoreload 2
np.set_printoptions(threshold=np.nan)<jupyter_output>Using TensorFlow backend.
<jupyter_text>## 0 - Naive Face Verification
In Face Verification, you're given two images and you have to determine if they are of the same person. The simplest way to do this is to compare the two images pixel-by-pixel. If the distance between the raw images are less than a chosen threshold, it may be the same person! 
   **Figure 1** * Of course, this algorithm performs really poorly, since the pixel values change dramatically due to variations in lighting, orientation of the person's face, even minor changes in head position, and so on. 
* You'll see that rather than using the raw image, you can learn an encoding, $f(img)$.  
* By using an encoding for each image, an element-wise comparison produces a more accurate judgement as to whether two pictures are of the same person.## 1 - Encoding face images into a 128-dimensional vector 
### 1.1 - Using a ConvNet  to compute encodings
The FaceNet model takes a lot of data and a long time to train. So following common practice in applied deep learning, let's  load weights that someone else has already trained. The network architecture follows the Inception model from [Szegedy *et al.*](https://arxiv.org/abs/1409.4842). We have provided an inception network implementation. You can look in the file `inception_blocks_v2.py` to see how it is implemented (do so by going to "File->Open..." at the top of the Jupyter notebook.  This opens the file directory that contains the '.py' file). The key things you need to know are:
- This network uses 96x96 dimensional RGB images as its input. Specifically, inputs a face image (or batch of $m$ face images) as a tensor of shape $(m, n_C, n_H, n_W) = (m, 3, 96, 96)$ 
- It outputs a matrix of shape $(m, 128)$ that encodes each input face image into a 128-dimensional vector
Run the cell below to create the model for face images.<jupyter_code>FRmodel = faceRecoModel(input_shape=(3, 96, 96))
print("Total Params:", FRmodel.count_params())<jupyter_output>Total Params: 3743280
<jupyter_text>** Expected Output **
Total Params: 3743280
By using a 128-neuron fully connected layer as its last layer, the model ensures that the output is an encoding vector of size 128. You then use the encodings to compare two face images as follows:
   **Figure 2**:    By computing the distance between two encodings and thresholding, you can determine if the two pictures represent the same person
So, an encoding is a good one if: 
- The encodings of two images of the same person are quite similar to each other. 
- The encodings of two images of different persons are very different.
The triplet loss function formalizes this, and tries to "push" the encodings of two images of the same person (Anchor and Positive) closer together, while "pulling" the encodings of two images of different persons (Anchor, Negative) further apart. 
   **Figure 3**:    In the next part, we will call the pictures from left to right: Anchor (A), Positive (P), Negative (N)  
### 1.2 - The Triplet Loss
For an image $x$, we denote its encoding $f(x)$, where $f$ is the function computed by the neural network.
<!--
We will also add a normalization step at the end of our model so that $\mid \mid f(x) \mid \mid_2 = 1$ (means the vector of encoding should be of norm 1).
!-->
Training will use triplets of images $(A, P, N)$:  
- A is an "Anchor" image--a picture of a person. 
- P is a "Positive" image--a picture of the same person as the Anchor image.
- N is a "Negative" image--a picture of a different person than the Anchor image.
These triplets are picked from our training dataset. We will write $(A^{(i)}, P^{(i)}, N^{(i)})$ to denote the $i$-th training example. 
You'd like to make sure that an image $A^{(i)}$ of an individual is closer to the Positive $P^{(i)}$ than to the Negative image $N^{(i)}$) by at least a margin $\alpha$:
$$\mid \mid f(A^{(i)}) - f(P^{(i)}) \mid \mid_2^2 + \alpha < \mid \mid f(A^{(i)}) - f(N^{(i)}) \mid \mid_2^2$$
You would thus like to minimize the following "triplet cost":
$$\mathcal{J} = \sum^{m}_{i=1} \large[ \small \underbrace{\mid \mid f(A^{(i)}) - f(P^{(i)}) \mid \mid_2^2}_\text{(1)} - \underbrace{\mid \mid f(A^{(i)}) - f(N^{(i)}) \mid \mid_2^2}_\text{(2)} + \alpha \large ] \small_+ \tag{3}$$
Here, we are using the notation "$[z]_+$" to denote $max(z,0)$.  
Notes:
- The term (1) is the squared distance between the anchor "A" and the positive "P" for a given triplet; you want this to be small. 
- The term (2) is the squared distance between the anchor "A" and the negative "N" for a given triplet, you want this to be relatively large. It has a minus sign preceding it because minimizing the negative of the term is the same as maximizing that term.
- $\alpha$ is called the margin. It is a hyperparameter that you pick manually. We will use $\alpha = 0.2$. 
Most implementations also rescale the encoding vectors to haven L2 norm equal to one (i.e., $\mid \mid f(img)\mid \mid_2$=1); you won't have to worry about that in this assignment.
**Exercise**: Implement the triplet loss as defined by formula (3). Here are the 4 steps:
1. Compute the distance between the encodings of "anchor" and "positive": $\mid \mid f(A^{(i)}) - f(P^{(i)}) \mid \mid_2^2$
2. Compute the distance between the encodings of "anchor" and "negative": $\mid \mid f(A^{(i)}) - f(N^{(i)}) \mid \mid_2^2$
3. Compute the formula per training example: $ \mid \mid f(A^{(i)}) - f(P^{(i)}) \mid \mid_2^2 - \mid \mid f(A^{(i)}) - f(N^{(i)}) \mid \mid_2^2 + \alpha$
3. Compute the full formula by taking the max with zero and summing over the training examples:
$$\mathcal{J} = \sum^{m}_{i=1} \large[ \small \mid \mid f(A^{(i)}) - f(P^{(i)}) \mid \mid_2^2 - \mid \mid f(A^{(i)}) - f(N^{(i)}) \mid \mid_2^2+ \alpha \large ] \small_+ \tag{3}$$
#### Hints
* Useful functions: `tf.reduce_sum()`, `tf.square()`, `tf.subtract()`, `tf.add()`, `tf.maximum()`.
* For steps 1 and 2, you will sum over the entries of $\mid \mid f(A^{(i)}) - f(P^{(i)}) \mid \mid_2^2$ and $\mid \mid f(A^{(i)}) - f(N^{(i)}) \mid \mid_2^2$.  
* For step 4 you will sum over the training examples.
#### Additional Hints
* Recall that the square of the L2 norm is the sum of the squared differences: $||x - y||_{2}^{2} = \sum_{i=1}^{N}(x_{i} - y_{i})^{2}$
* Note that the `anchor`, `positive` and `negative` encodings are of shape `(m,128)`, where m is the number of training examples and 128 is the number of elements used to encode a single example.
* For steps 1 and 2, you will maintain the number of `m` training examples and sum along the 128 values of each encoding. 
[tf.reduce_sum](https://www.tensorflow.org/api_docs/python/tf/math/reduce_sum) has an `axis` parameter.  This chooses along which axis the sums are applied.  
* Note that one way to choose the last axis in a tensor is to use negative indexing (`axis=-1`).
* In step 4, when summing over training examples, the result will be a single scalar value.
* For `tf.reduce_sum` to sum across all axes, keep the default value `axis=None`.<jupyter_code># GRADED FUNCTION: triplet_loss
def triplet_loss(y_true, y_pred, alpha = 0.2):
    """
    Implementation of the triplet loss as defined by formula (3)
    
    Arguments:
    y_true -- true labels, required when you define a loss in Keras, you don't need it in this function.
    y_pred -- python list containing three objects:
            anchor -- the encodings for the anchor images, of shape (None, 128)
            positive -- the encodings for the positive images, of shape (None, 128)
            negative -- the encodings for the negative images, of shape (None, 128)
    
    Returns:
    loss -- real number, value of the loss
    """
    
    anchor, positive, negative = y_pred[0], y_pred[1], y_pred[2]
    
    ### START CODE HERE ### (≈ 4 lines)
    # Step 1: Compute the (encoding) distance between the anchor and the positive
    pos_dist = tf.reduce_sum(tf.square(anchor - positive),axis = -1)
    # Step 2: Compute the (encoding) distance between the anchor and the negative
    neg_dist = tf.reduce_sum(tf.square(anchor - negative), axis = -1)
    # Step 3: subtract the two previous distances and add alpha.
    basic_loss = pos_dist - neg_dist + alpha
    # Step 4: Take the maximum of basic_loss and 0.0. Sum over the training examples.
    loss = tf.reduce_sum(tf.maximum(basic_loss, 0))
    ### END CODE HERE ###
    
    return loss
with tf.Session() as test:
    tf.set_random_seed(1)
    y_true = (None, None, None)
    y_pred = (tf.random_normal([3, 128], mean=6, stddev=0.1, seed = 1),
              tf.random_normal([3, 128], mean=1, stddev=1, seed = 1),
              tf.random_normal([3, 128], mean=3, stddev=4, seed = 1))
    loss = triplet_loss(y_true, y_pred)
    
    print("loss = " + str(loss.eval()))<jupyter_output>loss = 528.143
<jupyter_text>**Expected Output**:
    
        
            **loss**
        
        
           528.143
        
    
## 2 - Loading the pre-trained model
FaceNet is trained by minimizing the triplet loss. But since training requires a lot of data and a lot of computation, we won't train it from scratch here. Instead, we load a previously trained model. Load a model using the following cell; this might take a couple of minutes to run. <jupyter_code>FRmodel.compile(optimizer = 'adam', loss = triplet_loss, metrics = ['accuracy'])
load_weights_from_FaceNet(FRmodel)<jupyter_output><empty_output><jupyter_text>Here are some examples of distances between the encodings between three individuals:
   **Figure 4**:    Example of distance outputs between three individuals' encodings
Let's now use this model to perform face verification and face recognition! ## 3 - Applying the modelYou are building a system for an office building where the building manager  would like to offer facial recognition to allow the employees to enter the building.
You'd like to build a **Face verification** system that gives access to the list of people who live or work there. To get admitted, each person has to swipe an ID card (identification card) to identify themselves at the entrance. The face recognition system then checks that they are who they claim to be.### 3.1 - Face Verification
Let's build a database containing one encoding vector for each person who is allowed to enter the office. To generate the encoding we use `img_to_encoding(image_path, model)`, which runs the forward propagation of the model on the specified image. 
Run the following code to build the database (represented as a python dictionary). This database maps each person's name to a 128-dimensional encoding of their face.<jupyter_code>database = {}
database["danielle"] = img_to_encoding("images/danielle.png", FRmodel)
database["younes"] = img_to_encoding("images/younes.jpg", FRmodel)
database["tian"] = img_to_encoding("images/tian.jpg", FRmodel)
database["andrew"] = img_to_encoding("images/andrew.jpg", FRmodel)
database["kian"] = img_to_encoding("images/kian.jpg", FRmodel)
database["dan"] = img_to_encoding("images/dan.jpg", FRmodel)
database["sebastiano"] = img_to_encoding("images/sebastiano.jpg", FRmodel)
database["bertrand"] = img_to_encoding("images/bertrand.jpg", FRmodel)
database["kevin"] = img_to_encoding("images/kevin.jpg", FRmodel)
database["felix"] = img_to_encoding("images/felix.jpg", FRmodel)
database["benoit"] = img_to_encoding("images/benoit.jpg", FRmodel)
database["arnaud"] = img_to_encoding("images/arnaud.jpg", FRmodel)<jupyter_output><empty_output><jupyter_text>Now, when someone shows up at your front door and swipes their ID card (thus giving you their name), you can look up their encoding in the database, and use it to check if the person standing at the front door matches the name on the ID.
**Exercise**: Implement the verify() function which checks if the front-door camera picture (`image_path`) is actually the person called "identity". You will have to go through the following steps:
1. Compute the encoding of the image from `image_path`.
2. Compute the distance between this encoding and the encoding of the identity image stored in the database.
3. Open the door if the distance is less than 0.7, else do not open it.
* As presented above, you should use the L2 distance [np.linalg.norm](https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.norm.html). 
* (Note: In this implementation, compare the L2 distance, not the square of the L2 distance, to the threshold 0.7.) 
#### Hints
* `identity` is a string that is also a key in the `database` dictionary.
* `img_to_encoding` has two parameters: the `image_path` and `model`.<jupyter_code># GRADED FUNCTION: verify
def verify(image_path, identity, database, model):
    """
    Function that verifies if the person on the "image_path" image is "identity".
    
    Arguments:
    image_path -- path to an image
    identity -- string, name of the person you'd like to verify the identity. Has to be an employee who works in the office.
    database -- python dictionary mapping names of allowed people's names (strings) to their encodings (vectors).
    model -- your Inception model instance in Keras
    
    Returns:
    dist -- distance between the image_path and the image of "identity" in the database.
    door_open -- True, if the door should open. False otherwise.
    """
    
    ### START CODE HERE ###
    
    # Step 1: Compute the encoding for the image. Use img_to_encoding() see example above. (≈ 1 line)
    encoding = img_to_encoding(image_path, model)
    
    # Step 2: Compute distance with identity's image (≈ 1 line)
    dist = np.linalg.norm(encoding - database[identity])
    
    # Step 3: Open the door if dist < 0.7, else don't open (≈ 3 lines)
    if dist < 0.7:
        print("It's " + str(identity) + ", welcome in!")
        door_open = True
    else:
        print("It's not " + str(identity) + ", please go away")
        door_open = False
        
    ### END CODE HERE ###
        
    return dist, door_open<jupyter_output><empty_output><jupyter_text>Younes is trying to enter the office and the camera takes a picture of him ("images/camera_0.jpg"). Let's run your verification algorithm on this picture:
<jupyter_code>verify("images/camera_0.jpg", "younes", database, FRmodel)<jupyter_output>It's younes, welcome in!
<jupyter_text>**Expected Output**:
    
        
            **It's younes, welcome in!**
        
        
           (0.65939283, True)
        
    
Benoit, who does not work in the office, stole Kian's ID card and tried to enter the office. The camera took a picture of Benoit ("images/camera_2.jpg). Let's run the verification algorithm to check if benoit can enter.
<jupyter_code>verify("images/camera_2.jpg", "kian", database, FRmodel)<jupyter_output>It's not kian, please go away
<jupyter_text>**Expected Output**:
    
        
            **It's not kian, please go away**
        
        
           (0.86224014, False)
        
    
### 3.2 - Face Recognition
Your face verification system is mostly working well. But since Kian got his ID card stolen, when he came back to the office the next day and couldn't get in! 
To solve this, you'd like to change your face verification system to a face recognition system. This way, no one has to carry an ID card anymore. An authorized person can just walk up to the building, and the door will unlock for them! 
You'll implement a face recognition system that takes as input an image, and figures out if it is one of the authorized persons (and if so, who). Unlike the previous face verification system, we will no longer get a person's name as one of the inputs. 
**Exercise**: Implement `who_is_it()`. You will have to go through the following steps:
1. Compute the target encoding of the image from image_path
2. Find the encoding from the database that has smallest distance with the target encoding. 
    - Initialize the `min_dist` variable to a large enough number (100). It will help you keep track of what is the closest encoding to the input's encoding.
    - Loop over the database dictionary's names and encodings. To loop use `for (name, db_enc) in database.items()`.
        - Compute the L2 distance between the target "encoding" and the current "encoding" from the database.
        - If this distance is less than the min_dist, then set `min_dist` to `dist`, and `identity` to `name`.<jupyter_code># GRADED FUNCTION: who_is_it
def who_is_it(image_path, database, model):
    """
    Implements face recognition for the office by finding who is the person on the image_path image.
    
    Arguments:
    image_path -- path to an image
    database -- database containing image encodings along with the name of the person on the image
    model -- your Inception model instance in Keras
    
    Returns:
    min_dist -- the minimum distance between image_path encoding and the encodings from the database
    identity -- string, the name prediction for the person on image_path
    """
    
    ### START CODE HERE ### 
    
    ## Step 1: Compute the target "encoding" for the image. Use img_to_encoding() see example above. ## (≈ 1 line)
    encoding = img_to_encoding(image_path, model)
    
    ## Step 2: Find the closest encoding ##
    
    # Initialize "min_dist" to a large value, say 100 (≈1 line)
    min_dist = 100
    
    # Loop over the database dictionary's names and encodings.
    for (name, db_enc) in database.items():
        
        # Compute L2 distance between the target "encoding" and the current db_enc from the database. (≈ 1 line)
        dist = np.linalg.norm(encoding - db_enc)
        # If this distance is less than the min_dist, then set min_dist to dist, and identity to name. (≈ 3 lines)
        if dist<min_dist:
            min_dist = dist
            identity = name
    ### END CODE HERE ###
    
    if min_dist > 0.7:
        print("Not in the database.")
    else:
        print ("it's " + str(identity) + ", the distance is " + str(min_dist))
        
    return min_dist, identity<jupyter_output><empty_output><jupyter_text>Younes is at the front-door and the camera takes a picture of him ("images/camera_0.jpg"). Let's see if your who_it_is() algorithm identifies Younes. <jupyter_code>who_is_it("images/camera_0.jpg", database, FRmodel)<jupyter_output>it's younes, the distance is 0.659393
 | 
	no_license | 
	/Convolutional Neural Networks/Week4/Face_Recognition_v3a.ipynb | 
	vattikutiravi9/Deep-Learning-specialization | 10 | 
| 
	<jupyter_start><jupyter_text># Tutorial: Bring your own data (Part 3 of 3)
## Introduction
In the previous [Tutorial: Train a model in the cloud](2.train-model.ipynb) article, the CIFAR10 data was downloaded using the builtin `torchvision.datasets.CIFAR10` method in the PyTorch API. However, in many cases you are going to want to use your own data in a remote training run. This article focuses on the workflow you can leverage such that you can work with your own data in Azure Machine Learning. 
By the end of this tutorial you would have a better understanding of:
- How to upload your data to Azure
- Best practices for working with cloud data in Azure Machine Learning
- Working with command-line arguments
---
## Your machine learning code
By now you have your training script running in Azure Machine Learning, and can monitor the model performance. Let's _parametrize_ the training script by introducing
arguments. Using arguments will allow you to easily compare different hyperparmeters.
Presently our training script is set to download the CIFAR10 dataset on each run. The python code in [train-with-cloud-data-and-logging.py](../../code/models/pytorch/cifar10-cnn/train-with-cloud-data-and-logging.py) now uses **`argparse` to parametize the script.**### Understanding your machine learning code changes
The script `train-with-cloud-data-and-logging.py` has leveraged the `argparse` library to set up the `--data-path`, `--learning-rate`, `--momentum`, and `--epochs` arguments:
```python
import argparse
...
parser = argparse.ArgumentParser()
parser.add_argument("--data-path", type=str, help="Path to the training data")
parser.add_argument("--learning-rate", type=float, default=0.001, help="Learning rate for SGD")
parser.add_argument("--momentum", type=float, default=0.9, help="Momentum for SGD")
parser.add_argument("--epochs", type=int, default=2, help="Number of epochs to train")
args = parser.parse_args()
```
The script was adapted to update the optimizer to use the user-defined parameters:
```python
optimizer = optim.SGD(
    net.parameters(),
    lr=args.learning_rate,     # get learning rate from command-line argument
    momentum=args.momentum,    # get momentum from command-line argument
)
```
Similarly the training loop was adapted to update the number of epochs to train to use the user-defined parameters:
```python
for epoch in range(args.epochs):
```
## Upload your data to Azure
In order to run this script in Azure Machine Learning, you need to make your training data available in Azure. Your Azure Machine Learning workspace comes equipped with a _default_ **Datastore** - an Azure Blob storage account - that you can use to store your training data.
> ! NOTE 
> Azure Machine Learning allows you to connect other cloud-based datastores that store your data. For more details, see [datastores documentation](./concept-data.md).
<jupyter_code>!pip install --upgrade torchvision
from azureml.core import Workspace, Dataset
from torchvision import datasets
ws = Workspace.from_config()
datasets.CIFAR10(".", download=True)
ds = ws.get_default_datastore()
ds.upload(
    src_dir="cifar-10-batches-py",
    target_path="datasets/cifar10",
    overwrite=False,
)
import os
import shutil
os.remove("cifar-10-python.tar.gz")
shutil.rmtree("cifar-10-batches-py")<jupyter_output><empty_output><jupyter_text>The `target_path` specifies the path on the datastore where the CIFAR10 data will be uploaded.
## Submit your machine learning code to Azure Machine Learning
As you have done previously, create a new Python control script:<jupyter_code>import git
from pathlib import Path
prefix = Path(git.Repo(".", search_parent_directories=True).working_tree_dir)
prefix
from azureml.core import (
    Workspace,
    Experiment,
    Environment,
    ScriptRunConfig,
    Dataset,
)
from azureml.widgets import RunDetails
ws = Workspace.from_config()
ds = Dataset.File.from_files(
    path=(ws.get_default_datastore(), "datasets/cifar10")
)
env = Environment.from_conda_specification(
    name="pytorch-env-tutorial",
    file_path=prefix.joinpath("environments", "pytorch-example.yml"),
)
exp = Experiment(
    workspace=ws, name="getting-started-train-model-cloud-data-tutorial"
)
src = ScriptRunConfig(
    source_directory=prefix.joinpath(
        "code", "models", "pytorch", "cifar10-cnn"
    ),
    script="train-with-cloud-data-and-logging.py",
    compute_target="cpu-cluster",
    environment=env,
    arguments=[
        "--data-path",
        ds.as_mount(),
        "--learning-rate",
        0.003,
        "--momentum",
        0.92,
        "--epochs",
        2,
    ],
)
run = exp.submit(src)
RunDetails(run).show()<jupyter_output><empty_output> | 
	permissive | 
	/tutorials/getting-started/3.train-model-cloud-data.ipynb | 
	luisquintanilla/azureml-examples | 2 | 
| 
	<jupyter_start><jupyter_text># 1. Multi-layer Perceptron
### Train and evaluate a simple MLP on the Reuters newswire topic classification task.
This is a collection of documents that appeared on Reuters newswire in 1987. The documents were assembled and indexed with categories.
Dataset of 11,228 newswires from Reuters, labeled over 46 topics. As with the IMDB dataset, each wire is encoded as a sequence of word indexes (same conventions).
Each wire is encoded as a sequence of word indexes (integers). For convenience, words are indexed by overall frequency in the dataset, so that for instance the integer "3" encodes the 3rd most frequent word in the data. This allows for quick filtering operations such as: "only consider the top 10,000 most common words, but eliminate the top 20 most common words".
As a convention, "0" does not stand for a specific word, but instead is used to encode any unknown word.
Source: https://archive.ics.uci.edu/ml/datasets/Reuters-21578+Text+Categorization+Collection
<jupyter_code># Reuters data
from __future__ import print_function
import numpy as np
np.random.seed(1337)  # for reproducibility
#Import keras
from keras.datasets import reuters
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.utils import np_utils
from keras.preprocessing.text import Tokenizer
max_words = 1000
batch_size = 32
nb_epoch = 5
import os
path_to_data = os.path.abspath(os.path.join('..', 'data', 'reuters.pkl'))
print('Loading data...')
(X_train, y_train), (X_test, y_test) = reuters.load_data(path_to_data, nb_words=max_words, test_split=0.2)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
nb_classes = np.max(y_train)+1
print(nb_classes, 'classes')
print('Vectorizing sequence data...')
tokenizer = Tokenizer(nb_words=max_words)
X_train = tokenizer.sequences_to_matrix(X_train, mode='binary')
X_test = tokenizer.sequences_to_matrix(X_test, mode='binary')
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
print('Convert class vector to binary class matrix (for use with categorical_crossentropy)')
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
print('Y_train shape:', Y_train.shape)
print('Y_test shape:', Y_test.shape)
print('Building model...')
model = Sequential()
model.add(Dense(512, input_shape=(max_words,)))
model.add(Activation('relu'))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
              optimizer='sgd',
              metrics=['accuracy'])
history = model.fit(X_train, Y_train,
                    nb_epoch=nb_epoch, batch_size=batch_size,
                    verbose=1, validation_split=0.1)
score = model.evaluate(X_test, Y_test,
                       batch_size=batch_size, verbose=1)
print('Test score:', score[0])
print('Test accuracy:', score[1])<jupyter_output><empty_output> | 
	permissive | 
	/notebooks/1. Multi-Layer-Perceptron.ipynb | 
	3catz/DeepLearning-NLP | 1 | 
| 
	<jupyter_start><jupyter_text># 標準ガウス分布における確率密度関数と累積分布関数の考察分散を1.0から4.5まで0.5間隔で増やすと確率密度関数と累積分布関数はどのように変わるのかを考察した。
ただし、累積分布関数のところで出てくる$erf$とは
$erf(x) = \frac{2}{\sqrt{\pi}} \int_{0}^{x} e^{-t^2} dt$のことであり、誤差関数と呼ばれる<jupyter_code>import numpy as np
from scipy.special import erf
import matplotlib.pyplot as plt
%matplotlib inline<jupyter_output><empty_output><jupyter_text># 確率密度関数<jupyter_code>x = np.arange(-5,5,0.01)
mu = 0
Sigma = np.arange(1,5,0.5)
for sigma in Sigma:
    f = 1/(np.sqrt(2*np.pi*(sigma**2)))*np.exp((-(x-mu)**2)/(2*(sigma**2)))
    plt.plot(x,f,label=sigma)
    plt.legend()<jupyter_output><empty_output><jupyter_text>分散が大きくなるにつれて滑らかなグラフになることがわかる。# 累積分布関数<jupyter_code>for sigma in Sigma:
    g = (1/2)*(1+erf((x-mu)/np.sqrt(2*(sigma**2))))
    plt.plot(x,g,label=sigma)
    plt.legend()<jupyter_output><empty_output> | 
	no_license | 
	/codes/code_ipynb/gausu_distribution.ipynb | 
	kiwamizamurai/Lygometry | 3 | 
| 
	<jupyter_start><jupyter_text>Linear search<jupyter_code>pos = 0 #global variable for printing position
def search(my_list, n): 
    global pos 
    i = 0
    while i < len(my_list): #iterate throgh list to find element
        if my_list[i] == n:
            pos = i #updating position 
            return True
        i = i + 1;
    return False
my_list = [7,5,8,4,6,9,2,1,3,45,78,21,58,96,77,41,16]
n = 9
if search(my_list, n): #function to search number through list
    print("at postition",pos+1)
else:
    print("Not Found")<jupyter_output>at postition 6
<jupyter_text>Bubble Sort<jupyter_code>my_list = [7,5,8,4,6,9,2,1,3,45,78,21,58,96,77,41,16]
for z in range(0,len(my_list)-1): 
  for x in range(0,len(my_list)-1):
    y = x+1
    
    #swap x with x+1 if "x>x+1"
    if(my_list[x]>my_list[y]):
      my_list[x], my_list[y] = my_list[y], my_list[x] 
      y += 1
print(my_list)   <jupyter_output>[1, 2, 3, 4, 5, 6, 7, 8, 9, 16, 21, 41, 45, 58, 77, 78, 96]
<jupyter_text>Binary Search
<jupyter_code>#only works on sorted list 
start = 0 #start point of list
end = len(my_list)-1 #end point of list
mid = int((start+end)/2) #mid position of list
n = 58 
while mid>=0:
  if my_list[mid] == n:   #break point if mid == target
    print('number',my_list[mid],'is at position', mid+1)
    break
  '''
  if n > mid then target num is present in next half of list list
  if n < mid then target num is present in previous half of the list
  '''  
  elif n < my_list[mid]:
    end = mid
    mid= int((start+end)/2)
  elif (n > my_list[mid]):
    start = mid
    end = len(my_list)-1
    mid= int((start+end)/2)
  else:
    print('Not found!')<jupyter_output>number 58 is at position 14
<jupyter_text>Selection sort
<jupyter_code>'''
consider current num as min
then compare with other numbers 
if any smaller numbe is present
''' 
my_list = [7,5,8,4,6,9,2,1,3,45,78,21,58,96,77,41,16]
for x in range(len(my_list)):
  min_index = x #consider current num as min
  for y in range(x+1,len(my_list)):
    if my_list[min_index] > my_list[y]: #check selected num with current number 
      min_index = y
      
  my_list[x], my_list[min_index] = my_list[min_index], my_list[x]
print(my_list)<jupyter_output>[1, 2, 3, 4, 5, 6, 7, 8, 9, 16, 21, 41, 45, 58, 77, 78, 96]
 | 
	no_license | 
	/basic_ML/algorithms.ipynb | 
	rohan-dhere/Neosoft_assignment | 4 | 
| 
	<jupyter_start><jupyter_text># Limpieza de datos<jupyter_code>def remove_non_ascii(words):
    """Remove non-ASCII characters from list of tokenized words"""
    new_words = []
    for word in words:
        new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')
        new_words.append(new_word)
    return new_words
def to_lowercase(words):
    """Convert all characters to lowercase from list of tokenized words"""
    new_words = []
    for word in words:
      new_word = word.lower()
      new_words.append(new_word)
    return new_words
data_copy = data_t.copy()
data_copy['reviewText'] = data_copy['reviewText'].apply(to_lowercase)
  
    
def remove_punctuation(words):
    """Remove punctuation from list of tokenized words"""
    new_words = []
    for word in words:
        new_word = re.sub(r'[^\w\s]', '', word)
        if new_word != '':
            new_words.append(new_word)
    return new_words
def replace_numbers(words):
    """Replace all interger occurrences in list of tokenized words with textual representation"""
    p = inflect.engine()
    new_words = []
    for word in words:
        if word.isdigit():
            new_word = p.number_to_words(word)
            new_words.append(new_word)
        else:
            new_words.append(word)
    return new_words
def remove_stopwords(words):
    """Remove stop words from list of tokenized words"""
    new_words = []
    for word in words:
        if word not in stopwords.words('english'):
            new_words.append(word)
    return new_words
def preprocessing(words):
    words = to_lowercase(words)
    words = replace_numbers(words)
    words = remove_punctuation(words)
    words = remove_non_ascii(words)
    words = remove_stopwords(words)
    return words
# def to_lowercase(words):
#     """Convert all characters to lowercase from list of tokenized words"""
#     new_words = []
#     for word in words:
#       new_word = word.lower()
#       new_words.append(new_word)
#     return new_words
# data_copy = data_t.copy()
# data_copy['reviewText'] = data_copy.apply(lambda row: to_lowercase(row), axis = 1)
# data_copy['reviewText'].head()
<jupyter_output><empty_output><jupyter_text># Tokenización
<jupyter_code>import contractions
data_t['reviewText'] = data_t['reviewText'].apply(contractions.fix) #Aplica la corrección de las contracciones
data_t['words'] = data_t['reviewText'].apply(word_tokenize).apply(preprocessing) #Aplica la eliminación del ruido
data_t.head()<jupyter_output><empty_output><jupyter_text># Normalización
<jupyter_code>def stem_words(words):
    """Stem words in list of tokenized words"""
    stemmer = LancasterStemmer()
    stems = []
    for word in words:
        stem = stemmer.stem(word)
        stems.append(stem)
    return stems
def lemmatize_verbs(words):
    """Lemmatize verbs in list of tokenized words"""
    lemmatizer = WordNetLemmatizer()
    lemmas = []
    for word in words:
        lemma = lemmatizer.lemmatize(word, pos='v')
        lemmas.append(lemma)
    return lemmas
def stem_and_lemmatize(words):
    stems = stem_words(words)
    lemmas = lemmatize_verbs(words)
    return stems + lemmas
data_t['words'] = data_t['words'].apply(stem_and_lemmatize) 
data_t.head()
data_t['processed_message'] = data_t['words'].apply(lambda x: ' '.join(map(str, x)))
data_t
# a partir del arreglo que se encuentra en la columna helpful se obtiene un numero que lo representa.
#en este sentido, [2,3] = 2/3 = 0.667
def calcularHelpful(array):
  numerador = int(array[0])
  denominador = int(array[1])
  if(denominador == 0 & numerador == 0):
    return 0
  else:
    return float(numerador/denominador)
data_test = data_t.copy()
#reviso que todo este bien
data_test.iloc[9995]
#proceso la información de helpful
data_test['helpful_chevere'] = data_test.helpful.apply(lambda x: x.replace(' ',''))
data_test['helpful_chevere'] = data_test.helpful_chevere.apply(lambda x: x[1:-1].split(','))
data_test['helpful_chevere'][9995][1]
int(data_test['helpful_chevere'][9995][1])
data_test['helpful_calculado'] = data_test.helpful_chevere.apply(lambda s:  calcularHelpful(s))
#X_data, y_data = data_t['processed_message'],data_t['reviewText']
X_data, y_data = data_test['processed_message'],data_test['helpful_calculado']
y_data = (y_data >= 0.5).astype(int)
y_data
print(pd.__version__) 
df = data_test.copy()
df['helpful_calculado'] =  (df['helpful_calculado'] >= 0.5).astype(int)
# saving as a CSV file
df.to_csv('datos-.csv', sep ='\t')
# X_data, y_data = data_t['processed_message'],data_t['helpful']
# #X_data, y_data = data_t['processed_message'],data_t['helpful']
# y_data = (y_data == 'helpful').astype(int)
# y_data<jupyter_output><empty_output><jupyter_text># Representación en matriz de conteo
<jupyter_code>count = CountVectorizer()
X_count = count.fit_transform(X_data)
print(X_count.shape)
X_count.toarray()[0]<jupyter_output>(10000, 43071)
<jupyter_text>matriz binaria<jupyter_code>vectorizer = HashingVectorizer(n_features=2**4)
X_bin = vectorizer.fit_transform(X_data)
print(X_bin.shape)
#.toarray()[0]
X_bin.todense()<jupyter_output><empty_output><jupyter_text>Matriz TF-ID<jupyter_code>vectorizer = TfidfVectorizer()
X_TIFID = vectorizer.fit_transform(X_data)
vectorizer.get_feature_names()[96]
X_TIFID.todense()<jupyter_output><empty_output><jupyter_text># Modelo de clasificación<jupyter_code>#Dividir las variables de entrenamiento
X = X_count
#Variable objetivo
Y = y_data
# Dividir los datos en entrenamiento y test
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2)<jupyter_output><empty_output><jupyter_text># Decision Tree Classifier<jupyter_code>arbol = DecisionTreeClassifier(criterion = 'gini', max_depth = 8)
arbol = arbol.fit(X= X_train, y = Y_train)
#https://ai.plainenglish.io/hyperparameter-tuning-of-decision-tree-classifier-using-gridsearchcv-2a6ebcaffeda
param_dict = {
    "criterion":['gini','entropy'],
    "max_depth": range(1,10),
    "min_samples_split":range(2,10),
    "min_samples_leaf":range(1,5)
}
grid = GridSearchCV(
                    arbol,
                    param_grid = param_dict,
                    cv = 10,
                    verbose = 1,
                    n_jobs = 1
)
grid.fit(X_test, Y_test)
grid.best_params_
grid.best_estimator_
grid.best_score_
arbol = DecisionTreeClassifier(criterion='entropy', max_depth=7, min_samples_leaf=4,
                       min_samples_split=9)
arbol = arbol.fit(X= X_train, y = Y_train)
# Determinamos las predicciones del modelo sobre el conjunto test.
y_pred = arbol.predict(X_test)
#MAtriz de confusión
confusion_matrix(Y_test, y_pred)
print(classification_report(Y_test, y_pred))
# Grafica la matriz de confusión
plot_confusion_matrix(arbol, X_test, Y_test)  
plt.show()
f1_score(y_true = Y_test, y_pred = y_pred)
# Mostrar reporte de clasificación
print('Train')
print(classification_report(Y_train, y_pred_train))
print("------------")
print('Test')
print(classification_report(Y_test, y_pred_test))
y_pred_train = arbol.predict(X_train)
y_pred_test = arbol.predict(X_test)
print('Exactitud sobre entrenamiento: %.3f' % accuracy_score(Y_train, y_pred_train))
print('Exactitud sobre test: %.3f' % accuracy_score(Y_test, y_pred_test))<jupyter_output>Exactitud sobre entrenamiento: 0.696
Exactitud sobre test: 0.648
<jupyter_text># KNN Classifier<jupyter_code>#number_cols = data_t.dtypes[(data_t.dtypes == np.int64) | (data_t.dtypes == np.float64)].index 
#number_cols = data_t.select_dtypes(include = ['int64','float']).columns
#number_cols
#data_m = data_t['number_cols']
#Normalización de datos
#normalized_df = (data_m - data_m.min())/(data_m.max() - data_m.min())
#normalized_df.describe()
# https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html
k = KFold(shuffle=True, random_state = seed)
# Lista de Hiperparámetros a afinar
n_neighbors = list(range(1,10))
p=[1,2] #Función de distancia 1: manhattan, 2: euclidean, otro valor: minkowski
#Convert to dictionary
hyperparameters = dict(n_neighbors=n_neighbors, p=p)
#Create new KNN object
knn_2 = KNeighborsClassifier()
#Use GridSearch
mejor_modelo_knn = GridSearchCV(knn_2, hyperparameters, cv=k)
#Fit the model
mejor_modelo_knn.fit(X_train, Y_train)
#Print The value of best Hyperparameters
print('Best p:', mejor_modelo_knn.best_estimator_.get_params()['p'])
print('Best n_neighbors:', mejor_modelo_knn.best_estimator_.get_params()['n_neighbors'])
# Obtener el mejor modelo.
neigh_final = mejor_modelo_knn.best_estimator_
# Probemos ahora este modelo sobre test.
y_pred_train = neigh_final.predict(X_train)
y_pred_test = neigh_final.predict(X_test)
print('Exactitud sobre entrenamiento: %.2f' % accuracy_score(Y_train, y_pred_train))
print('Exactitud sobre test: %.2f' % accuracy_score(Y_test, y_pred_test))
#Y = data_t['helpful']
#X = data_t.drop(['helpful', 'helpful'], axis=1)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=seed)
neigh = KNeighborsClassifier(n_neighbors=5,p = 2)
neigh = neigh.fit(X_train, Y_train) 
y_pred = neigh.predict(X_test) 
#Matriz de confusión
confusion_matrix(Y_test, y_pred)
# Grafica la matriz de confusión
plot_confusion_matrix(neigh, X_test, Y_test)  
plt.show()
# Mostrar reporte de clasificación
print('Train')
print(classification_report(Y_train, y_pred_train))
print("------------")
print('Test')
print(classification_report(Y_test, y_pred_test))
#print("F1 Score:")
#f1_score(y_true = Y_test, y_pred = y_pred)
print("F1-Score:")
f1_score(y_true = Y_test, y_pred = y_pred)<jupyter_output>F1-Score:
<jupyter_text># SVC Classifieren primer lugar se hace una búsqueda de hiperperámetros usando KFold<jupyter_code>particiones = KFold(n_splits=10, shuffle=True, random_state = seed)
#se establece el espacio de busqueda
param_grid = {'kernel':['linear', 'poly','rbf']}
# se establece el modelo
# from sklearn.svm import LinearSVC
#  from sklearn.pipeline import make_pipeline
# from sklearn.preprocessing import StandardScaler
# from sklearn.datasets import make_classification
# clf = make_pipeline(StandardScaler(),LinearSVC(random_state=0, tol=1e-5))
clf = svm.SVC()
best_model = GridSearchCV(clf, param_grid, cv = particiones)
#se entrena el modelo
best_model.fit(X_train, Y_train)
#la eleccion de hiperparámetros que mejor funcionan con el modelo
best_model.best_params_
final_SVC = best_model.best_estimator_<jupyter_output><empty_output><jupyter_text>**Reporte de Metricas a Negocio**<jupyter_code>y_pred_train = final_SVC.predict(X_train)
y_pred_test = final_SVC.predict(X_test)
print('Exactitud sobre entrenamiento: %.3f' % accuracy_score(Y_train, y_pred_train))
print('Exactitud sobre test: %.3f' % accuracy_score(Y_test, y_pred_test))<jupyter_output>Exactitud sobre entrenamiento: 0.830
Exactitud sobre test: 0.674
<jupyter_text>F1 score<jupyter_code>print('Train')
print(classification_report(Y_train, y_pred_train))
print("------------")
print('Test')
print(classification_report(Y_test, y_pred_test))
# Se genera la matriz de confusión
confusion_matrix(Y_test, y_pred_test)
plot_confusion_matrix(final_SVC, X_test, Y_test)  
plt.show()  
# #grafica representando los resultados del modelo sobre el set de datos
# #codigo tomado de: https://scikit-learn.org/0.18/auto_examples/svm/plot_iris.html
# print(__doc__)
# import numpy as np
# import matplotlib.pyplot as plt
# from sklearn import svm, datasets
# # create a mesh to plot in
# x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
# y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
# xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
#                      np.arange(y_min, y_max, h))
# # title for the plots
# titles = ['SVC with polynomial (degree 3) kernel']
# for i, clf in enumerate((svc, final_SVC)):
#     # Plot the decision boundary. For that, we will assign a color to each
#     # point in the mesh [x_min, x_max]x[y_min, y_max].
#     plt.subplot(2, 2, 1)
#     plt.subplots_adjust(wspace=0.001, hspace=0.001)
#     Z = final_SVC.predict(np.c_[xx.ravel(), yy.ravel()])
#     # Put the result into a color plot
#     Z = Z.reshape(xx.shape)
#     plt.contourf(xx, yy, Z, cmap=plt.cm.coolwarm, alpha=0.8)
#     # Plot also the training points
#     plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.coolwarm)
#     plt.xlabel('Sepal length')
#     plt.ylabel('Sepal width')
#     plt.xlim(xx.min(), xx.max())
#     plt.ylim(yy.min(), yy.max())
#     plt.xticks(())
#     plt.yticks(())
#     plt.title(titles[0])
# plt.show()<jupyter_output><empty_output> | 
	no_license | 
	/Proyecto1.ipynb | 
	mg-torres/biProyecto1 | 12 | 
| 
	<jupyter_start><jupyter_text># ПРОЕКТ "СТАТИСТИЧЕСКИЙ АНАЛИЗ ДАННЫХ"**Описание проекта:**
Вы аналитик компании «Мегалайн» — федерального оператора сотовой связи. Клиентам предлагают два тарифных плана: «Смарт» и «Ультра». Чтобы скорректировать рекламный бюджет, коммерческий департамент хочет понять, какой тариф приносит больше денег.
Вам предстоит сделать предварительный анализ тарифов на небольшой выборке клиентов. В вашем распоряжении данные 500 пользователей «Мегалайна»: кто они, откуда, каким тарифом пользуются, сколько звонков и сообщений каждый отправил за 2018 год. Нужно проанализировать поведение клиентов и сделать вывод — какой тариф лучше.**Содержание**
1. [ОБЩАЯ ИНФОРМАЦИЯ](#head)
2. [ПОДГОТОВКА ДАННЫХ](#pre_data)
   - [ПОМЕСЯЧНЫЙ РАСХОД УСЛУГ ДЛЯ КАЖДОГО ПОЛЬЗОВАТЕЛЯ:](#month_used)
    * [Количество израсходованных минут](#minutes_used)
    * [Количество совершенных звонков](#calls_count)
    * [Объем израсходованного интернет-трафика](#internet_used)
    * [Количество отправленных сообщений](#messages_used)
    
   - [ПОМЕСЯЧНАЯ ВЫРУЧКА ДЛЯ КАЖДОГО ПОЛЬЗОВАТЕЛЯ:](#month_cost)
    * [Объединение таблиц](#merge_data)
    * [Замена пропусков](#replace_pass)
    * [Создание функций для подсчета стоимости](#make_func)
    * [Итоговая таблица](#end_data)
    
    
3. [АНАЛИЗ ДАННЫХ](#data_analysis)
    - [СРЕДНЕЕ, СТАНДАРТНОЕ И ДИСПЕРСИЯ](#3_ind)
    - [ГИСТОГРАММЫ](#hist)
    
    
4. [ПРОВЕРКА ГИПОТЕЗ](#hypo)
    - [ВЫРУЧКА ПОЛЬЗОВАТЕЛЕЙ ТАРИФОВ "СМАРТ" VS "УЛЬТРА"](#hypo_1)
    - [ВЫРУЧКА ПОЛЬЗОВАТЕЛЕЙ ИЗ МОСКВЫ VS ДРУГИЕ РЕГИОНЫ](#hypo_2)
    
    
5. [ВЫВОД](#sum)  ## ОБЩАЯ ИНФОРМАЦИЯ *Импортируем словари, необходимые для работы над проектом:*<jupyter_code>import pandas as pd
import numpy as np 
import random
from matplotlib import pyplot as plt
from math import factorial
from scipy import stats as st
import math as mt
from scipy import stats as st
import seaborn as sns
#избавим себя от невидимых столбцов, дальше таблица станет более широкой
pd.set_option('display.max_columns', None)<jupyter_output><empty_output><jupyter_text>---------------*Откроем файлы с данными и изучим их*<jupyter_code>#Таблица calls (информация о звонках):
data_calls = pd.read_csv('/datasets/calls.csv')
#Таблица internet (информация об интернет-сессиях):
data_internet = pd.read_csv('/datasets/internet.csv', index_col=0)
#Таблица messages (информация о сообщениях):
data_messages = pd.read_csv('/datasets/messages.csv')
#Таблица tariffs (информация о тарифах):
data_tariffs = pd.read_csv('/datasets/tariffs.csv')
#Таблица users (информация о пользователях):
data_users = pd.read_csv('/datasets/users.csv')<jupyter_output><empty_output><jupyter_text>
Комментарий ревьюера v1:
    
Здорово, что данные и библиотеки сгруппированы и разделены.
<jupyter_code>#информация о звонках
data_calls.info()
data_calls.sample(10)<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 202607 entries, 0 to 202606
Data columns (total 4 columns):
id           202607 non-null object
call_date    202607 non-null object
duration     202607 non-null float64
user_id      202607 non-null int64
dtypes: float64(1), int64(1), object(2)
memory usage: 6.2+ MB
<jupyter_text>*В таблице с информациями о звонках мы видим дату звонков и их продолжительность. Для некоторых звонков продолжительность нулевая (0.00) - это могут быть пропущенные или сброшенные звонки. Посмотрим на их количество:*<jupyter_code>data_calls[data_calls['duration'] == 0.00]['duration'].count()<jupyter_output><empty_output><jupyter_text>*Ого, да это целых 20%! Не думаю, что стоит избавляться от этих данных - они влияют на общую картину и дают нам такую же информацию, что и звонки ненулевой продолжительности.*<jupyter_code>#информация об интернет-сессиях
data_internet.info()
data_internet.sample(10)<jupyter_output><class 'pandas.core.frame.DataFrame'>
Int64Index: 149396 entries, 0 to 149395
Data columns (total 4 columns):
id              149396 non-null object
mb_used         149396 non-null float64
session_date    149396 non-null object
user_id         149396 non-null int64
dtypes: float64(1), int64(1), object(2)
memory usage: 5.7+ MB
<jupyter_text>*Таблица показывает нам интернет-сессии пользователей: столбец user_id - это конкретный id пользователя, а столбец id - id пользователя и порядковый номер интернет-сессии. mb_used - эти данные, соответственно, указывают на количество использованных МБ интернет-трафика.*
*Также, как и в случае с данными о звонках, мы имеем нулевые значения (0.00) в столбце mb_used. Возможно, это значит, что пользователь не воспользовался интернетом в рамках одной интернет-сессии. Это является частью нашего анализа и не препятствует дальнейшей работе над проектом, так что оставим эти данные здесь.*<jupyter_code>#информация о сообщениях
data_messages.info()
data_messages.sample(10)<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 123036 entries, 0 to 123035
Data columns (total 3 columns):
id              123036 non-null object
message_date    123036 non-null object
user_id         123036 non-null int64
dtypes: int64(1), object(2)
memory usage: 2.8+ MB
<jupyter_text>*Аналогично таблице с информацией об интернет-сессиях, в таблице с данными о сообщениях мы видим в столбце user_id - id пользователя, который воспользовался смс-сообщениями, а в левом столбике id - id пользователя и порядковый номер смс-сообщения, которое отправил пользователь. Пропущенные значения отсутствуют.*<jupyter_code>#информация о тарифах
data_tariffs.info()
data_tariffs<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 2 entries, 0 to 1
Data columns (total 8 columns):
messages_included        2 non-null int64
mb_per_month_included    2 non-null int64
minutes_included         2 non-null int64
rub_monthly_fee          2 non-null int64
rub_per_gb               2 non-null int64
rub_per_message          2 non-null int64
rub_per_minute           2 non-null int64
tariff_name              2 non-null object
dtypes: int64(7), object(1)
memory usage: 256.0+ bytes
<jupyter_text>*Лаконичная таблица с информацией об условиях предоставляемых тарифов.*<jupyter_code>#информация о пользователях
data_users.info()
data_users.sample(10)<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 500 entries, 0 to 499
Data columns (total 8 columns):
user_id       500 non-null int64
age           500 non-null int64
churn_date    38 non-null object
city          500 non-null object
first_name    500 non-null object
last_name     500 non-null object
reg_date      500 non-null object
tariff        500 non-null object
dtypes: int64(2), object(6)
memory usage: 31.4+ KB
<jupyter_text>*Таблица с информацией о пользователях! Позволяет нам уже сейчас увидеть какие тарифы наиболее популярны, каков средний возраст пользователей и еще много чего интересного:*<jupyter_code>#средний возраст пользователей
data_users['age'].mean()
#распределение пользователей по городам
data_users['city'].value_counts()
#распределение тарифов
data_users['tariff'].value_counts()<jupyter_output><empty_output><jupyter_text>[churn_date] - *дата прекращения пользования тарифом (если значение пропущено, то тариф ещё действовал на момент выгрузки данных). Посмотрим на количество пропущенных значений:*<jupyter_code>data_users['churn_date'].isna().sum()<jupyter_output><empty_output><jupyter_text>*Значит, что более 90% продолжают пользоваться тарифами "Мегалайн". Отличный оператор!****ВЫВОД***
- Мы импортировали необходимые словари, а также таблицы с данными для работы над проектом.
- Изучили данные в таблицах: на первый взгляд, они не сильно обременены ошибками и пропущенными значениями, но мы изучим это подробнее уже в следующем шаге нашего проекта, где будем готовить данные к анализу.
- Некоторые из таблиц возможно будет объединить по id с суммами по использованным сообщениям/мб интернета/звонкам.
## ПОДГОТОВКА ДАННЫХ Приведем данные в соответствие с условием "«Мегалайн» всегда округляет вверх значения минут и мегабайтов": используем метод np.ceil - он округлит данные в бóльшую сторону<jupyter_code>#данные до обработки
data_internet.head()
#данные до обработки
data_calls.head()
import numpy as np
data_calls['duration'] = np.ceil(data_calls['duration'])
data_internet['mb_used'] = np.ceil(data_internet['mb_used'])
#данные после обработки
data_calls.head()
#данные после обработки
data_internet.head()<jupyter_output><empty_output><jupyter_text>----**1. Количество израсходованных минут** **ПОМЕСЯЧНЫЙ РАСХОД УСЛУГ ДЛЯ КАЖДОГО ПОЛЬЗОВАТЕЛЯ:** *Подготовим данные к анализу: посчитаем ежемесячный расход услуг, выручку по каждому пользователю и сформируем из этого итоговую таблицу.*<jupyter_code>#Добавим столбик month, где будет указан месяц для каждой операции
data_calls['month'] = pd.DatetimeIndex(data_calls['call_date']).month
data_calls.head()
data_calls.info()
#создадим сводную таблицу для расчета данных по месяцам
data_calls_month_sum=data_calls.pivot_table(index=['user_id','month'
                                                  ], values='duration', aggfunc='sum')
data_calls_month_sum.head()<jupyter_output><empty_output><jupyter_text>**2. Количество совершенных звонков** <jupyter_code>#создадим сводную таблицу для расчета данных по месяцам
data_calls_month_count=data_calls.pivot_table(index=['user_id','month'
                                                    ], values='duration', aggfunc='count')
data_calls_month_count.head()<jupyter_output><empty_output><jupyter_text>**3. Объем израсходованного интернет-трафика** <jupyter_code>#Добавим столбик month, где будет указан месяц для каждой операции
data_internet['month'] = pd.DatetimeIndex(data_internet['session_date']).month
#создадим сводную таблицу для расчета данных по месяцам - неправильный вариант
#data_internet_month = data_internet.pivot_table(index=['user_id','month'
                                                      #], values='mb_used', aggfunc='count')
    
#создадим сводную таблицу для расчета данных по месяцам - правильный вариант
data_internet_month = data_internet.pivot_table(index=['user_id','month'
                                                      ], values='mb_used', aggfunc='sum')
data_internet_month.head()<jupyter_output><empty_output><jupyter_text>**4. Количество отправленных сообщений** <jupyter_code>#Добавим столбик month, где будет указан месяц для каждой операции
data_messages['month'] = pd.DatetimeIndex(data_messages['message_date']).month
#создадим сводную таблицу для расчета данных по месяцам
data_messages_month = data_messages.pivot_table(index=['user_id','month'
                                                      ], values='id', aggfunc='count')
data_messages_month.head()<jupyter_output><empty_output><jupyter_text>**ПОМЕСЯЧНАЯ ВЫРУЧКА С КАЖДОГО ПОЛЬЗОВАТЕЛЯ** *Объединим полученные сводные таблицы в одну, для того чтобы рассчитать помесячную выручку с каждого пользователя по каждому пакету услуг (звонки, интернет, смс)***1. Объединение таблиц** CALLS + INTERNET<jupyter_code>#объединим данные методом .merge
data_used = data_calls_month_sum.merge(data_internet_month, on=['user_id', 'month'
                                                               ], how='outer')
data_used<jupyter_output><empty_output><jupyter_text>& MESSAGES<jupyter_code>#объединим новые данные методом .merge
data_used = data_used.merge(data_messages_month, on=['user_id', 'month'], how='outer')
#для наглядности переименуем столбцы с данными в новой таблице
data_used.rename(columns={'duration':'call_duration'}, inplace=True)
data_used.rename(columns={'id':'message_used'}, inplace=True)
data_used.groupby(by='user_id')<jupyter_output><empty_output><jupyter_text>& CALLS<jupyter_code>#объединим новые данные методом .merge
data_used = data_used.merge(data_calls_month_count, on=['user_id', 'month'], how='outer')
#для наглядности переименуем столбцы с данными в новой таблице
data_used.rename(columns={'duration':'calls_count'}, inplace=True)
data_used<jupyter_output><empty_output><jupyter_text>& USERS<jupyter_code>#сбросим индексы в итоговой таблице
data_used = data_used.reset_index()
#объединим новые данные методом .merge
data_used = data_used.merge(data_users, how='left', on=['user_id'])
data_used<jupyter_output><empty_output><jupyter_text>& TARIFFS<jupyter_code>#переименуем столбцы для объединения по тарифам
data_tariffs.rename(columns={'tariff_name':'tariff'}, inplace=True)
#объединим данные методом .merge
data_used = data_used.merge(data_tariffs, how='left', on=['tariff'])
data_used.head()<jupyter_output><empty_output><jupyter_text>**2. Замена пропусков** <jupyter_code>data_used.isnull().sum()
data_used['call_duration'] = data_used['call_duration'].fillna(0)
data_used['mb_used'] = data_used['mb_used'].fillna(0)
data_used['message_used'] = data_used['message_used'].fillna(0)
data_used['calls_count'] = data_used['calls_count'].fillna(0)
data_used['churn_date'] = data_used['churn_date'].fillna(0)
data_used.isnull().sum()<jupyter_output><empty_output><jupyter_text>**3. Создание функций для подсчета стоимости** *Функция для подсчета стоимости звонков в месяц (сверх лимита)*<jupyter_code>def cost_call (df):
    if (df['call_duration'] - df['minutes_included']) <0:
        return 0
    else:
        x = (df['call_duration'] - df['minutes_included']
            ) * df['rub_per_minute']
        return x
data_used['call_over_cost'] = data_used.apply(cost_call, axis = 1)<jupyter_output><empty_output><jupyter_text>*Функция для подсчета стоимости сообщений в месяц (сверх лимита)*<jupyter_code>def cost_messages (df):
    if (df['message_used'] - df['messages_included']) <0:
        return 0
    else:
        x = (df['message_used'] - df['messages_included']
            ) * df['rub_per_message']
        return x
data_used['mes_over_cost'] = data_used.apply(cost_messages, axis = 1)<jupyter_output><empty_output><jupyter_text>Функция для подсчета стоимости интернета в месяц (сверх лимита) (ГБ и округление)<jupyter_code>def cost_int (df):
    if (df['mb_used'] - df['mb_per_month_included']) <0:
        return 0
    else:
        x = np.ceil((df['mb_used'] - df['mb_per_month_included'])/1024) * df['rub_per_gb']
        return x
data_used['int_over_cost'] = data_used.apply(cost_int, axis = 1)
data_used.head()<jupyter_output><empty_output><jupyter_text>**4. Итоговая таблица** <jupyter_code># ежемесячная плата
# + стоимость звонков сверх месячного лимита
# + стоимость сообщений сверх месячного лимита
# + стоимость интернета сверх месячного лимита
data_used['total_month_cost'] = data_used['rub_monthly_fee'
                                         ] + data_used['call_over_cost'
                                                ] + data_used['mes_over_cost'
                                                             ] + data_used['int_over_cost']
data_used.tail()
#общая выручка от пользователей тарифа "смарт"
data_used.query('tariff == "smart"')['total_month_cost'].sum()
#общая выручка от пользователей тарифа "ультра"
data_used.query('tariff == "ultra"')['total_month_cost'].sum()<jupyter_output><empty_output><jupyter_text>**Выручка от пользователей тарифа "Смарт" превышает выручку от пользователей тарифа "Ультра".*****ВЫВОД***В разделе "Подготовка данных" мы проделали большую работу: 
- Добавили в каждую таблицу с данными по услугам столбик month. Далее через pivot_table посчитали ежемесячные суммы израсходованных услуг (интернет, смс, звонки). 
- Объединили получившиеся таблицы методом .merge
- Избавились от пропусков для того, чтобы получить в расчётах нули (там, где данные отсутствуют)
- Написали функцию для того, чтобы рассчитать ежемесячные показатели по каждому пакету услуг сверх лимита
- Добавили в итоговую таблицу столбик с ежемесячной выручкой по каждому пользователю## АНАЛИЗ ДАННЫХ 
Опишем поведение клиентов оператора, исходя из выборки: сколько минут разговора, сколько сообщений и какой объём интернет-трафика требуется пользователям каждого тарифа в месяц**1. Среднее, стандартное и дисперсия** <jupyter_code>#создадим сводную таблицу, где распределим по тарифам и посчитаем для каждой услуги: 
#среднее (mean)
#стандартное отклонение (std)
#дисперсию (var)
data_used_info = pd.pivot_table(data_used, index = ['tariff'
                                                   ], values = ['call_duration'
                                                                , 'mb_used'
                                                                , 'message_used']
                                , aggfunc = {np.var, np.std ,np.mean}).round()
data_used_info<jupyter_output><empty_output><jupyter_text>**2. Гистограммы** Отобразим данные по использованным пакетам услуг для каждого тарифа на гистограммах<jupyter_code>fig, axes = plt.subplots(ncols = 3, figsize = (17, 5))
axes[0].set_title('smart & ultra call duration')
axes[0].hist(data_used.query("tariff == 'smart'")['call_duration'], alpha=0.5)
axes[0].hist(data_used.query("tariff == 'ultra'")['call_duration'], alpha=0.5)
axes[0].grid()
axes[0].set_ylabel('frequency')
axes[0].set_xlabel('minutes')
axes[0].legend(labels = ['smart', 'ultra'])
axes[1].set_title('smart & ultra internet used')
axes[1].hist(data_used.query("tariff == 'smart'")['mb_used'], alpha=0.5)
axes[1].hist(data_used.query("tariff == 'ultra'")['mb_used'], alpha=0.5)
axes[1].grid()
axes[1].set_ylabel('frequency')
axes[1].set_xlabel('mb')
axes[1].legend(labels = ['smart', 'ultra'])
axes[2].set_title('smart & ultra messages used')
axes[2].hist(data_used.query("tariff == 'smart'")['message_used'], alpha=0.5)
axes[2].hist(data_used.query("tariff == 'ultra'")['message_used'], alpha=0.5)
axes[2].grid()
axes[2].set_ylabel('frequency')
axes[2].set_xlabel('messages')
axes[2].legend(labels = ['smart', 'ultra'])<jupyter_output><empty_output><jupyter_text>-----------***ВЫВОДЫ***
1. Минуты
 - **пользователи тарифа "Ультра" в среднем расходуют больше минут**, чем пользователи тарифа "Смарт" (527/418 мин). 
 - гистограмма подтверждает распределение: данные тарифа "Ультра" скошены вправо вместе с пиковым значением, разброс больше, чем у тарифа "Смарт" (> 1200 минут).
 
 
2. Интернет
 - **пользователи тарифа "Ультра" в среднем расходуют больше МБ интернета**, чем пользователи тарифа "Смарт" (19/16 ГБ).
 - **распределение использованных МБ интернета у пользователей тарифа "Ультра" более плавное.** Пиковые значения лежат в диапазоне от 20 до 50 МБ. У тарифа "Cмарт" овыраженный пик значения в 50МБ.
 
 
3. СМС
 - **пользователи тарифа "Ультра" в среднем расходуют больше СМС**, чем пользователи тарифа "Смарт" (49/33 сообщения). 
 - гистограмма подтверждает распределение: данные тарифа "Ультра" чуть больше скошены вправо вместе с пиковым значением, разброс больше, чем у тарифа "Смарт" (> 100 сообщений).## ПРОВЕРКА ГИПОТЕЗ **1. Выручка пользователей "Ультра" и "Смарт"** Проверим гипотезу о том, что **средняя выручка пользователей тарифов «Ультра» и «Смарт» различается**. Сформулируем нулевую гипотезу: **средняя выручка пользователей тарифов "Смарт" и "Ультра" равна**<jupyter_code>#сравним дисперсии выборок
np.var(data_used.query("tariff == 'smart'")['total_month_cost'])
np.var(data_used.query("tariff == 'ultra'")['total_month_cost'])
#задаём критический уровень статистической зависимости
alpha = 0.05
#задаём переменные с выручкой по каждому тарифу
smart_value = data_used.query("tariff == 'smart'")['total_month_cost']
ultra_value = data_used.query("tariff == 'ultra'")['total_month_cost']
#проверяем гипотезу о равенстве средней выручки пользователей тарифов "Ультра" и "Смарт"
results = st.ttest_ind(smart_value, ultra_value, equal_var = False)
#выведем р-значение
print('p-значение:', results.pvalue)
if results.pvalue < alpha:
    print("Отвергаем нулевую гипотезу")
else:
    print("Не получилось отвергнуть нулевую гипотезу")<jupyter_output>p-значение: 2.7240946993530856e-246
Отвергаем нулевую гипотезу
<jupyter_text>ПРОВЕРИМ ГИПОТЕЗУ СРЕЗАМИ ДАННЫХ<jupyter_code>data_used.query('tariff == "smart"')['total_month_cost'].mean()
data_used.query('tariff == "ultra"')['total_month_cost'].mean()<jupyter_output><empty_output><jupyter_text>Вывод: **отвергаем нулевую гипотезу** о том, что средняя выручка пользователей тарифов "Смарт" и "Ультра" равна. **Средняя выручка пользователей тарифов «Ультра» и «Смарт» различается**----------------------------------------------------------------------------------------------**2. Выручка пользователей из Москвы и других регионов** Проверим гипотезу о том, что **средняя выручка пользователей из Москвы отличается от выручки пользователей из других регионов**Сформулируем нулевую гипотезу: **средняя выручка пользователей Москвы равна средней выручке пользователей из других регионов**<jupyter_code>#сравним дисперсии выборок
np.var(data_used.loc[data_used['city'] == 'Москва', ['total_month_cost']])
np.var(data_used.loc[data_used['city'] != 'Москва', ['total_month_cost']])
#задаём критический уровень статистической зависимости
alpha = 0.05
#задаём переменные с выручкой по москве и другим регионам
moscow_value = data_used.loc[data_used['city'] == 'Москва', ['total_month_cost']]
other_value = data_used.loc[data_used['city'] != 'Москва', ['total_month_cost']]
#дисперсии
np.var(data_used.loc[data_used['city'] == 'Москва', ['total_month_cost']])
#уберем выбросы в значениях выручки
moscow_value = moscow_value.loc[(moscow_value['total_month_cost'] >= moscow_value['total_month_cost'].quantile(.05))&(
moscow_value['total_month_cost'] <= moscow_value['total_month_cost'].quantile(.95)), 'total_month_cost']
other_value = other_value.loc[(other_value['total_month_cost'] >= other_value['total_month_cost'].quantile(.05))&(
other_value['total_month_cost'] <= other_value['total_month_cost'].quantile(.95)), 'total_month_cost']
#проверяем гипотезу о равенстве средней выручки пользователей тарифов "Ультра" и "Смарт"
results = st.ttest_ind(moscow_value, other_value, equal_var = False)
#equal_var = False, так как дисперсии выборок разные
#выведем р-значение
print('p-значение:', results.pvalue)
if results.pvalue < alpha:
    print("Отвергаем нулевую гипотезу")
else:
    print("Не получилось отвергнуть нулевую гипотезу")<jupyter_output>p-значение: 0.23243668231806447
Не получилось отвергнуть нулевую гипотезу
<jupyter_text>Мы не можем овтергнуть нулевую гипотезу о том, что **средняя выручка пользователей Москвы равна средней выручке пользователей из других регионов**. ПРОВЕРИМ ГИПОТЕЗУ СРЕЗАМИ ДАННЫХ<jupyter_code>data_used.query('city == "Москва"')['total_month_cost'].mean()
data_used.query('city != "Москва"')['total_month_cost'].mean()<jupyter_output><empty_output> | 
	no_license | 
	/Определение перспективного тарифа для телеком компании.ipynb | 
	ginger-boy/my_project | 31 | 
| 
	<jupyter_start><jupyter_text># Age Application
## Requirements:
* Get age of user as input
* Print how many seconds the user has lived
* use input(), int(), and print() functions 
* use the format() string method<jupyter_code>age = int(input('Enter your age: '))
print("You have lived for {} seconds.".format(age * 365 * 24 * 60 * 60))<jupyter_output>You have lived for 819936000 seconds.
<jupyter_text># Age in seconds function<jupyter_code>def age_yr2sec(age):
    return age * 365 * 24 * 60 * 60
print("You have lived for {} seconds.".format(age_yr2sec(age)))<jupyter_output>You have lived for 819936000 seconds.
 | 
	no_license | 
	/notebooks/section2_age_app.ipynb | 
	kristakernodle/learning_py_pgsql | 2 | 
| 
	<jupyter_start><jupyter_text># PoS<jupyter_code>task = "pos"
metric = "Accuracy"<jupyter_output><empty_output><jupyter_text>### mBERT<jupyter_code>short_model_name = "mbert"
stats.analysis_of_variance.one_way(task, short_model_name, metric, experiment, results_path, show_distribution=True)<jupyter_output><empty_output><jupyter_text>### XLM<jupyter_code>short_model_name = "xlm-roberta"
stats.analysis_of_variance.one_way(task, short_model_name, metric, experiment, results_path, show_distribution=True)<jupyter_output><empty_output><jupyter_text># Sentiment<jupyter_code>task = "sentiment"
metric = "Macro_F1"<jupyter_output><empty_output><jupyter_text>### mBERT<jupyter_code>short_model_name = "mbert"
stats.analysis_of_variance.one_way(task, short_model_name, metric, experiment, results_path, show_distribution=True)<jupyter_output><empty_output><jupyter_text>### XLM<jupyter_code>short_model_name = "xlm-roberta"
stats.analysis_of_variance.one_way(task, short_model_name, metric, experiment, results_path, show_distribution=True)<jupyter_output><empty_output><jupyter_text># Model comparison### PoS<jupyter_code>final1 = prepare_table("pos", "mbert", "Accuracy")
final1["Model"] = "mBERT"
final2 = prepare_table("pos", "xlm-roberta", "Accuracy")
final2["Model"] = "XLM"
final = pd.concat([final1, final2])
G1 = final.loc[final["Model"] == "mBERT", "Transfer-Loss"].values
G2 = final.loc[final["Model"] == "XLM", "Transfer-Loss"].values
stats.levene(G1, G2)
print("mBERT:", stats.normaltest(G1))
print("XLM:", stats.normaltest(G2))
sns.displot(x="Transfer-Loss", data=final, kind="kde", hue="Model", bw_adjust=0.5, palette="crest", common_norm=False)
stats.f_oneway(G1, G2)<jupyter_output><empty_output><jupyter_text>### Sentiment<jupyter_code>final1 = prepare_table("sentiment", "mbert", "Macro_F1")
final1["Model"] = "mBERT"
final2 = prepare_table("sentiment", "xlm-roberta", "Macro_F1")
final2["Model"] = "XLM"
final = pd.concat([final1, final2])
G1 = final.loc[final["Model"] == "mBERT", "Transfer-Loss"].values
G2 = final.loc[final["Model"] == "XLM", "Transfer-Loss"].values
stats.levene(G1, G2)
stats.kruskal(G1, G2)<jupyter_output><empty_output><jupyter_text># Two-way ANOVA### PoS<jupyter_code>final1 = prepare_table("pos", "mbert", "Accuracy")
final1["Model"] = "mBERT"
G1 = final1.loc[final1["Test-Group"] == "Intra-Group", "Transfer-Loss"].values
G2 = final1.loc[final1["Test-Group"] == "Inter-Group", "Transfer-Loss"].values
final2 = prepare_table("pos", "xlm-roberta", "Accuracy")
final2["Model"] = "XLM"
G3 = final2.loc[final2["Test-Group"] == "Intra-Group", "Transfer-Loss"].values
G4 = final2.loc[final2["Test-Group"] == "Inter-Group", "Transfer-Loss"].values
final = pd.concat([final1, final2])
final = final.rename(columns={"Transfer-Loss": "TL", "Test-Group": "Type"})
stats.levene(G1, G2, G3, G4)
print("Intra mBERT:", stats.normaltest(G1))
print("Inter mBERT:", stats.normaltest(G2))
print("Intra XLM:", stats.normaltest(G3))
print("Inter XLM:", stats.normaltest(G4))
formula = "TL ~ C(Type) + C(Model) + C(Type):C(Model)"
model = ols(formula, final).fit()
aov_table = anova_lm(model, typ=2)
print(aov_table)<jupyter_output>                         sum_sq     df           F        PR(>F)
C(Type)              978.158876    1.0    5.519445  1.909136e-02
C(Model)           21174.422285    1.0  119.480657  9.841311e-26
C(Type):C(Model)       2.939217    1.0    0.016585  8.975674e-01
Residual          120509.942495  680.0         NaN           NaN
<jupyter_text>### Sentiment<jupyter_code>final1 = prepare_table("sentiment", "mbert", "Macro_F1")
final1["Model"] = "mBERT"
G1 = final1.loc[final1["Test-Group"] == "Intra-Group", "Transfer-Loss"].values
G2 = final1.loc[final1["Test-Group"] == "Inter-Group", "Transfer-Loss"].values
final2 = prepare_table("sentiment", "xlm-roberta", "Macro_F1")
final2["Model"] = "XLM"
G3 = final2.loc[final2["Test-Group"] == "Intra-Group", "Transfer-Loss"].values
G4 = final2.loc[final2["Test-Group"] == "Inter-Group", "Transfer-Loss"].values
final = pd.concat([final1, final2])
final = final.rename(columns={"Transfer-Loss": "TL", "Test-Group": "Type"})
stats.levene(G1, G2, G3, G4)<jupyter_output><empty_output> | 
	no_license | 
	/analysis/stat_tests/acl/Intra_vs_inter_group.ipynb | 
	jerbarnes/typology_of_crosslingual | 10 | 
| 
	<jupyter_start><jupyter_text>Main pandas data models are Series (1D) and DataFrame (2D). Series is a subclass of numpy.ndarray.  
Index labels do not have to be ordered and duplicates are allowed.  
Indexes are for fast lookup and join. Hierarchical indexes.  
Data alignment, dataframe manipulation.  
In a dataframe, each column can be of different dtypes.  
<jupyter_code>import pandas as pd
from numpy.random import randn
df1 = pd.DataFrame(randn(3,2))
df2 = pd.DataFrame(randn(3,2))
print(df1)
print(df2)
df1+df2<jupyter_output>          0         1
0  0.184519 -1.252510
1 -1.092305 -0.696186
2  0.139191 -0.433829
          0         1
0  0.599671 -1.103609
1 -0.479809 -0.091513
2 -0.391440 -0.611138
<jupyter_text>Groupby - Split data into different groups, apply transformation to each group, combine into the output  
GroupBy
* DataFrame Columns
* Arrays of Labels
* Functions, applied to axis labels
axis = 0 -> Rows
axis = 1 -> Columns
<jupyter_code>%matplotlib inline
import pandas as pd
from pandas import *
import numpy as np
import matplotlib.pyplot as plt
def side_by_side(*objs, **kwds):
    from pandas.core.common import adjoin
    space = kwds.get('space', 4)
    reprs = [repr(obj).split(']n') for obj in objs]
    print(adjoin(space, *reprs))
plt.rc("figure", figsize=(10,6))<jupyter_output><empty_output><jupyter_text>##Series<jupyter_code>randn(5)
labels = ['a','b','c','d','e']
s = Series(randn(5), index = labels)
s
'b' in s # Like a dictionary
s['b']
s.index
#Convert into dict
mapping = s.to_dict()
mapping
back_to_s = Series(mapping) #automatically sorts the keys and uses them as index
back_to_s
back_to_s2 = Series(mapping, index=['b','c','a','f']) #Only selects those indexes
back_to_s2
isnull(back_to_s2)  # checking for NaN or null values
np.isnan(back_to_s2) # checking for NaN or null values - method 2
back_to_s2.dropna() # drop NaN values
back_to_s2[notnull(back_to_s2)] # drop NaN values - method 2
s * 2 # Multiplies each value by 2
s[:3] # Slicing<jupyter_output><empty_output><jupyter_text>###DataFrame : 2D Collection of Series<jupyter_code>df = DataFrame({'a':randn(6), 'b':['foo', 'bar'] * 3, 'c':randn(6)})
df
print(['foo','bar'] * 3)
np.tile(['foo','bar'],3) # Numpy style
#Method 2
df2 = DataFrame([randn(3), randn(3), randn(3)], columns=["a","b","c"])
df2
df['a']
df.index
df['d'] = ['a','b','c'] * 2
df
df[:3]
df[:-2]
df[-2:]
df.xs(0) # row 0
df.xs(0).index
df.ix[0]
df.ix[0,'c']
%timeit df.ix[0,2]   # Number index works, but slower than column name
%timeit df.ix[0,'b']
%timeit df.get_value(0,'b')   # Slightly faster
df.ix[2:4,'b']
# df.get_value(2:4,'b') wont work
df.ix[2:4,'b':'e']  # Excluding 4 and e
df.ix[[0,2,4]]
%timeit df.ix[[0,2,4]]
%timeit df.loc[[0,2,4]]   # Slightly faster. 
df.ix[[1,2],['b','d']]
df['c']>0
df[df['c']>0]
df.ix[df['c']>0]
df.columns
df.index
dfi = pd.DataFrame([{'a':5,'b':0},{'a':3,'c':6},{'a':10,'b':12,'c':4}])
dfi<jupyter_output><empty_output><jupyter_text>### Using Date Range<jupyter_code>print([date for date in pd.date_range('04/20/2015',periods=10)])
daterange = DataFrame({'a':randn(6),
                      'b':['foo','bar']*3,
                      'c':randn(6)},
                      index=pd.date_range('1/1/2000',periods=6))
daterange
# Specify column order
dfc = DataFrame({'a':randn(6)},columns=['b','a'])
dfc<jupyter_output><empty_output><jupyter_text>#### pass dict of dicts as argument. Outer dict becomes columns, inner dict rows.```python
df = read_csv('file.csv', index_col = 0, parse_dates=True)
```If we create index with date_range or specify read_csv should index and parse_dates, then when we manipulate the dataframes, they are aligned properly based on the date as shown below<jupyter_code>df1 = DataFrame({'val':randn(6)},index=pd.date_range('1/1/2000',periods=6))
df2 = DataFrame({'val':randn(6)},index=pd.date_range('1/4/2000',periods=6))
print(df1)
print(df2)
print(df1+df2)
print((df1+df2).dropna())
# If you want to substitute zero and add
print(df1.add(df2,fill_value=0))<jupyter_output>                 val
2000-01-01  0.046315
2000-01-02 -0.672273
2000-01-03 -1.025196
2000-01-04 -1.010926
2000-01-05  0.255565
2000-01-06  0.312423
2000-01-07 -0.914625
2000-01-08  0.524231
2000-01-09  0.290329
<jupyter_text>###Conform dateframes, Re-indexing<jupyter_code>print(df1.reindex(df2.index))   # Conform df1 to df2's index range.
print(df1.reindex(df2.index,fill_value=0))  # Fill blanks with zero
print(df1.ix[df2.index])        # Method 2
# side_by_side(df1.ix[df2.index],df2) # Doesn't display side by side for some reason. So, trying the below method
# df1.ix[df2.index].append(df2)
concat([df1.ix[df2.index],df2],axis=1)
# Inner Join
a,b = df1.align(df2,join='inner')
print(concat([a,b],axis=1))
# Outer join
a,b = df1.align(df2,join='outer')
print(concat([a,b],axis=1))
from numpy.random import *
data = DataFrame({'clicks':random_integers(0,500000,5),'views':random_integers(500000,100000000,5),'buys':random_integers(5,100,5)})
#data['buys'] = abs(data['buys'])
#data['clicks'] = abs(data['clicks']) * 100
data
data.apply(np.mean)
data.mean()          # Method 2
data.mean(1, skipna=True)      # Mean of rows
data.apply(np.mean, axis=1) # Method 2
data.max()
print(data.clicks.max())
print(data.clicks.idxmax())
print(data.ix[data.clicks.idxmax()])
daterange.drop('b',axis=1,inplace=True)   # Dropping foobar column
daterange
print(daterange.c.idxmax()) # index of max value of c, 
                            # in this case, we get the date
print(daterange.ix[daterange.c.idxmax()]) # get entire record
# To get the max date of all columns, write a function and apply
def get_max_date(series):
    return series.idxmax()
daterange.apply(get_max_date)
# Show all rows corresponding to max values of each column
# How cool is this?!
daterange.ix[daterange.apply(get_max_date)]
print(daterange)
print(daterange.max())
print(daterange.min())
daterange.apply(lambda x:x.max() - x.min()) # Finding the range
daterange.plot()
daterange.ix[-1].plot(kind='bar') # Plot the last row<jupyter_output><empty_output> | 
	no_license | 
	/.ipynb_checkpoints/Data Analysis with Pandas-checkpoint.ipynb | 
	neo-anderson/datascience-ipython | 7 | 
| 
	<jupyter_start><jupyter_text># Toronto3
### 1. Import libraries<jupyter_code>import numpy as np # library to handle data in a vectorized manner
import pandas as pd # library for data analsysis
pd.set_option("display.max_columns", None)
pd.set_option("display.max_rows", None)
import json # library to handle JSON files
from geopy.geocoders import Nominatim # convert an address into latitude and longitude values
import requests # library to handle requests
from bs4 import BeautifulSoup # library to parse HTML and XML documents
from pandas.io.json import json_normalize # tranform JSON file into a pandas dataframe
# Matplotlib and associated plotting modules
import matplotlib.cm as cm
import matplotlib.colors as colors
# import k-means from clustering stage
from sklearn.cluster import KMeans
!pip install folium
import folium # map rendering library
print("Libraries imported.")<jupyter_output>Collecting folium
[?25l  Downloading https://files.pythonhosted.org/packages/72/ff/004bfe344150a064e558cb2aedeaa02ecbf75e60e148a55a9198f0c41765/folium-0.10.0-py2.py3-none-any.whl (91kB)
[K     |████████████████████████████████| 92kB 17.4MB/s eta 0:00:01
[?25hRequirement already satisfied: jinja2>=2.9 in /opt/conda/envs/Python36/lib/python3.6/site-packages (from folium) (2.10)
Collecting branca>=0.3.0 (from folium)
  Downloading https://files.pythonhosted.org/packages/63/36/1c93318e9653f4e414a2e0c3b98fc898b4970e939afeedeee6075dd3b703/branca-0.3.1-py3-none-any.whl
Requirement already satisfied: requests in /opt/conda/envs/Python36/lib/python3.6/site-packages (from folium) (2.21.0)
Requirement already satisfied: numpy in /opt/conda/envs/Python36/lib/python3.6/site-packages (from folium) (1.15.4)
Requirement already satisfied: MarkupSafe>=0.23 in /opt/conda/envs/Python36/lib/python3.6/site-packages (from jinja2>=2.9->folium) (1.1.0)
Requirement already satisfied: six in /opt/conda/envs/[...]<jupyter_text>### 2. Scrap data from Wikipedia page into a DataFrame<jupyter_code># send the GET request
data = requests.get('https://en.wikipedia.org/wiki/List_of_postal_codes_of_Canada:_M').text
# parse data from the html into a beautifulsoup object
soup = BeautifulSoup(data, 'html.parser')
# create three lists to store table data
postalCodeList = []
boroughList = []
neighborhoodList = []<jupyter_output><empty_output><jupyter_text>**Using BeautifulSoup**
```python
# find the table
soup.find('table').find_all('tr')
# find all the rows of the table
soup.find('table').find_all('tr')
# for each row of the table, find all the table data
for row in soup.find('table').find_all('tr'):
    cells = row.find_all('td')
```<jupyter_code># append the data into the respective lists
for row in soup.find('table').find_all('tr'):
    cells = row.find_all('td')
    if(len(cells) > 0):
        postalCodeList.append(cells[0].text)
        boroughList.append(cells[1].text)
        neighborhoodList.append(cells[2].text.rstrip('\n')) # avoid new lines in neighborhood cell
# create a new DataFrame from the three lists
toronto_df = pd.DataFrame({"PostalCode": postalCodeList,
                           "Borough": boroughList,
                           "Neighborhood": neighborhoodList})
toronto_df.head()<jupyter_output><empty_output><jupyter_text>### 3. Drop cells with a borough that is "Not assigned"<jupyter_code># drop cells with a borough that is Not assigned
toronto_df_dropna = toronto_df[toronto_df.Borough != "Not assigned"].reset_index(drop=True)
toronto_df_dropna.head()<jupyter_output><empty_output><jupyter_text>### 4. Group neighborhoods in the same borough
<jupyter_code># group neighborhoods in the same borough
toronto_df_grouped = toronto_df_dropna.groupby(["PostalCode", "Borough"], as_index=False).agg(lambda x: ", ".join(x))
toronto_df_grouped.head()<jupyter_output><empty_output><jupyter_text>### 5. For Neighborhood="Not assigned", make the value the same as Borough<jupyter_code># for Neighborhood="Not assigned", make the value the same as Borough
for index, row in toronto_df_grouped.iterrows():
    if row["Neighborhood"] == "Not assigned":
        row["Neighborhood"] = row["Borough"]
        
toronto_df_grouped.head()<jupyter_output><empty_output><jupyter_text>### 6. Check whether it is the same as required by the question<jupyter_code># create a new test dataframe
column_names = ["PostalCode", "Borough", "Neighborhood"]
test_df = pd.DataFrame(columns=column_names)
test_list = ["M5G", "M2H", "M4B", "M1J", "M4G", "M4M", "M1R", "M9V", "M9L", "M5V", "M1B", "M5A"]
for postcode in test_list:
    test_df = test_df.append(toronto_df_grouped[toronto_df_grouped["PostalCode"]==postcode], ignore_index=True)
    
test_df<jupyter_output><empty_output><jupyter_text>### 7. Print the number of rows of the cleaned dataframe<jupyter_code># print the number of rows of the cleaned dataframe
toronto_df_grouped.shape<jupyter_output><empty_output><jupyter_text>### 8. Load the coordinates from the csv file on Coursera<jupyter_code># load the coordinates from the csv file on Coursera
coordinates = pd.read_csv("https://ibm.box.com/shared/static/9afzr83pps4pwf2smjjcf1y5mvgb18rr.csv")
coordinates.head()
# rename the column "PostalCode"
coordinates.rename(columns={"Postal Code": "PostalCode"}, inplace=True)
coordinates.head()<jupyter_output><empty_output><jupyter_text>### 9. Merge two tables to get the coordinates<jupyter_code># merge two table on the column "PostalCode"
toronto_df_new = toronto_df_grouped.merge(coordinates, on="PostalCode", how="left")
toronto_df_new.head()<jupyter_output><empty_output><jupyter_text>### 10. Finally, check to make sure the coordinates are added as required by the question<jupyter_code># create a new test dataframe
column_names = ["PostalCode", "Borough", "Neighborhood", "Latitude", "Longitude"]
test_df = pd.DataFrame(columns=column_names)
test_list = ["M5G", "M2H", "M4B", "M1J", "M4G", "M4M", "M1R", "M9V", "M9L", "M5V", "M1B", "M5A"]
for postcode in test_list:
    test_df = test_df.append(toronto_df_new[toronto_df_new["PostalCode"]==postcode], ignore_index=True)
    
test_df<jupyter_output><empty_output><jupyter_text>### 11. Use geopy library to get the latitude and longitude values of Toronto<jupyter_code>address = 'Toronto'
geolocator = Nominatim(user_agent="my-application")
location = geolocator.geocode(address)
latitude = location.latitude
longitude = location.longitude
print('The geograpical coordinate of Toronto are {}, {}.'.format(latitude, longitude))<jupyter_output>The geograpical coordinate of Toronto are 43.653963, -79.387207.
<jupyter_text>### 12. Create a map of Toronto with neighborhoods superimposed on top<jupyter_code># create map of Toronto using latitude and longitude values
map_toronto = folium.Map(location=[latitude, longitude], zoom_start=10)
# add markers to map
for lat, lng, borough, neighborhood in zip(toronto_df_new['Latitude'], toronto_df_new['Longitude'], toronto_df_new['Borough'], toronto_df_new['Neighborhood']):
    label = '{}, {}'.format(neighborhood, borough)
    label = folium.Popup(label, parse_html=True)
    folium.CircleMarker(
        [lat, lng],
        radius=5,
        popup=label,
        color='blue',
        fill=True,
        fill_color='#3186cc',
        fill_opacity=0.7).add_to(map_toronto)  
    
map_toronto <jupyter_output><empty_output><jupyter_text>### 13. Filter only boroughs that contain the word Toronto <jupyter_code># filter borough names that contain the word Toronto
borough_names = list(toronto_df_new.Borough.unique())
borough_with_toronto = []
for x in borough_names:
    if "toronto" in x.lower():
        borough_with_toronto.append(x)
        
borough_with_toronto
# create a new DataFrame with only boroughs that contain the word Toronto
toronto_df_new = toronto_df_new[toronto_df_new['Borough'].isin(borough_with_toronto)].reset_index(drop=True)
print(toronto_df_new.shape)
toronto_df_new.head()
# create map of Toronto using latitude and longitude values
map_toronto = folium.Map(location=[latitude, longitude], zoom_start=10)
# add markers to map
for lat, lng, borough, neighborhood in zip(toronto_df_new['Latitude'], toronto_df_new['Longitude'], toronto_df_new['Borough'], toronto_df_new['Neighborhood']):
    label = '{}, {}'.format(neighborhood, borough)
    label = folium.Popup(label, parse_html=True)
    folium.CircleMarker(
        [lat, lng],
        radius=5,
        popup=label,
        color='blue',
        fill=True,
        fill_color='#3186cc',
        fill_opacity=0.7).add_to(map_toronto)  
    
map_toronto <jupyter_output><empty_output><jupyter_text>### 14. Use the Foursquare API to explore the neighborhoods<jupyter_code># define Foursquare Credentials and Version
CLIENT_ID = 'EXL4HEJIXBUND5ZSMSMI3Z45ONSCROGWZCGWD0KY3T4UMVKH' # your Foursquare ID
CLIENT_SECRET = 'FF1FC0HM0NQYK3JRUTFKTXVDXPVX0YUKALAGFWT3ZQTWXKVM' # your Foursquare Secret
VERSION = '20180605' # Foursquare API version
print('Your credentails:')
print('CLIENT_ID: ' + CLIENT_ID)
print('CLIENT_SECRET:' + CLIENT_SECRET)<jupyter_output>Your credentails:
CLIENT_ID: EXL4HEJIXBUND5ZSMSMI3Z45ONSCROGWZCGWD0KY3T4UMVKH
CLIENT_SECRET:FF1FC0HM0NQYK3JRUTFKTXVDXPVX0YUKALAGFWT3ZQTWXKVM
<jupyter_text>**Now, let's get the top 100 venues that are within a radius of 500 meters.**<jupyter_code>radius = 500
LIMIT = 100
venues = []
for lat, long, post, borough, neighborhood in zip(toronto_df_new['Latitude'], toronto_df_new['Longitude'], toronto_df_new['PostalCode'], toronto_df_new['Borough'], toronto_df_new['Neighborhood']):
    url = "https://api.foursquare.com/v2/venues/explore?client_id={}&client_secret={}&v={}&ll={},{}&radius={}&limit={}".format(
        CLIENT_ID,
        CLIENT_SECRET,
        VERSION,
        lat,
        long,
        radius, 
        LIMIT)
    
    results = requests.get(url).json()["response"]['groups'][0]['items']
    
    for venue in results:
        venues.append((
            post, 
            borough,
            neighborhood,
            lat, 
            long, 
            venue['venue']['name'], 
            venue['venue']['location']['lat'], 
            venue['venue']['location']['lng'],  
            venue['venue']['categories'][0]['name']))
# convert the venues list into a new DataFrame
venues_df = pd.DataFrame(venues)
# define the column names
venues_df.columns = ['PostalCode', 'Borough', 'Neighborhood', 'BoroughLatitude', 'BoroughLongitude', 'VenueName', 'VenueLatitude', 'VenueLongitude', 'VenueCategory']
print(venues_df.shape)
venues_df.head()<jupyter_output>(1688, 9)
<jupyter_text>**Let's check how many venues were returned for each PostalCode**<jupyter_code>venues_df.groupby(["PostalCode", "Borough", "Neighborhood"]).count()<jupyter_output><empty_output><jupyter_text>**Let's find out how many unique categories can be curated from all the returned venues**<jupyter_code>print('There are {} uniques categories.'.format(len(venues_df['VenueCategory'].unique())))
venues_df['VenueCategory'].unique()[:50]<jupyter_output><empty_output><jupyter_text>### 15. Analyze Each Area<jupyter_code># one hot encoding
toronto_onehot = pd.get_dummies(venues_df[['VenueCategory']], prefix="", prefix_sep="")
# add postal, borough and neighborhood column back to dataframe
toronto_onehot['PostalCode'] = venues_df['PostalCode'] 
toronto_onehot['Borough'] = venues_df['Borough'] 
toronto_onehot['Neighborhoods'] = venues_df['Neighborhood'] 
# move postal, borough and neighborhood column to the first column
fixed_columns = list(toronto_onehot.columns[-3:]) + list(toronto_onehot.columns[:-3])
toronto_onehot = toronto_onehot[fixed_columns]
print(toronto_onehot.shape)
toronto_onehot.head()<jupyter_output>(1688, 239)
<jupyter_text>**Next, let's group rows by neighborhood and by taking the mean of the frequency of occurrence of each category**<jupyter_code>toronto_grouped = toronto_onehot.groupby(["PostalCode", "Borough", "Neighborhoods"]).mean().reset_index()
print(toronto_grouped.shape)
toronto_grouped<jupyter_output>(38, 239)
<jupyter_text>Now let's create the new dataframe and display the top 10 venues for each PostalCode.<jupyter_code>num_top_venues = 10
indicators = ['st', 'nd', 'rd']
# create columns according to number of top venues
areaColumns = ['PostalCode', 'Borough', 'Neighborhoods']
freqColumns = []
for ind in np.arange(num_top_venues):
    try:
        freqColumns.append('{}{} Most Common Venue'.format(ind+1, indicators[ind]))
    except:
        freqColumns.append('{}th Most Common Venue'.format(ind+1))
columns = areaColumns+freqColumns
# create a new dataframe
neighborhoods_venues_sorted = pd.DataFrame(columns=columns)
neighborhoods_venues_sorted['PostalCode'] = toronto_grouped['PostalCode']
neighborhoods_venues_sorted['Borough'] = toronto_grouped['Borough']
neighborhoods_venues_sorted['Neighborhoods'] = toronto_grouped['Neighborhoods']
for ind in np.arange(toronto_grouped.shape[0]):
    row_categories = toronto_grouped.iloc[ind, :].iloc[3:]
    row_categories_sorted = row_categories.sort_values(ascending=False)
    neighborhoods_venues_sorted.iloc[ind, 3:] = row_categories_sorted.index.values[0:num_top_venues]
# neighborhoods_venues_sorted.sort_values(freqColumns, inplace=True)
print(neighborhoods_venues_sorted.shape)
neighborhoods_venues_sorted<jupyter_output>(38, 13)
<jupyter_text>### 16. Cluster Areas
Run k-means to cluster the Toronto areas into 5 clusters.<jupyter_code># set number of clusters
kclusters = 5
toronto_grouped_clustering = toronto_grouped.drop(["PostalCode", "Borough", "Neighborhoods"], 1)
# run k-means clustering
kmeans = KMeans(n_clusters=kclusters, random_state=0).fit(toronto_grouped_clustering)
# check cluster labels generated for each row in the dataframe
kmeans.labels_[0:10] 
# create a new dataframe that includes the cluster as well as the top 10 venues for each neighborhood.
toronto_merged = toronto_df_new.copy()
# add clustering labels
toronto_merged["Cluster Labels"] = kmeans.labels_
# merge toronto_grouped with toronto_data to add latitude/longitude for each neighborhood
toronto_merged = toronto_merged.join(neighborhoods_venues_sorted.drop(["Borough", "Neighborhoods"], 1).set_index("PostalCode"), on="PostalCode")
print(toronto_merged.shape)
toronto_merged.head() # check the last columns!
# sort the results by Cluster Labels
print(toronto_merged.shape)
toronto_merged.sort_values(["Cluster Labels"], inplace=True)
toronto_merged<jupyter_output>(38, 16)
<jupyter_text>**Finally, let's visualize the resulting clusters**<jupyter_code># create map
map_clusters = folium.Map(location=[latitude, longitude], zoom_start=11)
# set color scheme for the clusters
x = np.arange(kclusters)
ys = [i+x+(i*x)**2 for i in range(kclusters)]
colors_array = cm.rainbow(np.linspace(0, 1, len(ys)))
rainbow = [colors.rgb2hex(i) for i in colors_array]
# add markers to the map
markers_colors = []
for lat, lon, post, bor, poi, cluster in zip(toronto_merged['Latitude'], toronto_merged['Longitude'], toronto_merged['PostalCode'], toronto_merged['Borough'], toronto_merged['Neighborhood'], toronto_merged['Cluster Labels']):
    label = folium.Popup('{} ({}): {} - Cluster {}'.format(bor, post, poi, cluster), parse_html=True)
    folium.CircleMarker(
        [lat, lon],
        radius=5,
        popup=label,
        color=rainbow[cluster-1],
        fill=True,
        fill_color=rainbow[cluster-1],
        fill_opacity=0.7).add_to(map_clusters)
       
map_clusters<jupyter_output><empty_output><jupyter_text>### 17. Examine Clusters#### Cluster 1<jupyter_code>toronto_merged.loc[toronto_merged['Cluster Labels'] == 0, toronto_merged.columns[[1] + list(range(5, toronto_merged.shape[1]))]]<jupyter_output><empty_output><jupyter_text>#### Cluster 2<jupyter_code>toronto_merged.loc[toronto_merged['Cluster Labels'] == 1, toronto_merged.columns[[1] + list(range(5, toronto_merged.shape[1]))]]<jupyter_output><empty_output><jupyter_text>#### Cluster 3<jupyter_code>toronto_merged.loc[toronto_merged['Cluster Labels'] == 2, toronto_merged.columns[[1] + list(range(5, toronto_merged.shape[1]))]]<jupyter_output><empty_output><jupyter_text>#### Cluster 4<jupyter_code>toronto_merged.loc[toronto_merged['Cluster Labels'] == 3, toronto_merged.columns[[1] + list(range(5, toronto_merged.shape[1]))]]<jupyter_output><empty_output><jupyter_text>#### Cluster 5<jupyter_code>toronto_merged.loc[toronto_merged['Cluster Labels'] == 4, toronto_merged.columns[[1] + list(range(5, toronto_merged.shape[1]))]]<jupyter_output><empty_output> | 
	no_license | 
	/Toronto 3.ipynb | 
	leduc0801/Capstone_project | 28 | 
| 
	<jupyter_start><jupyter_text>Capstone NotebookThis notebook will be used for developing a capstone project. This project is a part of IBM's data science specialisation.<jupyter_code>import pandas as pd
import numpy as np
print("Hello Capstone Project Course!")<jupyter_output>Hello Capstone Project Course!
 | 
	no_license | 
	/Ibm-NoteBook.ipynb | 
	modeware/ibm-assign | 1 | 
| 
	<jupyter_start><jupyter_text>## 1.0 Import Function<jupyter_code>from META import SA_ALGORITHM_0001
from META_GRAPHICS_LIBRARY import *<jupyter_output><empty_output><jupyter_text>## 2.0 Setup <jupyter_code>SETUP = {'N_REP': 10,
         'N_ITER': 10,
         'N_POP': 1,
         'D': 5,
         'X_L': [-30] * 5,
         'X_U': [30] * 5,
         'SIGMA': 0.20,
         'ALPHA': 0.98,
         'TEMP': 100,
         'STOP_CONTROL_TEMP': 2,
         'NULL_DIC': None
        }<jupyter_output><empty_output><jupyter_text>## 3.0 OF<jupyter_code># FUNÇÃO ROSENBROCK
def ROSENBROCK(X):
    """
    Rosenbrock benchmark function D-dimension
    """
    DIM = len(X)
    SUM = 0
    for I_COUNT in range(DIM - 1):
        X_I = X[I_COUNT]
        X_NEXT = X[I_COUNT + 1]
        NEW = 100 * (X_NEXT - X_I ** 2) ** 2 + (X_I - 1) ** 2
        SUM += NEW
    Y = SUM
    return Y
# OBJ. Function
def OF_FUNCTION(X, NULL_DIC):
    OF = ROSENBROCK(X)
    return OF<jupyter_output><empty_output><jupyter_text>## 4.0 Example<jupyter_code>[RESULTS_REP, BEST_REP, AVERAGE_REP, WORST_REP, STATUS] = SA_ALGORITHM_0001(OF_FUNCTION, SETUP)<jupyter_output><empty_output><jupyter_text>## 5.0 View results<jupyter_code>RESULTS_REP[0]
STATUS
BEST_REP[7]
BEST_REP
OF = BEST_REP[0]['OF'][10]
OF
BEST_REP[0]
AVERAGE_REP
import numpy as np
OF = [-50, 2, 3, 4, -90, 6, 7]
wander = np.argsort(OF)
wander
DATASET = {'DATASET': BEST_REP,
           'NUMBER OF REPETITIONS': 10,
           'DATA TYPE': 'OF'}
PLOT_SETUP = {'NAME': 'WANDER',
              'WIDTH': 0.40, 
              'HEIGHT': 0.20, 
              'X AXIS LABEL': 'OF',
              'X AXIS SIZE': 20,
              'Y AXIS SIZE': 20,
              'AXISES COLOR': '#000000',
              'LABELS SIZE': 16,
              'LABELS COLOR': '#000000',  
              'CHART COLOR': '#FEB625',
              'KDE': False,
              'BINS': 20,
              'DPI': 600, 
              'EXTENSION': '.svg'}
META_PLOT_004(DATASET, PLOT_SETUP)<jupyter_output>C:\Users\Wanderlei\anaconda3\lib\site-packages\seaborn\_decorators.py:43: FutureWarning: Pass the following variable as a keyword arg: x. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.
  FutureWarning
 | 
	no_license | 
	/Algoritmos em organização/SA example.ipynb | 
	wmpjrufg/META_PLATAFORMA | 5 | 
| 
	<jupyter_start><jupyter_text>資料來源:政府資料開放平台<jupyter_code>df = pd.read_csv('https://gis.taiwan.net.tw/od/01_PRD/%E6%AD%B7%E5%B9%B4%E8%A7%80%E5%85%89%E5%A4%96%E5%8C%AF%E6%94%B6%E5%85%A5%E7%B5%B1%E8%A8%88.csv',encoding='big5')
df.head()<jupyter_output><empty_output><jupyter_text>去逗號<jupyter_code>locale.setlocale(locale.LC_NUMERIC, '')
x = np.array(df['來臺人數'].apply(locale.atoi))
y = np.array(df['觀光外匯收入_美元'].apply(locale.atoi))
plt.scatter(x,y)<jupyter_output><empty_output><jupyter_text>將資料進行迴歸分析<jupyter_code>regr=LinearRegression()
X = x.reshape(len(x), 1)
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 999)
plt.scatter(x_train, y_train)
plt.scatter(x_test, y_test)
regr.fit(x_train, y_train)
y_pred=regr.predict(x_test)
plt.scatter(x_test,y_test)
plt.plot(x_test,y_pred,'c')
plt.xlabel("來台人數")
plt.ylabel("觀光外匯收入")<jupyter_output><empty_output> | 
	no_license | 
	/HW5_1 .ipynb | 
	Athenakk/Pythonhw | 3 | 
| 
	<jupyter_start><jupyter_text> ╔══Alai-DeepLearning════════════════════════════╗
###     **✎  Week 5. Machine Learning Basis**
# Section 4. Tensorflow을 이용한 Linear Regression 구현
### _Objective_
1. Tensorflow을 통해 우리는 선형회귀를 구현합니다. 
  
╚═════════════════════════════════════════╝<jupyter_code>%matplotlib inline
!pip install tensorboardcolab
import tensorboardcolab
import numpy as np
import pandas as pd
import tensorflow as tf 
import matplotlib.pyplot as plt<jupyter_output>Requirement already satisfied: tensorboardcolab in /Users/ksj/anaconda3/lib/python3.6/site-packages (0.0.22)
[33mYou are using pip version 18.1, however version 19.0.3 is available.
You should consider upgrading via the 'pip install --upgrade pip' command.[0m
<jupyter_text>### [Optional.  Tensorflow Graph Visualization ]
---
> _Jupyter에서 Tensorflow에서 구성되는 Graph를 시각적으로 보여주기 위한 helper 메소드입니다._
<jupyter_code>from IPython.display import clear_output, Image, display, HTML
import numpy as np    
def strip_consts(graph_def, max_const_size=32):
    """Strip large constant values from graph_def."""
    strip_def = tf.GraphDef()
    for n0 in graph_def.node:
        n = strip_def.node.add() 
        n.MergeFrom(n0)
        if n.op == 'Const':
            tensor = n.attr['value'].tensor
            size = len(tensor.tensor_content)
            if size > max_const_size:
                tensor.tensor_content = "<stripped %d bytes>"%size
    return strip_def
def show_graph(graph_def, max_const_size=32):
    """Visualize TensorFlow graph."""
    if hasattr(graph_def, 'as_graph_def'):
        graph_def = graph_def.as_graph_def()
    strip_def = strip_consts(graph_def, max_const_size=max_const_size)
    code = """
        <script>
          function load() {{
            document.getElementById("{id}").pbtxt = {data};
          }}
        </script>
        <link rel="import" href="https://tensorboard.appspot.com/tf-graph-basic.build.html" onload=load()>
        <div style="height:600px">
          <tf-graph-basic id="{id}"></tf-graph-basic>
        </div>
    """.format(data=repr(str(strip_def)), id='graph'+str(np.random.rand()))
    iframe = """
        <iframe seamless style="width:1200px;height:620px;border:0" srcdoc="{}"></iframe>
    """.format(code.replace('"', '"'))
    display(HTML(iframe))<jupyter_output><empty_output><jupyter_text>## 예제 데이터 ) 빅데이터로 예측한 '옥자' 관객 수
"이전까지 개봉했던 영화의 보고싶어요 수와 관객 수를 바탕으로, 옥자의 예상 관객 수 예측하기"
reference : [빅데이터로 예측한 옥자 관객 수 727만 명](http://platum.kr/wp-content/uploads/2017/06/unnamed-12.png)
<jupyter_code>movie_df = pd.DataFrame([
    [8759, 487],
    [10132,612],
    [12078,866],
    [16430,1030]],
    columns=["nums_want_to_see","nums_audience"])
movie_df.index = ["마션","킹스맨","캡틴아메리카","인터스텔라"]
movie_df<jupyter_output><empty_output><jupyter_text>
# \[ 1. 학습이 잘 되지 않는 이유 \]
----
----
> *선형 회귀 모델을 그래프로 구현해보도록 하겠습니다.*## 1. 이전 코드 재학습 시키기
---
<jupyter_code># 손실 함수값 계산하기
def calculate_MSE(W0, W1):
    X = movie_df.nums_want_to_see # 독립 변수, 보고싶어요 수
    y_true = movie_df.nums_audience # 실제값
    y_pred = W1*X+W0 # 가중치 조합을 통한 예측값
    return ((y_true - y_pred)**2).mean() # MSE
# W0에 대한 미분식
def calculate_wo_derivative(w0,w1):
    x = movie_df.nums_want_to_see # 독립 변수, 보고싶어요 수
    y_true = movie_df.nums_audience # 실제값
    return (w1*x + w0 - y_true).mean()
# W1에 대한 미분식
def calculate_w1_derivative(w0,w1):
    x = movie_df.nums_want_to_see # 독립 변수, 보고싶어요 수
    y_true = movie_df.nums_audience # 실제값
    return ((w1*x + w0 - y_true)*x).mean()
w0 = -500
w1 = -0.75
mse = calculate_MSE(w0,w1)
# 갱신된 weight와 mse 저장
w0_history = [w0]
w1_history = [w1]
mse_history = [mse]
dw0_history = []
dw1_history = []
alpha = 2e-10 # 학습률
epoch = 100 # 몇 번 학습할지 결정
for i in range(epoch):
    # 이전 가중치 가져오기
    old_w0 = w0_history[-1]
    old_w1 = w1_history[-1]
    
    # 미분값 갱신
    dw0 = calculate_wo_derivative(old_w0,old_w1)
    dw1 = calculate_w1_derivative(old_w0,old_w1)
    
    # 경사하강법을 통한 가중치 갱신
    new_w0 = old_w0 - alpha * dw0
    new_w1 = old_w1 - alpha * dw1
    new_mse = calculate_MSE(new_w0,new_w1)
    
    # 결과 저장
    w0_history.append(new_w0)
    w1_history.append(new_w1)
    dw0_history.append(dw0)
    dw1_history.append(dw1)    
    mse_history.append(new_mse)<jupyter_output><empty_output><jupyter_text>## 2. 결과 확인하기
---<jupyter_code>hist_df = pd.DataFrame({
    "W0":w0_history[:-1],
    "W1":w1_history[:-1],
    "dw0":dw0_history,
    "dw1":dw1_history,    
    "Loss":mse_history[:-1]
})<jupyter_output><empty_output><jupyter_text>### (1) 손실함수 값 확인하기<jupyter_code>hist_df.plot(y='Loss')
plt.title("Loss Function")
plt.show()<jupyter_output><empty_output><jupyter_text>### (2) Weight 값 추이 확인하기<jupyter_code># W1은 어느정도 바뀌었지만, W0은 전혀 변하지 않았다.
hist_df.plot(y=['W0','W1'],subplots=True,figsize=(10,10))
plt.show()<jupyter_output><empty_output><jupyter_text>### (3) 기울기 값 추이 확인하기<jupyter_code># Delta 값의 변화가 너무 큽니다.
# dw0은 10^4의 크기를 보이고 있는데, dw1은 10^8의 크기를 보이고 있습니다.
hist_df.plot(y=['dw0','dw1'],subplots=True,figsize=(10,10))
plt.show()<jupyter_output><empty_output><jupyter_text>위의 문제를 방지해주기 위해, 우리는 기본적으로 **Feature Scaling**을 적용합니다.## 3. Feature Scale 적용 후, 학습 시키기
---
* Featrue Scale에 관한 구체적인 설명은 다음 시간에 하겠습니다.
* 간단하게 여기서는 Min-Max Normalization을 적용하겠습니다.<jupyter_code># Min-Max Normalization 적용
movie_df = (movie_df - movie_df.min())/(movie_df.max()-movie_df.min())
movie_df.describe()
w0 = -0.1
w1 = -0.1
mse = calculate_MSE(w0,w1)<jupyter_output><empty_output><jupyter_text>* 학습률을 보다 올려서 동작시키겠습니다.<jupyter_code># 갱신된 weight와 mse 저장
w0_history = [w0]
w1_history = [w1]
mse_history = [mse]
dw0_history = []
dw1_history = []
alpha = 2e-1 # 학습률
epoch = 100 # 몇 번 학습할지 결정
for i in range(epoch):
    # 이전 가중치 가져오기
    old_w0 = w0_history[-1]
    old_w1 = w1_history[-1]
    
    # 미분값 갱신
    dw0 = calculate_wo_derivative(old_w0,old_w1)
    dw1 = calculate_w1_derivative(old_w0,old_w1)
    
    # 경사하강법을 통한 가중치 갱신
    new_w0 = old_w0 - alpha * dw0
    new_w1 = old_w1 - alpha * dw1
    new_mse = calculate_MSE(new_w0,new_w1)
    
    # 결과 저장
    w0_history.append(new_w0)
    w1_history.append(new_w1)
    dw0_history.append(dw0)
    dw1_history.append(dw1)    
    mse_history.append(new_mse)<jupyter_output><empty_output><jupyter_text>## 4. Feature Scale 적용 후, 결과 확인하기
---
* Feature Scaling을 적용한 후, 결과의 변화는 극적으로 안정적으로 됩니다.<jupyter_code>hist_df = pd.DataFrame({
    "W0":w0_history[:-1],
    "W1":w1_history[:-1],
    "dw0":dw0_history,
    "dw1":dw1_history,    
    "Loss":mse_history[:-1]
})
hist_df.plot(y='Loss')
plt.title("Loss Function")
plt.show()
# W0, W1모두 비슷한 수준으로 변화하였습니다.
hist_df.plot(y=['W0','W1'],subplots=True,figsize=(10,10))
plt.show()
# Delta 값 또한 비슷한 수준의 차이를 보이고 있습니다.
hist_df.plot(y=['dw0','dw1'],subplots=True,figsize=(10,10))
plt.show()<jupyter_output><empty_output><jupyter_text>* 위와 같이 안정적으로 학습을 시키기 위해서는 Feature Scaling이 필수적입니다.
# \[ 2. TensorFlow Graph 그리기 \]
----
----
> *선형 회귀 모델을 그래프로 구현해보도록 하겠습니다.*
## 1.  placeholder 생성
----
* 선형 회귀 모델을 학습시키고, 예측하기 위해서는
  독립 변수(X), 종속 변수(Y), 학습률(learning rate)을 받아오는 placeholder가 필요합니다.<jupyter_code>tf.reset_default_graph()
# Data를 받아오는 placeholder
x = tf.placeholder(tf.float32, shape=(None,), name='x')
y_true = tf.placeholder(tf.float32, shape=(None,), name="y_true")
learning_rate = tf.placeholder(tf.float32, name="learning_rate")<jupyter_output><empty_output><jupyter_text>## 2.  weight 생성
----
* 우리는 학습해야 하는 두개의 가중치 ($W_0$,$W_1$)가 있습니다.
  이전과 동일하게 초깃값을 (-0.1,-0.1)로 두겠습니다.<jupyter_code># Weight 초기화
with tf.variable_scope('weights'):
    w0 = tf.Variable(-0.1,name='W0')
    w1 = tf.Variable(-0.1,name='W1')<jupyter_output><empty_output><jupyter_text>## 3. 예측 모델 $\hat y$ 구현
---
* 선형회귀 예측 모델인 아래의 수식을 구현해야 합니다.
$$
\hat y = w_1 * x + w_0
$$<jupyter_code># Y_hat 구하기
with tf.variable_scope('linear_regression'):
    y_pred = w1*x + w0
show_graph(tf.get_default_graph())<jupyter_output><empty_output><jupyter_text>선형회귀로 예측하기 위해서는 weight와 x만 있으면 됩니다.## 4. 손실함수 구현
---
$$
\mbox{손실함수 : } Loss(W_0,W_1) = \frac{1}{2m}\sum_{i=1}^{m}(\hat y - y)^2 = \frac{1}{2m}\sum_{i=1}^{m}(w_1x^{(i)}+w_0-y^{(i)})^2
$$<jupyter_code># MSE_loss
with tf.variable_scope('loss'):
    mse_loss = tf.reduce_mean(tf.square(y_pred-y_true))
    tf.add_to_collection(tf.GraphKeys.LOSSES,mse_loss)
show_graph(tf.get_default_graph())<jupyter_output><empty_output><jupyter_text>## 5. Gradient Descent 알고리즘 구현하기
---
각 Weight들의 기울기는 아래와 같습니다.
$$
\frac{\partial Loss}{\partial W_0} = \frac{1}{m}\sum_{i=1}^{m}(w_1x^{(i)}+w_0 - y^{(i)})\\
\frac{\partial Loss}{\partial W_1} = \frac{1}{m}\sum_{i=1}^{m}(w_1x^{(i)}+w_0 - y^{(i)})x^{(i)}\\
$$
Gradient Descent 알고리즘의 수식은 아래와 같습니다.
$$
W_{new} = W_{old} - \alpha * \frac{\partial loss}{\partial W} 
$$
<jupyter_code># weight 별 gradient 구하기
with tf.variable_scope('gradient_descent'):
    grad_w0 = tf.reduce_mean(w1*x + w0 - y_true, 
                             name="gradient_w0")
    grad_w1 = tf.reduce_mean((w1*x + w0 - y_true)*x,
                             name='gradient_w1')
    update_w0 = tf.assign_sub(w0, learning_rate*grad_w0,
                              name="update_w0")
    update_w1 = tf.assign_sub(w1, learning_rate*grad_w1,
                              name='update_w1')
    train_op = tf.group([update_w0,update_w1],
                        name='train_op')
    tf.add_to_collection(tf.GraphKeys.TRAIN_OP,train_op)
show_graph(tf.get_default_graph())<jupyter_output><empty_output><jupyter_text>
# \[ 3. TensorFlow로 학습시키기 \]
----
----
> *Tensorflow로 만든 graph을 Session으로 학습시켜보겠습니다. *
## 1. Tensor와 Operation 가져오기 
---
* training에서 필요한 Tensor와 Operation들을 가져옵니다.<jupyter_code>graph = tf.get_default_graph()
x = graph.get_tensor_by_name('x:0')
y = graph.get_tensor_by_name('y_true:0')
learning_rate = graph.get_tensor_by_name('learning_rate:0')
w1 = graph.get_tensor_by_name('weights/W1:0')
w0 = graph.get_tensor_by_name('weights/W0:0')
mse_loss = graph.get_collection(tf.GraphKeys.LOSSES)[0]
train_op = graph.get_collection(tf.GraphKeys.TRAIN_OP)[0]
show_graph(graph)<jupyter_output><empty_output><jupyter_text>
## 2. Tensorboard에서 Tracking할 summary 추가
---
<jupyter_code>tf.summary.scalar('Mean_Squared_Error',mse_loss)
tf.summary.scalar('weight_1',w1)
tf.summary.scalar('weight_0',w0)
merged_summary = tf.summary.merge_all()<jupyter_output><empty_output><jupyter_text>
## 3. Session을 만들어 학습시키기
---
<jupyter_code>sess = tf.Session(graph=graph)
# 변수 초기화하기
sess.run(tf.global_variables_initializer())
# tensorboard에 기록할 writer 객체 만들기
writer = tf.summary.FileWriter(logdir='./linear_regression',
                               graph=graph)
for epoch in range(100):
    # nums_epoch : 100    
    _,merged_ = sess.run([train_op,merged_summary],
             feed_dict={
                 x:movie_df.nums_want_to_see.values,
                 y:movie_df.nums_audience.values,
                 learning_rate:2e-1
             })
    
    # summary 기록하기
    writer.add_summary(merged_, epoch)
writer.flush()
# Weight1 과 Weight2 확인하기
sess.run([w1,w0])<jupyter_output><empty_output><jupyter_text>
## 4. tensorboard로 결과 확인하기
---
<jupyter_code># 텐서보드 가져오기
tbc = tensorboardcolab.TensorBoardColab(graph_path='./linear_regression')<jupyter_output>Wait for 8 seconds...
Initialization failed, retry again (1)
Wait for 8 seconds...
Initialization failed, retry again (2)
Wait for 8 seconds...
Initialization failed, retry again (3)
Wait for 8 seconds...
Initialization failed, retry again (4)
Wait for 8 seconds...
Initialization failed, retry again (5)
Wait for 8 seconds...
Initialization failed, retry again (6)
Wait for 8 seconds...
Initialization failed, retry again (7)
Wait for 8 seconds...
Initialization failed, retry again (8)
Wait for 8 seconds...
Initialization failed, retry again (9)
Wait for 8 seconds...
Initialization failed, retry again (10)
Wait for 8 seconds...
Initialization failed, retry again (11)
Wait for 8 seconds...
Initialization failed, retry again (12)
Wait for 8 seconds...
Initialization failed, retry again (13)
Wait for 8 seconds...
Initialization failed, retry again (14)
Wait for 8 seconds...
Initialization failed, retry again (15)
Wait for 8 seconds...
Initialization failed, retr[...] | 
	no_license | 
	/lecture-codes/5_machine_learning_basis/4_Tensorflow을 이용한 Linear Regression 구현하기.ipynb | 
	anthony0727/ALAI-DL | 20 | 
| 
	<jupyter_start><jupyter_text>y = 2x + 1<jupyter_code>model.predict([[15]])
from sklearn.datasets import fetch_california_housing
california = fetch_california_housing()
X = california.data
df = pd.DataFrame(X, columns = california.feature_names)
Y = california.target
print(df)
i = 3
plt.title(california.feature_names[i] + ' & ' + 'target')
plt.xlabel(california.feature_names[i])
plt.ylabel('target')
plt.scatter(df[california.feature_names[i]], Y)
plt.legend()
plt.show()
model = reg.fit(X, Y)
print(f'coef: {model.coef_}')
print(f'intercept: {model.intercept_}')
i=0
plt.title(california.feature_names[i] + ' & ' + 'target')
plt.xlabel(california.feature_names[i])
plt.ylabel('target')
plt.scatter(df[california.feature_names[i]], Y)
plt.plot(df[california.feature_names[i]], model.coef_[i] * df[california.feature_names[i]], 'r-')
plt.legend()
plt.show()
df.mean()
model.predict([df.mean()])<jupyter_output><empty_output> | 
	no_license | 
	/C1. Linear Regression/chapte1-practice.ipynb | 
	JHyuk2/ML-DL | 1 | 
| 
	<jupyter_start><jupyter_text>## loading training data<jupyter_code>x_train=[]
for file in tqdm(train['file']):
    img = read_img(file, (224, 224))
    x_train.append(img)
    
    
x_train=np.array(x_train)
x_train.shape
x_test=[]
for file in tqdm(test['filepath']):
    img=read_img(file,(224,224))
    x_test.append(img)
x_test=np.array(x_test)
x_test.shape<jupyter_output><empty_output><jupyter_text>## loading image labels of training data<jupyter_code>y_train=train['category_id']
y_train.shape<jupyter_output><empty_output><jupyter_text>## converting labels into one hot vectors<jupyter_code>from sklearn.preprocessing import LabelBinarizer 
label_binarizer = LabelBinarizer()
label_binarizer.fit(y_train)
y_train=label_binarizer.transform(y_train)
y_train.shape
from sklearn.model_selection import train_test_split
X_train, X_valid, Y_train, Y_valid=train_test_split(x_train,y_train,test_size=0.05, random_state=42)
X_train=X_train/255
X_valid=X_valid/255
x_test=x_test/255<jupyter_output><empty_output><jupyter_text>## image augmentation<jupyter_code>from keras.preprocessing.image import ImageDataGenerator
datagen_train = ImageDataGenerator(
    width_shift_range=0.2,  # randomly shift images horizontally 
    height_shift_range=0.2,# randomly shift images vertically 
    
    horizontal_flip=True) # randomly flip images horizontally
# fit augmented image generator on data
datagen_train.fit(X_train)
from keras.layers import Input,InputLayer, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D
from keras.layers import AveragePooling2D, MaxPooling2D, Dropout
from keras.models import Sequential,Model
from keras.callbacks import ModelCheckpoint<jupyter_output><empty_output><jupyter_text>## using pretrained keras model<jupyter_code>!ls ../input/keras-pretrained-models/
cache_dir = os.path.expanduser(os.path.join('~', '.keras'))
if not os.path.exists(cache_dir):
    os.makedirs(cache_dir)
models_dir = os.path.join(cache_dir, 'models')
if not os.path.exists(models_dir):
    os.makedirs(models_dir)
!cp ../input/keras-pretrained-models/vgg* ~/.keras/models/
!ls ~/.keras/models
from keras import applications
model = applications.VGG16(weights = "imagenet", include_top=False, input_shape = (224, 224, 3))
model.summary()<jupyter_output><empty_output><jupyter_text>### Freeze all layers <jupyter_code>for layer in model.layers:
    layer.trainable = False
model.summary()<jupyter_output><empty_output><jupyter_text>### Adding Fully connected layers<jupyter_code>x = model.output
x = Flatten()(x)
x = Dense(128, activation="relu")(x)
x = Dropout(0.4)(x)
predictions = Dense(12, activation="softmax")(x)
model_final = Model(input = model.input, output = predictions)
model_final.compile(loss = "categorical_crossentropy",  optimizer ='adam', metrics=["accuracy"])
checkpointer = ModelCheckpoint(filepath='vgg16.hdf5', verbose=1, save_best_only=True)
'''
model_final.fit_generator(datagen_train.flow(X_train, Y_train, batch_size=16), validation_data=(X_valid, Y_valid),
                         epochs=2,steps_per_epoch=X_train.shape[0],callbacks=[checkpointer], verbose=1)
'''<jupyter_output><empty_output> | 
	no_license | 
	/datasets/plant-seedlings-classification/kernels/NULL---amarjeet007---plant-seedlings-classification.ipynb | 
	mindis/GDS | 7 | 
| 
	<jupyter_start><jupyter_text>I had posted my very naive baseline at https://www.kaggle.com/mhviraf/a-baseline-for-dsb-2019. In that kernel I only used the mode label for each Assessment and I thought it should be very easy to beat. This kernel shows how you can beat that baseline by actually applying a model. In this kernel via `get_data()` function, I go over each `installation_id` and try to extract some features based on his/her behavior prior to the assessment. I will then train a `Catboost` classifier on it and make predictions on the test set. Note that the features I made in this kernel are so very basic and you can easily add many more to it. Good luck and happy kaggling. Don't forget to upvote if you found it useful ;)<jupyter_code>import numpy as np
import pandas as pd
import datetime
from catboost import CatBoostClassifier
from time import time
from tqdm import tqdm_notebook as tqdm
from sklearn.metrics import confusion_matrix
def qwk(act,pred,n=4,hist_range=(0,3)):
    
    O = confusion_matrix(act,pred)
    O = np.divide(O,np.sum(O))
    
    W = np.zeros((n,n))
    for i in range(n):
        for j in range(n):
            W[i][j] = ((i-j)**2)/((n-1)**2)
            
    act_hist = np.histogram(act,bins=n,range=hist_range)[0]
    prd_hist = np.histogram(pred,bins=n,range=hist_range)[0]
    
    E = np.outer(act_hist,prd_hist)
    E = np.divide(E,np.sum(E))
    
    num = np.sum(np.multiply(W,O))
    den = np.sum(np.multiply(W,E))
        
    return 1-np.divide(num,den)
    
train = pd.read_csv('/kaggle/input/data-science-bowl-2019/train.csv')
train_labels = pd.read_csv('/kaggle/input/data-science-bowl-2019/train_labels.csv')
specs = pd.read_csv('/kaggle/input/data-science-bowl-2019/specs.csv')
test = pd.read_csv('/kaggle/input/data-science-bowl-2019/test.csv')
submission = pd.read_csv('/kaggle/input/data-science-bowl-2019/sample_submission.csv')
# encode title
list_of_user_activities = list(set(train['title'].value_counts().index).union(set(test['title'].value_counts().index)))
activities_map = dict(zip(list_of_user_activities, np.arange(len(list_of_user_activities))))
train['title'] = train['title'].map(activities_map)
test['title'] = test['title'].map(activities_map)
train_labels['title'] = train_labels['title'].map(activities_map)
win_code = dict(zip(activities_map.values(), (4100*np.ones(len(activities_map))).astype('int')))
win_code[activities_map['Bird Measurer (Assessment)']] = 4110
train['timestamp'] = pd.to_datetime(train['timestamp'])
test['timestamp'] = pd.to_datetime(test['timestamp'])
# no intersection between installation ids of train and test
# user_sample = train.query('installation_id=="0006a69f"')
# user_sample = test.query('installation_id=="01242218"') 
def get_data(user_sample, test_set=False):
    last_activity = 0
    user_activities_count = {'Clip':0, 'Activity': 0, 'Assessment': 0, 'Game':0}
    accuracy_groups = {0:0, 1:0, 2:0, 3:0}
    all_assessments = []
    accumulated_accuracy_group = 0
    accumulated_accuracy=0
    accumulated_correct_attempts = 0 
    accumulated_uncorrect_attempts = 0 
    accumulated_actions = 0
    counter = 0
    durations = []
    for i, session in user_sample.groupby('game_session', sort=False):
        session_type = session['type'].iloc[0]
        session_title = session['title'].iloc[0]
        if test_set == True:
            second_condition = True
        else:
            if len(session)>1:
                second_condition = True
            else:
                second_condition= False
            
        if (session_type == 'Assessment') & (second_condition):
            all_attempts = session.query(f'event_code == {win_code[session_title]}')
            true_attempts = all_attempts['event_data'].str.contains('true').sum()
            false_attempts = all_attempts['event_data'].str.contains('false').sum()
            features = user_activities_count.copy()
    #         features['installation_id'] = session['installation_id'].iloc[0]
#             features['game_session'] = i
            features['session_title'] = session['title'].iloc[0] 
            features['accumulated_correct_attempts'] = accumulated_correct_attempts
            features['accumulated_uncorrect_attempts'] = accumulated_uncorrect_attempts
            accumulated_correct_attempts += true_attempts 
            accumulated_uncorrect_attempts += false_attempts
            if durations == []:
                features['duration_mean'] = 0
            else:
                features['duration_mean'] = np.mean(durations)
            durations.append((session.iloc[-1, 2] - session.iloc[0, 2] ).seconds)
            features['accumulated_accuracy'] = accumulated_accuracy/counter if counter > 0 else 0
            accuracy = true_attempts/(true_attempts+false_attempts) if (true_attempts+false_attempts) != 0 else 0
            accumulated_accuracy += accuracy
            if accuracy == 0:
                features['accuracy_group'] = 0
            elif accuracy == 1:
                features['accuracy_group'] = 3
            elif accuracy == 0.5:
                features['accuracy_group'] = 2
            else:
                features['accuracy_group'] = 1
            features.update(accuracy_groups)
            features['accumulated_accuracy_group'] = accumulated_accuracy_group/counter if counter > 0 else 0
            features['accumulated_actions'] = accumulated_actions
            accumulated_accuracy_group += features['accuracy_group']
            accuracy_groups[features['accuracy_group']] += 1
            if test_set == True:
                all_assessments.append(features)
            else:
                if true_attempts+false_attempts > 0:
                    all_assessments.append(features)
                
            counter += 1
    #         break
        accumulated_actions += len(session)
        if last_activity != session_type:
            user_activities_count[session_type] += 1
            last_activitiy = session_type
    if test_set:
        return all_assessments[-1] 
    return all_assessments
compiled_data = []
for i, (ins_id, user_sample) in tqdm(enumerate(train.groupby('installation_id', sort=False)), total=17000):
    compiled_data += get_data(user_sample)
new_train = pd.DataFrame(compiled_data)
del compiled_data
new_train.shape<jupyter_output><empty_output><jupyter_text>Below are the features I have generated. Note that all of them are **prior** to each event. For example, the first row shows **before** this assessment, the player have watched 3 clips, did 3 activities, played 4 games and solved 0 assessments, so on so forth.<jupyter_code>new_train.head()<jupyter_output><empty_output><jupyter_text>## Model<jupyter_code>all_features = [x for x in new_train.columns if x not in ['accuracy_group']]
cat_features = ['session_title']
X, y = new_train[all_features], new_train['accuracy_group']
del train
def make_classifier():
    clf = CatBoostClassifier(
                               loss_function='MultiClass',
    #                            eval_metric="AUC",
                               task_type="CPU",
                               learning_rate=0.01,
                               iterations=2000,
                               od_type="Iter",
#                                depth=8,
                               early_stopping_rounds=500,
    #                            l2_leaf_reg=1,
    #                            border_count=96,
                               random_seed=2019
                              )
        
    return clf
oof = np.zeros(len(X))
# CV
from sklearn.model_selection import KFold
# preds = np.zeros(len(X_test))
oof = np.zeros(len(X))
NFOLDS = 5
folds = KFold(n_splits=NFOLDS, shuffle=True, random_state=2019)
training_start_time = time()
for fold, (trn_idx, test_idx) in enumerate(folds.split(X, y)):
    start_time = time()
    print(f'Training on fold {fold+1}')
    clf = make_classifier()
    clf.fit(X.loc[trn_idx, all_features], y.loc[trn_idx], eval_set=(X.loc[test_idx, all_features], y.loc[test_idx]),
                          use_best_model=True, verbose=500, cat_features=cat_features)
    
#     preds += clf.predict(X_test).reshape(len(X_test))/NFOLDS
    oof[test_idx] = clf.predict(X.loc[test_idx, all_features]).reshape(len(test_idx))
    
    print('Fold {} finished in {}'.format(fold + 1, str(datetime.timedelta(seconds=time() - start_time))))
    
print('-' * 30)
print('OOF QWK:', qwk(y, oof))
print('-' * 30)<jupyter_output><empty_output><jupyter_text>Note that Cross validation is only for the feature engineering part and you don't actually need it if you want to submit the results. You can safely comment it out. <jupyter_code># train model on all data once
clf = make_classifier()
clf.fit(X, y, verbose=500, cat_features=cat_features)
del X, y
# process test set
new_test = []
for ins_id, user_sample in tqdm(test.groupby('installation_id', sort=False), total=1000):
    a = get_data(user_sample, test_set=True)
    new_test.append(a)
    
X_test = pd.DataFrame(new_test)
del test
# make predictions on test set once
preds = clf.predict(X_test)
del X_test<jupyter_output><empty_output><jupyter_text>## Make submission<jupyter_code>submission['accuracy_group'] = np.round(preds).astype('int')
submission.to_csv('submission.csv', index=None)
submission.head()
submission['accuracy_group'].plot(kind='hist')
train_labels['accuracy_group'].plot(kind='hist')
pd.Series(oof).plot(kind='hist')<jupyter_output><empty_output> | 
	no_license | 
	/kernels/a-new-baseline-for-dsb-2019-catboost-model.ipynb | 
	enridaga/data-journey | 5 | 
| 
	<jupyter_start><jupyter_text>**Purpose:**
Inference with a DistilBERT model pretrained on SQuAD
<jupyter_code>%%capture
!pip install transformers
import time
import sys
import os
import contextlib
from transformers import DistilBertTokenizer, DistilBertForQuestionAnswering
import torch
from google.colab import drive
drive.mount('/content/drive')
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased', return_token_type_ids = True)
model = DistilBertForQuestionAnswering.from_pretrained('distilbert-base-uncased-distilled-squad')
# Inference:
start_time = time.time()
context = "The US has passed the peak on new coronavirus cases, " \
          "President Donald Trump said and predicted that some states would reopen this month. " \
          "The US has over 637,000 confirmed Covid-19 cases and over 30,826 deaths, the highest for any country in the world."
question = "What was President Donald Trump's prediction?"
encoding = tokenizer.encode_plus(question, context)
input_ids, attention_mask = encoding["input_ids"], encoding["attention_mask"]
start_scores, end_scores = model(torch.tensor([input_ids]), attention_mask=torch.tensor([attention_mask]))
ans_tokens = input_ids[torch.argmax(start_scores) : torch.argmax(end_scores)+1]
answer_tokens = tokenizer.convert_ids_to_tokens(ans_tokens , skip_special_tokens=True)
print ("\nQuestion ",question)
print ("\nAnswer Tokens: ")
print (answer_tokens)
answer_tokens_to_string = tokenizer.convert_tokens_to_string(answer_tokens)
print ("\nAnswer : ",answer_tokens_to_string)
end_time = time.time()
print("\nExecution Time: {} seconds.".format(end_time - start_time))
from transformers.data.processors.squad import SquadV2Processor
# this processor loads the SQuAD2.0 dev set examples
processor = SquadV2Processor()
examples = processor.get_dev_examples("/content/drive/My Drive/colab_files/data/Covid-QA/", filename="Covid-QA-val.json")
print(len(examples))
# generate some maps to help us identify examples of interest
qid_to_example_index = {example.qas_id: i for i, example in enumerate(examples)}
qid_to_has_answer = {example.qas_id: bool(example.answers) for example in examples}
answer_qids = [qas_id for qas_id, has_answer in qid_to_has_answer.items() if has_answer]
no_answer_qids = [qas_id for qas_id, has_answer in qid_to_has_answer.items() if not has_answer]
def display_example(qid):    
    from pprint import pprint
    idx = qid_to_example_index[qid]
    q = examples[idx].question_text
    c = examples[idx].context_text
    a = [answer['text'] for answer in examples[idx].answers]
    
    print(f'Example {idx} of {len(examples)}\n---------------------')
    print(f"Q: {q}\n")
    print("Context:")
    pprint(c)
    print(f"\nTrue Answers:\n{a}")
#display_example(answer_qids[0])
import sys
sys.path.append('/content/drive/My Drive/colab_files/modules')
import infersent_glove_context_generation as ig
import time
import os
import contextlib
import torch
import nltk
nltk.download('punkt')
def get_prediction(qid):
    # given a question id (qas_id or qid), load the example, get the model outputs and generate an answer
    question = examples[qid_to_example_index[qid]].question_text
    doc_text = examples[qid_to_example_index[qid]].context_text
    with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
        context = ig.generate_context_from_doc(doc_text, question)
    context_tokens = nltk.word_tokenize(context)
    #print('\nContext token count: ', len(context_tokens))
    #print('\n\nContext tokens: ', context_tokens)
    inputs = tokenizer.encode_plus(question, context, return_tensors='pt')
    outputs = model(**inputs)
    answer_start = torch.argmax(outputs[0])  # get the most likely beginning of answer with the argmax of the score
    answer_end = torch.argmax(outputs[1]) + 1 
    answer = tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(inputs['input_ids'][0][answer_start:answer_end]))
    return answer
# these functions are heavily influenced by the HF squad_metrics.py script
def normalize_text(s):
    """Removing articles and punctuation, and standardizing whitespace are all typical text processing steps."""
    import string, re
    def remove_articles(text):
        regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
        return re.sub(regex, " ", text)
    def white_space_fix(text):
        return " ".join(text.split())
    def remove_punc(text):
        exclude = set(string.punctuation)
        return "".join(ch for ch in text if ch not in exclude)
    def lower(text):
        return text.lower()
    return white_space_fix(remove_articles(remove_punc(lower(s))))
def compute_exact_match(prediction, truth):
    return int(normalize_text(prediction) == normalize_text(truth))
def compute_f1(prediction, truth):
    pred_tokens = normalize_text(prediction).split()
    truth_tokens = normalize_text(truth).split()
    
    # if either the prediction or the truth is no-answer then f1 = 1 if they agree, 0 otherwise
    if len(pred_tokens) == 0 or len(truth_tokens) == 0:
        return int(pred_tokens == truth_tokens)
    
    common_tokens = set(pred_tokens) & set(truth_tokens)
    
    # if there are no common tokens then f1 = 0
    if len(common_tokens) == 0:
        return 0
    
    prec = len(common_tokens) / len(pred_tokens)
    rec = len(common_tokens) / len(truth_tokens)
    
    return 2 * (prec * rec) / (prec + rec)
def get_gold_answers(example):
    """helper function that retrieves all possible true answers from a squad2.0 example"""
    
    gold_answers = [answer["text"] for answer in example.answers if answer["text"]]
    # if gold_answers doesn't exist it's because this is a negative example - 
    # the only correct answer is an empty string
    if not gold_answers:
        gold_answers = [""]
        
    return gold_answers
answer_qids[0]
start_time = time.time()
prediction = get_prediction(answer_qids[0])
example = examples[qid_to_example_index[answer_qids[0]]]
gold_answers = get_gold_answers(example)
em_score = max((compute_exact_match(prediction, answer)) for answer in gold_answers)
f1_score = max((compute_f1(prediction, answer)) for answer in gold_answers)
print(f"Question: {example.question_text}")
print(f"Prediction: {prediction}")
print(f"True Answers: {gold_answers}")
print(f"EM: {em_score} \t F1: {f1_score}")
print("\nExecution time: {}".format(time.time() - start_time))
def evaluate_model():
    em_scores = []
    f1_scores = []
    for qid in answer_qids:
        prediction = get_prediction(qid)
        example = examples[qid_to_example_index[qid]]
        gold_answers = get_gold_answers(example)
        em_score = max((compute_exact_match(prediction, answer)) for answer in gold_answers)
        f1_score = max((compute_f1(prediction, answer)) for answer in gold_answers)
        em_scores.append(em_score)
        f1_scores.append(f1_score)
    avg_em = sum(em_scores) / len(em_scores)
    avg_f1 = sum(f1_scores) / len(f1_scores)
    print("\nAvg EM: {}".format(avg_em))
    print("\nAvg F1: {}".format(avg_f1))
start_time = time.time()
evaluate_model()
print("\n\nExecution time: {}".format(time.time() - start_time))<jupyter_output>/content/drive/My Drive/colab_files/modules/models_infersent.py:197: UserWarning: No words in "['<s>', 'No.', '</s>']" (idx=23) have w2v vectors.                                Replacing by "</s>"..
  Replacing by "</s>"..' % (sentences[i], i))
/content/drive/My Drive/colab_files/modules/models_infersent.py:197: UserWarning: No words in "['<s>', '258/11_120365).', '</s>']" (idx=24) have w2v vectors.                                Replacing by "</s>"..
  Replacing by "</s>"..' % (sentences[i], i))
/content/drive/My Drive/colab_files/modules/models_infersent.py:197: UserWarning: No words in "['<s>', '.newer', 'organizations."', '</s>']" (idx=21) have w2v vectors.                                Replacing by "</s>"..
  Replacing by "</s>"..' % (sentences[i], i))
/content/drive/My Drive/colab_files/modules/models_infersent.py:197: UserWarning: No words in "['<s>', 'Is', 'hepcidin', 'toxic?', '</s>']" (idx=0) have w2v vectors.                                Replacing by "</s>"..
  Replacing [...] | 
	no_license | 
	/Colab/Colab Notebooks/1_squad_pretrained_distilbert_base_QnA.ipynb | 
	niravraje/Web-QnA | 1 | 
| 
	<jupyter_start><jupyter_text># 问题
- 你想将一个多层嵌套的序列展开成一个单层列表
## 解决方案
- 可以写一个包含yield from 语句的递归生成器来轻松解决这个问题。比如:<jupyter_code>from collections import Iterable
def flatten(items, ignore_types=(str, bytes)):
    for x in items:
        if isinstance(x, Iterable) and not isinstance(x, ignore_types):
            yield from flatten(x)
        else:
            yield x
            
items = [1, 2, [3, 4, [5, 6], 7], 8]
for x in flatten(items):
    print(x, end=" ")
    
for x in items:
    print(x)<jupyter_output>1 2 3 4 5 6 7 8 1
2
[3, 4, [5, 6], 7]
8
<jupyter_text>- 在上面代码中,isinstance(x, Iterable) 检查某个元素是否是可迭代的。如果是的话,yield from 就会返回所有子例程的值。最终返回结果就是一个没有嵌套的简单序列了。
- 额外的参数ignore_types 和检测语句isinstance(x, ignore_types) 用来将字符串和字节排除在可迭代对象外,防止将它们再展开成单个的字符。这样的话字符串数组就能最终返回我们所期望的结果了。比如:<jupyter_code>items = ['Dave', 'Paula', ['Thomas', 'Lewis']]
for x in flatten(items):
    print(x)<jupyter_output>Dave
Paula
Thomas
Lewis
<jupyter_text>## 讨论
- 语句yield from 在你想在生成器中调用其他生成器作为子例程的时候非常有用。如果你不使用它的话,那么就必须写额外的for 循环了。比如:<jupyter_code>def flatten(items, ignore_types=(str, bytes)):
    for x in items:
        if isinstance(x, Iterable) and not isinstance(x, ignore_types):
            for x in flatten(x):
                yield x
        else:
            yield x
            
for x in flatten(items):
    print(x)<jupyter_output>Dave
Paula
Thomas
Lewis
 | 
	no_license | 
	/PythonCookBook/4_迭代器和生成器/4_14_展开嵌套的序列.ipynb | 
	NAMEs/Python_Note | 3 | 
| 
	<jupyter_start><jupyter_text>## Function to generate all the primenumbers in a given range<jupyter_code>def prime(lb):
    count=0
    for i in range(1,lb+1):
        if lb%i==0:
             count=count+1
    if count==2:
        print(i,end=",")
lb=int(input())
ub=int(input())
for j in range(lb,ub+1):
    prime(j)
        
def prime(lb):                      ## USING RETURN
    count=0
    for i in range(1,lb+1):
        if lb%i==0:
             count=count+1
    if count==2:
        return 1
    else:
        return 0
lb=int(input())
ub=int(input())
for j in range(lb,ub+1):
    if prime(j)==1:
        print(j,end="-")
        <jupyter_output>20
30
23-29-<jupyter_text>TASK## Check whether a given number is perfect or not<jupyter_code>def perfect(n):
    sum=0
    for i in range(1,n):
        if n%i==0:
            sum=sum+i
            print(i)
    print(sum)
    if(sum==n):
        print("it is perfect")
    else:
        print("it is not perfect")
n=int(input())        
perfect(n)       <jupyter_output>6
1
2
3
6
it is perfect
<jupyter_text>## Program to print the sum of digits in agiven number<jupyter_code>def sum(n):
    sum=0
    for i in range (0,n):
        f=n%10
        sum=sum+f
        n=n//10
    print(sum)
n=int(input())
sum(n)        <jupyter_output>1234
10
<jupyter_text>## check whether the given number is armstrong or not<jupyter_code>def armstrong(n):
    s=0
    m=n
    for i in range(1,n):
        a=n%10
        s=s+a**3
        n=n//10
    print(s)
    if(m==s):
        print("armstrong")
    else:
        print("not an armstrong")
n=int(input())
armstrong(n)
        
        
   <jupyter_output>153
153
armstrong
 | 
	no_license | 
	/25-09-2019/25-09-2019(class)/25-09-2019(class).ipynb | 
	bindu707/gitbasics-mstp-level1-506 | 4 | 
| 
	<jupyter_start><jupyter_text>Задание 1
Дана переменная, в которой хранится словарь, содержащий гео-метки для каждого пользователя 
(пример структуры данных приведен ниже). 
Вам необходимо написать программу, которая выведет на экран множество уникальных гео-меток всех пользователей.<jupyter_code>ids = {'user1': [213, 213, 213, 15, 213], 
       'user2': [54, 54, 119, 119, 119], 
       'user3': [213, 98, 98, 35]}
values = []
for value in ids.values():
    values = values + value
print(set(values))
set([value for value in ids.values() for value in value]) #Второй способ с использованием list comprehension<jupyter_output><empty_output><jupyter_text>Задание 2
Дана переменная, в которой хранится список поисковых запросов пользователя (пример структуры данных приведен ниже).
Вам необходимо написать программу, которая выведет на экран распределение количества слов в запросах в требуемом виде.<jupyter_code>queries = [
    'смотреть сериалы онлайн',
    'новости спорта',
    'афиша кино',
    'курс доллара',
    'сериалы этим летом',
    'курс по питону',
    'сериалы про спорт',
]
total_queries = 0
result = {}
for query in queries:
    length = len(query.split())
    if length not in result:
        result[length] = 1
        total_queries = total_queries + 1
    else:
        result[length] += 1
        total_queries = total_queries + 1
for key, value in result.items():
    print(f'Поисковых запросов, содержащих {key} слов(а): {round((value / total_queries) * 100, 2)}%')<jupyter_output>Поисковых запросов, содержащих 3 слов(а): 57.14%
Поисковых запросов, содержащих 2 слов(а): 42.86%
<jupyter_text>Задание 3
Дана переменная, в которой хранится информация о затратах и доходе рекламных кампаний по различным источникам. 
Необходимо дополнить исходную структуру показателем ROI, который рассчитаем по формуле: (revenue / cost - 1) * 100<jupyter_code>results = {
    'vk': {'revenue': 103, 'cost': 98},
    'yandex': {'revenue': 179, 'cost': 153},
    'facebook': {'revenue': 103, 'cost': 110},
    'adwords': {'revenue': 35, 'cost': 34},
    'twitter': {'revenue': 11, 'cost': 24},
}
for key, value in results.items():
    value['ROI'] = round((value['revenue'] / value['cost'] - 1) * 100,2)
print(results)<jupyter_output>{'vk': {'revenue': 103, 'cost': 98, 'ROI': 5.1}, 'yandex': {'revenue': 179, 'cost': 153, 'ROI': 16.99}, 'facebook': {'revenue': 103, 'cost': 110, 'ROI': -6.36}, 'adwords': {'revenue': 35, 'cost': 34, 'ROI': 2.94}, 'twitter': {'revenue': 11, 'cost': 24, 'ROI': -54.17}}
<jupyter_text>Задание 4
Дана переменная, в которой хранится статистика рекламных каналов по объемам продаж (пример структуры данных приведен ниже). 
Напишите программу, которая возвращает название канала с максимальным объемом продаж.<jupyter_code>stats = {'facebook': 55, 'yandex': 115, 'vk': 120, 'google': 99, 'email': 42, 'ok': 98}
print(f'Максимальный объем продаж на рекламном канале: {sorted(stats.items(), key = lambda kv: kv[1], reverse = True)[0][0]}')<jupyter_output>Максимальный объем продаж на рекламном канале: vk
<jupyter_text>Задание 5 (необязательно)
Дан список произвольной длины. 
Необходимо написать код, который на основе исходного списка составит словарь такого уровня вложенности, 
какова длина исходного списка.
Результат: {'a': {'b': {'c': {'d': {'e': 'f'}}}}}<jupyter_code>my_list = ['a', 'b', 'c', 'd', 'e', 'f']
my_dict = {}
for index, num in enumerate(my_list[::-1]):
    my_dict = dict.fromkeys(num,my_dict)
    index = index + 1
my_dict
my_list = ['a', 'b', 'c', 'd', 'e', 'f'] # Подскажите, пожалуйста, почему у меня этот вариант не сработал? 
my_list2 = my_list.reverse()
my_dict = {}
for index, num in enumerate(my_list.reverse()):
    my_dict = dict.fromkeys(num,my_dict)
    index = index + 1
my_dict<jupyter_output><empty_output><jupyter_text>Задание 6 (необязательно)
Дана книга рецептов с информацией о том, сколько ингредиентов нужно для приготовления блюда в расчете на одну порцию (пример данных представлен ниже).
Напишите программу, которая будет запрашивать у пользователя количество порций для приготовления этих блюд и отображать информацию о суммарном количестве требуемых ингредиентов в указанном виде.
Внимание! Одинаковые ингридиенты с разными размерностями нужно считать раздельно!<jupyter_code>cook_book = {
  'салат': [
     {'ingridient_name': 'сыр', 'quantity': 50, 'measure': 'гр'},
     {'ingridient_name': 'томаты', 'quantity': 2, 'measure': 'шт'},
     {'ingridient_name': 'огурцы', 'quantity': 20, 'measure': 'гр'},
     {'ingridient_name': 'маслины', 'quantity': 10, 'measure': 'гр'},
     {'ingridient_name': 'оливковое масло', 'quantity': 20, 'measure': 'мл'},
     {'ingridient_name': 'салат', 'quantity': 10, 'measure': 'гр'},
     {'ingridient_name': 'перец', 'quantity': 20, 'measure': 'гр'}
    ],
  'пицца': [
     {'ingridient_name': 'сыр', 'quantity': 20, 'measure': 'гр'},
     {'ingridient_name': 'колбаса', 'quantity': 30, 'measure': 'гр'},
     {'ingridient_name': 'бекон', 'quantity': 30, 'measure': 'гр'},
     {'ingridient_name': 'оливки', 'quantity': 10, 'measure': 'гр'},
     {'ingridient_name': 'томаты', 'quantity': 20, 'measure': 'гр'},
     {'ingridient_name': 'тесто', 'quantity': 100, 'measure': 'гр'},   
    ],
  'лимонад': [
     {'ingridient_name': 'лимон', 'quantity': 1, 'measure': 'шт'},
     {'ingridient_name': 'вода', 'quantity': 200, 'measure': 'мл'},
     {'ingridient_name': 'сахар', 'quantity': 10, 'measure': 'гр'},
     {'ingridient_name': 'лайм', 'quantity': 20, 'measure': 'гр'},    
    ]
}
result = {}
user_input = input('Введите количество порций:')
for dish in cook_book.values():
    for ingredient in dish:
        ingredient['id_'] = ingredient['ingridient_name'] + ' ' + ingredient['measure']
        meal,weight,gr,id__ = list(ingredient.values())
        if id__ not in result:
            result[id__] = int(weight) * int(user_input)
        else:
            result[id__] += int(weight) * int(user_input)
for id__,weight in result.items():
    print(f'{id__}: {weight}')<jupyter_output>Введите количество порций:3
сыр гр: 210
томаты шт: 6
огурцы гр: 60
маслины гр: 30
оливковое масло мл: 60
салат гр: 30
перец гр: 60
колбаса гр: 90
бекон гр: 90
оливки гр: 30
томаты гр: 60
тесто гр: 300
лимон шт: 3
вода мл: 600
сахар гр: 30
лайм гр: 60
 | 
	no_license | 
	/DZ4/DZ4.ipynb | 
	VeraRomantsova/Vera_Romantsova_ds | 6 | 
| 
	<jupyter_start><jupyter_text># This is a sample notebook
Some text<jupyter_code>library('qt1')
install.packages('qtl', lib='~/R-library/', repos='http://cran.us.r-project.org')
.libPaths('~/R-library/')
.libPaths()
library('qtl')
qtlversion()
help(install.packages)<jupyter_output><empty_output> | 
	no_license | 
	/R-installing-package.ipynb | 
	mbmilligan/msi-ipython-nb-ex | 1 | 
| 
	<jupyter_start><jupyter_text>### 1. Make numbers in a list ordering by value ?<jupyter_code># http://wuchong.me/blog/2014/02/09/algorithm-sort-summary/
# method 1 
# using max, remove 
def value_ordering(x):
    out=[]
    for k in range(len(x)):
        #print ((x))
        out.append(max(x))
        x.remove(max(x))
    return out
my_list = [8,1,9,4,5,5,6]
value_ordering(my_list)
# method 2 
# bubble 
def bubble_sort(arry):
    n = len(arry)                                       #get length of list 
    for i in range(n):
        for j in range(1,n-i):
            if  arry[j-1] > arry[j] :                   # if the former > later, 
                arry[j-1],arry[j] = arry[j],arry[j-1]   #then reverse them
                print (arry)
    return arry
my_list = [8,1,9,4,5,5,6]
bubble_sort(my_list)
# method 3 
def select_sort(ary):
    n = len(ary)
    for i in range(0,n):
        min = i                             #get index of min element 
        for j in range(i+1,n):
            if ary[j] < ary[min] :
                min = j                     #get index of minimum element 
        ary[min],ary[i] = ary[i],ary[min]   #reverse each other 
        print (ary)
    return ary
my_list = [8,1,9,4,5,5,6]
select_sort(my_list)
# method 5 : bubble 
# example :
# [5,4,1,2] =>
# ocunt 0 
# [4,5,1,2]
# [4,1,5,2]
# [4,1,2,5]
# count 1 
# [1,4,2,5]
# [1,2,4,5]
# count 2 
### run N-1 times : loop every consecutive in list
### N = length of list 
def sort_list(list):
    count = 0
    while count < len(list):
        for j,k in enumerate(list):
            try:
                if list[j] > list[j+1]:
                    list[j], list[j+1] = list[j+1], list[j]
                else:
                    pass
            except:
                pass 
            j+=1 
        count += 1 
    print (list)
my_list = [8,1,9,4,5,5,6]
sort_list(my_list)<jupyter_output>[1, 4, 5, 5, 6, 8, 9]
<jupyter_text>### 2) check type of object <jupyter_code># using isinstance instead of type 
# since isinstance can work well when in sub-class / class, but type no 
a = 4
print (isinstance (a,int))
print (isinstance (a,str))
print (isinstance (a,float))
a = "b"
print (isinstance (a,str))
print (isinstance(a,(int,list,float)))
print (isinstance(a,(int,list,str)))
class A:
    pass
class B(A):
    pass
print (isinstance(A(), A)) # returns True
print (type(A()) == A )     # returns True
print (isinstance(B(), A))    # returns True
print (type(B()) == A )       # returns False<jupyter_output>True
True
True
False
<jupyter_text>### 3) Fibonacci<jupyter_code># Fibonacci series : 0,1,1,2,3,5,8,13... + F(n-1)+F(n-2) + Fn
# (F0=0,F1=1,Fn=F(n-1)+F(n-2)(n>=2,n∈N*))
def f(n):
    a,b = 0,1
    for k in range(n):
        a,b = b, a+b
    return a 
for count in range(10):
    print (f(count))<jupyter_output>0
1
1
2
3
5
8
13
21
34
 | 
	no_license | 
	/archived/programming/python/Python_Basics_FAQs.ipynb | 
	yennanliu/CS_basics | 3 | 
| 
	<jupyter_start><jupyter_text>## Barrowman Method Application
This code is an application of the barrowman method for determining the center of pressure for each respective component of the rocket. This method is meant to provide insight on the drag coefficient vs. Mach number for the LV4 rocket for PSAS.
## References:
- http://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/20010047838.pdf
- http://rocketmime.com/rockets/Barrowman<jupyter_code>import math as m
import numpy as np
# Ian B Zabel Barrowman Method Application
# Equations from:
# - ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/20010047838.pdf
# - rocketmime.com/rockets/Barrowman
'''
Note for all references of Ar:
 Ar is the reference area for the specific equation mentioned.
 This means all functions with Ar must be redefined 
 with the proper reference area values.
 Possibly make Cna1 a general function for all possible Ar?
'''
# Defined Parameters
# Assuming:
# - fin is clipped delta, swept
# - fin airfloil is symmetric hex
Ln = 0.5 # Nosecone length (m)
d = 0.27 # Outer diameter (m)
cr = 0.25 # Fin, root chord (m)
ct = 0.125 # Fin, tip chord (m)
b = 0.3 # Wing span (m)
S = (cr+ct)*b*0.5 # Fin area (m^2)
TR = 0 # Taper ratio
SMC = S/b # Standard mean chord (m)
#MAC = 
O = 10 # Sweep Angle at root tip (deg.)
xr = 1 # Fin, root LE to tip LE (m)
xb = 1 # Rocket nose to fin tip LE (m)
N = 4 # Number of fins
Cnan = 2 # Normal force for nosecone
# Fin Center of Pressure
# pg. 7-10, ref. 1
# For fin geometry, note: Fig 3-2, pg 7
MAC = (2/3)*(cr+ct-cr*ct/(cr+ct)) # eq. 24, mean aero chord (MAC) (m)
MAY = (S/3)*(cr+2*ct)/(cr+ct) # eq. 27, MAC loc. from root (m)
MAX = LT + xt*MAY/S + MAC/4 # eq. 30, Longitudinal loc. of MAC (m)
# Roll Damping Coeff.
# pg. 11, ref. 1
# Note: for supersonic, ref. 1 in appendix A
Cna1 = 2*m.pi*AR*(Af/Ar)/(2+m.sqrt(4+(Beta*AR/m.cos(O))**2)) # eq. 6 (dimless)
Cld  = N*Cna1*MAY/Lr # eq. 35 (dimless)
# Body Center of Pressure
# pg. 29, ref. 1
Cpb = 2/(Ar*Lr)*(lo*Alo-V) # eq. 87, body Cp as f(V) (dimless)
Xb  = Cpb/Cna1 # eq. 88, Cp loc. from tip (m)
# Total Normal Coeff.
# pg. 37, ref. 1
Cn_total = CnaB+CnaTB + CnaBT # eq. 106
X_total  = XB*CnaB+XTB*CnaTB+XBT*CnaBT # eq. 107<jupyter_output><empty_output> | 
	non_permissive | 
	/archive/BarrowmanMethodNotebook/.ipynb_checkpoints/BarrowmanMethod-checkpoint.ipynb | 
	psas/liquid-engine-analysis | 1 | 
			Subsets and Splits
				
	
				
			
				
No community queries yet
The top public SQL queries from the community will appear here once available.
