Cheatsheet

Created: Some heplful functions for Data Analysis and ML with Python

Updated: 03 September 2023

Python Data Science Cheatsheet

General Information from here:

Univariate Selection

1
import pandas as pd
2
import numpy as np
3
from sklearn.feature_selection import SelectKBest
4
from sklearn.feature_selection import chi2
5
6
DATA_FILE = 'sample-data/mobile-price-classification/train.csv'
7
8
uv_data = pd.read_csv(DATA_FILE)
9
10
uv_data.head()
11
12
uv_x = uv_data.iloc[:, 0:20] # Read in the first 20 columns
13
uv_y = uv_data.iloc[:, -1] # Read in the last column
14
15
feature_count = 10 # Number of features we want to select
16
17
scores = SelectKBest(score_func=chi2, k = 'all').fit(uv_x, uv_y).scores_
18
19
df_fit = pd.DataFrame(scores) # Scores as DF
20
df_cols = pd.DataFrame(uv_x.columns) # Column names as DF
21
22
df_scores = pd.concat([df_cols, df_fit], axis=1)
23
df_scores.columns = ['Feature', 'Score']
24
25
df_scores.nlargest(feature_count, 'Score')

Feature Selection

1
import pandas as pd
2
import numpy as np
3
from sklearn.ensemble import ExtraTreesClassifier
4
import matplotlib.pyplot as plt
5
6
fs_data = pd.read_csv(DATA_FILE)
7
fs_x = fs_data.iloc[:,0:20]
8
fs_y = fs_data.iloc[:,-1]
9
10
classifier = ExtraTreesClassifier() # Create classifier instance
11
classifier.fit(fs_x, fs_y) # Train the Classifier
12
13
fs_importance = classifier.feature_importances_
14
15
print(fs_importance)
16
17
df_importance = pd.Series(fs_importance, index=fs_x.columns)
18
df_importance.nlargest
19
20
df_importance.nlargest(10).plot(kind='barh')
21
plt.show()

Normal Correlation Heatmap

1
import pandas as pd
2
import numpy as np
3
import seaborn as sns
4
5
cm_data = pd.read_csv(DATA_FILE)
6
7
cm_data.head()
8
9
cm_x = cm_data.iloc[:,0:20] # Extract feature columns
10
cm_y = cm_data.iloc[:,-1] # Extract target column
11
12
correlation_matrix = cm_data.corr()
13
14
correlation_matrix
15
16
top_correlation_features = correlation_matrix.index
17
plt.figure(figsize=(20,20))
18
19
_ = sns.heatmap(cm_data[top_correlation_features].corr(), annot=True, cmap="RdYlGn")

Or as the folllowing Function:

1
def plot_df_correlation(df):
2
plt.figure(figsize=(20,20))
3
return sns.heatmap(df[df.corr().index].corr(), annot=True, cmap="coolwarm")

Simplified One-Hot Encoding

Only encode values that occure more than a specific threshold

1
domain_counts = df_domain_invoice['Email Domain'].value_counts()
2
3
replace_domains = domain_counts[domain_counts < 100].index
4
5
# on-hot encoding of Domains
6
df_domain_invoice = pd.get_dummies(df_domain_invoice['Email Domain']
7
.replace(replace_domains, 'other_'),
8
columns = ['Email Domain'],
9
drop_first=False)

Heatmapping Categorical Correlation

From the following sources:

1
import scipy.stats as ss
2
from collections import Counter
3
import math
4
import pandas as pd
5
import numpy as np
6
import seaborn as sns
7
from matplotlib import pyplot as plt
8
from scipy import stats
9
import numpy as np
10
11
def convert(data, to):
12
converted = None
13
if to == 'array':
14
if isinstance(data, np.ndarray):
15
converted = data
16
elif isinstance(data, pd.Series):
17
converted = data.values
18
elif isinstance(data, list):
19
converted = np.array(data)
20
elif isinstance(data, pd.DataFrame):
21
converted = data.as_matrix()
22
elif to == 'list':
23
if isinstance(data, list):
24
converted = data
25
elif isinstance(data, pd.Series):
26
converted = data.values.tolist()
27
elif isinstance(data, np.ndarray):
28
converted = data.tolist()
29
elif to == 'dataframe':
30
if isinstance(data, pd.DataFrame):
31
converted = data
32
elif isinstance(data, np.ndarray):
33
converted = pd.DataFrame(data)
34
else:
35
raise ValueError("Unknown data conversion: {}".format(to))
36
if converted is None:
37
raise TypeError('cannot handle data conversion of type: {} to {}'.format(type(data),to))
38
else:
39
return converted
40
41
def conditional_entropy(x, y):
42
"""
43
Calculates the conditional entropy of x given y: S(x|y)
44
Wikipedia: https://en.wikipedia.org/wiki/Conditional_entropy
45
:param x: list / NumPy ndarray / Pandas Series
46
A sequence of measurements
47
:param y: list / NumPy ndarray / Pandas Series
48
A sequence of measurements
49
:return: float
50
"""
51
# entropy of x given y
52
y_counter = Counter(y)
53
xy_counter = Counter(list(zip(x,y)))
54
total_occurrences = sum(y_counter.values())
55
entropy = 0.0
56
for xy in xy_counter.keys():
57
p_xy = xy_counter[xy] / total_occurrences
58
p_y = y_counter[xy[1]] / total_occurrences
59
entropy += p_xy * math.log(p_y/p_xy)
60
return entropy
61
62
def cramers_v(x, y):
63
confusion_matrix = pd.crosstab(x,y)
64
chi2 = ss.chi2_contingency(confusion_matrix)[0]
65
n = confusion_matrix.sum().sum()
66
phi2 = chi2/n
67
r,k = confusion_matrix.shape
68
phi2corr = max(0, phi2-((k-1)*(r-1))/(n-1))
69
rcorr = r-((r-1)**2)/(n-1)
70
kcorr = k-((k-1)**2)/(n-1)
71
return np.sqrt(phi2corr/min((kcorr-1),(rcorr-1)))
72
73
def theils_u(x, y):
74
s_xy = conditional_entropy(x,y)
75
x_counter = Counter(x)
76
total_occurrences = sum(x_counter.values())
77
p_x = list(map(lambda n: n/total_occurrences, x_counter.values()))
78
s_x = ss.entropy(p_x)
79
if s_x == 0:
80
return 1
81
else:
82
return (s_x - s_xy) / s_x
83
84
def correlation_ratio(categories, measurements):
85
fcat, _ = pd.factorize(categories)
86
cat_num = np.max(fcat)+1
87
y_avg_array = np.zeros(cat_num)
88
n_array = np.zeros(cat_num)
89
for i in range(0,cat_num):
90
cat_measures = measurements[np.argwhere(fcat == i).flatten()]
91
n_array[i] = len(cat_measures)
92
y_avg_array[i] = np.average(cat_measures)
93
y_total_avg = np.sum(np.multiply(y_avg_array,n_array))/np.sum(n_array)
94
numerator = np.sum(np.multiply(n_array,np.power(np.subtract(y_avg_array,y_total_avg),2)))
95
denominator = np.sum(np.power(np.subtract(measurements,y_total_avg),2))
96
if numerator == 0:
97
eta = 0.0
98
else:
99
eta = numerator/denominator
100
return eta
101
102
def associations(dataset, nominal_columns=None, mark_columns=False, theil_u=False, plot=True,
103
return_results = False, **kwargs):
104
"""
105
Calculate the correlation/strength-of-association of features in data-set with both categorical (eda_tools) and
106
continuous features using:
107
- Pearson's R for continuous-continuous cases
108
- Correlation Ratio for categorical-continuous cases
109
- Cramer's V or Theil's U for categorical-categorical cases
110
:param dataset: NumPy ndarray / Pandas DataFrame
111
The data-set for which the features' correlation is computed
112
:param nominal_columns: string / list / NumPy ndarray
113
Names of columns of the data-set which hold categorical values. Can also be the string 'all' to state that all
114
columns are categorical, or None (default) to state none are categorical
115
:param mark_columns: Boolean (default: False)
116
if True, output's columns' names will have a suffix of '(nom)' or '(con)' based on there type (eda_tools or
117
continuous), as provided by nominal_columns
118
:param theil_u: Boolean (default: False)
119
In the case of categorical-categorical feaures, use Theil's U instead of Cramer's V
120
:param plot: Boolean (default: True)
121
If True, plot a heat-map of the correlation matrix
122
:param return_results: Boolean (default: False)
123
If True, the function will return a Pandas DataFrame of the computed associations
124
:param kwargs:
125
Arguments to be passed to used function and methods
126
:return: Pandas DataFrame
127
A DataFrame of the correlation/strength-of-association between all features
128
"""
129
130
dataset = convert(dataset, 'dataframe')
131
columns = dataset.columns
132
if nominal_columns is None:
133
nominal_columns = list()
134
elif nominal_columns == 'all':
135
nominal_columns = columns
136
corr = pd.DataFrame(index=columns, columns=columns)
137
for i in range(0,len(columns)):
138
for j in range(i,len(columns)):
139
if i == j:
140
corr[columns[i]][columns[j]] = 1.0
141
else:
142
if columns[i] in nominal_columns:
143
if columns[j] in nominal_columns:
144
if theil_u:
145
corr[columns[j]][columns[i]] = theils_u(dataset[columns[i]],dataset[columns[j]])
146
corr[columns[i]][columns[j]] = theils_u(dataset[columns[j]],dataset[columns[i]])
147
else:
148
cell = cramers_v(dataset[columns[i]],dataset[columns[j]])
149
corr[columns[i]][columns[j]] = cell
150
corr[columns[j]][columns[i]] = cell
151
else:
152
cell = correlation_ratio(dataset[columns[i]], dataset[columns[j]])
153
corr[columns[i]][columns[j]] = cell
154
corr[columns[j]][columns[i]] = cell
155
else:
156
if columns[j] in nominal_columns:
157
cell = correlation_ratio(dataset[columns[j]], dataset[columns[i]])
158
corr[columns[i]][columns[j]] = cell
159
corr[columns[j]][columns[i]] = cell
160
else:
161
cell, _ = ss.pearsonr(dataset[columns[i]], dataset[columns[j]])
162
corr[columns[i]][columns[j]] = cell
163
corr[columns[j]][columns[i]] = cell
164
corr.fillna(value=np.nan, inplace=True)
165
if mark_columns:
166
marked_columns = ['{} (nom)'.format(col) if col in nominal_columns else '{} (con)'.format(col) for col in columns]
167
corr.columns = marked_columns
168
corr.index = marked_columns
169
if plot:
170
plt.figure(figsize=(20,20))#kwargs.get('figsize',None))
171
sns.heatmap(corr, annot=kwargs.get('annot',True), fmt=kwargs.get('fmt','.2f'), cmap='coolwarm')
172
plt.show()
173
if return_results:
174
return corr