🐍 Python ML Libraries

Master NumPy, Pandas, Scikit-learn & More

📖 Introduction

Python is the dominant language for machine learning, thanks to its rich ecosystem of libraries. This guide covers the essential libraries you need to master for ML success.

🔢 NumPy - Numerical Computing

NumPy provides fast, efficient arrays and mathematical operations. It's the foundation for most ML libraries.

import numpy as np

# Creating arrays
arr1 = np.array([1, 2, 3, 4, 5])
arr2 = np.arange(0, 10, 2)  # [0, 2, 4, 6, 8]
arr3 = np.linspace(0, 1, 5)  # 5 evenly spaced values
arr4 = np.zeros((3, 3))  # 3x3 matrix of zeros
arr5 = np.ones((2, 4))   # 2x4 matrix of ones
arr6 = np.random.randn(3, 3)  # 3x3 random normal

print("Array 1:", arr1)
print("Array 2:", arr2)
print("Array 3:", arr3)
print("Zeros:\n", arr4)
print("Random:\n", arr6)

# Array operations
a = np.array([1, 2, 3])
b = np.array([4, 5, 6])

print("\nAddition:", a + b)
print("Multiplication:", a * b)
print("Dot product:", np.dot(a, b))
print("Power:", a ** 2)
print("Square root:", np.sqrt(a))

# Statistical operations
data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
print("\nMean:", np.mean(data))
print("Median:", np.median(data))
print("Std Dev:", np.std(data))
print("Sum:", np.sum(data))
print("Min:", np.min(data))
print("Max:", np.max(data))

# Reshaping
arr = np.arange(12)
print("\nOriginal:", arr)
print("Reshaped (3x4):\n", arr.reshape(3, 4))
print("Reshaped (2x6):\n", arr.reshape(2, 6))

# Indexing and slicing
matrix = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
print("\nMatrix:\n", matrix)
print("First row:", matrix[0])
print("First column:", matrix[:, 0])
print("2x2 submatrix:\n", matrix[:2, :2])

# Boolean indexing
arr = np.array([1, 2, 3, 4, 5, 6])
print("\nElements > 3:", arr[arr > 3])
print("Even elements:", arr[arr % 2 == 0])

📊 Pandas - Data Manipulation

Pandas provides DataFrames for working with structured data. Essential for data preprocessing!

import pandas as pd

# Creating DataFrames
df = pd.DataFrame({
    'Name': ['Alice', 'Bob', 'Charlie', 'David'],
    'Age': [25, 30, 35, 28],
    'City': ['NYC', 'SF', 'LA', 'Chicago'],
    'Salary': [70000, 85000, 90000, 75000]
})

print("DataFrame:\n", df)
print("\nInfo:")
print(df.info())
print("\nDescribe:")
print(df.describe())

# Accessing data
print("\nAccess column:", df['Name'])
print("\nAccess row:", df.iloc[0])
print("\nAccess specific cell:", df.loc[0, 'Name'])

# Filtering
print("\nAge > 28:")
print(df[df['Age'] > 28])

print("\nSalary between 70k-85k:")
print(df[(df['Salary'] >= 70000) & (df['Salary'] <= 85000)])

# Adding columns
df['Bonus'] = df['Salary'] * 0.1
print("\nWith Bonus column:\n", df)

# Grouping
cities_df = pd.DataFrame({
    'City': ['NYC', 'NYC', 'SF', 'SF', 'LA'],
    'Sales': [100, 150, 200, 175, 125]
})
print("\nGroup by City:")
print(cities_df.groupby('City')['Sales'].sum())

# Reading/Writing files
# df.to_csv('data.csv', index=False)
# df_loaded = pd.read_csv('data.csv')

# Handling missing data
df_with_na = pd.DataFrame({
    'A': [1, 2, np.nan, 4],
    'B': [5, np.nan, 7, 8],
    'C': [9, 10, 11, 12]
})
print("\nWith NaN:\n", df_with_na)
print("\nDrop NaN:\n", df_with_na.dropna())
print("\nFill NaN with 0:\n", df_with_na.fillna(0))
print("\nFill with mean:\n", df_with_na.fillna(df_with_na.mean()))

🎨 Matplotlib & Seaborn - Visualization

import matplotlib.pyplot as plt
import seaborn as sns

# Line plot
x = np.linspace(0, 10, 100)
y1 = np.sin(x)
y2 = np.cos(x)

plt.figure(figsize=(12, 4))

plt.subplot(1, 3, 1)
plt.plot(x, y1, label='sin(x)')
plt.plot(x, y2, label='cos(x)')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Line Plot')
plt.legend()
plt.grid(True)

# Scatter plot
plt.subplot(1, 3, 2)
x_scatter = np.random.randn(100)
y_scatter = x_scatter + np.random.randn(100) * 0.5
plt.scatter(x_scatter, y_scatter, alpha=0.5)
plt.xlabel('x')
plt.ylabel('y')
plt.title('Scatter Plot')

# Histogram
plt.subplot(1, 3, 3)
data = np.random.randn(1000)
plt.hist(data, bins=30, edgecolor='black')
plt.xlabel('Value')
plt.ylabel('Frequency')
plt.title('Histogram')

plt.tight_layout()
plt.show()

# Seaborn examples
tips = sns.load_dataset('tips')

plt.figure(figsize=(15, 4))

plt.subplot(1, 3, 1)
sns.boxplot(x='day', y='total_bill', data=tips)
plt.title('Box Plot')

plt.subplot(1, 3, 2)
sns.violinplot(x='day', y='total_bill', data=tips)
plt.title('Violin Plot')

plt.subplot(1, 3, 3)
sns.heatmap(tips.corr(), annot=True, cmap='coolwarm')
plt.title('Correlation Heatmap')

plt.tight_layout()
plt.show()

🤖 Scikit-learn - Machine Learning

Scikit-learn is THE library for classical machine learning in Python.

from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, classification_report
from sklearn.datasets import load_iris

# Load dataset
iris = load_iris()
X, y = iris.data, iris.target

# Split data
X_train, X_test, y_train, y_test = train_test_split(
    X, y, test_size=0.2, random_state=42
)

# Scale features
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)

# Train model
model = LogisticRegression()
model.fit(X_train_scaled, y_train)

# Predict
y_pred = model.predict(X_test_scaled)

# Evaluate
accuracy = accuracy_score(y_test, y_pred)
print(f"Accuracy: {accuracy:.2f}")
print("\nClassification Report:")
print(classification_report(y_test, y_pred, 
                          target_names=iris.target_names))

Common Scikit-learn Modules

sklearn.model_selection

train_test_split, cross_val_score, GridSearchCV

sklearn.preprocessing

StandardScaler, MinMaxScaler, LabelEncoder

sklearn.linear_model

LinearRegression, LogisticRegression, Ridge, Lasso

sklearn.tree

DecisionTreeClassifier, DecisionTreeRegressor

sklearn.ensemble

RandomForest, GradientBoosting, AdaBoost

sklearn.metrics

accuracy_score, confusion_matrix, roc_auc_score

📚 Complete ML Stack

# Install all essential libraries
"""
pip install numpy pandas matplotlib seaborn scikit-learn
pip install jupyter notebook
pip install scipy
"""

# Typical imports for ML projects
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns

from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report

# Set visualization defaults
plt.style.use('seaborn-v0_8')
sns.set_palette("husl")
pd.set_option('display.max_columns', None)
pd.set_option('display.precision', 2)

🎯 Practical Example: End-to-End ML

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, confusion_matrix
import seaborn as sns

# 1. Load data
from sklearn.datasets import load_wine
wine = load_wine()
df = pd.DataFrame(wine.data, columns=wine.feature_names)
df['target'] = wine.target

# 2. Explore data
print("Shape:", df.shape)
print("\nFirst rows:\n", df.head())
print("\nStatistics:\n", df.describe())

# 3. Visualize
plt.figure(figsize=(12, 4))

plt.subplot(1, 3, 1)
df['target'].value_counts().plot(kind='bar')
plt.title('Class Distribution')

plt.subplot(1, 3, 2)
sns.boxplot(data=df[['alcohol', 'malic_acid', 'ash']])
plt.title('Feature Distributions')

plt.subplot(1, 3, 3)
sns.heatmap(df.corr(), cmap='coolwarm', center=0)
plt.title('Correlation Matrix')

plt.tight_layout()
plt.show()

# 4. Prepare data
X = df.drop('target', axis=1)
y = df['target']

X_train, X_test, y_train, y_test = train_test_split(
    X, y, test_size=0.2, random_state=42
)

# 5. Scale features
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)

# 6. Train model
model = RandomForestClassifier(n_estimators=100, random_state=42)
model.fit(X_train_scaled, y_train)

# 7. Evaluate
y_pred = model.predict(X_test_scaled)
accuracy = accuracy_score(y_test, y_pred)

print(f"\nAccuracy: {accuracy:.2%}")

# 8. Confusion Matrix
cm = confusion_matrix(y_test, y_pred)
plt.figure(figsize=(8, 6))
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues')
plt.title('Confusion Matrix')
plt.ylabel('True Label')
plt.xlabel('Predicted Label')
plt.show()

# 9. Feature Importance
importances = pd.DataFrame({
    'feature': wine.feature_names,
    'importance': model.feature_importances_
}).sort_values('importance', ascending=False)

plt.figure(figsize=(10, 6))
plt.barh(importances['feature'][:10], importances['importance'][:10])
plt.xlabel('Importance')
plt.title('Top 10 Feature Importances')
plt.gca().invert_yaxis()
plt.tight_layout()
plt.show()