36 lines
1.1 KiB
Python
36 lines
1.1 KiB
Python
|
# Data Preprocessing
|
||
|
|
||
|
# Importer les librairies
|
||
|
import numpy as np
|
||
|
import matplotlib.pyplot as plt
|
||
|
import pandas as pd
|
||
|
|
||
|
# Importer le dataset
|
||
|
dataset = pd.read_csv('Data.csv')
|
||
|
X = dataset.iloc[:, :-1].values
|
||
|
y = dataset.iloc[:, -1].values
|
||
|
|
||
|
# Gérer les données manquantes
|
||
|
from sklearn.preprocessing import Imputer
|
||
|
imputer = Imputer(missing_values = 'NaN', strategy = 'mean', axis = 0)
|
||
|
imputer.fit(X[:, 1:3])
|
||
|
X[:, 1:3] = imputer.transform(X[:, 1:3])
|
||
|
|
||
|
# Gérer les variables catégoriques
|
||
|
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
|
||
|
labelencoder_X = LabelEncoder()
|
||
|
X[:, 0] = labelencoder_X.fit_transform(X[:, 0])
|
||
|
onehotencoder = OneHotEncoder(categorical_features = [0])
|
||
|
X = onehotencoder.fit_transform(X).toarray()
|
||
|
labelencoder_y = LabelEncoder()
|
||
|
y = labelencoder_y.fit_transform(y)
|
||
|
|
||
|
# Diviser le dataset entre le Training set et le Test set
|
||
|
from sklearn.model_selection import train_test_split
|
||
|
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
|
||
|
|
||
|
# Feature Scaling
|
||
|
from sklearn.preprocessing import StandardScaler
|
||
|
sc = StandardScaler()
|
||
|
X_train = sc.fit_transform(X_train)
|
||
|
X_test = sc.transform(X_test)
|