from sklearn.neighbors import BallTree
import numpy as np
import pandas as pd
[docs]def hopkins(data_frame, sampling_size):
"""Assess the clusterability of a dataset. A score between 0 and 1, a score around 0.5 express
no clusterability and a score tending to 0 express a high cluster tendency.
Parameters
----------
data_frame : numpy array
The input dataset
sampling_size : int
The sampling size which is used to evaluate the number of DataFrame.
Returns
---------------------
score : float
The hopkins score of the dataset (between 0 and 1)
Examples
--------
>>> from sklearn import datasets
>>> from pyclustertend import hopkins
>>> X = datasets.load_iris().data
>>> hopkins(X,150)
0.16
"""
if type(data_frame) == np.ndarray:
data_frame = pd.DataFrame(data_frame)
# Sample n observations from D : P
if sampling_size > data_frame.shape[0]:
raise Exception(
'The number of sample of sample is bigger than the shape of D')
data_frame_sample = data_frame.sample(n=sampling_size)
# Get the distance to their neirest neighbors in D : X
tree = BallTree(data_frame, leaf_size=2)
dist, _ = tree.query(data_frame_sample, k=2)
data_frame_sample_distances_to_nearest_neighbours = dist[:, 1]
# Randomly simulate n points with the same variation as in D : Q.
max_data_frame = data_frame.max()
min_data_frame = data_frame.min()
uniformly_selected_values_0 = np.random.uniform(min_data_frame[0], max_data_frame[0], sampling_size)
uniformly_selected_values_1 = np.random.uniform(min_data_frame[1], max_data_frame[1], sampling_size)
uniformly_selected_observations = np.column_stack((uniformly_selected_values_0, uniformly_selected_values_1))
if len(max_data_frame) >= 2:
for i in range(2, len(max_data_frame)):
uniformly_selected_values_i = np.random.uniform(min_data_frame[i], max_data_frame[i], sampling_size)
to_stack = (uniformly_selected_observations, uniformly_selected_values_i)
uniformly_selected_observations = np.column_stack(to_stack)
uniformly_selected_observations_df = pd.DataFrame(uniformly_selected_observations)
# Get the distance to their neirest neighbors in D : Y
tree = BallTree(data_frame, leaf_size=2)
dist, _ = tree.query(uniformly_selected_observations_df, k=1)
uniformly_df_distances_to_nearest_neighbours = dist
# return the hopkins score
x = sum(data_frame_sample_distances_to_nearest_neighbours)
y = sum(uniformly_df_distances_to_nearest_neighbours)
if x + y == 0:
raise Exception('The denominator of the hopkins statistics is null')
return x / (x + y)[0]