10

I need to compute Information Gain scores for >100k features in >10k documents for text classification. Code below works fine but for the full dataset is very slow - takes more than an hour on a laptop. Dataset is 20newsgroup and I am using scikit-learn, chi2 function which is provided in scikit works extremely fast.

Any idea how to compute Information Gain faster for such dataset?

def information_gain(x, y):

    def _entropy(values):
        counts = np.bincount(values)
        probs = counts[np.nonzero(counts)] / float(len(values))
        return - np.sum(probs * np.log(probs))

    def _information_gain(feature, y):
        feature_set_indices = np.nonzero(feature)[1]
        feature_not_set_indices = [i for i in feature_range if i not in feature_set_indices]
        entropy_x_set = _entropy(y[feature_set_indices])
        entropy_x_not_set = _entropy(y[feature_not_set_indices])

        return entropy_before - (((len(feature_set_indices) / float(feature_size)) * entropy_x_set)
                                 + ((len(feature_not_set_indices) / float(feature_size)) * entropy_x_not_set))

    feature_size = x.shape[0]
    feature_range = range(0, feature_size)
    entropy_before = _entropy(y)
    information_gain_scores = []

    for feature in x.T:
        information_gain_scores.append(_information_gain(feature, y))
    return information_gain_scores, []

EDIT:

I merged the internal functions and ran cProfiler as below (on a dataset limited to ~15k features and ~1k documents):

cProfile.runctx(
    """for feature in x.T:
    feature_set_indices = np.nonzero(feature)[1]
    feature_not_set_indices = [i for i in feature_range if i not in feature_set_indices]

    values = y[feature_set_indices]
    counts = np.bincount(values)
    probs = counts[np.nonzero(counts)] / float(len(values))
    entropy_x_set = - np.sum(probs * np.log(probs))

    values = y[feature_not_set_indices]
    counts = np.bincount(values)
    probs = counts[np.nonzero(counts)] / float(len(values))
    entropy_x_not_set = - np.sum(probs * np.log(probs))

    result = entropy_before - (((len(feature_set_indices) / float(feature_size)) * entropy_x_set)
                             + ((len(feature_not_set_indices) / float(feature_size)) * entropy_x_not_set))
    information_gain_scores.append(result)""",
    globals(), locals())

Result top 20 by tottime:

ncalls  tottime percall cumtime percall filename:lineno(function)
1       60.27   60.27   65.48   65.48   <string>:1(<module>)
16171   1.362   0   2.801   0   csr.py:313(_get_row_slice)
16171   0.523   0   0.892   0   coo.py:201(_check)
16173   0.394   0   0.89    0   compressed.py:101(check_format)
210235  0.297   0   0.297   0   {numpy.core.multiarray.array}
16173   0.287   0   0.331   0   compressed.py:631(prune)
16171   0.197   0   1.529   0   compressed.py:534(tocoo)
16173   0.165   0   1.263   0   compressed.py:20(__init__)
16171   0.139   0   1.669   0   base.py:415(nonzero)
16171   0.124   0   1.201   0   coo.py:111(__init__)
32342   0.123   0   0.123   0   {method 'max' of 'numpy.ndarray' objects}
48513   0.117   0   0.218   0   sputils.py:93(isintlike)
32342   0.114   0   0.114   0   {method 'sum' of 'numpy.ndarray' objects}
16171   0.106   0   3.081   0   csr.py:186(__getitem__)
32342   0.105   0   0.105   0   {numpy.lib._compiled_base.bincount}
32344   0.09    0   0.094   0   base.py:59(set_shape)
210227  0.088   0   0.088   0   {isinstance}
48513   0.081   0   1.777   0   fromnumeric.py:1129(nonzero)
32342   0.078   0   0.078   0   {method 'min' of 'numpy.ndarray' objects}
97032   0.066   0   0.153   0   numeric.py:167(asarray)

Looks that most of the time is spent in _get_row_slice. I am not entirely sure about the first row, looks it covers the whole block I provided to cProfile.runctx, though I don't know why there is such a big gap between first line totime=60.27 and second one tottime=1.362. Where was the difference spent in? Is it possible to check it in cProfile?

Basically, looks the problem is with sparse matrix operations (slicing, getting elements) -- the solution probably would be to calculate Information Gain using matrix algebra (like chi2 is implemented in scikit). But I have no idea how to express this calculation in terms of matrices operations... Anyone has an idea??

p_b_garcia
  • 101
  • 1
  • 1
  • 5
  • 1
    My advice, independent of the question: reduce the feature set _before_ you compute information gain, by simpler means which are easier to compute. For example, many ngrams (which I suppose are your features) will only appear once or twice in the corpus and should be excluded beforehand, reducing your featureset greatly. – kutschkem Mar 26 '15 at 09:34

3 Answers3

9

Don't know whether it still helps since a year has passed. But now I happen to be faced with the same task for text classification. I've rewritten your code using the nonzero() function provided for sparse matrix. Then I just scan nz, count the corresponding y_value and calculate the entropy.

The following code only needs seconds to run news20 dataset (loaded in using libsvm sparse matrix format).

def information_gain(X, y):

    def _calIg():
        entropy_x_set = 0
        entropy_x_not_set = 0
        for c in classCnt:
            probs = classCnt[c] / float(featureTot)
            entropy_x_set = entropy_x_set - probs * np.log(probs)
            probs = (classTotCnt[c] - classCnt[c]) / float(tot - featureTot)
            entropy_x_not_set = entropy_x_not_set - probs * np.log(probs)
        for c in classTotCnt:
            if c not in classCnt:
                probs = classTotCnt[c] / float(tot - featureTot)
                entropy_x_not_set = entropy_x_not_set - probs * np.log(probs)
        return entropy_before - ((featureTot / float(tot)) * entropy_x_set
                             +  ((tot - featureTot) / float(tot)) * entropy_x_not_set)

    tot = X.shape[0]
    classTotCnt = {}
    entropy_before = 0
    for i in y:
        if i not in classTotCnt:
            classTotCnt[i] = 1
        else:
            classTotCnt[i] = classTotCnt[i] + 1
    for c in classTotCnt:
        probs = classTotCnt[c] / float(tot)
        entropy_before = entropy_before - probs * np.log(probs)

    nz = X.T.nonzero()
    pre = 0
    classCnt = {}
    featureTot = 0
    information_gain = []
    for i in range(0, len(nz[0])):
        if (i != 0 and nz[0][i] != pre):
            for notappear in range(pre+1, nz[0][i]):
                information_gain.append(0)
            ig = _calIg()
            information_gain.append(ig)
            pre = nz[0][i]
            classCnt = {}
            featureTot = 0
        featureTot = featureTot + 1
        yclass = y[nz[1][i]]
        if yclass not in classCnt:
            classCnt[yclass] = 1
        else:
            classCnt[yclass] = classCnt[yclass] + 1
    ig = _calIg()
    information_gain.append(ig)

    return np.asarray(information_gain)
junjiek
  • 91
  • 1
  • 3
  • Sir please help me to understand X and y here. Thanks. – Pappu Jha Oct 08 '15 at 05:55
  • These $X$ is a matrix containing the features per instance, where every row represent an instance and the columns represent the features. The $y$ represents the target classes. See for example http://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.chi2.html for more information about the interface. – www.data-blogger.com Jan 07 '16 at 12:53
  • @Kevin- I tried your code but I am getting the following error TypeError: only integer arrays with one element can be converted to an index. I passed the tfidf-matrix (weights) as the X variable and the training labels as the Y variable. Am I passing the wrong X variables? – OAK Jan 11 '16 at 18:25
  • @junjiek thank you for sharing your code with us. do you mind explaining, while information gain should be applied on the words to calculate the corresponding information gain of each word with repect to each class, how did you cover this in 20 news group. as your matrix is based on (documents, features). do we need to load 20news group in specific ways to be compatible with your code? – sariii Nov 27 '18 at 20:43
1

Here is a version that uses matrix operations. The IG for a feature is a mean over its class-specific scores.

import numpy as np
from scipy.sparse import issparse
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import check_array
from sklearn.utils.extmath import safe_sparse_dot


def ig(X, y):

    def get_t1(fc, c, f):
        t = np.log2(fc/(c * f))
        t[~np.isfinite(t)] = 0
        return np.multiply(fc, t)

    def get_t2(fc, c, f):
        t = np.log2((1-f-c+fc)/((1-c)*(1-f)))
        t[~np.isfinite(t)] = 0
        return np.multiply((1-f-c+fc), t)

    def get_t3(c, f, class_count, observed, total):
        nfc = (class_count - observed)/total
        t = np.log2(nfc/(c*(1-f)))
        t[~np.isfinite(t)] = 0
        return np.multiply(nfc, t)

    def get_t4(c, f, feature_count, observed, total):
        fnc = (feature_count - observed)/total
        t = np.log2(fnc/((1-c)*f))
        t[~np.isfinite(t)] = 0
        return np.multiply(fnc, t)

    X = check_array(X, accept_sparse='csr')
    if np.any((X.data if issparse(X) else X) < 0):
        raise ValueError("Input X must be non-negative.")

    Y = LabelBinarizer().fit_transform(y)
    if Y.shape[1] == 1:
        Y = np.append(1 - Y, Y, axis=1)

    # counts

    observed = safe_sparse_dot(Y.T, X)          # n_classes * n_features
    total = observed.sum(axis=0).reshape(1, -1).sum()
    feature_count = X.sum(axis=0).reshape(1, -1)
    class_count = (X.sum(axis=1).reshape(1, -1) * Y).T

    # probs

    f = feature_count / feature_count.sum()
    c = class_count / float(class_count.sum())
    fc = observed / total

    # the feature score is averaged over classes
    scores = (get_t1(fc, c, f) +
            get_t2(fc, c, f) +
            get_t3(c, f, class_count, observed, total) +
            get_t4(c, f, feature_count, observed, total)).mean(axis=0)

    scores = np.asarray(scores).reshape(-1)

    return scores, []

On a dataset with 1000 instances and 1000 unique features, this implementation is >100 faster than the one without matrix operations.

vpekar
  • 3,275
  • 1
  • 19
  • 16
0

It is this code feature_not_set_indices = [i for i in feature_range if i not in feature_set_indices] takes 90% of the time, try to change to set operation

Nikolay Kostov
  • 16,433
  • 23
  • 85
  • 123