0

I am implementing a model in TF1.15 and while trying to run the session, I get the error.

My dataset:

import numpy as np
import scipy.sparse as sp
from collections import OrderedDict
import tensorflow as tf
from .data_container import index_keys

index_keys = ["batch_seg", "idnb_i", "idnb_j", "id_expand_kj",
              "id_reduce_ji", "id3dnb_i", "id3dnb_j", "id3dnb_k"]

class Qm9Data():
    """ Qm9 dataset"""
    def __init__(self, filename, cutoff, target_keys):
        data_dict = np.load(filename, allow_pickle=True)
        self.cutoff = cutoff
        self.target_keys = target_keys
        for key in ['id', 'N', 'Z', 'R']:
            if key in data_dict:
                setattr(self, key, data_dict[key])
            else:
                setattr(self, key, None)
        self.targets = np.stack([data_dict[key] for key in self.target_keys], axis=1)

        if self.N is None:
            self.N = np.zeros(len(self.targets), dtype=np.int32)
        self.N_cumsum = np.concatenate([[0], np.cumsum(self.N)])

        assert self.R is not None
        self.batch_seg = []
        self.idnb_i = []
        self.idnb_j = []
        self.id_expand_kj = []
        self.id_reduce_ji = []
        self.id3dnb_i = []
        self.id3dnb_j = []
        self.id3dnb_k = []

        for idx in range(len(self.targets)):
            if type(idx) is int or type(idx) is np.int64:
                idx = [idx]

            data = {}
            data['targets'] = np.array(self.targets[idx])
            data['id'] = self.id[idx]
            data['N'] = self.N[idx]
            # data['batch_seg'] = np.repeat(np.arange(len(idx), dtype=np.int32), data['N'])
            self.batch_seg.append(np.repeat(np.arange(len(idx), dtype=np.int32), data['N']))
            adj_matrices = []

            data['Z'] = np.zeros(np.sum(data['N']), dtype=np.int32)
            data['R'] = np.zeros([np.sum(data['N']), 3], dtype=np.float32)

            nend = 0
            for k, i in enumerate(idx):
                n = data['N'][k]  # number of atoms
                nstart = nend
                nend = nstart + n

                if self.Z is not None:
                    data['Z'][nstart:nend] = self.Z[self.N_cumsum[i]:self.N_cumsum[i + 1]]

                R = self.R[self.N_cumsum[i]:self.N_cumsum[i + 1]]
                data['R'][nstart:nend] = R

                Dij = np.linalg.norm(R[:, None, :] - R[None, :, :], axis=-1)
                adj_matrices.append(sp.csr_matrix(Dij <= self.cutoff))
                adj_matrices[-1] -= sp.eye(n, dtype=np.bool)

            # Entry x,y is edge x<-y (!)
            adj_matrix = self._bmat_fast(adj_matrices)
            # Entry x,y is edgeid x<-y (!)
            atomids_to_edgeid = sp.csr_matrix(
                (np.arange(adj_matrix.nnz), adj_matrix.indices, adj_matrix.indptr),
                shape=adj_matrix.shape)
            edgeid_to_target, edgeid_to_source = adj_matrix.nonzero()

            # Target (i) and source (j) nodes of edges
            # data['idnb_i'] = edgeid_to_target
            self.idnb_i.append(edgeid_to_target)
            # data['idnb_j'] = edgeid_to_source
            self.idnb_j.append(edgeid_to_source)

            # Indices of triplets k->j->i
            ntriplets = adj_matrix[edgeid_to_source].sum(1).A1
            id3ynb_i = np.repeat(edgeid_to_target, ntriplets)
            id3ynb_j = np.repeat(edgeid_to_source, ntriplets)
            id3ynb_k = adj_matrix[edgeid_to_source].nonzero()[1]

            # Indices of triplets that are not i->j->i
            id3_y_to_d, = (id3ynb_i != id3ynb_k).nonzero()
            # data['id3dnb_i'] = id3ynb_i[id3_y_to_d]
            self.id3dnb_i.append(id3ynb_i[id3_y_to_d])
            # data['id3dnb_j'] = id3ynb_j[id3_y_to_d]
            self.id3dnb_j.append(id3ynb_j[id3_y_to_d])
            # data['id3dnb_k'] = id3ynb_k[id3_y_to_d]
            self.id3dnb_k.append(id3ynb_k[id3_y_to_d])

            # Edge indices for interactions
            # j->i => k->j
            # data['id_expand_kj'] = atomids_to_edgeid[edgeid_to_source, :].data[id3_y_to_d]
            self.id_expand_kj.append(atomids_to_edgeid[edgeid_to_source, :].data[id3_y_to_d])
            # j->i => k->j => j->i
            # data['id_reduce_ji'] = atomids_to_edgeid[edgeid_to_source, :].tocoo().row[id3_y_to_d]
            self.id_reduce_ji.append(atomids_to_edgeid[edgeid_to_source, :].tocoo().row[id3_y_to_d])
            # print(self.batch_seg[idx[0]],  data['idnb_i'], data['id3dnb_i'], data['id_expand_kj'])
            # exit(0)

    def __len__(self):
        return self.targets.shape[0]

    def _bmat_fast(self, mats):
        new_data = np.concatenate([mat.data for mat in mats])

        ind_offset = np.zeros(1 + len(mats))
        ind_offset[1:] = np.cumsum([mat.shape[0] for mat in mats])
        new_indices = np.concatenate(
            [mats[i].indices + ind_offset[i] for i in range(len(mats))])

        indptr_offset = np.zeros(1 + len(mats))
        indptr_offset[1:] = np.cumsum([mat.nnz for mat in mats])
        new_indptr = np.concatenate(
            [mats[i].indptr[i >= 1:] + indptr_offset[i] for i in range(len(mats))])
        return sp.csr_matrix((new_data, new_indices, new_indptr))

Session:

loss_train, _ = sess.run([optimizer],
                             feed_dict={placeholders['Z']: DATA.Z[idx['train']],
                                        placeholders['R']: DATA.R[idx['train']],
                                        placeholders['idnb_i']: [DATA.idnb_i[i] for i in idx['train']],
                                        placeholders['idnb_j']: [DATA.idnb_j[i] for i in idx['train']],
                                        placeholders['id_expand_kj']: [DATA.id_expand_kj[i] for i in idx['train']],
                                        placeholders['id_reduce_ji']: [DATA.id_reduce_ji[i] for i in idx['train']],
                                        placeholders['id3dnb_i']: [DATA.id3dnb_i[i] for i in idx['train']],
                                        placeholders['id3dnb_j']: [DATA.id3dnb_j[i] for i in idx['train']],
                                        placeholders['id3dnb_k']: [DATA.id3dnb_k[i] for i in idx['train']],
                                        placeholders['batch_seg']: [DATA.batch_seg[i] for i in idx['train']],
                                        placeholders['targets']: DATA.targets[idx['train']]})

The error is for the placeholders['targets'].

Traceback (most recent call last):
  
File "train_new.py", line 260, in <module> placeholders['targets']: DATA.targets[idx['train']]})
File ".local/lib/python3.7/site-packages/tensorflow_core/python/client/session.py", line 956, in run run_metadata_ptr)
File ".local/lib/python3.7/site-packages/tensorflow_core/python/client/session.py", line 1149, in _run\np_val = np.asarray(subfeed_val, dtype=subfeed_dtype)
File "/usr/local/python3.7.5/lib/python3.7/site-packages/numpy/core/_asarray.py", line 85, in asarray\return array(a, dtype, copy=False, order=order)
ValueError: setting an array element with a sequence.
Yevhen Kuzmovych
  • 10,940
  • 7
  • 28
  • 48

0 Answers0