Error Desciption
ValueError: Failed to convert a NumPy array to a Tensor (Unsupported object type float).
Details
ValueError Traceback (most recent call last)
D:\deeplearning\Temp\ipykernel_5864\2961666884.py in
26 print(train_data.shape,test_data.shape,time.time()-start)
27
---> 28 model,predictions = trainer(train_data,test_data)
29 # returns = simulate(test_data,predictions)
30 # returns.to_csv(result_folder+'/avg_daily_rets-'+str(test_year)+'.csv')
D:\deeplearning\Temp\ipykernel_5864\3168391516.py in trainer(train_data, test_data)
28 validation_split=0.2,
29 callbacks=callbacks,
---> 30 batch_size=512
31 )
32
C:\ProgramData\miniconda3\envs\tf\lib\site-packages\tensorflow_core\python\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, max_queue_size, workers, use_multiprocessing, **kwargs)
817 max_queue_size=max_queue_size,
818 workers=workers,
--> 819 use_multiprocessing=use_multiprocessing)
820
821 def evaluate(self,
C:\ProgramData\miniconda3\envs\tf\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py in fit(self, model, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, max_queue_size, workers, use_multiprocessing, **kwargs)
233 max_queue_size=max_queue_size,
234 workers=workers,
--> 235 use_multiprocessing=use_multiprocessing)
236
237 total_samples = _get_total_number_of_samples(training_data_adapter)
C:\ProgramData\miniconda3\envs\tf\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py in _process_training_inputs(model, x, y, batch_size, epochs, sample_weights, class_weights, steps_per_epoch, validation_split, validation_data, validation_steps, shuffle, distribution_strategy, max_queue_size, workers, use_multiprocessing)
567 sample_weight_modes=sample_weight_modes,
568 shuffle=shuffle,
--> 569 distribution_strategy=distribution_strategy)
570
571 val_adapter = adapter_cls(
C:\ProgramData\miniconda3\envs\tf\lib\site-packages\tensorflow_core\python\keras\engine\data_adapter.py in init(self, x, y, sample_weights, sample_weight_modes, batch_size, epochs, steps, shuffle, **kwargs)
355 indices_dataset = indices_dataset.flat_map(slice_batch_indices)
356
--> 357 dataset = self.slice_inputs(indices_dataset, inputs)
358
359 if shuffle == "batch":
C:\ProgramData\miniconda3\envs\tf\lib\site-packages\tensorflow_core\python\keras\engine\data_adapter.py in slice_inputs(self, indices_dataset, inputs)
381 dataset = dataset_ops.DatasetV2.zip((
382 indices_dataset,
--> 383 dataset_ops.DatasetV2.from_tensors(inputs).repeat()
384 ))
385
C:\ProgramData\miniconda3\envs\tf\lib\site-packages\tensorflow_core\python\data\ops\dataset_ops.py in from_tensors(tensors)
564 Dataset: A Dataset.
565 """
--> 566 return TensorDataset(tensors)
567
568 @staticmethod
C:\ProgramData\miniconda3\envs\tf\lib\site-packages\tensorflow_core\python\data\ops\dataset_ops.py in init(self, element)
2763 def init(self, element):
2764 """See Dataset.from_tensors() for details."""
-> 2765 element = structure.normalize_element(element)
2766 self._structure = structure.type_spec_from_value(element)
2767 self._tensors = structure.to_tensor_list(self._structure, element)
C:\ProgramData\miniconda3\envs\tf\lib\site-packages\tensorflow_core\python\data\util\structure.py in normalize_element(element)
111 else:
112 normalized_components.append(
--> 113 ops.convert_to_tensor(t, name="component_%d" % i))
114 return nest.pack_sequence_as(element, normalized_components)
115
C:\ProgramData\miniconda3\envs\tf\lib\site-packages\tensorflow_core\python\framework\ops.py in convert_to_tensor(value, dtype, name, as_ref, preferred_dtype, dtype_hint, ctx, accepted_result_types)
1312
1313 if ret is None:
-> 1314 ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
1315
1316 if ret is NotImplemented:
C:\ProgramData\miniconda3\envs\tf\lib\site-packages\tensorflow_core\python\framework\tensor_conversion_registry.py in _default_conversion_function(failed resolving arguments)
50 def _default_conversion_function(value, dtype, name, as_ref):
51 del as_ref # Unused.
---> 52 return constant_op.constant(value, dtype, name=name)
53
54
C:\ProgramData\miniconda3\envs\tf\lib\site-packages\tensorflow_core\python\framework\constant_op.py in constant(value, dtype, shape, name)
256 """
257 return _constant_impl(value, dtype, shape, name, verify_shape=False,
--> 258 allow_broadcast=True)
259
260
C:\ProgramData\miniconda3\envs\tf\lib\site-packages\tensorflow_core\python\framework\constant_op.py in _constant_impl(value, dtype, shape, name, verify_shape, allow_broadcast)
264 ctx = context.context()
265 if ctx.executing_eagerly():
--> 266 t = convert_to_eager_tensor(value, ctx, dtype)
267 if shape is None:
268 return t
C:\ProgramData\miniconda3\envs\tf\lib\site-packages\tensorflow_core\python\framework\constant_op.py in convert_to_eager_tensor(value, ctx, dtype)
94 dtype = dtypes.as_dtype(dtype).as_datatype_enum
95 ctx.ensure_initialized()
---> 96 return ops.EagerTensor(value, ctx.device_name, dtype)
97
98
Error Desciption
ValueError: Failed to convert a NumPy array to a Tensor (Unsupported object type float).
Details
ValueError Traceback (most recent call last)
D:\deeplearning\Temp\ipykernel_5864\2961666884.py in
26 print(train_data.shape,test_data.shape,time.time()-start)
27
---> 28 model,predictions = trainer(train_data,test_data)
29 # returns = simulate(test_data,predictions)
30 # returns.to_csv(result_folder+'/avg_daily_rets-'+str(test_year)+'.csv')
D:\deeplearning\Temp\ipykernel_5864\3168391516.py in trainer(train_data, test_data)
28 validation_split=0.2,
29 callbacks=callbacks,
---> 30 batch_size=512
31 )
32
C:\ProgramData\miniconda3\envs\tf\lib\site-packages\tensorflow_core\python\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, max_queue_size, workers, use_multiprocessing, **kwargs)
817 max_queue_size=max_queue_size,
818 workers=workers,
--> 819 use_multiprocessing=use_multiprocessing)
820
821 def evaluate(self,
C:\ProgramData\miniconda3\envs\tf\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py in fit(self, model, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, max_queue_size, workers, use_multiprocessing, **kwargs)
233 max_queue_size=max_queue_size,
234 workers=workers,
--> 235 use_multiprocessing=use_multiprocessing)
236
237 total_samples = _get_total_number_of_samples(training_data_adapter)
C:\ProgramData\miniconda3\envs\tf\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py in _process_training_inputs(model, x, y, batch_size, epochs, sample_weights, class_weights, steps_per_epoch, validation_split, validation_data, validation_steps, shuffle, distribution_strategy, max_queue_size, workers, use_multiprocessing)
567 sample_weight_modes=sample_weight_modes,
568 shuffle=shuffle,
--> 569 distribution_strategy=distribution_strategy)
570
571 val_adapter = adapter_cls(
C:\ProgramData\miniconda3\envs\tf\lib\site-packages\tensorflow_core\python\keras\engine\data_adapter.py in init(self, x, y, sample_weights, sample_weight_modes, batch_size, epochs, steps, shuffle, **kwargs)
355 indices_dataset = indices_dataset.flat_map(slice_batch_indices)
356
--> 357 dataset = self.slice_inputs(indices_dataset, inputs)
358
359 if shuffle == "batch":
C:\ProgramData\miniconda3\envs\tf\lib\site-packages\tensorflow_core\python\keras\engine\data_adapter.py in slice_inputs(self, indices_dataset, inputs)
381 dataset = dataset_ops.DatasetV2.zip((
382 indices_dataset,
--> 383 dataset_ops.DatasetV2.from_tensors(inputs).repeat()
384 ))
385
C:\ProgramData\miniconda3\envs\tf\lib\site-packages\tensorflow_core\python\data\ops\dataset_ops.py in from_tensors(tensors)
564 Dataset: A
Dataset.565 """
--> 566 return TensorDataset(tensors)
567
568 @staticmethod
C:\ProgramData\miniconda3\envs\tf\lib\site-packages\tensorflow_core\python\data\ops\dataset_ops.py in init(self, element)
2763 def init(self, element):
2764 """See
Dataset.from_tensors()for details."""-> 2765 element = structure.normalize_element(element)
2766 self._structure = structure.type_spec_from_value(element)
2767 self._tensors = structure.to_tensor_list(self._structure, element)
C:\ProgramData\miniconda3\envs\tf\lib\site-packages\tensorflow_core\python\data\util\structure.py in normalize_element(element)
111 else:
112 normalized_components.append(
--> 113 ops.convert_to_tensor(t, name="component_%d" % i))
114 return nest.pack_sequence_as(element, normalized_components)
115
C:\ProgramData\miniconda3\envs\tf\lib\site-packages\tensorflow_core\python\framework\ops.py in convert_to_tensor(value, dtype, name, as_ref, preferred_dtype, dtype_hint, ctx, accepted_result_types)
1312
1313 if ret is None:
-> 1314 ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
1315
1316 if ret is NotImplemented:
C:\ProgramData\miniconda3\envs\tf\lib\site-packages\tensorflow_core\python\framework\tensor_conversion_registry.py in _default_conversion_function(failed resolving arguments)
50 def _default_conversion_function(value, dtype, name, as_ref):
51 del as_ref # Unused.
---> 52 return constant_op.constant(value, dtype, name=name)
53
54
C:\ProgramData\miniconda3\envs\tf\lib\site-packages\tensorflow_core\python\framework\constant_op.py in constant(value, dtype, shape, name)
256 """
257 return _constant_impl(value, dtype, shape, name, verify_shape=False,
--> 258 allow_broadcast=True)
259
260
C:\ProgramData\miniconda3\envs\tf\lib\site-packages\tensorflow_core\python\framework\constant_op.py in _constant_impl(value, dtype, shape, name, verify_shape, allow_broadcast)
264 ctx = context.context()
265 if ctx.executing_eagerly():
--> 266 t = convert_to_eager_tensor(value, ctx, dtype)
267 if shape is None:
268 return t
C:\ProgramData\miniconda3\envs\tf\lib\site-packages\tensorflow_core\python\framework\constant_op.py in convert_to_eager_tensor(value, ctx, dtype)
94 dtype = dtypes.as_dtype(dtype).as_datatype_enum
95 ctx.ensure_initialized()
---> 96 return ops.EagerTensor(value, ctx.device_name, dtype)
97
98