diff --git a/examples/Sionna_Ray_Tracing_Diffraction.ipynb b/examples/Sionna_Ray_Tracing_Diffraction.ipynb index 10ea1d4c2..7886e0117 100644 --- a/examples/Sionna_Ray_Tracing_Diffraction.ipynb +++ b/examples/Sionna_Ray_Tracing_Diffraction.ipynb @@ -30,6 +30,7 @@ "source": [ "## Table of Contents\n", "* [Background Information](#Background-Information)\n", + "* [Wedge vs Edge](#Wedge-vs-Edge)\n", "* [GPU Configuration and Imports](#GPU-Configuration-and-Imports)\n", "* [Experiments with a Simple Wedge](#Experiments-with-a-Simple-Wedge)\n", "* [Coverage Maps with Diffraction](#Coverage-Maps-with-Diffraction)\n", @@ -92,6 +93,22 @@ "We will explore in this notebook these effects in detail and also validate the UTD implementation in Sionna RT as a by-product." ] }, + { + "cell_type": "markdown", + "id": "318a9c54-4bd1-4bb5-b615-6ab2d8143b21", + "metadata": {}, + "source": [ + "# Wedge vs Edge\n", + "\n", + "First, it is important to know the difference between a *wedge* and an *edge*, and why we distinguish between them.\n", + "\n", + "Sionna defines a *wedge* as the line segment between two primitives, i.e., the common segment of two triangles. For example, a cubic building would have 12 wedges.\n", + "\n", + "For primitives that have one or more line segments that are not shared with another primitive, Sionna refers to such line segments as *edges*. See [`sionna.rt.scene.floor_wall`](https://nvlabs.github.io/sionna/api/rt.html#sionna.rt.scene.floor_wall) for an example scene.\n", + "\n", + "By default, Sionna does not simulate diffraction on edges (`edge_diffraction=False`), to avoid problems such as diffraction on the exterior edges of the ground surface (modelled as a rectangular plane)." + ] + }, { "cell_type": "markdown", "id": "d77f8e5b-f32c-4752-a0cf-258c69638b28", diff --git a/sionna/rt/previewer.py b/sionna/rt/previewer.py index e367efb3e..4c89757c5 100644 --- a/sionna/rt/previewer.py +++ b/sionna/rt/previewer.py @@ -122,15 +122,14 @@ def plot_radio_devices(self, show_orientations=False): """ scene = self._scene sc, tx_positions, rx_positions, _ = scene_scale(scene) - tr_color = [0.160, 0.502, 0.725] - rc_color = [0.153, 0.682, 0.375] + transmitter_colors = [transmitter.color.numpy() for + transmitter in scene.transmitters.values()] + receiver_colors = [receiver.color.numpy() for + receiver in scene.receivers.values()] # Radio emitters, shown as points p = np.array(list(tx_positions.values()) + list(rx_positions.values())) - albedo = np.array( - [tr_color] * len(scene.transmitters) - + [rc_color] * len(scene.receivers) - ) + albedo = np.array(transmitter_colors + receiver_colors) if p.shape[0] > 0: # Radio devices are not persistent @@ -142,14 +141,14 @@ def plot_radio_devices(self, show_orientations=False): head_length = 0.15 * line_length zeros = np.zeros((1, 3)) - for devices, color in [(scene.transmitters.values(), tr_color), - (scene.receivers.values(), rc_color)]: + for devices in [scene.transmitters.values(), + scene.receivers.values()]: if len(devices) == 0: continue - color = f'rgb({", ".join([str(int(v * 255)) for v in color])})' starts, ends = [], [] for rd in devices: # Arrow line + color = f'rgb({", ".join([str(int(v)) for v in rd.color])})' starts.append(rd.position) endpoint = rd.position + rotate([line_length, 0., 0.], rd.orientation) diff --git a/sionna/rt/radio_device.py b/sionna/rt/radio_device.py index 955f41180..ee1e0c64f 100644 --- a/sionna/rt/radio_device.py +++ b/sionna/rt/radio_device.py @@ -43,6 +43,10 @@ class RadioDevice(OrientedObject): :class:`~sionna.rt.Receiver`, or :class:`~sionna.rt.Camera` to look at. If set to `None`, then ``orientation`` is used to orientate the device. + color : [3], float + Defines the RGB (red, green, blue) ``color`` parameter for the device as displayed in the previewer and renderer. + Each RGB component must have a value within the range :math:`\in [0,1]`. + trainable_position : bool Determines if the ``position`` is a trainable variable or not. Defaults to `False`. @@ -61,6 +65,7 @@ def __init__(self, position, orientation=(0.,0.,0.), look_at=None, + color=(0,0,0), trainable_position=False, trainable_orientation=False, dtype=tf.complex64): @@ -73,6 +78,8 @@ def __init__(self, self._position = tf.Variable(tf.zeros([3], self._rdtype)) self._orientation = tf.Variable(tf.zeros([3], self._rdtype)) + self.color = color + self.trainable_position = trainable_position self.trainable_orientation = trainable_orientation @@ -183,3 +190,22 @@ def look_at(self, target): beta = theta-PI/2 # Rotation around y-axis gamma = 0.0 # Rotation around x-axis self.orientation = (alpha, beta, gamma) + + @property + def color(self): + r""" + [3], float : Get/set the the RGB (red, green, blue) color for the device as displayed in the previewer and renderer. + Each RGB component must have a value within the range :math:`\in [0,1]`. + """ + return self._color + + @color.setter + def color(self, new_color): + new_color = tf.cast(new_color, dtype=self._rdtype) + if not (tf.rank(new_color) == 1 and new_color.shape[0] == 3): + msg = "Color must be shaped as [r,g,b] (rank=1 and shape=[3])" + raise ValueError(msg) + if tf.reduce_any(new_color < 0.) or tf.reduce_any(new_color > 1.): + msg = "Color components must be in the range (0,1)" + raise ValueError(msg) + self._color = new_color diff --git a/sionna/rt/receiver.py b/sionna/rt/receiver.py index 95617707c..4ced574a5 100644 --- a/sionna/rt/receiver.py +++ b/sionna/rt/receiver.py @@ -34,6 +34,11 @@ class Receiver(RadioDevice): :class:`~sionna.rt.Receiver`, or :class:`~sionna.rt.Camera` to look at. If set to `None`, then ``orientation`` is used to orientate the device. + color : [3], float + Defines the RGB (red, green, blue) ``color`` parameter for the device as displayed in the previewer and renderer. + Each RGB component must have a value within the range :math:`\in [0,1]`. + Defaults to `[0.153, 0.682, 0.375]`. + trainable_position : bool Determines if the ``position`` is a trainable variable or not. Defaults to `False`. @@ -52,6 +57,7 @@ def __init__(self, position, orientation=(0.,0.,0.), look_at=None, + color=(0.153, 0.682, 0.375), trainable_position=False, trainable_orientation=False, dtype=tf.complex64): @@ -61,6 +67,7 @@ def __init__(self, position=position, orientation=orientation, look_at=look_at, + color=color, trainable_position=trainable_position, trainable_orientation=trainable_orientation, dtype=dtype) diff --git a/sionna/rt/renderer.py b/sionna/rt/renderer.py index 71478c609..06e7dc4de 100644 --- a/sionna/rt/renderer.py +++ b/sionna/rt/renderer.py @@ -268,13 +268,17 @@ def results_to_mitsuba_scene(scene, paths, show_paths, show_devices, 'type': 'scene', } sc, tx_positions, rx_positions, _ = scene_scale(scene) + transmitter_colors = [transmitter.color.numpy() for + transmitter in scene.transmitters.values()] + receiver_colors = [receiver.color.numpy() for + receiver in scene.receivers.values()] - # --- Radio devices, shown as spheres (blue: transmitter, green: receiver) + # --- Radio devices, shown as spheres if show_devices: radius = max(0.0025 * sc, 1) - for source, color in ((tx_positions, [0.160, 0.502, 0.725]), - (rx_positions, [0.153, 0.682, 0.375])): - for k, p in source.items(): + for source, color in ((tx_positions, transmitter_colors), + (rx_positions, receiver_colors)): + for index, (k, p) in enumerate(source.items()): key = 'rd-' + k assert key not in objects objects[key] = { @@ -283,7 +287,7 @@ def results_to_mitsuba_scene(scene, paths, show_paths, show_devices, 'radius': radius, 'light': { 'type': 'area', - 'radiance': {'type': 'rgb', 'value': color}, + 'radiance': {'type': 'rgb', 'value': color[index]}, }, } diff --git a/sionna/rt/transmitter.py b/sionna/rt/transmitter.py index 2cdc78ada..af4409101 100644 --- a/sionna/rt/transmitter.py +++ b/sionna/rt/transmitter.py @@ -34,6 +34,11 @@ class Transmitter(RadioDevice): :class:`~sionna.rt.Receiver`, or :class:`~sionna.rt.Camera` to look at. If set to `None`, then ``orientation`` is used to orientate the device. + color : [3], float + Defines the RGB (red, green, blue) ``color`` parameter for the device as displayed in the previewer and renderer. + Each RGB component must have a value within the range :math:`\in [0,1]`. + Defaults to `[0.160, 0.502, 0.725]`. + trainable_position : bool Determines if the ``position`` is a trainable variable or not. Defaults to `False`. @@ -52,6 +57,7 @@ def __init__(self, position, orientation=(0.,0.,0.), look_at=None, + color=(0.160, 0.502, 0.725), trainable_position=False, trainable_orientation=False, dtype=tf.complex64): @@ -61,6 +67,7 @@ def __init__(self, position=position, orientation=orientation, look_at=look_at, + color=color, trainable_position=trainable_position, trainable_orientation=trainable_orientation, dtype=dtype) diff --git a/sionna/utils/misc.py b/sionna/utils/misc.py index 3415167d8..d7668b4c4 100644 --- a/sionna/utils/misc.py +++ b/sionna/utils/misc.py @@ -410,7 +410,8 @@ def sim_ber(mc_fun, graph_mode=None, verbose=True, forward_keyboard_interrupt=True, - dtype=tf.complex64): + dtype=tf.complex64, + callback=None): """Simulates until target number of errors is reached and returns BER/BLER. The simulation continues with the next SNR point if either @@ -420,7 +421,7 @@ def sim_ber(mc_fun, Input ----- - mc_fun: + mc_fun: callable Callable that yields the transmitted bits `b` and the receiver's estimate `b_hat` for a given ``batch_size`` and ``ebno_db``. If ``soft_estimates`` is True, b_hat is interpreted as @@ -470,6 +471,22 @@ def sim_ber(mc_fun, dtype: tf.complex64 Datatype of the model / function to be used (``mc_fun``). + callback: callable + Defaults to `None`. If specified, ``callback`` + will be called after each Monte-Carlo step. Can be used for + logging or advanced early stopping. + Input signature of ``callback`` must match `callback(mc_iter, + ebno_dbs, bit_errors, block_errors, nb_bits, nb_blocks)` where + ``mc_iter`` denotes the number of processed batches for the current + SNR, ``ebno_dbs`` is the current SNR point, ``bit_errors`` the number + of bit errors, ``block_errors`` the number of block errors, ``nb_bits`` + the number of simulated bits, ``nb_blocks`` the number of simulated + blocks. If ``callable`` returns `sim_ber.CALLBACK_NEXT_SNR`, early + stopping is detected and the simulation will continue with the next SNR + point. If ``callable`` returns `sim_ber.CALLBACK_STOP`, the simulation + is stopped immediately. For `sim_ber.CALLBACK_CONTINUE` continues with + the simulation. + Output ------ (ber, bler) : @@ -567,7 +584,8 @@ def _print_progress(is_final, rt, idx_snr, idx_it, header_text=None): "reached max iter ", # status=1; spacing for impr. layout "no errors - early stop", # status=2 "reached target bit errors", # status=3 - "reached target block errors"] # status=4 + "reached target block errors", # status=4 + "callback triggered stopping"] # status=5 # check inputs for consistency assert isinstance(early_stop, bool), "early_stop must be bool." @@ -658,6 +676,18 @@ def _print_progress(is_final, rt, idx_snr, idx_it, header_text=None): nb_blocks = tf.tensor_scatter_nd_add( nb_blocks, [[i]], tf.cast([block_n], tf.int64)) + cb_state = sim_ber.CALLBACK_CONTINUE + if callback is not None: + cb_state = callback (ii, ebno_dbs[i], bit_errors[i], + block_errors[i], nb_bits[i], + nb_blocks[i]) + if cb_state in (sim_ber.CALLBACK_STOP, + sim_ber.CALLBACK_NEXT_SNR): + # stop runtime timer + runtime[i] = time.perf_counter() - runtime[i] + status[i] = 5 # change internal status for summary + break # stop for this SNR point have been simulated + # print progress summary if verbose: # print summary header during first iteration @@ -714,6 +744,14 @@ def _print_progress(is_final, rt, idx_snr, idx_it, header_text=None): print("\nSimulation stopped as no error occurred " \ f"@ EbNo = {ebno_dbs[i].numpy():.1f} dB.\n") break + # allow callback to end the entire simulation + if cb_state is sim_ber.CALLBACK_STOP: + # stop runtime timer + status[i] = 5 # change internal status for summary + if verbose: + print("\nSimulation stopped by callback funtion " \ + f"@ EbNo = {ebno_dbs[i].numpy():.1f} dB.\n") + break # Stop if KeyboardInterrupt is detected and set remaining SNR points to -1 except KeyboardInterrupt as e: @@ -745,6 +783,9 @@ def _print_progress(is_final, rt, idx_snr, idx_it, header_text=None): return ber, bler +sim_ber.CALLBACK_CONTINUE = None +sim_ber.CALLBACK_STOP = 2 +sim_ber.CALLBACK_NEXT_SNR = 1 def complex_normal(shape, var=1.0, dtype=tf.complex64): r"""Generates a tensor of complex normal random variables. diff --git a/test/unit/rt/test_radio_device.py b/test/unit/rt/test_radio_device.py index 999bef41b..7c89e4259 100644 --- a/test/unit/rt/test_radio_device.py +++ b/test/unit/rt/test_radio_device.py @@ -113,7 +113,7 @@ def test_look_at(self): theta_r = tf.squeeze(paths.theta_r) phi_r = tf.squeeze(paths.phi_r) - # Compute AODs and AoAs in LCS of the transmitter and receiver + # Compute AODs and AoAs in LCS of the transmitter and receiver theta_prime_t, phi_prime_t = theta_prime_phi_prime(tx.orientation, theta_t, phi_t) theta_prime_r, phi_prime_r = theta_prime_phi_prime(rx.orientation, theta_r, phi_r) @@ -124,7 +124,33 @@ def test_look_at(self): self.assertTrue(np.abs(phi_prime_r)<1e-5) # Compute channel impulse response and make - # sure that it matches the theoretical + # sure that it matches the theoretical a = tf.squeeze(paths.a) a_db = 20*np.log10(np.abs(a.numpy())) self.assertTrue(np.abs(a_db-a_theo_db)< 1e-4) + + def test_default_coloring(self): + """Test default coloring of radio devices""" + scene = load_scene() + tx = Transmitter("tx", [1,2,-3], [0, 0, 0]) + rx = Receiver("rx", [0,0,0], [0, 0, 0]) + scene.add(tx) + scene.add(rx) + + color_tx = tf.cast((0.160, 0.502, 0.725), tf.float32) + color_rx = tf.cast((0.153, 0.682, 0.375), tf.float32) + self.assertTrue(tf.reduce_all(list(scene.transmitters.values())[0].color==color_tx)) + self.assertTrue(tf.reduce_all(list(scene.receivers.values())[0].color==color_rx)) + + def test_custom_coloring(self): + """Test custom coloring of radio devices""" + color_tx = tf.cast((0.8, 0., 0.), tf.float32) + color_rx = tf.cast((1., 1., 0.), tf.float32) + scene = load_scene() + tx = Transmitter("tx", [1,2,-3], [0, 0, 0], color=color_tx) + rx = Receiver("rx", [0,0,0], [0, 0, 0], color=color_rx) + scene.add(tx) + scene.add(rx) + + self.assertTrue(tf.reduce_all(list(scene.transmitters.values())[0].color==color_tx)) + self.assertTrue(tf.reduce_all(list(scene.receivers.values())[0].color==color_rx))