Line data Source code
1 : /* SPDX-License-Identifier: Apache-2.0 */
2 : /**
3 : * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved.
4 : *
5 : * @file ml-api-inference-pipeline.c
6 : * @date 11 March 2019
7 : * @brief NNStreamer/Pipeline(main) C-API Wrapper.
8 : * This allows to construct and control NNStreamer pipelines.
9 : * @see https://github.com/nnstreamer/nnstreamer
10 : * @author MyungJoo Ham <myungjoo.ham@samsung.com>
11 : * @bug Thread safety for ml_tensors_data should be addressed.
12 : */
13 :
14 : #include <string.h>
15 : #include <glib.h>
16 : #include <gst/gstbuffer.h>
17 : #include <gst/app/app.h> /* To push data to pipeline */
18 : #include <nnstreamer_plugin_api.h>
19 : #include <tensor_if.h>
20 : #include <tensor_typedef.h>
21 : #include <tensor_filter_custom_easy.h>
22 :
23 : #include <nnstreamer.h>
24 : #include <nnstreamer-tizen-internal.h>
25 :
26 : #include "ml-api-internal.h"
27 : #include "ml-api-inference-internal.h"
28 : #include "ml-api-inference-pipeline-internal.h"
29 :
30 :
31 : #define handle_init(name, h) \
32 : ml_pipeline_common_elem *name= (h); \
33 : ml_pipeline *p; \
34 : ml_pipeline_element *elem; \
35 : int ret = ML_ERROR_NONE; \
36 : check_feature_state (ML_FEATURE_INFERENCE); \
37 : if ((h) == NULL) { \
38 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, \
39 : "The parameter, %s, (handle) is invalid (NULL). Please provide a valid handle.", \
40 : #h); \
41 : } \
42 : p = name->pipe; \
43 : elem = name->element; \
44 : if (p == NULL) \
45 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, \
46 : "Internal error. The contents of parameter, %s, (handle), is invalid. The pipeline entry (%s->pipe) is NULL. The handle (%s) is either not properly created or application threads may have touched its contents.", \
47 : #h, #h, #h); \
48 : if (elem == NULL) \
49 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, \
50 : "Internal error. The contents of parameter, %s, (handle), is invalid. The element entry (%s->element) is NULL. The handle (%s) is either not properly created or application threads may have touched its contents.", \
51 : #h, #h, #h); \
52 : if (elem->pipe == NULL) \
53 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, \
54 : "Internal error. The contents of parameter, %s, (handle), is invalid. The pipeline entry of the element entry (%s->element->pipe) is NULL. The handle (%s) is either not properly created or application threads may have touched its contents.", \
55 : #h, #h, #h); \
56 : g_mutex_lock (&p->lock); \
57 : g_mutex_lock (&elem->lock); \
58 : if (NULL == g_list_find (elem->handles, name)) { \
59 : _ml_error_report \
60 : ("Internal error. The handle name, %s, does not exists in the list of %s->element->handles.", \
61 : #h, #h); \
62 : ret = ML_ERROR_INVALID_PARAMETER; \
63 : goto unlock_return; \
64 : }
65 :
66 : #define handle_exit(h) \
67 : unlock_return: \
68 : g_mutex_unlock (&elem->lock); \
69 : g_mutex_unlock (&p->lock); \
70 : return ret;
71 :
72 : /**
73 : * @brief The enumeration for custom data type.
74 : */
75 : typedef enum
76 : {
77 : PIPE_CUSTOM_TYPE_NONE,
78 : PIPE_CUSTOM_TYPE_IF,
79 : PIPE_CUSTOM_TYPE_FILTER,
80 :
81 : PIPE_CUSTOM_TYPE_MAX
82 : } pipe_custom_type_e;
83 :
84 : /**
85 : * @brief The struct for custom data.
86 : */
87 : typedef struct
88 : {
89 : pipe_custom_type_e type;
90 : gchar *name;
91 : gpointer handle;
92 : } pipe_custom_data_s;
93 :
94 : static void ml_pipeline_custom_filter_ref (ml_custom_easy_filter_h custom);
95 : static void ml_pipeline_custom_filter_unref (ml_custom_easy_filter_h custom);
96 : static void ml_pipeline_if_custom_ref (ml_pipeline_if_h custom);
97 : static void ml_pipeline_if_custom_unref (ml_pipeline_if_h custom);
98 :
99 : /**
100 : * @brief Global lock for pipeline functions.
101 : */
102 : G_LOCK_DEFINE_STATIC (g_ml_pipe_lock);
103 :
104 : /**
105 : * @brief The list of custom data. This should be managed with lock.
106 : */
107 : static GList *g_ml_custom_data = NULL;
108 :
109 : /**
110 : * @brief Finds a position of custom data in the list.
111 : * @note This function should be called with lock.
112 : */
113 : static GList *
114 0 : pipe_custom_find_link (const pipe_custom_type_e type, const gchar * name)
115 : {
116 : pipe_custom_data_s *data;
117 : GList *link;
118 :
119 0 : g_return_val_if_fail (name != NULL, NULL);
120 :
121 0 : link = g_ml_custom_data;
122 0 : while (link) {
123 0 : data = (pipe_custom_data_s *) link->data;
124 :
125 0 : if (data->type == type && g_str_equal (data->name, name))
126 0 : break;
127 :
128 0 : link = link->next;
129 : }
130 :
131 0 : return link;
132 : }
133 :
134 : /**
135 : * @brief Finds custom data matched with data type and name.
136 : */
137 : static pipe_custom_data_s *
138 0 : pipe_custom_find_data (const pipe_custom_type_e type, const gchar * name)
139 : {
140 : pipe_custom_data_s *data;
141 : GList *link;
142 :
143 0 : G_LOCK (g_ml_pipe_lock);
144 :
145 0 : link = pipe_custom_find_link (type, name);
146 0 : data = (link != NULL) ? (pipe_custom_data_s *) link->data : NULL;
147 :
148 0 : G_UNLOCK (g_ml_pipe_lock);
149 0 : return data;
150 : }
151 :
152 : /**
153 : * @brief Adds new custom data into the list.
154 : */
155 : static void
156 0 : pipe_custom_add_data (const pipe_custom_type_e type, const gchar * name,
157 : gpointer handle)
158 : {
159 : pipe_custom_data_s *data;
160 :
161 0 : data = g_new0 (pipe_custom_data_s, 1);
162 0 : data->type = type;
163 0 : data->name = g_strdup (name);
164 0 : data->handle = handle;
165 :
166 0 : G_LOCK (g_ml_pipe_lock);
167 0 : g_ml_custom_data = g_list_prepend (g_ml_custom_data, data);
168 0 : G_UNLOCK (g_ml_pipe_lock);
169 0 : }
170 :
171 : /**
172 : * @brief Removes custom data from the list.
173 : */
174 : static void
175 0 : pipe_custom_remove_data (const pipe_custom_type_e type, const gchar * name)
176 : {
177 : pipe_custom_data_s *data;
178 : GList *link;
179 :
180 0 : G_LOCK (g_ml_pipe_lock);
181 :
182 0 : link = pipe_custom_find_link (type, name);
183 0 : if (link) {
184 0 : data = (pipe_custom_data_s *) link->data;
185 :
186 0 : g_ml_custom_data = g_list_delete_link (g_ml_custom_data, link);
187 :
188 0 : g_clear_pointer (&data->name, g_free);
189 0 : g_free (data);
190 : }
191 :
192 0 : G_UNLOCK (g_ml_pipe_lock);
193 0 : }
194 :
195 : /**
196 : * @brief The callback function called when the element node with custom data is released.
197 : */
198 : static int
199 0 : pipe_custom_destroy_cb (void *handle, void *user_data)
200 : {
201 : pipe_custom_data_s *custom_data;
202 :
203 0 : custom_data = (pipe_custom_data_s *) handle;
204 0 : if (custom_data == NULL)
205 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
206 : "The parameter, handle, is NULL. It should be a valid internal object. This is possibly a bug in ml-api-inference-pipeline.c along with tensor-if or tensor-filter/custom function. Please report to https://github.com/nnstreamer/nnstreamer/issues");
207 :
208 0 : switch (custom_data->type) {
209 0 : case PIPE_CUSTOM_TYPE_IF:
210 0 : ml_pipeline_if_custom_unref (custom_data->handle);
211 0 : break;
212 0 : case PIPE_CUSTOM_TYPE_FILTER:
213 0 : ml_pipeline_custom_filter_unref (custom_data->handle);
214 0 : break;
215 0 : default:
216 0 : break;
217 : }
218 :
219 0 : return ML_ERROR_NONE;
220 : }
221 :
222 : /**
223 : * @brief Internal function to create a referable element in a pipeline
224 : */
225 : static ml_pipeline_element *
226 0 : construct_element (GstElement * e, ml_pipeline * p, const char *name,
227 : ml_pipeline_element_e t)
228 : {
229 0 : ml_pipeline_element *ret = g_new0 (ml_pipeline_element, 1);
230 :
231 0 : if (ret == NULL)
232 0 : _ml_error_report_return (NULL,
233 : "Failed to allocate memory for the pipeline.");
234 :
235 0 : ret->element = e;
236 0 : ret->pipe = p;
237 0 : ret->name = g_strdup (name);
238 0 : ret->type = t;
239 0 : ret->handles = NULL;
240 0 : ret->src = NULL;
241 0 : ret->sink = NULL;
242 0 : ret->maxid = 0;
243 0 : ret->handle_id = 0;
244 0 : ret->is_media_stream = FALSE;
245 0 : ret->is_flexible_tensor = FALSE;
246 0 : g_mutex_init (&ret->lock);
247 0 : gst_tensors_info_init (&ret->tensors_info);
248 :
249 0 : return ret;
250 : }
251 :
252 : /**
253 : * @brief Internal function to get the tensors info from the element caps.
254 : */
255 : static gboolean
256 0 : get_tensors_info_from_caps (const GstCaps * caps, GstTensorsInfo * info,
257 : gboolean * is_flexible)
258 : {
259 : GstTensorsConfig config;
260 0 : gboolean found = FALSE;
261 :
262 0 : found = gst_tensors_config_from_caps (&config, caps, TRUE);
263 :
264 0 : if (found) {
265 0 : gst_tensors_info_free (info);
266 0 : gst_tensors_info_copy (info, &config.info);
267 :
268 0 : if (is_flexible) {
269 0 : *is_flexible = gst_tensors_config_is_flexible (&config);
270 : }
271 :
272 0 : gst_tensors_config_free (&config);
273 : }
274 :
275 0 : return found;
276 : }
277 :
278 : /**
279 : * @brief Handle a sink element for registered ml_pipeline_sink_cb
280 : */
281 : static void
282 0 : cb_sink_event (GstElement * e, GstBuffer * b, gpointer user_data)
283 : {
284 0 : ml_pipeline_element *elem = user_data;
285 :
286 : /** @todo CRITICAL if the pipeline is being killed, don't proceed! */
287 : GstMemory *mem[ML_TENSOR_SIZE_LIMIT];
288 : GstMapInfo map[ML_TENSOR_SIZE_LIMIT];
289 : guint i, num_tensors;
290 : GList *l;
291 0 : ml_tensors_data_s *_data = NULL;
292 : GstTensorsInfo gst_info;
293 : int status;
294 :
295 0 : gst_tensors_info_init (&gst_info);
296 0 : gst_info.num_tensors = num_tensors = gst_tensor_buffer_get_count (b);
297 :
298 : /* Set tensor data. The handle for tensors-info in data should be added. */
299 : status =
300 0 : _ml_tensors_data_create_no_alloc (NULL, (ml_tensors_data_h *) & _data);
301 0 : if (status != ML_ERROR_NONE) {
302 0 : _ml_loge (_ml_detail
303 : ("Failed to allocate memory for tensors data in sink callback, which is registered by ml_pipeline_sink_register ()."));
304 0 : return;
305 : }
306 :
307 0 : g_mutex_lock (&elem->lock);
308 :
309 0 : _data->num_tensors = num_tensors;
310 0 : for (i = 0; i < num_tensors; i++) {
311 0 : mem[i] = gst_tensor_buffer_get_nth_memory (b, i);
312 0 : if (!gst_memory_map (mem[i], &map[i], GST_MAP_READ)) {
313 0 : _ml_loge (_ml_detail
314 : ("Failed to map the output in sink '%s' callback, which is registered by ml_pipeline_sink_register ()",
315 : elem->name));
316 0 : gst_memory_unref (mem[i]);
317 0 : num_tensors = i;
318 0 : goto error;
319 : }
320 :
321 0 : _data->tensors[i].data = map[i].data;
322 0 : _data->tensors[i].size = map[i].size;
323 : }
324 :
325 : /** @todo This assumes that padcap is static */
326 0 : if (elem->sink == NULL) {
327 0 : gboolean found = FALSE;
328 0 : gboolean flexible = FALSE;
329 :
330 : /* Get the sink-pad-cap */
331 0 : elem->sink = gst_element_get_static_pad (elem->element, "sink");
332 :
333 0 : if (elem->sink) {
334 : /* sinkpadcap available (negotiated) */
335 0 : GstCaps *caps = gst_pad_get_current_caps (elem->sink);
336 :
337 0 : if (caps) {
338 0 : found = get_tensors_info_from_caps (caps, &elem->tensors_info,
339 : &flexible);
340 0 : gst_caps_unref (caps);
341 : }
342 : }
343 :
344 0 : if (found) {
345 0 : elem->is_flexible_tensor = flexible;
346 : } else {
347 : /* It is not valid */
348 0 : if (elem->sink) {
349 0 : gst_object_unref (elem->sink);
350 0 : elem->sink = NULL;
351 : }
352 :
353 0 : goto error;
354 : }
355 : }
356 :
357 : /* Prepare output and set data. */
358 0 : if (elem->is_flexible_tensor) {
359 : GstTensorMetaInfo meta;
360 : gsize hsize;
361 :
362 : /* handle header for flex tensor */
363 0 : for (i = 0; i < num_tensors; i++) {
364 0 : gst_tensor_meta_info_parse_header (&meta, map[i].data);
365 0 : hsize = gst_tensor_meta_info_get_header_size (&meta);
366 :
367 0 : gst_tensor_meta_info_convert (&meta,
368 : gst_tensors_info_get_nth_info (&gst_info, i));
369 :
370 0 : _data->tensors[i].data = map[i].data + hsize;
371 0 : _data->tensors[i].size = map[i].size - hsize;
372 : }
373 : } else {
374 0 : gst_tensors_info_copy (&gst_info, &elem->tensors_info);
375 :
376 : /* Compare output info and buffer if gst-buffer is not flexible. */
377 0 : if (gst_info.num_tensors != num_tensors) {
378 0 : _ml_loge (_ml_detail
379 : ("The sink event of [%s] cannot be handled because the number of tensors mismatches.",
380 : elem->name));
381 :
382 0 : gst_object_unref (elem->sink);
383 0 : elem->sink = NULL;
384 0 : goto error;
385 : }
386 :
387 0 : for (i = 0; i < num_tensors; i++) {
388 0 : size_t sz = gst_tensors_info_get_size (&gst_info, i);
389 :
390 : /* Not configured, yet. */
391 0 : if (sz == 0)
392 0 : _ml_loge (_ml_detail
393 : ("The caps for sink(%s) is not configured.", elem->name));
394 :
395 0 : if (sz != map[i].size) {
396 0 : _ml_loge (_ml_detail
397 : ("The sink event of [%s] cannot be handled because the tensor dimension mismatches.",
398 : elem->name));
399 :
400 0 : gst_object_unref (elem->sink);
401 0 : elem->sink = NULL;
402 0 : goto error;
403 : }
404 : }
405 : }
406 :
407 : /* Create new output info, data handle should be updated here. */
408 0 : _ml_tensors_info_create_from_gst (&_data->info, &gst_info);
409 :
410 : /* Iterate e->handles, pass the data to them */
411 0 : for (l = elem->handles; l != NULL; l = l->next) {
412 : ml_pipeline_sink_cb callback;
413 0 : ml_pipeline_common_elem *sink = l->data;
414 0 : if (sink->callback_info == NULL)
415 0 : continue;
416 :
417 0 : callback = sink->callback_info->sink_cb;
418 0 : if (callback)
419 0 : callback (_data, _data->info, sink->callback_info->sink_pdata);
420 :
421 : /** @todo Measure time. Warn if it takes long. Kill if it takes too long. */
422 : }
423 :
424 0 : error:
425 0 : g_mutex_unlock (&elem->lock);
426 :
427 0 : for (i = 0; i < num_tensors; i++) {
428 0 : gst_memory_unmap (mem[i], &map[i]);
429 0 : gst_memory_unref (mem[i]);
430 : }
431 :
432 0 : _ml_tensors_data_destroy_internal (_data, FALSE);
433 0 : _data = NULL;
434 :
435 0 : gst_tensors_info_free (&gst_info);
436 0 : return;
437 : }
438 :
439 : /**
440 : * @brief Handle a appsink element for registered ml_pipeline_sink_cb
441 : */
442 : static GstFlowReturn
443 0 : cb_appsink_new_sample (GstElement * e, gpointer user_data)
444 : {
445 : GstSample *sample;
446 : GstBuffer *buffer;
447 :
448 : /* get the sample from appsink */
449 0 : sample = gst_app_sink_pull_sample (GST_APP_SINK (e));
450 0 : buffer = gst_sample_get_buffer (sample);
451 :
452 0 : cb_sink_event (e, buffer, user_data);
453 :
454 0 : gst_sample_unref (sample);
455 0 : return GST_FLOW_OK;
456 : }
457 :
458 : /**
459 : * @brief Callback for bus message.
460 : */
461 : static void
462 0 : cb_bus_sync_message (GstBus * bus, GstMessage * message, gpointer user_data)
463 : {
464 : ml_pipeline *pipe_h;
465 :
466 0 : pipe_h = (ml_pipeline *) user_data;
467 :
468 0 : if (pipe_h == NULL)
469 0 : return;
470 :
471 0 : switch (GST_MESSAGE_TYPE (message)) {
472 0 : case GST_MESSAGE_EOS:
473 0 : pipe_h->isEOS = TRUE;
474 0 : break;
475 0 : case GST_MESSAGE_STATE_CHANGED:
476 0 : if (GST_MESSAGE_SRC (message) == GST_OBJECT_CAST (pipe_h->element)) {
477 : GstState old_state, new_state;
478 :
479 0 : gst_message_parse_state_changed (message, &old_state, &new_state, NULL);
480 0 : pipe_h->pipe_state = (ml_pipeline_state_e) new_state;
481 :
482 0 : _ml_logd (_ml_detail ("The pipeline state changed from %s to %s.",
483 : gst_element_state_get_name (old_state),
484 : gst_element_state_get_name (new_state)));
485 :
486 0 : if (pipe_h->state_cb.cb) {
487 0 : pipe_h->state_cb.cb (pipe_h->pipe_state, pipe_h->state_cb.user_data);
488 : }
489 : }
490 0 : break;
491 0 : default:
492 0 : break;
493 : }
494 : }
495 :
496 : /**
497 : * @brief Clean up each element of the pipeline.
498 : */
499 : static void
500 0 : free_element_handle (gpointer data)
501 : {
502 0 : ml_pipeline_common_elem *item = (ml_pipeline_common_elem *) data;
503 : ml_pipeline_element *elem;
504 :
505 0 : if (!(item && item->callback_info)) {
506 0 : g_free (item);
507 0 : return;
508 : }
509 :
510 : /* clear callbacks */
511 0 : item->callback_info->sink_cb = NULL;
512 0 : elem = item->element;
513 0 : if (elem && elem->type == ML_PIPELINE_ELEMENT_APP_SRC) {
514 0 : GstAppSrcCallbacks appsrc_cb = { 0, };
515 0 : gst_app_src_set_callbacks (GST_APP_SRC (elem->element), &appsrc_cb,
516 : NULL, NULL);
517 : }
518 :
519 0 : g_clear_pointer (&item->callback_info, g_free);
520 0 : g_free (item);
521 : }
522 :
523 : /**
524 : * @brief Private function for ml_pipeline_destroy, cleaning up nodes in namednodes
525 : */
526 : static void
527 0 : cleanup_node (gpointer data)
528 : {
529 0 : ml_pipeline_element *e = data;
530 :
531 0 : g_mutex_lock (&e->lock);
532 : /** @todo CRITICAL. Stop the handle callbacks if they are running/ready */
533 0 : if (e->handle_id > 0) {
534 0 : g_signal_handler_disconnect (e->element, e->handle_id);
535 0 : e->handle_id = 0;
536 : }
537 :
538 : /* clear all handles first */
539 0 : if (e->handles)
540 0 : g_list_free_full (e->handles, free_element_handle);
541 0 : e->handles = NULL;
542 :
543 0 : if (e->type == ML_PIPELINE_ELEMENT_APP_SRC) {
544 0 : ml_pipeline *p = e->pipe;
545 :
546 0 : if (p && !p->isEOS) {
547 0 : int eos_check_cnt = 0;
548 :
549 : /** to push EOS event, the pipeline should be in PAUSED state */
550 0 : gst_element_set_state (p->element, GST_STATE_PAUSED);
551 :
552 0 : if (gst_app_src_end_of_stream (GST_APP_SRC (e->element)) != GST_FLOW_OK) {
553 0 : _ml_logw (_ml_detail
554 : ("Cleaning up a pipeline has failed to set End-Of-Stream for the pipeline element of %s",
555 : e->name));
556 : }
557 :
558 0 : g_mutex_unlock (&e->lock);
559 0 : while (!p->isEOS) {
560 0 : eos_check_cnt++;
561 : /** check EOS every 1ms */
562 0 : g_usleep (1000);
563 0 : if (eos_check_cnt >= EOS_MESSAGE_TIME_LIMIT) {
564 0 : _ml_loge (_ml_detail
565 : ("Cleaning up a pipeline has requested to set End-Of-Stream. However, the pipeline has not become EOS after the timeout. It has failed to become EOS with the element of %s.",
566 : e->name));
567 0 : break;
568 : }
569 : }
570 0 : g_mutex_lock (&e->lock);
571 : }
572 : }
573 :
574 0 : if (e->custom_destroy) {
575 0 : e->custom_destroy (e->custom_data, e);
576 : }
577 :
578 0 : g_clear_pointer (&e->name, g_free);
579 0 : g_clear_pointer (&e->src, gst_object_unref);
580 0 : g_clear_pointer (&e->sink, gst_object_unref);
581 0 : g_clear_pointer (&e->element, gst_object_unref);
582 :
583 0 : gst_tensors_info_free (&e->tensors_info);
584 :
585 0 : g_mutex_unlock (&e->lock);
586 0 : g_mutex_clear (&e->lock);
587 :
588 0 : g_free (e);
589 0 : }
590 :
591 : /**
592 : * @brief Private function to release the pipeline resources
593 : */
594 : static void
595 0 : cleanup_resource (gpointer data)
596 : {
597 0 : pipeline_resource_s *res = data;
598 :
599 : /* check resource type and free data */
600 0 : if (g_str_has_prefix (res->type, "tizen")) {
601 0 : release_tizen_resource (res->handle, res->type);
602 : }
603 :
604 0 : g_clear_pointer (&res->type, g_free);
605 0 : g_free (res);
606 0 : }
607 :
608 : /**
609 : * @brief Converts predefined element in pipeline description.
610 : */
611 : static int
612 0 : convert_description (ml_pipeline_h pipe, const gchar * description,
613 : gchar ** result, gboolean is_internal)
614 : {
615 : gchar *converted;
616 0 : int status = ML_ERROR_NONE;
617 :
618 0 : g_return_val_if_fail (pipe, ML_ERROR_INVALID_PARAMETER);
619 0 : g_return_val_if_fail (description && result, ML_ERROR_INVALID_PARAMETER);
620 :
621 : /* init null */
622 0 : *result = NULL;
623 :
624 0 : converted = _ml_convert_predefined_entity (description);
625 :
626 : /* convert pre-defined element for Tizen */
627 0 : status = convert_tizen_element (pipe, &converted, is_internal);
628 :
629 0 : if (status == ML_ERROR_NONE) {
630 0 : _ml_logd (_ml_detail
631 : ("Pipeline element converted with aliases for gstreamer (Tizen element aliases): %s",
632 : converted));
633 0 : *result = converted;
634 : } else {
635 0 : g_free (converted);
636 0 : _ml_error_report_continue
637 : ("Failed to convert element: convert_tizen_element() returned %d",
638 : status);
639 : }
640 :
641 0 : return status;
642 : }
643 :
644 : /**
645 : * @brief Handle tensor-filter options.
646 : */
647 : static void
648 0 : process_tensor_filter_option (ml_pipeline_element * e)
649 : {
650 0 : gchar *fw = NULL;
651 0 : gchar *model = NULL;
652 : pipe_custom_data_s *custom_data;
653 :
654 0 : g_object_get (G_OBJECT (e->element), "framework", &fw, "model", &model, NULL);
655 :
656 0 : if (fw && g_ascii_strcasecmp (fw, "custom-easy") == 0) {
657 : /* ref to tensor-filter custom-easy handle. */
658 0 : custom_data = pipe_custom_find_data (PIPE_CUSTOM_TYPE_FILTER, model);
659 0 : if (custom_data) {
660 0 : ml_pipeline_custom_filter_ref (custom_data->handle);
661 :
662 0 : e->custom_destroy = pipe_custom_destroy_cb;
663 0 : e->custom_data = custom_data;
664 : }
665 : }
666 :
667 0 : g_free (fw);
668 0 : g_free (model);
669 0 : }
670 :
671 : /**
672 : * @brief Handle tensor-if options.
673 : */
674 : static void
675 0 : process_tensor_if_option (ml_pipeline_element * e)
676 : {
677 0 : gint cv = 0;
678 0 : gchar *cv_option = NULL;
679 : pipe_custom_data_s *custom_data;
680 :
681 0 : g_object_get (G_OBJECT (e->element), "compared-value", &cv,
682 : "compared-value-option", &cv_option, NULL);
683 :
684 0 : if (cv == 5) {
685 : /* cv is TIFCV_CUSTOM, ref to tensor-if custom handle. */
686 0 : custom_data = pipe_custom_find_data (PIPE_CUSTOM_TYPE_IF, cv_option);
687 0 : if (custom_data) {
688 0 : ml_pipeline_if_custom_ref (custom_data->handle);
689 :
690 0 : e->custom_destroy = pipe_custom_destroy_cb;
691 0 : e->custom_data = custom_data;
692 : }
693 : }
694 :
695 0 : g_free (cv_option);
696 0 : }
697 :
698 : /**
699 : * @brief Initializes the GStreamer library. This is internal function.
700 : */
701 : int
702 0 : _ml_initialize_gstreamer (void)
703 : {
704 0 : GError *err = NULL;
705 :
706 0 : if (!gst_init_check (NULL, NULL, &err)) {
707 0 : if (err) {
708 0 : _ml_error_report
709 : ("Initrializing ML-API failed: GStreamer has the following error from gst_init_check(): %s",
710 : err->message);
711 0 : g_clear_error (&err);
712 : } else {
713 0 : _ml_error_report ("Cannot initialize GStreamer. Unknown reason.");
714 : }
715 :
716 0 : return ML_ERROR_STREAMS_PIPE;
717 : }
718 :
719 0 : return ML_ERROR_NONE;
720 : }
721 :
722 : /**
723 : * @brief Checks the element is registered and available on the pipeline.
724 : */
725 : int
726 0 : ml_check_element_availability (const char *element_name, bool *available)
727 : {
728 : GstElementFactory *factory;
729 : int status;
730 :
731 0 : check_feature_state (ML_FEATURE_INFERENCE);
732 :
733 0 : if (!element_name)
734 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
735 : "The parameter, element_name, is NULL. It should be a name (string) to be queried if it exists as a GStreamer/NNStreamer element.");
736 :
737 0 : if (!available)
738 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
739 : "The parameter, available, is NULL. It should be a valid pointer to a bool entry so that the API (ml_check_element_availability) may return the queried result via \"available\" parameter. E.g., bool available; ml_check_element_availability (\"tensor_converter\", &available);");
740 :
741 0 : _ml_error_report_return_continue_iferr (_ml_initialize_gstreamer (),
742 : "Internal error of _ml_initialize_gstreamer(). Check the availability of gstreamer libraries in your system.");
743 :
744 : /* init false */
745 0 : *available = false;
746 :
747 0 : factory = gst_element_factory_find (element_name);
748 0 : if (factory) {
749 0 : GstPluginFeature *feature = GST_PLUGIN_FEATURE (factory);
750 0 : const gchar *plugin_name = gst_plugin_feature_get_plugin_name (feature);
751 :
752 : /* check restricted element */
753 0 : status = _ml_check_plugin_availability (plugin_name, element_name);
754 0 : if (status == ML_ERROR_NONE)
755 0 : *available = true;
756 :
757 0 : gst_object_unref (factory);
758 : }
759 :
760 0 : return ML_ERROR_NONE;
761 : }
762 :
763 : /**
764 : * @brief Checks the availability of the plugin.
765 : */
766 : int
767 0 : _ml_check_plugin_availability (const char *plugin_name,
768 : const char *element_name)
769 : {
770 : static gboolean list_loaded = FALSE;
771 : static gchar **allowed_elements = NULL;
772 :
773 0 : if (!plugin_name)
774 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
775 : "The parameter, plugin_name, is NULL. It should be a valid string.");
776 :
777 0 : if (!element_name)
778 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
779 : "The parameter, element_name, is NULL. It should be a valid string.");
780 :
781 0 : if (!list_loaded) {
782 : gboolean restricted;
783 :
784 : restricted =
785 0 : nnsconf_get_custom_value_bool ("element-restriction",
786 : "enable_element_restriction", FALSE);
787 0 : if (restricted) {
788 : gchar *elements;
789 :
790 : /* check white-list of available plugins */
791 : elements =
792 0 : nnsconf_get_custom_value_string ("element-restriction",
793 : "allowed_elements");
794 0 : if (elements) {
795 0 : allowed_elements = g_strsplit_set (elements, " ,;", -1);
796 0 : g_free (elements);
797 : }
798 : }
799 :
800 0 : list_loaded = TRUE;
801 : }
802 :
803 : /* nnstreamer elements */
804 0 : if (g_str_has_prefix (plugin_name, "nnstreamer") &&
805 0 : g_str_has_prefix (element_name, "tensor_")) {
806 0 : return ML_ERROR_NONE;
807 : }
808 :
809 0 : if (allowed_elements &&
810 0 : find_key_strv ((const gchar **) allowed_elements, element_name) < 0) {
811 0 : _ml_error_report_return (ML_ERROR_NOT_SUPPORTED,
812 : "The element %s is restricted.", element_name);
813 : }
814 :
815 0 : return ML_ERROR_NONE;
816 : }
817 :
818 : /**
819 : * @brief Get the ml_pipeline_element_e type from its element name
820 : */
821 : static ml_pipeline_element_e
822 0 : get_elem_type_from_name (GHashTable * table, const gchar * name)
823 : {
824 0 : gpointer value = g_hash_table_lookup (table, name);
825 0 : if (!value)
826 0 : return ML_PIPELINE_ELEMENT_UNKNOWN;
827 :
828 0 : return GPOINTER_TO_INT (value);
829 : }
830 :
831 : /**
832 : * @brief Iterate elements and prepare element handle.
833 : */
834 : static int
835 0 : iterate_element (ml_pipeline * pipe_h, GstElement * pipeline,
836 : gboolean is_internal)
837 : {
838 0 : GstIterator *it = NULL;
839 0 : int status = ML_ERROR_NONE;
840 :
841 0 : g_return_val_if_fail (pipe_h && pipeline, ML_ERROR_INVALID_PARAMETER);
842 :
843 0 : g_mutex_lock (&pipe_h->lock);
844 :
845 0 : it = gst_bin_iterate_elements (GST_BIN (pipeline));
846 0 : if (it != NULL) {
847 0 : gboolean done = FALSE;
848 0 : GValue item = G_VALUE_INIT;
849 : GObject *obj;
850 : gchar *name;
851 :
852 : /* Fill in the hashtable, "namednodes" with named Elements */
853 0 : while (!done) {
854 0 : switch (gst_iterator_next (it, &item)) {
855 0 : case GST_ITERATOR_OK:
856 0 : obj = g_value_get_object (&item);
857 :
858 0 : if (GST_IS_ELEMENT (obj)) {
859 0 : GstElement *elem = GST_ELEMENT (obj);
860 : GstPluginFeature *feature =
861 0 : GST_PLUGIN_FEATURE (gst_element_get_factory (elem));
862 : const gchar *plugin_name =
863 0 : gst_plugin_feature_get_plugin_name (feature);
864 0 : const gchar *element_name = gst_plugin_feature_get_name (feature);
865 :
866 : /* validate the availability of the plugin */
867 0 : if (!is_internal && _ml_check_plugin_availability (plugin_name,
868 : element_name) != ML_ERROR_NONE) {
869 0 : _ml_error_report_continue
870 : ("There is a pipeline element (filter) that is not allowed for applications via ML-API (privilege not granted) or now available: '%s'/'%s'.",
871 : plugin_name, element_name);
872 0 : status = ML_ERROR_NOT_SUPPORTED;
873 0 : done = TRUE;
874 0 : break;
875 : }
876 :
877 0 : name = gst_element_get_name (elem);
878 0 : if (name != NULL) {
879 : ml_pipeline_element_e element_type =
880 0 : get_elem_type_from_name (pipe_h->pipe_elm_type, element_name);
881 :
882 : /* check 'sync' property in sink element */
883 0 : if (element_type == ML_PIPELINE_ELEMENT_SINK ||
884 : element_type == ML_PIPELINE_ELEMENT_APP_SINK) {
885 0 : gboolean sync = FALSE;
886 :
887 0 : g_object_get (G_OBJECT (elem), "sync", &sync, NULL);
888 0 : if (sync) {
889 0 : _ml_logw (_ml_detail
890 : ("It is recommended to apply 'sync=false' property to a sink element in most AI applications. Otherwise, inference results of large neural networks will be frequently dropped by the synchronization mechanism at the sink element."));
891 : }
892 : }
893 :
894 0 : if (element_type != ML_PIPELINE_ELEMENT_UNKNOWN) {
895 : ml_pipeline_element *e;
896 :
897 0 : e = construct_element (gst_object_ref (elem), pipe_h, name,
898 : element_type);
899 0 : if (e != NULL) {
900 0 : if (g_str_equal (element_name, "tensor_if"))
901 0 : process_tensor_if_option (e);
902 0 : else if (g_str_equal (element_name, "tensor_filter"))
903 0 : process_tensor_filter_option (e);
904 :
905 0 : g_hash_table_insert (pipe_h->namednodes, g_strdup (name), e);
906 : } else {
907 : /* allocation failure */
908 0 : gst_object_unref (elem);
909 0 : _ml_error_report_continue
910 : ("Cannot allocate memory with construct_element().");
911 0 : status = ML_ERROR_OUT_OF_MEMORY;
912 0 : done = TRUE;
913 : }
914 : }
915 :
916 0 : g_free (name);
917 : }
918 : }
919 :
920 0 : g_value_reset (&item);
921 0 : break;
922 0 : case GST_ITERATOR_RESYNC:
923 : case GST_ITERATOR_ERROR:
924 0 : _ml_logw (_ml_detail
925 : ("There is an error or a resync-event while inspecting a pipeline. However, we can still execute the pipeline."));
926 : /* fallthrough */
927 0 : case GST_ITERATOR_DONE:
928 0 : done = TRUE;
929 : }
930 : }
931 :
932 0 : g_value_unset (&item);
933 : /** @todo CRITICAL check the validity of elem=item registered in e */
934 0 : gst_iterator_free (it);
935 : }
936 :
937 0 : g_mutex_unlock (&pipe_h->lock);
938 0 : return status;
939 : }
940 :
941 : /**
942 : * @brief Internal function to create the hash table for managing internal resources
943 : */
944 : static void
945 0 : create_internal_hash (ml_pipeline * pipe_h)
946 : {
947 0 : pipe_h->namednodes =
948 0 : g_hash_table_new_full (g_str_hash, g_str_equal, g_free, cleanup_node);
949 0 : pipe_h->resources =
950 0 : g_hash_table_new_full (g_str_hash, g_str_equal, g_free, cleanup_resource);
951 :
952 0 : pipe_h->pipe_elm_type =
953 0 : g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL);
954 0 : g_hash_table_insert (pipe_h->pipe_elm_type, g_strdup ("tensor_sink"),
955 : GINT_TO_POINTER (ML_PIPELINE_ELEMENT_SINK));
956 0 : g_hash_table_insert (pipe_h->pipe_elm_type, g_strdup ("appsrc"),
957 : GINT_TO_POINTER (ML_PIPELINE_ELEMENT_APP_SRC));
958 0 : g_hash_table_insert (pipe_h->pipe_elm_type, g_strdup ("appsink"),
959 : GINT_TO_POINTER (ML_PIPELINE_ELEMENT_APP_SINK));
960 0 : g_hash_table_insert (pipe_h->pipe_elm_type, g_strdup ("valve"),
961 : GINT_TO_POINTER (ML_PIPELINE_ELEMENT_VALVE));
962 0 : g_hash_table_insert (pipe_h->pipe_elm_type, g_strdup ("input-selector"),
963 : GINT_TO_POINTER (ML_PIPELINE_ELEMENT_SWITCH_INPUT));
964 0 : g_hash_table_insert (pipe_h->pipe_elm_type, g_strdup ("output-selector"),
965 : GINT_TO_POINTER (ML_PIPELINE_ELEMENT_SWITCH_OUTPUT));
966 0 : g_hash_table_insert (pipe_h->pipe_elm_type, g_strdup ("tensor_if"),
967 : GINT_TO_POINTER (ML_PIPELINE_ELEMENT_COMMON));
968 0 : g_hash_table_insert (pipe_h->pipe_elm_type, g_strdup ("tensor_filter"),
969 : GINT_TO_POINTER (ML_PIPELINE_ELEMENT_COMMON));
970 0 : }
971 :
972 : /**
973 : * @brief Internal function to construct the pipeline.
974 : * If is_internal is true, this will ignore the permission in Tizen.
975 : */
976 : static int
977 0 : construct_pipeline_internal (const char *pipeline_description,
978 : ml_pipeline_state_cb cb, void *user_data, ml_pipeline_h * pipe,
979 : gboolean is_internal)
980 : {
981 0 : GError *err = NULL;
982 : GstElement *pipeline;
983 0 : gchar *description = NULL;
984 0 : int status = ML_ERROR_NONE;
985 :
986 : ml_pipeline *pipe_h;
987 :
988 0 : check_feature_state (ML_FEATURE_INFERENCE);
989 :
990 0 : if (!pipe)
991 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
992 : "ml_pipeline_construct error: parameter pipe is NULL. It should be a valid ml_pipeline_h pointer. E.g., ml_pipeline_h pipe; ml_pipeline_construct (..., &pip);");
993 :
994 0 : if (!pipeline_description)
995 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
996 : "ml_pipeline_construct error: parameter pipeline_description is NULL. It should be a valid string of Gstreamer/NNStreamer pipeline description.");
997 :
998 : /* init null */
999 0 : *pipe = NULL;
1000 :
1001 0 : _ml_error_report_return_continue_iferr (_ml_initialize_gstreamer (),
1002 : "ml_pipeline_construct error: it has failed to initialize gstreamer(). Please check if you have a valid GStreamer library installed in your system.");
1003 :
1004 : /* prepare pipeline handle */
1005 0 : pipe_h = g_new0 (ml_pipeline, 1);
1006 0 : if (pipe_h == NULL)
1007 0 : _ml_error_report_return (ML_ERROR_OUT_OF_MEMORY,
1008 : "ml_pipeline_construct error: failed to allocate memory for pipeline handle. Out of memory?");
1009 :
1010 0 : g_mutex_init (&pipe_h->lock);
1011 :
1012 0 : pipe_h->isEOS = FALSE;
1013 0 : pipe_h->pipe_state = ML_PIPELINE_STATE_UNKNOWN;
1014 :
1015 0 : create_internal_hash (pipe_h);
1016 :
1017 : /* convert predefined element and launch the pipeline */
1018 0 : status = convert_description ((ml_pipeline_h) pipe_h, pipeline_description,
1019 : &description, is_internal);
1020 0 : if (status != ML_ERROR_NONE) {
1021 0 : _ml_error_report_continue
1022 : ("ml_pipeline_construct error: failed while converting pipeline description for GStreamer w/ convert_description() function, which has returned %d",
1023 : status);
1024 0 : goto failed;
1025 : }
1026 :
1027 0 : pipeline = gst_parse_launch (description, &err);
1028 0 : g_free (description);
1029 :
1030 0 : if (!GST_IS_PIPELINE (pipeline) || err) {
1031 0 : _ml_error_report
1032 : ("ml_pipeline_construct error: gst_parse_launch cannot parse and launch the given pipeline = [%s]. The error message from gst_parse_launch is '%s'.",
1033 : pipeline_description, (err) ? err->message : "unknown reason");
1034 0 : g_clear_error (&err);
1035 :
1036 0 : if (pipeline)
1037 0 : gst_object_unref (pipeline);
1038 :
1039 0 : status = ML_ERROR_STREAMS_PIPE;
1040 0 : goto failed;
1041 : }
1042 :
1043 0 : pipe_h->element = pipeline;
1044 :
1045 : /* bus and message callback */
1046 0 : pipe_h->bus = gst_element_get_bus (pipeline);
1047 0 : if (pipe_h->bus == NULL) {
1048 0 : _ml_error_report
1049 : ("ml_pipeline_construct error: Failed to retrieve bus from the pipeline.");
1050 0 : status = ML_ERROR_STREAMS_PIPE;
1051 0 : goto failed;
1052 : }
1053 :
1054 0 : gst_bus_enable_sync_message_emission (pipe_h->bus);
1055 0 : pipe_h->signal_msg = g_signal_connect (pipe_h->bus, "sync-message",
1056 : G_CALLBACK (cb_bus_sync_message), pipe_h);
1057 :
1058 : /* state change callback */
1059 0 : pipe_h->state_cb.cb = cb;
1060 0 : pipe_h->state_cb.user_data = user_data;
1061 :
1062 : /* iterate elements and prepare element handle */
1063 0 : status = iterate_element (pipe_h, pipeline, is_internal);
1064 0 : if (status != ML_ERROR_NONE) {
1065 0 : _ml_error_report_continue ("ml_pipeline_construct error: ...");
1066 0 : goto failed;
1067 : }
1068 :
1069 : /* finally set pipeline state to PAUSED */
1070 0 : status = ml_pipeline_stop ((ml_pipeline_h) pipe_h);
1071 :
1072 0 : if (status == ML_ERROR_NONE) {
1073 : /**
1074 : * Let's wait until the pipeline state is changed to paused.
1075 : * Otherwise, the following APIs like 'set_property' may incur
1076 : * unintended behaviors. But, don't need to return any error
1077 : * even if this state change is not finished within the timeout,
1078 : * just replying on the caller.
1079 : */
1080 0 : gst_element_get_state (pipeline, NULL, NULL, 10 * GST_MSECOND);
1081 : } else {
1082 0 : _ml_error_report_continue
1083 : ("ml_pipeline_construct error: ml_pipeline_stop has failed with %d return. The pipeline should be able to be stopped when it is constructed.",
1084 : status);
1085 : }
1086 :
1087 0 : failed:
1088 0 : if (status != ML_ERROR_NONE) {
1089 : /* failed to construct the pipeline */
1090 0 : ml_pipeline_destroy ((ml_pipeline_h) pipe_h);
1091 : } else {
1092 0 : *pipe = pipe_h;
1093 : }
1094 :
1095 0 : return status;
1096 : }
1097 :
1098 : /**
1099 : * @brief Construct the pipeline (more info in nnstreamer.h)
1100 : */
1101 : int
1102 0 : ml_pipeline_construct (const char *pipeline_description,
1103 : ml_pipeline_state_cb cb, void *user_data, ml_pipeline_h * pipe)
1104 : {
1105 : /* not an internal pipeline construction */
1106 0 : return construct_pipeline_internal (pipeline_description, cb, user_data, pipe,
1107 : FALSE);
1108 : }
1109 :
1110 : #if defined (__TIZEN__)
1111 : /**
1112 : * @brief Construct the pipeline (Tizen internal, see nnstreamer-tizen-internal.h)
1113 : */
1114 : int
1115 0 : ml_pipeline_construct_internal (const char *pipeline_description,
1116 : ml_pipeline_state_cb cb, void *user_data, ml_pipeline_h * pipe)
1117 : {
1118 : /* Tizen internal pipeline construction */
1119 0 : return construct_pipeline_internal (pipeline_description, cb, user_data, pipe,
1120 : TRUE);
1121 : }
1122 : #endif /* __TIZEN__ */
1123 :
1124 : /**
1125 : * @brief Destroy the pipeline (more info in nnstreamer.h)
1126 : */
1127 : int
1128 0 : ml_pipeline_destroy (ml_pipeline_h pipe)
1129 : {
1130 0 : ml_pipeline *p = pipe;
1131 : GstStateChangeReturn scret;
1132 : GstState state;
1133 0 : guint check_paused_cnt = 0;
1134 :
1135 0 : check_feature_state (ML_FEATURE_INFERENCE);
1136 :
1137 0 : if (p == NULL)
1138 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1139 : "The parameter, pipe, is NULL. It should be a valid ml_pipeline_h handle instance, usually created by ml_pipeline_construct().");
1140 :
1141 0 : g_mutex_lock (&p->lock);
1142 :
1143 : /* Before changing the state, remove all callbacks. */
1144 0 : p->state_cb.cb = NULL;
1145 :
1146 : /* Destroy registered callback handles and resources */
1147 0 : g_hash_table_destroy (p->namednodes);
1148 0 : g_hash_table_destroy (p->resources);
1149 0 : g_hash_table_destroy (p->pipe_elm_type);
1150 0 : p->namednodes = p->resources = p->pipe_elm_type = NULL;
1151 :
1152 0 : if (p->element) {
1153 : /* Pause the pipeline if it's playing */
1154 0 : scret = gst_element_get_state (p->element, &state, NULL, 10 * GST_MSECOND); /* 10ms */
1155 0 : if (scret != GST_STATE_CHANGE_FAILURE && state == GST_STATE_PLAYING) {
1156 0 : scret = gst_element_set_state (p->element, GST_STATE_PAUSED);
1157 0 : if (scret == GST_STATE_CHANGE_FAILURE) {
1158 0 : g_mutex_unlock (&p->lock);
1159 0 : _ml_error_report_return (ML_ERROR_STREAMS_PIPE,
1160 : "gst_element_get_state() has failed to wait until state changed from PLAYING to PAUSED and returned GST_STATE_CHANGE_FAILURE. For the detail, please check the GStreamer log messages (or dlog messages in Tizen). It is possible that there is a filter or neural network that is taking too much time to finish.");
1161 : }
1162 : }
1163 :
1164 0 : g_mutex_unlock (&p->lock);
1165 0 : while (p->pipe_state == ML_PIPELINE_STATE_PLAYING) {
1166 0 : check_paused_cnt++;
1167 : /** check PAUSED every 1ms */
1168 0 : g_usleep (1000);
1169 0 : if (check_paused_cnt >= WAIT_PAUSED_TIME_LIMIT) {
1170 0 : _ml_error_report
1171 : ("Timeout while waiting for a state change to 'PAUSED' from a 'sync-message' signal from the pipeline. It is possible that there is a filter or neural network that is taking too much time to finish.");
1172 0 : break;
1173 : }
1174 : }
1175 0 : g_mutex_lock (&p->lock);
1176 :
1177 : /* Stop (NULL State) the pipeline */
1178 0 : scret = gst_element_set_state (p->element, GST_STATE_NULL);
1179 0 : if (scret != GST_STATE_CHANGE_SUCCESS) {
1180 0 : g_mutex_unlock (&p->lock);
1181 0 : _ml_error_report_return (ML_ERROR_STREAMS_PIPE,
1182 : "gst_element_set_state to stop the pipeline has failed after trying to stop the pipeline with PAUSE and waiting for stopping. For the detail, please check the GStreamer log messages. It is possible that there is a filter of neural network that is taking too much time to finish.");
1183 : }
1184 :
1185 0 : if (p->bus) {
1186 0 : g_signal_handler_disconnect (p->bus, p->signal_msg);
1187 0 : gst_object_unref (p->bus);
1188 : }
1189 :
1190 0 : gst_object_unref (p->element);
1191 0 : p->element = NULL;
1192 : }
1193 :
1194 0 : g_mutex_unlock (&p->lock);
1195 0 : g_mutex_clear (&p->lock);
1196 :
1197 0 : g_free (p);
1198 0 : return ML_ERROR_NONE;
1199 : }
1200 :
1201 : /**
1202 : * @brief Get the pipeline state (more info in nnstreamer.h)
1203 : */
1204 : int
1205 0 : ml_pipeline_get_state (ml_pipeline_h pipe, ml_pipeline_state_e * state)
1206 : {
1207 0 : ml_pipeline *p = pipe;
1208 : GstState _state;
1209 : GstStateChangeReturn scret;
1210 :
1211 0 : check_feature_state (ML_FEATURE_INFERENCE);
1212 :
1213 0 : if (p == NULL)
1214 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1215 : "The parameter, pipe, is NULL. It should be a valid ml_pipeline_h handle, which is usually created by ml_pipeline_construct ().");
1216 0 : if (state == NULL)
1217 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1218 : "The parameter, state, is NULL. It should be a valid pointer of ml_pipeline_state_e. E.g., ml_pipeline_state_e state; ml_pipeline_get_state(pipe, &state);");
1219 :
1220 0 : *state = ML_PIPELINE_STATE_UNKNOWN;
1221 :
1222 0 : g_mutex_lock (&p->lock);
1223 0 : scret = gst_element_get_state (p->element, &_state, NULL, GST_MSECOND); /* Do it within 1ms! */
1224 0 : g_mutex_unlock (&p->lock);
1225 :
1226 0 : if (scret == GST_STATE_CHANGE_FAILURE)
1227 0 : _ml_error_report_return (ML_ERROR_STREAMS_PIPE,
1228 : "Failed to get the state of the pipeline. For the detail, please check the GStreamer log messages.");
1229 :
1230 0 : *state = (ml_pipeline_state_e) _state;
1231 0 : return ML_ERROR_NONE;
1232 : }
1233 :
1234 : /****************************************************
1235 : ** NNStreamer Pipeline Start/Stop Control **
1236 : ****************************************************/
1237 : /**
1238 : * @brief Start/Resume the pipeline! (more info in nnstreamer.h)
1239 : */
1240 : int
1241 0 : ml_pipeline_start (ml_pipeline_h pipe)
1242 : {
1243 0 : ml_pipeline *p = pipe;
1244 : GstStateChangeReturn scret;
1245 0 : int status = ML_ERROR_NONE;
1246 :
1247 0 : check_feature_state (ML_FEATURE_INFERENCE);
1248 :
1249 0 : if (p == NULL)
1250 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1251 : "The parameter, pipe, is NULL. It should be a valid ml_pipeline_h handle, which is usually created by ml_pipeline_construct ().");
1252 :
1253 0 : g_mutex_lock (&p->lock);
1254 :
1255 : /* check the resources when starting the pipeline */
1256 0 : if (g_hash_table_size (p->resources)) {
1257 : GHashTableIter iter;
1258 : gpointer key, value;
1259 :
1260 : /* iterate all handle and acquire res if released */
1261 0 : g_hash_table_iter_init (&iter, p->resources);
1262 0 : while (g_hash_table_iter_next (&iter, &key, &value)) {
1263 0 : if (g_str_has_prefix (key, "tizen")) {
1264 0 : status = get_tizen_resource (pipe, key);
1265 0 : if (status != ML_ERROR_NONE) {
1266 0 : _ml_error_report_continue
1267 : ("Internal API _ml_tizen_get_resource () has failed: Tizen mm resource manager has failed to acquire the resource of '%s'",
1268 : (gchar *) key);
1269 0 : goto done;
1270 : }
1271 : }
1272 : }
1273 : }
1274 :
1275 0 : scret = gst_element_set_state (p->element, GST_STATE_PLAYING);
1276 0 : if (scret == GST_STATE_CHANGE_FAILURE) {
1277 0 : _ml_error_report
1278 : ("Failed to set the state of the pipeline to PLAYING. For the detail, please check the GStreamer log messages.");
1279 0 : status = ML_ERROR_STREAMS_PIPE;
1280 : }
1281 :
1282 0 : done:
1283 0 : g_mutex_unlock (&p->lock);
1284 0 : return status;
1285 : }
1286 :
1287 : /**
1288 : * @brief Pause the pipeline! (more info in nnstreamer.h)
1289 : */
1290 : int
1291 0 : ml_pipeline_stop (ml_pipeline_h pipe)
1292 : {
1293 0 : ml_pipeline *p = pipe;
1294 : GstStateChangeReturn scret;
1295 :
1296 0 : check_feature_state (ML_FEATURE_INFERENCE);
1297 :
1298 0 : if (p == NULL)
1299 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1300 : "The parameter, pipe, is NULL. It should be a valid ml_pipeline_h instance, which is usually created by ml_pipeline_construct().");
1301 :
1302 0 : g_mutex_lock (&p->lock);
1303 0 : scret = gst_element_set_state (p->element, GST_STATE_PAUSED);
1304 0 : g_mutex_unlock (&p->lock);
1305 :
1306 0 : if (scret == GST_STATE_CHANGE_FAILURE)
1307 0 : _ml_error_report_return (ML_ERROR_STREAMS_PIPE,
1308 : "Failed to set the state of the pipeline to PAUSED. For the detail, please check the GStreamer log messages.");
1309 :
1310 0 : return ML_ERROR_NONE;
1311 : }
1312 :
1313 : /**
1314 : * @brief Clears all data and resets the running-time of the pipeline (more info in nnstreamer.h)
1315 : */
1316 : int
1317 0 : ml_pipeline_flush (ml_pipeline_h pipe, bool start)
1318 : {
1319 0 : ml_pipeline *p = pipe;
1320 0 : int status = ML_ERROR_NONE;
1321 :
1322 0 : check_feature_state (ML_FEATURE_INFERENCE);
1323 :
1324 0 : if (p == NULL)
1325 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1326 : "The parameter, pipe, is NULL. It should be a valid ml_pipeline_h instance, which is usually created by ml_pipeline_construct().");
1327 :
1328 0 : _ml_error_report_return_continue_iferr (ml_pipeline_stop (pipe),
1329 : "Failed to stop the pipeline with ml_pipeline_stop (). It has returned %d.",
1330 : _ERRNO);
1331 :
1332 0 : _ml_logi ("The pipeline is stopped, clear all data from the pipeline.");
1333 :
1334 : /* send flush event to pipeline */
1335 0 : g_mutex_lock (&p->lock);
1336 0 : if (!gst_element_send_event (p->element, gst_event_new_flush_start ())) {
1337 0 : _ml_logw ("Error occurs while sending flush_start event.");
1338 : }
1339 :
1340 0 : if (!gst_element_send_event (p->element, gst_event_new_flush_stop (TRUE))) {
1341 0 : _ml_logw ("Error occurs while sending flush_stop event.");
1342 : }
1343 0 : g_mutex_unlock (&p->lock);
1344 :
1345 0 : if (start && status == ML_ERROR_NONE)
1346 0 : status = ml_pipeline_start (pipe);
1347 :
1348 0 : return status;
1349 : }
1350 :
1351 : /****************************************************
1352 : ** NNStreamer Pipeline Sink/Src Control **
1353 : ****************************************************/
1354 : /**
1355 : * @brief Register a callback for sink (more info in nnstreamer.h)
1356 : */
1357 : int
1358 0 : ml_pipeline_sink_register (ml_pipeline_h pipe, const char *sink_name,
1359 : ml_pipeline_sink_cb cb, void *user_data, ml_pipeline_sink_h * h)
1360 : {
1361 : ml_pipeline_element *elem;
1362 0 : ml_pipeline *p = pipe;
1363 : ml_pipeline_common_elem *sink;
1364 0 : int ret = ML_ERROR_NONE;
1365 :
1366 0 : check_feature_state (ML_FEATURE_INFERENCE);
1367 :
1368 0 : if (h == NULL)
1369 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1370 : "The argument, h (ml_pipeline_sink_h), is NULL. It should be a valid pointer to a new ml_pipeline_sink_h instance. E.g., ml_pipeline_sink_h h; ml_pipeline_sink_register (...., &h);");
1371 :
1372 : /* init null */
1373 0 : *h = NULL;
1374 :
1375 0 : if (pipe == NULL)
1376 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1377 : "The argument, pipe (ml_pipeline_h), is NULL. It should be a valid ml_pipeline_h instance, usually created by ml_pipeline_construct.");
1378 :
1379 0 : if (sink_name == NULL)
1380 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1381 : "The argument, sink_name (const char *), is NULL. It should be a valid string naming the sink handle (h).");
1382 :
1383 0 : if (cb == NULL)
1384 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1385 : "The argument, cb (ml_pipeline_sink_cb), is NULL. It should be a call-back function called for data-sink events.");
1386 :
1387 0 : g_mutex_lock (&p->lock);
1388 0 : elem = g_hash_table_lookup (p->namednodes, sink_name);
1389 :
1390 0 : if (elem == NULL) {
1391 0 : _ml_error_report
1392 : ("There is no element named [%s](sink_name) in the pipeline. Please check your pipeline description.",
1393 : sink_name);
1394 0 : ret = ML_ERROR_INVALID_PARAMETER;
1395 0 : goto unlock_return;
1396 : }
1397 :
1398 0 : if (elem->type != ML_PIPELINE_ELEMENT_SINK &&
1399 0 : elem->type != ML_PIPELINE_ELEMENT_APP_SINK) {
1400 0 : _ml_error_report
1401 : ("The element [%s](sink_name) in the pipeline is not a sink element. Please supply the name of tensor_sink or appsink.",
1402 : sink_name);
1403 0 : ret = ML_ERROR_INVALID_PARAMETER;
1404 0 : goto unlock_return;
1405 : }
1406 :
1407 0 : if (elem->handle_id > 0) {
1408 : /* no need to connect signal to sink element */
1409 0 : _ml_logw ("Sink callback is already registered.");
1410 : } else {
1411 : /* set callback for new data */
1412 0 : if (elem->type == ML_PIPELINE_ELEMENT_SINK) {
1413 : /* tensor_sink */
1414 0 : g_object_set (G_OBJECT (elem->element), "emit-signal", (gboolean) TRUE,
1415 : NULL);
1416 0 : elem->handle_id =
1417 0 : g_signal_connect (elem->element, "new-data",
1418 : G_CALLBACK (cb_sink_event), elem);
1419 : } else {
1420 : /* appsink */
1421 0 : g_object_set (G_OBJECT (elem->element), "emit-signals", (gboolean) TRUE,
1422 : NULL);
1423 0 : elem->handle_id =
1424 0 : g_signal_connect (elem->element, "new-sample",
1425 : G_CALLBACK (cb_appsink_new_sample), elem);
1426 : }
1427 :
1428 0 : if (elem->handle_id == 0) {
1429 0 : _ml_error_report
1430 : ("Failed to connect a signal to the element [%s](sink_name). g_signal_connect has returned NULL.",
1431 : sink_name);
1432 0 : ret = ML_ERROR_STREAMS_PIPE;
1433 0 : goto unlock_return;
1434 : }
1435 : }
1436 :
1437 0 : sink = g_new0 (ml_pipeline_common_elem, 1);
1438 0 : if (sink == NULL) {
1439 0 : _ml_error_report
1440 : ("Failed to allocate memory for the sink handle of %s. Out of memory?",
1441 : sink_name);
1442 0 : ret = ML_ERROR_OUT_OF_MEMORY;
1443 0 : goto unlock_return;
1444 : }
1445 :
1446 0 : sink->callback_info = g_new0 (callback_info_s, 1);
1447 0 : if (sink->callback_info == NULL) {
1448 0 : g_free (sink);
1449 0 : _ml_error_report
1450 : ("Failed to allocate memory for the sink handle of %s. Out of memory?",
1451 : sink_name);
1452 0 : ret = ML_ERROR_OUT_OF_MEMORY;
1453 0 : goto unlock_return;
1454 : }
1455 :
1456 0 : sink->pipe = p;
1457 0 : sink->element = elem;
1458 0 : sink->callback_info->sink_cb = cb;
1459 0 : sink->callback_info->sink_pdata = user_data;
1460 0 : *h = sink;
1461 :
1462 0 : g_mutex_lock (&elem->lock);
1463 :
1464 0 : elem->maxid++;
1465 0 : sink->id = elem->maxid;
1466 0 : elem->handles = g_list_append (elem->handles, sink);
1467 :
1468 0 : g_mutex_unlock (&elem->lock);
1469 :
1470 0 : unlock_return:
1471 0 : g_mutex_unlock (&p->lock);
1472 0 : return ret;
1473 : }
1474 :
1475 : /**
1476 : * @brief Unregister a callback for sink (more info in nnstreamer.h)
1477 : */
1478 : int
1479 0 : ml_pipeline_sink_unregister (ml_pipeline_sink_h h)
1480 : {
1481 0 : handle_init (sink, h);
1482 :
1483 0 : if (elem->handle_id > 0) {
1484 0 : g_signal_handler_disconnect (elem->element, elem->handle_id);
1485 0 : elem->handle_id = 0;
1486 : }
1487 :
1488 0 : elem->handles = g_list_remove (elem->handles, sink);
1489 0 : free_element_handle (sink);
1490 :
1491 0 : handle_exit (h);
1492 : }
1493 :
1494 : /**
1495 : * @brief Parse tensors info of src element.
1496 : */
1497 : static int
1498 0 : ml_pipeline_src_parse_tensors_info (ml_pipeline_element * elem)
1499 : {
1500 0 : GstCaps *caps = NULL;
1501 0 : gboolean found = FALSE, flexible = FALSE;
1502 :
1503 0 : if (elem->src == NULL) {
1504 0 : elem->src = gst_element_get_static_pad (elem->element, "src");
1505 : }
1506 :
1507 0 : if (elem->src == NULL) {
1508 0 : _ml_error_report
1509 : ("Failed to get the src pad of the element[%s]. The designated source element does not have available src pad? For the detail, please check the GStreamer log messages.",
1510 : elem->name);
1511 0 : return ML_ERROR_STREAMS_PIPE;
1512 : }
1513 :
1514 : /* If caps is given, use it. e.g. Use cap "image/png" when the pipeline is */
1515 : /* given as "appsrc caps=image/png ! pngdec ! ... " */
1516 0 : caps = gst_pad_get_current_caps (elem->src);
1517 0 : if (!caps)
1518 0 : caps = gst_pad_get_allowed_caps (elem->src);
1519 :
1520 0 : if (!caps) {
1521 0 : _ml_logw
1522 : ("Cannot find caps. The pipeline is not yet negotiated for src element [%s].",
1523 : elem->name);
1524 0 : gst_object_unref (elem->src);
1525 0 : elem->src = NULL;
1526 0 : return ML_ERROR_TRY_AGAIN;
1527 : }
1528 :
1529 0 : found = get_tensors_info_from_caps (caps, &elem->tensors_info, &flexible);
1530 :
1531 0 : if (found) {
1532 0 : elem->is_flexible_tensor = flexible;
1533 : } else {
1534 0 : if (gst_caps_is_fixed (caps)) {
1535 0 : GstStructure *st = gst_caps_get_structure (caps, 0);
1536 0 : elem->is_media_stream = !gst_structure_is_tensor_stream (st);
1537 : }
1538 : }
1539 :
1540 0 : gst_caps_unref (caps);
1541 0 : return ML_ERROR_NONE;
1542 : }
1543 :
1544 : /**
1545 : * @brief Get a handle to operate a src (more info in nnstreamer.h)
1546 : */
1547 : int
1548 0 : ml_pipeline_src_get_handle (ml_pipeline_h pipe, const char *src_name,
1549 : ml_pipeline_src_h * h)
1550 : {
1551 0 : ml_pipeline *p = pipe;
1552 : ml_pipeline_element *elem;
1553 : ml_pipeline_common_elem *src;
1554 0 : int ret = ML_ERROR_NONE;
1555 :
1556 0 : check_feature_state (ML_FEATURE_INFERENCE);
1557 :
1558 0 : if (h == NULL)
1559 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1560 : "The parameter, h (ml_pipeline_src_h), is NULL. It should be a valid pointer to a new (or to be cleared) instance. E.g., ml_pipeline_src_h h; ml_pipeline_src_get_handle (..., &h);");
1561 :
1562 : /* init null */
1563 0 : *h = NULL;
1564 :
1565 0 : if (pipe == NULL)
1566 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1567 : "The parameter, pipe (ml_pipeline_h), is NULL. It should be a valid ml_pipeline_h instance, which is usually created by ml_pipeline_construct().");
1568 :
1569 0 : if (src_name == NULL)
1570 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1571 : "The parameter, src_name (const char *), is NULL. This string is the name of source element (appsrc) you want to push data stream from your application threads.");
1572 :
1573 0 : g_mutex_lock (&p->lock);
1574 :
1575 0 : elem = g_hash_table_lookup (p->namednodes, src_name);
1576 :
1577 0 : if (elem == NULL) {
1578 0 : _ml_error_report
1579 : ("Cannot find the name, '%s': there is no element named [%s] in the given pipeline.",
1580 : src_name, src_name);
1581 0 : ret = ML_ERROR_INVALID_PARAMETER;
1582 0 : goto unlock_return;
1583 : }
1584 :
1585 0 : if (elem->type != ML_PIPELINE_ELEMENT_APP_SRC) {
1586 0 : _ml_error_report
1587 : ("The element designated by '%s' is not a source element (appsrc). Please provide a name of source element for ml_pipeline_src_get_handle API.",
1588 : src_name);
1589 0 : ret = ML_ERROR_INVALID_PARAMETER;
1590 0 : goto unlock_return;
1591 : }
1592 :
1593 0 : src = *h = g_new0 (ml_pipeline_common_elem, 1);
1594 0 : if (src == NULL) {
1595 0 : _ml_error_report
1596 : ("Failed to allocate the src handle for %s. Out of memory?", src_name);
1597 0 : ret = ML_ERROR_OUT_OF_MEMORY;
1598 0 : goto unlock_return;
1599 : }
1600 :
1601 0 : src->pipe = p;
1602 0 : src->element = elem;
1603 :
1604 0 : g_mutex_lock (&elem->lock);
1605 :
1606 0 : elem->maxid++;
1607 0 : src->id = elem->maxid;
1608 0 : elem->handles = g_list_append (elem->handles, src);
1609 :
1610 0 : ml_pipeline_src_parse_tensors_info (elem);
1611 0 : g_mutex_unlock (&elem->lock);
1612 :
1613 0 : unlock_return:
1614 0 : g_mutex_unlock (&p->lock);
1615 :
1616 0 : return ret;
1617 : }
1618 :
1619 : /**
1620 : * @brief Close a src node (more info in nnstreamer.h)
1621 : */
1622 : int
1623 0 : ml_pipeline_src_release_handle (ml_pipeline_src_h h)
1624 : {
1625 0 : handle_init (src, h);
1626 :
1627 0 : elem->handles = g_list_remove (elem->handles, src);
1628 0 : free_element_handle (src);
1629 :
1630 0 : handle_exit (h);
1631 : }
1632 :
1633 : /**
1634 : * @brief Push a data frame to a src (more info in nnstreamer.h)
1635 : */
1636 : int
1637 0 : ml_pipeline_src_input_data (ml_pipeline_src_h h, ml_tensors_data_h data,
1638 : ml_pipeline_buf_policy_e policy)
1639 : {
1640 : GstBuffer *buffer;
1641 : GstMemory *mem, *tmp;
1642 : gpointer mem_data;
1643 : gsize mem_size;
1644 : GstFlowReturn gret;
1645 : GstTensorsInfo gst_info;
1646 : ml_tensors_data_s *_data;
1647 : unsigned int i;
1648 :
1649 0 : handle_init (src, h);
1650 :
1651 0 : _data = (ml_tensors_data_s *) data;
1652 0 : if (!_data) {
1653 0 : _ml_error_report
1654 : ("The given parameter, data (ml_tensors_data_h), is NULL. It should be a valid ml_tensor_data_h instance, which is usually created by ml_tensors_data_create().");
1655 0 : ret = ML_ERROR_INVALID_PARAMETER;
1656 0 : goto unlock_return;
1657 : }
1658 0 : G_LOCK_UNLESS_NOLOCK (*_data);
1659 :
1660 0 : if (_data->num_tensors < 1 || _data->num_tensors > ML_TENSOR_SIZE_LIMIT) {
1661 0 : _ml_error_report
1662 : ("The number of tensors of the given data (ml_tensors_data_h) is invalid. The number of tensors of data is %u. It should be between 1 and %u.",
1663 : _data->num_tensors, ML_TENSOR_SIZE_LIMIT);
1664 0 : ret = ML_ERROR_INVALID_PARAMETER;
1665 0 : goto dont_destroy_data;
1666 : }
1667 :
1668 0 : ret = ml_pipeline_src_parse_tensors_info (elem);
1669 :
1670 0 : if (ret != ML_ERROR_NONE) {
1671 0 : if (ret == ML_ERROR_TRY_AGAIN)
1672 0 : _ml_error_report_continue
1673 : ("The pipeline is not ready to accept input streams. The input is ignored.");
1674 : else
1675 0 : _ml_error_report_continue
1676 : ("The pipeline is either not ready to accept input streams, yet, or does not have appropriate source elements to accept input streams.");
1677 0 : goto dont_destroy_data;
1678 : }
1679 :
1680 0 : if (!elem->is_media_stream && !elem->is_flexible_tensor) {
1681 0 : if (elem->tensors_info.num_tensors != _data->num_tensors) {
1682 0 : _ml_error_report
1683 : ("The src push of [%s] cannot be handled because the number of tensors in a frame mismatches. %u != %u",
1684 : elem->name, elem->tensors_info.num_tensors, _data->num_tensors);
1685 :
1686 0 : ret = ML_ERROR_INVALID_PARAMETER;
1687 0 : goto dont_destroy_data;
1688 : }
1689 :
1690 0 : for (i = 0; i < _data->num_tensors; i++) {
1691 0 : size_t sz = gst_tensors_info_get_size (&elem->tensors_info, i);
1692 :
1693 0 : if (sz != _data->tensors[i].size) {
1694 0 : _ml_error_report
1695 : ("The given input tensor size (%d'th, %zu bytes) mismatches the source pad (%zu bytes)",
1696 : i, _data->tensors[i].size, sz);
1697 :
1698 0 : ret = ML_ERROR_INVALID_PARAMETER;
1699 0 : goto dont_destroy_data;
1700 : }
1701 : }
1702 : }
1703 :
1704 : /* Create buffer to be pushed from buf[] */
1705 0 : buffer = gst_buffer_new ();
1706 0 : _ml_tensors_info_copy_from_ml (&gst_info, _data->info);
1707 :
1708 0 : for (i = 0; i < _data->num_tensors; i++) {
1709 : GstTensorInfo *_gst_tensor_info =
1710 0 : gst_tensors_info_get_nth_info (&gst_info, i);
1711 0 : mem_data = _data->tensors[i].data;
1712 0 : mem_size = _data->tensors[i].size;
1713 :
1714 0 : mem = tmp = gst_memory_new_wrapped (GST_MEMORY_FLAG_READONLY,
1715 : mem_data, mem_size, 0, mem_size, mem_data,
1716 : (policy == ML_PIPELINE_BUF_POLICY_AUTO_FREE) ? g_free : NULL);
1717 :
1718 : /* flex tensor, append header. */
1719 0 : if (elem->is_flexible_tensor) {
1720 : GstTensorMetaInfo meta;
1721 :
1722 0 : gst_tensor_info_convert_to_meta (_gst_tensor_info, &meta);
1723 :
1724 0 : mem = gst_tensor_meta_info_append_header (&meta, tmp);
1725 0 : gst_memory_unref (tmp);
1726 : }
1727 :
1728 0 : gst_tensor_buffer_append_memory (buffer, mem, _gst_tensor_info);
1729 : /** @todo Verify that gst_buffer_append lists tensors/gstmem in the correct order */
1730 : }
1731 :
1732 0 : gst_tensors_info_free (&gst_info);
1733 :
1734 : /* Unlock if it's not auto-free. We do not know when it'll be freed. */
1735 0 : if (policy != ML_PIPELINE_BUF_POLICY_AUTO_FREE)
1736 0 : G_UNLOCK_UNLESS_NOLOCK (*_data);
1737 :
1738 : /* Push the data! */
1739 0 : gret = gst_app_src_push_buffer (GST_APP_SRC (elem->element), buffer);
1740 :
1741 : /* Free data ptr if buffer policy is auto-free */
1742 0 : if (policy == ML_PIPELINE_BUF_POLICY_AUTO_FREE) {
1743 0 : G_UNLOCK_UNLESS_NOLOCK (*_data);
1744 0 : _ml_tensors_data_destroy_internal (_data, FALSE);
1745 0 : _data = NULL;
1746 : }
1747 :
1748 0 : if (gret == GST_FLOW_FLUSHING) {
1749 0 : _ml_logw
1750 : ("The pipeline is not in PAUSED/PLAYING. The input may be ignored.");
1751 0 : ret = ML_ERROR_TRY_AGAIN;
1752 0 : } else if (gret == GST_FLOW_EOS) {
1753 0 : _ml_logw ("THe pipeline is in EOS state. The input is ignored.");
1754 0 : ret = ML_ERROR_STREAMS_PIPE;
1755 : }
1756 :
1757 0 : goto unlock_return;
1758 :
1759 0 : dont_destroy_data:
1760 0 : G_UNLOCK_UNLESS_NOLOCK (*_data);
1761 :
1762 0 : handle_exit (h);
1763 : }
1764 :
1765 : /**
1766 : * @brief Internal function to fetch ml_pipeline_src_callbacks_s pointer
1767 : */
1768 : static ml_pipeline_src_callbacks_s *
1769 0 : get_app_src_callback (ml_pipeline_common_elem * src_h, void **data)
1770 : {
1771 0 : ml_pipeline_src_callbacks_s *src_cb = NULL;
1772 :
1773 0 : if (src_h->callback_info) {
1774 0 : src_cb = &src_h->callback_info->src_cb;
1775 0 : *data = src_h->callback_info->src_pdata;
1776 : }
1777 :
1778 0 : return src_cb;
1779 : }
1780 :
1781 : /**
1782 : * @brief Internal function for appsrc callback - need_data.
1783 : */
1784 : static void
1785 0 : _pipe_src_cb_need_data (GstAppSrc * src, guint length, gpointer user_data)
1786 : {
1787 : ml_pipeline_common_elem *src_h;
1788 0 : ml_pipeline_src_callbacks_s *src_cb = NULL;
1789 0 : void *pdata = NULL;
1790 :
1791 0 : src_h = (ml_pipeline_common_elem *) user_data;
1792 0 : if (!src_h)
1793 0 : return;
1794 :
1795 0 : src_cb = get_app_src_callback (src_h, &pdata);
1796 0 : if (src_cb && src_cb->need_data)
1797 0 : src_cb->need_data (src_h, length, pdata);
1798 : }
1799 :
1800 : /**
1801 : * @brief Internal function for appsrc callback - enough_data.
1802 : */
1803 : static void
1804 0 : _pipe_src_cb_enough_data (GstAppSrc * src, gpointer user_data)
1805 : {
1806 : ml_pipeline_common_elem *src_h;
1807 0 : ml_pipeline_src_callbacks_s *src_cb = NULL;
1808 0 : void *pdata = NULL;
1809 :
1810 0 : src_h = (ml_pipeline_common_elem *) user_data;
1811 0 : if (!src_h)
1812 0 : return;
1813 :
1814 0 : src_cb = get_app_src_callback (src_h, &pdata);
1815 0 : if (src_cb && src_cb->enough_data)
1816 0 : src_cb->enough_data (src_h, pdata);
1817 : }
1818 :
1819 : /**
1820 : * @brief Internal function for appsrc callback - seek_data.
1821 : */
1822 : static gboolean
1823 0 : _pipe_src_cb_seek_data (GstAppSrc * src, guint64 offset, gpointer user_data)
1824 : {
1825 : ml_pipeline_common_elem *src_h;
1826 0 : ml_pipeline_src_callbacks_s *src_cb = NULL;
1827 0 : void *pdata = NULL;
1828 :
1829 0 : src_h = (ml_pipeline_common_elem *) user_data;
1830 0 : if (!src_h)
1831 0 : return TRUE;
1832 :
1833 0 : src_cb = get_app_src_callback (src_h, &pdata);
1834 0 : if (src_cb && src_cb->seek_data)
1835 0 : src_cb->seek_data (src_h, offset, pdata);
1836 :
1837 0 : return TRUE;
1838 : }
1839 :
1840 : /**
1841 : * @brief Register callbacks for src events (more info in nnstreamer.h)
1842 : */
1843 : int
1844 0 : ml_pipeline_src_set_event_cb (ml_pipeline_src_h src_handle,
1845 : ml_pipeline_src_callbacks_s * cb, void *user_data)
1846 : {
1847 0 : GstAppSrcCallbacks appsrc_cb = { 0, };
1848 :
1849 0 : handle_init (src, src_handle);
1850 :
1851 0 : if (cb == NULL) {
1852 0 : ret = ML_ERROR_INVALID_PARAMETER;
1853 0 : goto unlock_return;
1854 : }
1855 :
1856 0 : if (src->callback_info == NULL)
1857 0 : src->callback_info = g_new0 (callback_info_s, 1);
1858 0 : if (src->callback_info == NULL) {
1859 0 : _ml_error_report
1860 : ("Failed to allocate memory of the callback info for %s. Out of memory?",
1861 : elem->name);
1862 0 : ret = ML_ERROR_OUT_OF_MEMORY;
1863 0 : goto unlock_return;
1864 : }
1865 :
1866 0 : src->callback_info->src_cb = *cb;
1867 0 : src->callback_info->src_pdata = user_data;
1868 :
1869 0 : appsrc_cb.need_data = _pipe_src_cb_need_data;
1870 0 : appsrc_cb.enough_data = _pipe_src_cb_enough_data;
1871 0 : appsrc_cb.seek_data = _pipe_src_cb_seek_data;
1872 :
1873 0 : gst_app_src_set_callbacks (GST_APP_SRC (elem->element), &appsrc_cb,
1874 : src_handle, NULL);
1875 :
1876 0 : handle_exit (src_handle);
1877 : }
1878 :
1879 : /**
1880 : * @brief Gets a handle for the tensors metadata of given src node.
1881 : */
1882 : int
1883 0 : ml_pipeline_src_get_tensors_info (ml_pipeline_src_h h, ml_tensors_info_h * info)
1884 : {
1885 0 : handle_init (src, h);
1886 :
1887 0 : if (info == NULL) {
1888 0 : _ml_error_report
1889 : ("The parameter, info (ml_tensors_info_h *), is NULL. It should be a valid pointer to a ml_tensors_info_h instance, which is usually created by ml_tensors_info_create().");
1890 0 : ret = ML_ERROR_INVALID_PARAMETER;
1891 0 : goto unlock_return;
1892 : }
1893 :
1894 0 : ret = ml_pipeline_src_parse_tensors_info (elem);
1895 :
1896 0 : if (ret == ML_ERROR_NONE) {
1897 0 : ret = _ml_tensors_info_create_from_gst (info, &elem->tensors_info);
1898 : } else {
1899 0 : _ml_error_report_continue
1900 : ("ml_pipeline_src_parse_tensors_info () has returned error; it cannot fetch input tensor info (metadata of input stream) for the given ml_pipeline_src_h handle (h). ml_pipeline_src_get_tensors_info () cannot continue.");
1901 : }
1902 :
1903 0 : handle_exit (h);
1904 : }
1905 :
1906 : /****************************************************
1907 : ** NNStreamer Pipeline Switch/Valve Control **
1908 : ****************************************************/
1909 :
1910 : /**
1911 : * @brief Get a handle to operate a selector (more info in nnstreamer.h)
1912 : */
1913 : int
1914 0 : ml_pipeline_switch_get_handle (ml_pipeline_h pipe, const char *switch_name,
1915 : ml_pipeline_switch_e * type, ml_pipeline_switch_h * h)
1916 : {
1917 : ml_pipeline_element *elem;
1918 0 : ml_pipeline *p = pipe;
1919 : ml_pipeline_common_elem *swtc;
1920 0 : int ret = ML_ERROR_NONE;
1921 :
1922 0 : check_feature_state (ML_FEATURE_INFERENCE);
1923 :
1924 0 : if (h == NULL)
1925 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1926 : "The parameter, h (ml_pipeline_switch_h *), is NULL. It should be a new or to-be-cleared instance of ml_pipeline_switch_h. E.g., ml_pipeline_switch_h h; ml_pipeline_switch_get_handle (..., &h);");
1927 :
1928 : /* init null */
1929 0 : *h = NULL;
1930 :
1931 0 : if (pipe == NULL)
1932 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1933 : "The parameter, pipe (ml_pipeline_h), is NULL. It should be a valid ml_pipeline_h pipeline instance, which is usually created by ml_pipeline_construct().");
1934 :
1935 0 : if (switch_name == NULL)
1936 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1937 : "The parameter, switch_name, is NULL. It should be a valid string of the corresponding name of a switch element.");
1938 :
1939 0 : g_mutex_lock (&p->lock);
1940 0 : elem = g_hash_table_lookup (p->namednodes, switch_name);
1941 :
1942 0 : if (elem == NULL) {
1943 0 : _ml_error_report
1944 : ("The parameter, switch_name (%s), is invalid. An element with the name, '%s', cannot be found in the supplied pipeline (pipe)",
1945 : switch_name, switch_name);
1946 0 : ret = ML_ERROR_INVALID_PARAMETER;
1947 0 : goto unlock_return;
1948 : }
1949 :
1950 0 : if (elem->type == ML_PIPELINE_ELEMENT_SWITCH_INPUT) {
1951 0 : if (type)
1952 0 : *type = ML_PIPELINE_SWITCH_INPUT_SELECTOR;
1953 0 : } else if (elem->type == ML_PIPELINE_ELEMENT_SWITCH_OUTPUT) {
1954 0 : if (type)
1955 0 : *type = ML_PIPELINE_SWITCH_OUTPUT_SELECTOR;
1956 : } else {
1957 0 : _ml_error_report
1958 : ("An element with the given name, '%s', is found; however, it is not a 'switch' element. A switch-handle cannot be fetched from a non-switch element. It should be either input-selector or output-selector.",
1959 : switch_name);
1960 0 : ret = ML_ERROR_INVALID_PARAMETER;
1961 0 : goto unlock_return;
1962 : }
1963 :
1964 0 : swtc = *h = g_new0 (ml_pipeline_common_elem, 1);
1965 0 : if (swtc == NULL) {
1966 0 : _ml_error_report
1967 : ("Failed to allocate memory of the switch handle, %s. Out of memory?",
1968 : switch_name);
1969 0 : ret = ML_ERROR_OUT_OF_MEMORY;
1970 0 : goto unlock_return;
1971 : }
1972 :
1973 0 : swtc->pipe = p;
1974 0 : swtc->element = elem;
1975 :
1976 0 : g_mutex_lock (&elem->lock);
1977 :
1978 0 : elem->maxid++;
1979 0 : swtc->id = elem->maxid;
1980 0 : elem->handles = g_list_append (elem->handles, swtc);
1981 :
1982 0 : g_mutex_unlock (&elem->lock);
1983 :
1984 0 : unlock_return:
1985 0 : g_mutex_unlock (&p->lock);
1986 0 : return ret;
1987 : }
1988 :
1989 : /**
1990 : * @brief Close the given switch handle (more info in nnstreamer.h)
1991 : */
1992 : int
1993 0 : ml_pipeline_switch_release_handle (ml_pipeline_switch_h h)
1994 : {
1995 0 : handle_init (swtc, h);
1996 :
1997 0 : elem->handles = g_list_remove (elem->handles, swtc);
1998 0 : free_element_handle (swtc);
1999 :
2000 0 : handle_exit (h);
2001 : }
2002 :
2003 : /**
2004 : * @brief Control the switch (more info in nnstreamer.h)
2005 : */
2006 : int
2007 0 : ml_pipeline_switch_select (ml_pipeline_switch_h h, const char *pad_name)
2008 : {
2009 : GstPad *active_pad, *new_pad;
2010 : gchar *active_name;
2011 :
2012 0 : handle_init (swtc, h);
2013 :
2014 0 : if (pad_name == NULL) {
2015 0 : _ml_error_report
2016 : ("The parameter, pad_name (const char *), is NULL. It should be a valid name of a pad (GSTPAD) in the given switch, h.");
2017 0 : ret = ML_ERROR_INVALID_PARAMETER;
2018 0 : goto unlock_return;
2019 : }
2020 :
2021 0 : g_object_get (G_OBJECT (elem->element), "active-pad", &active_pad, NULL);
2022 0 : active_name = gst_pad_get_name (active_pad);
2023 :
2024 0 : if (g_strcmp0 (pad_name, active_name) == 0) {
2025 0 : _ml_logi ("Switch is called, but there is no effective changes: %s->%s.",
2026 : active_name, pad_name);
2027 0 : g_free (active_name);
2028 0 : gst_object_unref (active_pad);
2029 :
2030 0 : goto unlock_return;
2031 : }
2032 :
2033 0 : g_free (active_name);
2034 0 : gst_object_unref (active_pad);
2035 :
2036 0 : new_pad = gst_element_get_static_pad (elem->element, pad_name);
2037 0 : if (new_pad == NULL) {
2038 : /* Not Found! */
2039 0 : _ml_error_report
2040 : ("Cannot find the pad, [%s], from the switch, [%s]. Please check the pad name. You may use ml_pipeline_switch_pad_list() to fetch the valid pad names.",
2041 : pad_name, elem->name);
2042 0 : ret = ML_ERROR_INVALID_PARAMETER;
2043 0 : goto unlock_return;
2044 : }
2045 :
2046 0 : g_object_set (G_OBJECT (elem->element), "active-pad", new_pad, NULL);
2047 0 : gst_object_unref (new_pad);
2048 :
2049 0 : _ml_logi ("Switched to [%s] successfully at switch [%s].", pad_name,
2050 : elem->name);
2051 :
2052 0 : handle_exit (h);
2053 : }
2054 :
2055 : /**
2056 : * @brief Gets the pad names of a switch.
2057 : */
2058 : int
2059 0 : ml_pipeline_switch_get_pad_list (ml_pipeline_switch_h h, char ***list)
2060 : {
2061 : GstIterator *it;
2062 0 : GValue item = G_VALUE_INIT;
2063 0 : gboolean done = FALSE;
2064 0 : GList *dllist = NULL;
2065 : GstPad *pad;
2066 0 : int counter = 0;
2067 :
2068 0 : handle_init (swtc, h);
2069 :
2070 0 : if (list == NULL) {
2071 0 : _ml_error_report
2072 : ("The parameter, list (char ***), is NULL. It should be a valid pointer to store a list of strings. E.g., char **list; ml_pipeline_switch_get_pad_list (h, &list);");
2073 0 : ret = ML_ERROR_INVALID_PARAMETER;
2074 0 : goto unlock_return;
2075 : }
2076 :
2077 : /* init null */
2078 0 : *list = NULL;
2079 :
2080 0 : if (elem->type == ML_PIPELINE_ELEMENT_SWITCH_INPUT)
2081 0 : it = gst_element_iterate_sink_pads (elem->element);
2082 0 : else if (elem->type == ML_PIPELINE_ELEMENT_SWITCH_OUTPUT)
2083 0 : it = gst_element_iterate_src_pads (elem->element);
2084 : else {
2085 0 : _ml_error_report
2086 : ("The element, [%s], is supposed to be input/output switch, but it is not. Internal data structure is broken.",
2087 : elem->name);
2088 0 : ret = ML_ERROR_STREAMS_PIPE;
2089 0 : goto unlock_return;
2090 : }
2091 :
2092 0 : while (!done) {
2093 0 : switch (gst_iterator_next (it, &item)) {
2094 0 : case GST_ITERATOR_OK:
2095 0 : pad = GST_PAD (g_value_get_object (&item));
2096 0 : dllist = g_list_append (dllist, gst_pad_get_name (pad));
2097 0 : counter++;
2098 0 : g_value_reset (&item);
2099 0 : break;
2100 0 : case GST_ITERATOR_RESYNC:
2101 0 : g_list_free_full (dllist, g_free); /* This frees all strings as well */
2102 0 : dllist = NULL;
2103 0 : counter = 0;
2104 0 : gst_iterator_resync (it);
2105 0 : break;
2106 0 : case GST_ITERATOR_ERROR:
2107 0 : _ml_error_report
2108 : ("Cannot access the list of pad properly of a switch, [%s]. Internal data structure is broken?",
2109 : elem->name);
2110 0 : ret = ML_ERROR_STREAMS_PIPE;
2111 0 : break;
2112 0 : case GST_ITERATOR_DONE:
2113 0 : done = TRUE;
2114 0 : break;
2115 : }
2116 : }
2117 :
2118 0 : gst_iterator_free (it);
2119 :
2120 : /* There has been no error with that "while" loop. */
2121 0 : if (ret == ML_ERROR_NONE) {
2122 0 : int i = 0;
2123 : GList *l;
2124 :
2125 0 : *list = g_malloc0 (sizeof (char *) * (counter + 1));
2126 0 : if (*list == NULL) {
2127 0 : _ml_error_report
2128 : ("Failed to allocate memory for pad list (parameter list). Out of memory?");
2129 0 : ret = ML_ERROR_OUT_OF_MEMORY;
2130 0 : g_list_free_full (dllist, g_free);
2131 0 : goto unlock_return;
2132 : }
2133 :
2134 0 : for (l = dllist; l != NULL; l = l->next) {
2135 0 : (*list)[i] = l->data; /* Allocated by gst_pad_get_name(). Caller has to free it */
2136 0 : i++;
2137 :
2138 0 : if (i > counter) {
2139 0 : g_list_free_full (dllist, g_free); /* This frees all strings as well */
2140 0 : g_clear_pointer (list, g_free);
2141 :
2142 0 : _ml_error_report
2143 : ("Internal data inconsistency. This could be a bug in nnstreamer. Switch [%s].",
2144 : elem->name);
2145 0 : ret = ML_ERROR_STREAMS_PIPE;
2146 0 : goto unlock_return;
2147 : }
2148 : }
2149 : }
2150 0 : g_list_free (dllist); /* This does not free the strings.. fortunately. */
2151 :
2152 0 : handle_exit (h);
2153 : }
2154 :
2155 : /**
2156 : * @brief Get a handle to operate a Valve (more info in nnstreamer.h)
2157 : */
2158 : int
2159 0 : ml_pipeline_valve_get_handle (ml_pipeline_h pipe, const char *valve_name,
2160 : ml_pipeline_valve_h * h)
2161 : {
2162 : ml_pipeline_element *elem;
2163 0 : ml_pipeline *p = pipe;
2164 : ml_pipeline_common_elem *valve;
2165 0 : int ret = ML_ERROR_NONE;
2166 :
2167 0 : check_feature_state (ML_FEATURE_INFERENCE);
2168 :
2169 0 : if (h == NULL)
2170 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
2171 : "The parameter, h (ml_pipeline_valve_h), is NULL. It should be a valid pointer of ml_pipeline_valve_h. E.g., ml_pipeline_valve_h h; ml_pipeline_valve_get_handle (..., &h);");
2172 :
2173 : /* init null */
2174 0 : *h = NULL;
2175 :
2176 0 : if (pipe == NULL)
2177 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
2178 : "The parameter, pipe (ml_pipeline_h), is NULL. It should be a valid ml_pipeline_h instance, which is usually created by ml_pipeline_construct().");
2179 :
2180 0 : if (valve_name == NULL)
2181 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
2182 : "The parameter, valve_name (const char *), is NULL. It should be a valid string of the valve name.");
2183 :
2184 0 : g_mutex_lock (&p->lock);
2185 0 : elem = g_hash_table_lookup (p->namednodes, valve_name);
2186 :
2187 0 : if (elem == NULL) {
2188 0 : _ml_error_report
2189 : ("Cannot find the valve with the given name, '%s', in the pipeline. There is no element in the pipeline with such a name. Please check if you have a value with the appropriate name.",
2190 : valve_name);
2191 0 : ret = ML_ERROR_INVALID_PARAMETER;
2192 0 : goto unlock_return;
2193 : }
2194 :
2195 0 : if (elem->type != ML_PIPELINE_ELEMENT_VALVE) {
2196 0 : _ml_error_report
2197 : ("Cannot find the value with the given name, '%s', in the pipeline. There is an element with such a name; however, the element is not a valve. Please correct the names of element in the pipeline.",
2198 : valve_name);
2199 0 : ret = ML_ERROR_INVALID_PARAMETER;
2200 0 : goto unlock_return;
2201 : }
2202 :
2203 0 : valve = *h = g_new0 (ml_pipeline_common_elem, 1);
2204 0 : if (valve == NULL) {
2205 0 : _ml_error_report
2206 : ("Cannot allocate memory for valve handle of %s. Out of memory?",
2207 : valve_name);
2208 0 : ret = ML_ERROR_OUT_OF_MEMORY;
2209 0 : goto unlock_return;
2210 : }
2211 :
2212 0 : valve->pipe = p;
2213 0 : valve->element = elem;
2214 :
2215 0 : g_mutex_lock (&elem->lock);
2216 :
2217 0 : elem->maxid++;
2218 0 : valve->id = elem->maxid;
2219 0 : elem->handles = g_list_append (elem->handles, valve);
2220 :
2221 0 : g_mutex_unlock (&elem->lock);
2222 :
2223 0 : unlock_return:
2224 0 : g_mutex_unlock (&p->lock);
2225 0 : return ret;
2226 : }
2227 :
2228 : /**
2229 : * @brief Close the given valve handle (more info in nnstreamer.h)
2230 : */
2231 : int
2232 0 : ml_pipeline_valve_release_handle (ml_pipeline_valve_h h)
2233 : {
2234 0 : handle_init (valve, h);
2235 :
2236 0 : elem->handles = g_list_remove (elem->handles, valve);
2237 0 : free_element_handle (valve);
2238 :
2239 0 : handle_exit (h);
2240 : }
2241 :
2242 : /**
2243 : * @brief Control the valve with the given handle (more info in nnstreamer.h)
2244 : */
2245 : int
2246 0 : ml_pipeline_valve_set_open (ml_pipeline_valve_h h, bool open)
2247 : {
2248 0 : gboolean drop = FALSE;
2249 0 : handle_init (valve, h);
2250 :
2251 0 : g_object_get (G_OBJECT (elem->element), "drop", &drop, NULL);
2252 :
2253 0 : if ((open != false) != (drop != FALSE)) {
2254 : /* Nothing to do */
2255 0 : _ml_logi ("Valve is called, but there is no effective changes");
2256 0 : goto unlock_return;
2257 : }
2258 :
2259 0 : drop = (open) ? FALSE : TRUE;
2260 0 : g_object_set (G_OBJECT (elem->element), "drop", drop, NULL);
2261 :
2262 0 : handle_exit (h);
2263 : }
2264 :
2265 : /********************************************************
2266 : ** NNStreamer Element Property Control in Pipeline **
2267 : ********************************************************/
2268 : /**
2269 : * @brief Gets an element handle in NNStreamer pipelines to control its properties.
2270 : */
2271 : int
2272 0 : ml_pipeline_element_get_handle (ml_pipeline_h pipe, const char *element_name,
2273 : ml_pipeline_element_h * elem_h)
2274 : {
2275 0 : int ret = ML_ERROR_NONE;
2276 : ml_pipeline_element *elem;
2277 : ml_pipeline_common_elem *common_elem;
2278 0 : ml_pipeline *p = pipe;
2279 :
2280 : /* Check input parameter */
2281 0 : if (pipe == NULL)
2282 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
2283 : "The parameter, pipe (ml_pipeline_h), is NULL. It should be a valid ml_pipeline_h instance, which is usually created by ml_pipeline_construct().");
2284 :
2285 0 : if (element_name == NULL)
2286 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
2287 : "The parameter, element_name (const char *), is NULL. It should be a valid string of the element name to be searched.");
2288 :
2289 0 : if (elem_h == NULL)
2290 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
2291 : "The parameter, elem_h (ml_pipeline_element_h), is NULL. It should be a valid pointer of ml_pipeline_element_h. E.g., ml_pipeline_element_h eh; ml_pipeline_element_get_handle (..., &eh);");
2292 0 : *elem_h = NULL;
2293 :
2294 0 : g_mutex_lock (&p->lock);
2295 :
2296 : /* 1. Search element in lookup table first */
2297 0 : elem = g_hash_table_lookup (p->namednodes, element_name);
2298 0 : if (elem == NULL) {
2299 : /* 2. Search element in pipeline itself */
2300 : GstElement *gst_elem;
2301 :
2302 0 : gst_elem = gst_bin_get_by_name (GST_BIN (p->element), element_name);
2303 0 : if (gst_elem == NULL) {
2304 0 : _ml_error_report
2305 : ("Cannot find the element with the given name, '%s', in the pipeline. There is no element in the pipeline with such a name. Please check if you have an element with the appropriate name.",
2306 : element_name);
2307 0 : ret = ML_ERROR_INVALID_PARAMETER;
2308 0 : goto unlock_return;
2309 : }
2310 :
2311 : /* Caching for next search */
2312 0 : elem = construct_element (gst_elem, pipe, element_name,
2313 : ML_PIPELINE_ELEMENT_COMMON);
2314 0 : if (elem == NULL) {
2315 0 : _ml_error_report
2316 : ("Cannot allocate memory for element handle of %s. Out of memory?",
2317 : element_name);
2318 0 : ret = ML_ERROR_OUT_OF_MEMORY;
2319 0 : goto unlock_return;
2320 : }
2321 0 : g_hash_table_insert (p->namednodes, g_strdup (element_name), elem);
2322 : }
2323 :
2324 : /* Type checking */
2325 0 : if (elem->type == ML_PIPELINE_ELEMENT_UNKNOWN) {
2326 0 : _ml_error_report
2327 : ("There is an element named [%s] in the pipeline, but its type is unknown. It is possible that the app thread has touched ML-API's internal data structure.",
2328 : element_name);
2329 0 : ret = ML_ERROR_INVALID_PARAMETER;
2330 0 : goto unlock_return;
2331 : }
2332 :
2333 0 : common_elem = *elem_h = g_new0 (ml_pipeline_common_elem, 1);
2334 0 : if (common_elem == NULL) {
2335 0 : _ml_error_report
2336 : ("Failed to allocate the internal handler for %s. Out of memory?",
2337 : element_name);
2338 0 : ret = ML_ERROR_OUT_OF_MEMORY;
2339 0 : goto unlock_return;
2340 : }
2341 :
2342 0 : common_elem->pipe = p;
2343 0 : common_elem->element = elem;
2344 :
2345 0 : g_mutex_lock (&elem->lock);
2346 0 : elem->maxid++;
2347 0 : common_elem->id = elem->maxid;
2348 0 : elem->handles = g_list_append (elem->handles, common_elem);
2349 0 : g_mutex_unlock (&elem->lock);
2350 :
2351 0 : unlock_return:
2352 0 : g_mutex_unlock (&p->lock);
2353 0 : return ret;
2354 : }
2355 :
2356 : /**
2357 : * @brief Releases the given element handle.
2358 : */
2359 : int
2360 0 : ml_pipeline_element_release_handle (ml_pipeline_element_h elem_h)
2361 : {
2362 0 : handle_init (common_elem, elem_h);
2363 :
2364 0 : elem->handles = g_list_remove (elem->handles, common_elem);
2365 0 : free_element_handle (common_elem);
2366 :
2367 0 : handle_exit (elem_h);
2368 : }
2369 :
2370 : /**
2371 : * @brief Check property existence and its type.
2372 : */
2373 : static bool
2374 0 : ml_pipeline_element_check_property (GObjectClass * klass,
2375 : const char *property_name, const GType type)
2376 : {
2377 0 : GParamSpec *pspec = NULL;
2378 :
2379 : /* Check property existence */
2380 0 : pspec = g_object_class_find_property (klass, property_name);
2381 0 : if (pspec == NULL) {
2382 0 : _ml_logw ("The property name [%s] does not exist.", property_name);
2383 0 : return FALSE;
2384 : }
2385 :
2386 : /* Compare property's type with given type */
2387 0 : if (!((pspec->value_type == type) ||
2388 0 : (type == G_TYPE_ENUM && G_TYPE_IS_ENUM (pspec->value_type)) ||
2389 0 : (type == G_TYPE_INT64 && pspec->value_type == G_TYPE_LONG) ||
2390 0 : (type == G_TYPE_UINT64 && pspec->value_type == G_TYPE_ULONG) ||
2391 0 : (type == G_TYPE_INT && G_TYPE_IS_ENUM (pspec->value_type)) ||
2392 0 : (type == G_TYPE_UINT && G_TYPE_IS_ENUM (pspec->value_type)) ||
2393 0 : (type == G_TYPE_DOUBLE && pspec->value_type == G_TYPE_FLOAT))) {
2394 0 : _ml_logw ("The type of property name [%s] is '%s'", property_name,
2395 : g_type_name (pspec->value_type));
2396 0 : return FALSE;
2397 : }
2398 0 : return TRUE;
2399 : }
2400 :
2401 : /**
2402 : * @brief Sets the value of given element's property in NNStreamer pipelines.
2403 : */
2404 : static int
2405 0 : ml_pipeline_element_set_property (ml_pipeline_element_h elem_h,
2406 : const char *property_name, gpointer value, GType type)
2407 : {
2408 0 : handle_init (common_elem, elem_h);
2409 :
2410 : /* Check the input parameter */
2411 0 : if (property_name == NULL) {
2412 0 : _ml_error_report
2413 : ("The parameter, property_name (const char *), is NULL. It should be a valid string of property name.");
2414 0 : ret = ML_ERROR_INVALID_PARAMETER;
2415 0 : goto unlock_return;
2416 : }
2417 :
2418 : /* Check property existence & its type */
2419 0 : if (!ml_pipeline_element_check_property (G_OBJECT_GET_CLASS (elem->element),
2420 : property_name, type)) {
2421 0 : _ml_error_report
2422 : ("The property ('%s') of the element, '%s', cannot be checked. It looks like this property does not exist in this element.",
2423 : property_name, elem->name);
2424 0 : ret = ML_ERROR_INVALID_PARAMETER;
2425 0 : goto unlock_return;
2426 : }
2427 :
2428 : /* Set property */
2429 0 : if (type == G_TYPE_DOUBLE || type == G_TYPE_FLOAT) {
2430 0 : g_object_set (G_OBJECT (elem->element), property_name,
2431 : *(double *) value, NULL);
2432 0 : } else if (type == G_TYPE_INT64) {
2433 0 : g_object_set (G_OBJECT (elem->element), property_name,
2434 : *(int64_t *) value, NULL);
2435 0 : } else if (type == G_TYPE_UINT64) {
2436 0 : g_object_set (G_OBJECT (elem->element), property_name,
2437 : *(uint64_t *) value, NULL);
2438 : } else {
2439 0 : g_object_set (G_OBJECT (elem->element), property_name, value, NULL);
2440 : }
2441 :
2442 0 : handle_exit (elem_h);
2443 : }
2444 :
2445 : /**
2446 : * @brief Gets the value of given element's property in NNStreamer pipelines.
2447 : */
2448 : static int
2449 0 : ml_pipeline_element_get_property (ml_pipeline_element_h elem_h,
2450 : const char *property_name, GType type, gpointer pvalue)
2451 : {
2452 0 : handle_init (common_elem, elem_h);
2453 :
2454 : /* Check the input parameter */
2455 0 : if (property_name == NULL) {
2456 0 : _ml_error_report
2457 : ("The parameter, property_name (const char *), is NULL. It should be a valid string of the property name of an element.");
2458 0 : ret = ML_ERROR_INVALID_PARAMETER;
2459 0 : goto unlock_return;
2460 : }
2461 :
2462 0 : if (pvalue == NULL) {
2463 0 : _ml_error_report
2464 : ("The parameter, pvalue (gpointer / a pointer of a value), is NULL. It should be a valid gpointer (a pointer of a value). E.g., char *str; ... ml_pipeline_get_property_string (... &str); ... int32_t val; ... ml_pipeline_get_property_int32 (..., &val);");
2465 0 : ret = ML_ERROR_INVALID_PARAMETER;
2466 0 : goto unlock_return;
2467 : }
2468 :
2469 : /* Check property existence & its type */
2470 0 : if (!ml_pipeline_element_check_property (G_OBJECT_GET_CLASS (elem->element),
2471 : property_name, type)) {
2472 0 : _ml_error_report
2473 : ("Cannot check the property ('%s') or the element ('%s'). Please check if you have the corresponding element in the pipeline.",
2474 : property_name, elem->name);
2475 0 : ret = ML_ERROR_INVALID_PARAMETER;
2476 0 : goto unlock_return;
2477 : }
2478 :
2479 : /* Get property */
2480 0 : g_object_get (G_OBJECT (elem->element), property_name, pvalue, NULL);
2481 :
2482 0 : handle_exit (elem_h);
2483 : }
2484 :
2485 : /**
2486 : * @brief Sets the boolean value of element's property in NNStreamer pipelines.
2487 : */
2488 : int
2489 0 : ml_pipeline_element_set_property_bool (ml_pipeline_element_h elem_h,
2490 : const char *property_name, const int32_t value)
2491 : {
2492 0 : return ml_pipeline_element_set_property (elem_h, property_name,
2493 : GINT_TO_POINTER (value), G_TYPE_BOOLEAN);
2494 : }
2495 :
2496 : /**
2497 : * @brief Sets the string value of element's property in NNStreamer pipelines.
2498 : */
2499 : int
2500 0 : ml_pipeline_element_set_property_string (ml_pipeline_element_h elem_h,
2501 : const char *property_name, const char *value)
2502 : {
2503 0 : return ml_pipeline_element_set_property (elem_h, property_name,
2504 : (gpointer) value, G_TYPE_STRING);
2505 : }
2506 :
2507 : /**
2508 : * @brief Sets the integer value of element's property in NNStreamer pipelines.
2509 : */
2510 : int
2511 0 : ml_pipeline_element_set_property_int32 (ml_pipeline_element_h elem_h,
2512 : const char *property_name, const int32_t value)
2513 : {
2514 0 : return ml_pipeline_element_set_property (elem_h, property_name,
2515 : GINT_TO_POINTER (value), G_TYPE_INT);
2516 : }
2517 :
2518 : /**
2519 : * @brief Sets the integer 64bit value of element's property in NNStreamer pipelines.
2520 : */
2521 : int
2522 0 : ml_pipeline_element_set_property_int64 (ml_pipeline_element_h elem_h,
2523 : const char *property_name, const int64_t value)
2524 : {
2525 0 : return ml_pipeline_element_set_property (elem_h, property_name,
2526 : (gpointer) (&value), G_TYPE_INT64);
2527 : }
2528 :
2529 : /**
2530 : * @brief Sets the unsigned integer value of element's property in NNStreamer pipelines.
2531 : */
2532 : int
2533 0 : ml_pipeline_element_set_property_uint32 (ml_pipeline_element_h elem_h,
2534 : const char *property_name, const uint32_t value)
2535 : {
2536 0 : return ml_pipeline_element_set_property (elem_h, property_name,
2537 : GUINT_TO_POINTER (value), G_TYPE_UINT);
2538 : }
2539 :
2540 : /**
2541 : * @brief Sets the unsigned integer 64bit value of element's property in NNStreamer pipelines.
2542 : */
2543 : int
2544 0 : ml_pipeline_element_set_property_uint64 (ml_pipeline_element_h elem_h,
2545 : const char *property_name, const uint64_t value)
2546 : {
2547 0 : return ml_pipeline_element_set_property (elem_h, property_name,
2548 : (gpointer) (&value), G_TYPE_UINT64);
2549 : }
2550 :
2551 : /**
2552 : * @brief Sets the floating point value of element's property in NNStreamer pipelines.
2553 : */
2554 : int
2555 0 : ml_pipeline_element_set_property_double (ml_pipeline_element_h elem_h,
2556 : const char *property_name, const double value)
2557 : {
2558 0 : return ml_pipeline_element_set_property (elem_h, property_name,
2559 : (gpointer) (&value), G_TYPE_DOUBLE);
2560 : }
2561 :
2562 : /**
2563 : * @brief Sets the enumeration value of element's property in NNStreamer pipelines.
2564 : */
2565 : int
2566 0 : ml_pipeline_element_set_property_enum (ml_pipeline_element_h elem_h,
2567 : const char *property_name, const uint32_t value)
2568 : {
2569 0 : return ml_pipeline_element_set_property (elem_h, property_name,
2570 : GUINT_TO_POINTER (value), G_TYPE_ENUM);
2571 : }
2572 :
2573 : /**
2574 : * @brief Gets the boolean value of element's property in NNStreamer pipelines.
2575 : */
2576 : int
2577 0 : ml_pipeline_element_get_property_bool (ml_pipeline_element_h elem_h,
2578 : const char *property_name, int32_t * value)
2579 : {
2580 0 : return ml_pipeline_element_get_property (elem_h, property_name,
2581 : G_TYPE_BOOLEAN, (gpointer) value);
2582 : }
2583 :
2584 : /**
2585 : * @brief Gets the string value of element's property in NNStreamer pipelines.
2586 : */
2587 : int
2588 0 : ml_pipeline_element_get_property_string (ml_pipeline_element_h elem_h,
2589 : const char *property_name, char **value)
2590 : {
2591 0 : return ml_pipeline_element_get_property (elem_h, property_name,
2592 : G_TYPE_STRING, (gpointer) value);
2593 : }
2594 :
2595 : /**
2596 : * @brief Gets the integer value of element's property in NNStreamer pipelines.
2597 : */
2598 : int
2599 0 : ml_pipeline_element_get_property_int32 (ml_pipeline_element_h elem_h,
2600 : const char *property_name, int32_t * value)
2601 : {
2602 0 : return ml_pipeline_element_get_property (elem_h, property_name,
2603 : G_TYPE_INT, (gpointer) value);
2604 : }
2605 :
2606 : /**
2607 : * @brief Gets the integer 64bit value of element's property in NNStreamer pipelines.
2608 : */
2609 : int
2610 0 : ml_pipeline_element_get_property_int64 (ml_pipeline_element_h elem_h,
2611 : const char *property_name, int64_t * value)
2612 : {
2613 0 : return ml_pipeline_element_get_property (elem_h, property_name,
2614 : G_TYPE_INT64, (gpointer) value);
2615 : }
2616 :
2617 : /**
2618 : * @brief Gets the unsigned integer value of element's property in NNStreamer pipelines.
2619 : */
2620 : int
2621 0 : ml_pipeline_element_get_property_uint32 (ml_pipeline_element_h elem_h,
2622 : const char *property_name, uint32_t * value)
2623 : {
2624 0 : return ml_pipeline_element_get_property (elem_h, property_name,
2625 : G_TYPE_UINT, (gpointer) value);
2626 : }
2627 :
2628 : /**
2629 : * @brief Gets the unsigned integer 64bit value of element's property in NNStreamer pipelines.
2630 : */
2631 : int
2632 0 : ml_pipeline_element_get_property_uint64 (ml_pipeline_element_h elem_h,
2633 : const char *property_name, uint64_t * value)
2634 : {
2635 0 : return ml_pipeline_element_get_property (elem_h, property_name,
2636 : G_TYPE_UINT64, (gpointer) value);
2637 : }
2638 :
2639 : /**
2640 : * @brief Gets the floating point value of element's property in NNStreamer pipelines.
2641 : */
2642 : int
2643 0 : ml_pipeline_element_get_property_double (ml_pipeline_element_h elem_h,
2644 : const char *property_name, double *value)
2645 : {
2646 0 : return ml_pipeline_element_get_property (elem_h, property_name,
2647 : G_TYPE_DOUBLE, (gpointer) value);
2648 : }
2649 :
2650 : /**
2651 : * @brief Gets the enumeration value of element's property in NNStreamer pipelines.
2652 : */
2653 : int
2654 0 : ml_pipeline_element_get_property_enum (ml_pipeline_element_h elem_h,
2655 : const char *property_name, uint32_t * value)
2656 : {
2657 0 : return ml_pipeline_element_get_property (elem_h, property_name,
2658 : G_TYPE_ENUM, (gpointer) value);
2659 : }
2660 :
2661 : /**
2662 : * @brief Gets the element of pipeline itself (GstElement).
2663 : */
2664 : GstElement *
2665 0 : _ml_pipeline_get_gst_pipeline (ml_pipeline_h pipe)
2666 : {
2667 0 : ml_pipeline *p = (ml_pipeline *) pipe;
2668 0 : GstElement *element = NULL;
2669 :
2670 0 : if (p) {
2671 0 : g_mutex_lock (&p->lock);
2672 :
2673 0 : element = p->element;
2674 0 : if (element)
2675 0 : gst_object_ref (element);
2676 :
2677 0 : g_mutex_unlock (&p->lock);
2678 : }
2679 :
2680 0 : return element;
2681 : }
2682 :
2683 : /**
2684 : * @brief Gets the element in pipeline (GstElement).
2685 : */
2686 : GstElement *
2687 0 : _ml_pipeline_get_gst_element (ml_pipeline_element_h handle)
2688 : {
2689 0 : ml_pipeline_common_elem *e = (ml_pipeline_common_elem *) handle;
2690 0 : GstElement *element = NULL;
2691 :
2692 0 : if (e && e->element) {
2693 0 : ml_pipeline_element *elem = e->element;
2694 :
2695 0 : g_mutex_lock (&elem->lock);
2696 :
2697 0 : element = elem->element;
2698 0 : if (element)
2699 0 : gst_object_ref (element);
2700 :
2701 0 : g_mutex_unlock (&elem->lock);
2702 : }
2703 :
2704 0 : return element;
2705 : }
2706 :
2707 : /**
2708 : * @brief Increases ref count of custom-easy filter.
2709 : */
2710 : static void
2711 0 : ml_pipeline_custom_filter_ref (ml_custom_easy_filter_h custom)
2712 : {
2713 0 : ml_custom_filter_s *c = (ml_custom_filter_s *) custom;
2714 :
2715 0 : if (c) {
2716 0 : g_mutex_lock (&c->lock);
2717 0 : c->ref_count++;
2718 0 : g_mutex_unlock (&c->lock);
2719 : }
2720 0 : }
2721 :
2722 : /**
2723 : * @brief Decreases ref count of custom-easy filter.
2724 : */
2725 : static void
2726 0 : ml_pipeline_custom_filter_unref (ml_custom_easy_filter_h custom)
2727 : {
2728 0 : ml_custom_filter_s *c = (ml_custom_filter_s *) custom;
2729 :
2730 0 : if (!c)
2731 0 : return;
2732 :
2733 0 : g_mutex_lock (&c->lock);
2734 0 : if (c->ref_count > 0)
2735 0 : c->ref_count--;
2736 0 : g_mutex_unlock (&c->lock);
2737 : }
2738 :
2739 : /**
2740 : * @brief Releases custom filter handle.
2741 : */
2742 : static void
2743 0 : ml_pipeline_custom_free_handle (ml_custom_filter_s * custom)
2744 : {
2745 0 : if (custom) {
2746 0 : g_mutex_lock (&custom->lock);
2747 :
2748 0 : g_clear_pointer (&custom->name, g_free);
2749 0 : ml_tensors_info_destroy (custom->in_info);
2750 0 : ml_tensors_info_destroy (custom->out_info);
2751 :
2752 0 : g_mutex_unlock (&custom->lock);
2753 0 : g_mutex_clear (&custom->lock);
2754 :
2755 0 : g_free (custom);
2756 : }
2757 0 : }
2758 :
2759 : /**
2760 : * @brief Invoke callback for custom-easy filter.
2761 : */
2762 : static int
2763 0 : ml_pipeline_custom_invoke (void *data, const GstTensorFilterProperties * prop,
2764 : const GstTensorMemory * in, GstTensorMemory * out)
2765 : {
2766 : int status;
2767 : ml_custom_filter_s *c;
2768 : ml_tensors_data_h in_data, out_data;
2769 : ml_tensors_data_s *_data;
2770 : guint i;
2771 :
2772 0 : in_data = out_data = NULL;
2773 0 : c = (ml_custom_filter_s *) data;
2774 :
2775 : /* internal error? */
2776 0 : if (!c || !c->cb)
2777 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
2778 : "Internal error of callback function, ml_pipeline_custom_invoke. Its internal data structure is broken.");
2779 :
2780 0 : g_mutex_lock (&c->lock);
2781 :
2782 : /* prepare invoke */
2783 0 : status = _ml_tensors_data_create_no_alloc (c->in_info, &in_data);
2784 0 : if (status != ML_ERROR_NONE) {
2785 0 : _ml_error_report_continue ("_ml_tensors_data_create_no_alloc has failed.");
2786 0 : goto done;
2787 : }
2788 :
2789 0 : _data = (ml_tensors_data_s *) in_data;
2790 0 : for (i = 0; i < _data->num_tensors; i++)
2791 0 : _data->tensors[i].data = in[i].data;
2792 :
2793 0 : status = _ml_tensors_data_create_no_alloc (c->out_info, &out_data);
2794 0 : if (status != ML_ERROR_NONE) {
2795 0 : _ml_error_report_continue ("_ml_tensors_data_create_no_alloc has failed.");
2796 0 : goto done;
2797 : }
2798 :
2799 0 : _data = (ml_tensors_data_s *) out_data;
2800 0 : for (i = 0; i < _data->num_tensors; i++)
2801 0 : _data->tensors[i].data = out[i].data;
2802 :
2803 : /* call invoke callback */
2804 0 : status = c->cb (in_data, out_data, c->pdata);
2805 :
2806 0 : done:
2807 0 : g_mutex_unlock (&c->lock);
2808 : /* NOTE: DO NOT free tensor data */
2809 0 : _ml_tensors_data_destroy_internal (in_data, FALSE);
2810 0 : _ml_tensors_data_destroy_internal (out_data, FALSE);
2811 :
2812 0 : return status;
2813 : }
2814 :
2815 : /**
2816 : * @brief Registers a custom filter.
2817 : */
2818 : int
2819 0 : ml_pipeline_custom_easy_filter_register (const char *name,
2820 : const ml_tensors_info_h in, const ml_tensors_info_h out,
2821 : ml_custom_easy_invoke_cb cb, void *user_data,
2822 : ml_custom_easy_filter_h * custom)
2823 : {
2824 0 : int status = ML_ERROR_NONE;
2825 : ml_custom_filter_s *c;
2826 : GstTensorsInfo in_info, out_info;
2827 :
2828 0 : check_feature_state (ML_FEATURE_INFERENCE);
2829 :
2830 0 : if (!name)
2831 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
2832 : "The parameter, name (const char *), is NULL. It should be a valid string of the filter name.");
2833 0 : if (!cb)
2834 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
2835 : "The parameter, cb (ml_custom_easy_invoke_cb), is NULL. It should be a valid call-back struct containing function pointer and its related data.");
2836 0 : if (!custom)
2837 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
2838 : "The parameter, custom (ml_custom_easy_filter_h *), is NULL. It should be a valid pointer of ml_custom_easy_filter. E.g., ml_custom_easy_filter_h custom; ml_pipeline_custom_easy_filter_register (..., &custom);");
2839 :
2840 : /* init null */
2841 0 : *custom = NULL;
2842 :
2843 0 : if (!ml_tensors_info_is_valid (in))
2844 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
2845 : "The parameter, in (const ml_tensors_info_h), is not valid. ml_tensors_info_is_valid(in) has returned FALSE. Please check if its cloned/fetched from a valid object or if you have configured it properly.");
2846 0 : if (!ml_tensors_info_is_valid (out))
2847 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
2848 : "The parameter, out (const ml_tensors_info_h), is not valid. ml_tensors_info_is_valid(in) has returned FALSE. Please check if its cloned/fetched from a valid object or if you have configured it properly.");
2849 :
2850 : /* create and init custom handle */
2851 0 : if ((c = g_new0 (ml_custom_filter_s, 1)) == NULL)
2852 0 : _ml_error_report_return (ML_ERROR_OUT_OF_MEMORY,
2853 : "Cannot allocate memory. Out of memory?");
2854 :
2855 0 : g_mutex_init (&c->lock);
2856 :
2857 : /** no need to acquire c->lock as its created locally */
2858 0 : c->name = g_strdup (name);
2859 0 : c->ref_count = 0;
2860 0 : c->cb = cb;
2861 0 : c->pdata = user_data;
2862 0 : ml_tensors_info_create_extended (&c->in_info);
2863 0 : ml_tensors_info_create_extended (&c->out_info);
2864 :
2865 0 : status = ml_tensors_info_clone (c->in_info, in);
2866 0 : if (status != ML_ERROR_NONE) {
2867 0 : _ml_error_report_continue
2868 : ("ml_tensors_info_clone has failed with %d. Cannot fetch input tensor-info (metadata).",
2869 : status);
2870 0 : goto exit;
2871 : }
2872 :
2873 0 : status = ml_tensors_info_clone (c->out_info, out);
2874 0 : if (status != ML_ERROR_NONE) {
2875 0 : _ml_error_report_continue
2876 : ("ml_tensors_info_clone has filed with %d. Cannot fetch output tensor-info (metadata).",
2877 : status);
2878 0 : goto exit;
2879 : }
2880 :
2881 : /* register custom filter */
2882 0 : _ml_tensors_info_copy_from_ml (&in_info, c->in_info);
2883 0 : _ml_tensors_info_copy_from_ml (&out_info, c->out_info);
2884 :
2885 0 : status = NNS_custom_easy_register (name, ml_pipeline_custom_invoke, c,
2886 : &in_info, &out_info);
2887 0 : if (status != 0) {
2888 0 : char buf[255] = { 0 };
2889 0 : if (status == -EINVAL) {
2890 0 : status = ML_ERROR_INVALID_PARAMETER;
2891 0 : strncpy (buf, "invalid parameters are given.", 254);
2892 0 : } else if (status == -ENOMEM) {
2893 0 : status = ML_ERROR_OUT_OF_MEMORY;
2894 0 : strncpy (buf, "out of memory. cannot allocate.", 254);
2895 : } else {
2896 0 : status = ML_ERROR_UNKNOWN;
2897 0 : strncpy (buf, "unknown error.", 254);
2898 : }
2899 0 : _ml_error_report
2900 : ("Failed to register custom filter %s with NNStreamer API, NNS_custom_easy_register(). It has returned %d, which means '%s'.",
2901 : name, status, buf);
2902 : }
2903 :
2904 0 : exit:
2905 0 : if (status == ML_ERROR_NONE) {
2906 0 : pipe_custom_add_data (PIPE_CUSTOM_TYPE_FILTER, name, c);
2907 0 : *custom = c;
2908 : } else {
2909 0 : ml_pipeline_custom_free_handle (c);
2910 : }
2911 :
2912 0 : return status;
2913 : }
2914 :
2915 : /**
2916 : * @brief Unregisters the custom filter.
2917 : */
2918 : int
2919 0 : ml_pipeline_custom_easy_filter_unregister (ml_custom_easy_filter_h custom)
2920 : {
2921 : ml_custom_filter_s *c;
2922 0 : int status = ML_ERROR_NONE;
2923 :
2924 0 : check_feature_state (ML_FEATURE_INFERENCE);
2925 :
2926 0 : if (!custom)
2927 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
2928 : "The parameter, custom (ml_custom_easy_filter_h), is NULL. It should be a valid ml_custom_easy_filter_h instance, usually created by ml_pipeline_custom_easy_filter_register().");
2929 :
2930 0 : c = (ml_custom_filter_s *) custom;
2931 0 : g_mutex_lock (&c->lock);
2932 :
2933 0 : if (c->ref_count > 0) {
2934 0 : _ml_error_report
2935 : ("Failed to unregister custom filter %s, it is used in the pipeline. Its reference counter value is %u.",
2936 : c->name, c->ref_count);
2937 0 : status = ML_ERROR_INVALID_PARAMETER;
2938 0 : goto done;
2939 : }
2940 :
2941 0 : status = NNS_custom_easy_unregister (c->name);
2942 0 : if (status != 0) {
2943 0 : _ml_error_report
2944 : ("Failed to unregister custom filter %s. It is possible that this is already unregistered or not registered.",
2945 : c->name);
2946 0 : status = ML_ERROR_INVALID_PARAMETER;
2947 0 : goto done;
2948 : }
2949 :
2950 0 : done:
2951 0 : g_mutex_unlock (&c->lock);
2952 :
2953 0 : if (status == ML_ERROR_NONE) {
2954 0 : pipe_custom_remove_data (PIPE_CUSTOM_TYPE_FILTER, c->name);
2955 0 : ml_pipeline_custom_free_handle (c);
2956 : }
2957 :
2958 0 : return status;
2959 : }
2960 :
2961 : /**
2962 : * @brief Increases ref count of tensor_if custom condition.
2963 : */
2964 : static void
2965 0 : ml_pipeline_if_custom_ref (ml_pipeline_if_h custom)
2966 : {
2967 0 : ml_if_custom_s *c = (ml_if_custom_s *) custom;
2968 :
2969 0 : if (c) {
2970 0 : g_mutex_lock (&c->lock);
2971 0 : c->ref_count++;
2972 0 : g_mutex_unlock (&c->lock);
2973 : }
2974 0 : }
2975 :
2976 : /**
2977 : * @brief Decreases ref count of tensor_if custom condition.
2978 : */
2979 : static void
2980 0 : ml_pipeline_if_custom_unref (ml_pipeline_if_h custom)
2981 : {
2982 0 : ml_if_custom_s *c = (ml_if_custom_s *) custom;
2983 :
2984 0 : if (c) {
2985 0 : g_mutex_lock (&c->lock);
2986 0 : if (c->ref_count > 0)
2987 0 : c->ref_count--;
2988 0 : g_mutex_unlock (&c->lock);
2989 : }
2990 0 : }
2991 :
2992 : /**
2993 : * @brief Callback for tensor_if custom condition.
2994 : */
2995 : static gboolean
2996 0 : ml_pipeline_if_custom (const GstTensorsInfo * info,
2997 : const GstTensorMemory * input, void *data, gboolean * result)
2998 : {
2999 0 : int status = 0;
3000 : guint i;
3001 : ml_if_custom_s *c;
3002 0 : ml_tensors_data_h in_data = NULL;
3003 : ml_tensors_data_s *_data;
3004 0 : ml_tensors_info_h ml_info = NULL;
3005 0 : gboolean ret = FALSE;
3006 :
3007 0 : c = (ml_if_custom_s *) data;
3008 :
3009 : /* internal error? */
3010 0 : if (!c || !c->cb)
3011 0 : _ml_error_report_return (FALSE,
3012 : "Internal error: the parameter, data, is not valid. App thread might have touched internal data structure.");
3013 :
3014 0 : status = _ml_tensors_info_create_from_gst (&ml_info, info);
3015 0 : if (status != ML_ERROR_NONE)
3016 0 : _ml_error_report_return_continue (FALSE,
3017 : "Cannot create tensors-info from the parameter, info (const GstTensorsInfo). _ml_tensors_info_create_from_gst has returned %d.",
3018 : status);
3019 0 : status = _ml_tensors_data_create_no_alloc (ml_info, &in_data);
3020 0 : if (status != ML_ERROR_NONE) {
3021 0 : _ml_error_report_continue
3022 : ("Cannot create data entry from the given metadata, info (const GstTensorMemory, although we could create tensor-info from info. _ml_tensors_data_create_no_alloc() has returned %d.",
3023 : status);
3024 0 : goto done;
3025 : }
3026 :
3027 0 : _data = (ml_tensors_data_s *) in_data;
3028 0 : for (i = 0; i < _data->num_tensors; i++)
3029 0 : _data->tensors[i].data = input[i].data;
3030 :
3031 : /* call invoke callback */
3032 0 : g_mutex_lock (&c->lock);
3033 0 : status = c->cb (in_data, ml_info, result, c->pdata);
3034 0 : g_mutex_unlock (&c->lock);
3035 :
3036 0 : ret = (status == ML_ERROR_NONE);
3037 0 : if (!ret)
3038 0 : _ml_error_report
3039 : ("The callback function of if-statement has returned error: %d.",
3040 : status);
3041 :
3042 0 : done:
3043 0 : ml_tensors_info_destroy (ml_info);
3044 0 : _ml_tensors_data_destroy_internal (in_data, FALSE);
3045 :
3046 0 : return ret;
3047 : }
3048 :
3049 : /**
3050 : * @brief Releases tensor_if custom condition.
3051 : */
3052 : static void
3053 0 : ml_pipeline_if_custom_free (ml_if_custom_s * custom)
3054 : {
3055 0 : if (custom) {
3056 0 : g_mutex_lock (&custom->lock);
3057 :
3058 0 : g_clear_pointer (&custom->name, g_free);
3059 :
3060 0 : g_mutex_unlock (&custom->lock);
3061 0 : g_mutex_clear (&custom->lock);
3062 :
3063 0 : g_free (custom);
3064 : }
3065 0 : }
3066 :
3067 : /**
3068 : * @brief Registers the tensor_if custom callback.
3069 : */
3070 : int
3071 0 : ml_pipeline_tensor_if_custom_register (const char *name,
3072 : ml_pipeline_if_custom_cb cb, void *user_data, ml_pipeline_if_h * if_custom)
3073 : {
3074 0 : int status = ML_ERROR_NONE;
3075 : ml_if_custom_s *c;
3076 :
3077 0 : check_feature_state (ML_FEATURE_INFERENCE);
3078 :
3079 0 : if (!name)
3080 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
3081 : "The parameter, name (const char *), is NULL. It should be a valid string of the tensor_if element in your pipeline.");
3082 0 : if (!cb)
3083 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
3084 : "The parameter, cb (ml_pipeline_if_custom_cb callback function pointer), is NULL. It should be a valid function pointer that determines if the 'if' statement is TRUE or FALSE from the given tensor data frame.");
3085 0 : if (!if_custom)
3086 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
3087 : "The parameter, if_custom (ml_pipeline_if_h *), is NULL. It should be a valid pointer to the pipeline-if handle instance. E.g., ml_pipeline_if_h h; ml_pipeline_tensor_if_custom_register (..., &h);");
3088 :
3089 : /* init null */
3090 0 : *if_custom = NULL;
3091 :
3092 : /* create and init custom handle */
3093 0 : if ((c = g_try_new0 (ml_if_custom_s, 1)) == NULL)
3094 0 : _ml_error_report_return (ML_ERROR_OUT_OF_MEMORY,
3095 : "Cannot allocate memory. Out of memory?");
3096 :
3097 0 : g_mutex_init (&c->lock);
3098 :
3099 0 : g_mutex_lock (&c->lock);
3100 0 : c->name = g_strdup (name);
3101 0 : c->ref_count = 0;
3102 0 : c->cb = cb;
3103 0 : c->pdata = user_data;
3104 :
3105 0 : status = nnstreamer_if_custom_register (name, ml_pipeline_if_custom, c);
3106 0 : if (status != 0) {
3107 0 : if (status == -ENOMEM) {
3108 0 : _ml_error_report
3109 : ("Failed to register tensor_if custom condition %s because nnstreamer_if_custom_register has failed to allocate memory. Out of memory?",
3110 : name);
3111 0 : status = ML_ERROR_OUT_OF_MEMORY;
3112 0 : } else if (status == -EINVAL) {
3113 0 : _ml_error_report
3114 : ("Failed to register tensor_if custom condition %s because nnstreamer_if_custom_register has reported that an invalid parameter is given to the API call. Please check if the given name is 0-length or duplicated (already registered), memory is full, or the name is not allowed ('any', 'auto' are not allowed).",
3115 : name);
3116 0 : status = ML_ERROR_INVALID_PARAMETER;
3117 : } else {
3118 0 : _ml_error_report
3119 : ("Failed to register tensor_if custom condition %s because nnstreamer_if_custom_register has returned unknown error.",
3120 : name);
3121 0 : status = ML_ERROR_UNKNOWN;
3122 : }
3123 : }
3124 0 : g_mutex_unlock (&c->lock);
3125 :
3126 0 : if (status == ML_ERROR_NONE) {
3127 0 : pipe_custom_add_data (PIPE_CUSTOM_TYPE_IF, name, c);
3128 0 : *if_custom = c;
3129 : } else {
3130 0 : ml_pipeline_if_custom_free (c);
3131 : }
3132 :
3133 0 : return status;
3134 : }
3135 :
3136 : /**
3137 : * @brief Unregisters the tensor_if custom callback.
3138 : */
3139 : int
3140 0 : ml_pipeline_tensor_if_custom_unregister (ml_pipeline_if_h if_custom)
3141 : {
3142 : ml_if_custom_s *c;
3143 0 : int status = ML_ERROR_NONE;
3144 :
3145 0 : check_feature_state (ML_FEATURE_INFERENCE);
3146 :
3147 0 : if (!if_custom)
3148 0 : return ML_ERROR_INVALID_PARAMETER;
3149 :
3150 0 : c = (ml_if_custom_s *) if_custom;
3151 0 : g_mutex_lock (&c->lock);
3152 :
3153 0 : if (c->ref_count > 0) {
3154 0 : _ml_error_report
3155 : ("Failed to unregister custom condition %s, it is used in the pipeline.",
3156 : c->name);
3157 0 : status = ML_ERROR_INVALID_PARAMETER;
3158 0 : goto done;
3159 : }
3160 :
3161 0 : status = nnstreamer_if_custom_unregister (c->name);
3162 0 : if (status != 0) {
3163 0 : if (status == -EINVAL)
3164 0 : _ml_error_report
3165 : ("Failed to unregister tensor_if custom condition %s. It appears that it is already unregistered or not yet registered.",
3166 : c->name);
3167 : else
3168 0 : _ml_error_report
3169 : ("Failed to unregister tensor_if custom condition %s with unknown reason. Internal error?",
3170 : c->name);
3171 0 : status = ML_ERROR_STREAMS_PIPE;
3172 0 : goto done;
3173 : }
3174 :
3175 0 : done:
3176 0 : g_mutex_unlock (&c->lock);
3177 :
3178 0 : if (status == ML_ERROR_NONE) {
3179 0 : pipe_custom_remove_data (PIPE_CUSTOM_TYPE_IF, c->name);
3180 0 : ml_pipeline_if_custom_free (c);
3181 : }
3182 :
3183 0 : return status;
3184 : }
|