Line data Source code
1 : /* SPDX-License-Identifier: Apache-2.0 */
2 : /**
3 : * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved.
4 : *
5 : * @file ml-api-inference-single.c
6 : * @date 29 Aug 2019
7 : * @brief NNStreamer/Single C-API Wrapper.
8 : * This allows to invoke individual input frame with NNStreamer.
9 : * @see https://github.com/nnstreamer/nnstreamer
10 : * @author MyungJoo Ham <myungjoo.ham@samsung.com>
11 : * @author Parichay Kapoor <pk.kapoor@samsung.com>
12 : * @bug No known bugs except for NYI items
13 : */
14 :
15 : #include <string.h>
16 : #include <nnstreamer-single.h>
17 : #include <nnstreamer-tizen-internal.h> /* Tizen platform header */
18 : #include <nnstreamer_internal.h>
19 : #include <nnstreamer_plugin_api_util.h>
20 : #include <tensor_filter_single.h>
21 :
22 : #include "ml-api-inference-internal.h"
23 : #include "ml-api-internal.h"
24 : #include "ml-api-inference-single-internal.h"
25 :
26 : #define ML_SINGLE_MAGIC 0xfeedfeed
27 :
28 : /**
29 : * @brief Default time to wait for an output in milliseconds (0 will wait for the output).
30 : */
31 : #define SINGLE_DEFAULT_TIMEOUT 0
32 :
33 : /**
34 : * @brief Global lock for single shot API
35 : * @detail This lock ensures that ml_single_close is thread safe. All other API
36 : * functions use the mutex from the single handle. However for close,
37 : * single handle mutex cannot be used as single handle is destroyed at
38 : * close
39 : * @note This mutex is automatically initialized as it is statically declared
40 : */
41 : G_LOCK_DEFINE_STATIC (magic);
42 :
43 : /**
44 : * @brief Get valid handle after magic verification
45 : * @note handle's mutex (single_h->mutex) is acquired after this
46 : * @param[out] single_h The handle properly casted: (ml_single *).
47 : * @param[in] single The handle to be validated: (void *).
48 : * @param[in] reset Set TRUE if the handle is to be reset (magic = 0).
49 : */
50 : #define ML_SINGLE_GET_VALID_HANDLE_LOCKED(single_h, single, reset) do { \
51 : G_LOCK (magic); \
52 : single_h = (ml_single *) single; \
53 : if (G_UNLIKELY(single_h->magic != ML_SINGLE_MAGIC)) { \
54 : _ml_error_report \
55 : ("The given param, %s (ml_single_h), is invalid. It is not a single_h instance or the user thread has modified it.", \
56 : #single); \
57 : G_UNLOCK (magic); \
58 : return ML_ERROR_INVALID_PARAMETER; \
59 : } \
60 : if (G_UNLIKELY(reset)) \
61 : single_h->magic = 0; \
62 : g_mutex_lock (&single_h->mutex); \
63 : G_UNLOCK (magic); \
64 : } while (0)
65 :
66 : /**
67 : * @brief This is for the symmetricity with ML_SINGLE_GET_VALID_HANDLE_LOCKED
68 : * @param[in] single_h The casted handle (ml_single *).
69 : */
70 : #define ML_SINGLE_HANDLE_UNLOCK(single_h) g_mutex_unlock (&single_h->mutex);
71 :
72 : /** define string names for input/output */
73 : #define INPUT_STR "input"
74 : #define OUTPUT_STR "output"
75 : #define TYPE_STR "type"
76 : #define NAME_STR "name"
77 :
78 : /** concat string from #define */
79 : #define CONCAT_MACRO_STR(STR1,STR2) STR1 STR2
80 :
81 : /** States for invoke thread */
82 : typedef enum
83 : {
84 : IDLE = 0, /**< ready to accept next input */
85 : RUNNING, /**< running an input, cannot accept more input */
86 : JOIN_REQUESTED /**< should join the thread, will exit soon */
87 : } thread_state;
88 :
89 : /**
90 : * @brief The name of sub-plugin for defined neural net frameworks.
91 : * @note The sub-plugin for Android is not declared (e.g., snap)
92 : */
93 : static const char *ml_nnfw_subplugin_name[] = {
94 : [ML_NNFW_TYPE_ANY] = "any", /* DO NOT use this name ('any') to get the sub-plugin */
95 : [ML_NNFW_TYPE_CUSTOM_FILTER] = "custom",
96 : [ML_NNFW_TYPE_TENSORFLOW_LITE] = "tensorflow-lite",
97 : [ML_NNFW_TYPE_TENSORFLOW] = "tensorflow",
98 : [ML_NNFW_TYPE_NNFW] = "nnfw",
99 : [ML_NNFW_TYPE_MVNC] = "movidius-ncsdk2",
100 : [ML_NNFW_TYPE_OPENVINO] = "openvino",
101 : [ML_NNFW_TYPE_VIVANTE] = "vivante",
102 : [ML_NNFW_TYPE_EDGE_TPU] = "edgetpu",
103 : [ML_NNFW_TYPE_ARMNN] = "armnn",
104 : [ML_NNFW_TYPE_SNPE] = "snpe",
105 : [ML_NNFW_TYPE_PYTORCH] = "pytorch",
106 : [ML_NNFW_TYPE_NNTR_INF] = "nntrainer",
107 : [ML_NNFW_TYPE_VD_AIFW] = "vd_aifw",
108 : [ML_NNFW_TYPE_TRIX_ENGINE] = "trix-engine",
109 : [ML_NNFW_TYPE_MXNET] = "mxnet",
110 : [ML_NNFW_TYPE_TVM] = "tvm",
111 : [ML_NNFW_TYPE_ONNX_RUNTIME] = "onnxruntime",
112 : [ML_NNFW_TYPE_NCNN] = "ncnn",
113 : [ML_NNFW_TYPE_TENSORRT] = "tensorrt",
114 : [ML_NNFW_TYPE_QNN] = "qnn",
115 : [ML_NNFW_TYPE_LLAMACPP] = "llamacpp",
116 : [ML_NNFW_TYPE_TIZEN_HAL] = "tizen-hal",
117 : NULL
118 : };
119 :
120 : /** ML single api data structure for handle */
121 : typedef struct
122 : {
123 : GTensorFilterSingleClass *klass; /**< tensor filter class structure*/
124 : GTensorFilterSingle *filter; /**< tensor filter element */
125 : GstTensorsInfo in_info; /**< info about input */
126 : GstTensorsInfo out_info; /**< info about output */
127 : ml_nnfw_type_e nnfw; /**< nnfw type for this filter */
128 : guint magic; /**< code to verify valid handle */
129 :
130 : GThread *thread; /**< thread for invoking */
131 : GMutex mutex; /**< mutex for synchronization */
132 : GCond cond; /**< condition for synchronization */
133 : ml_tensors_data_h input; /**< input received from user */
134 : ml_tensors_data_h output; /**< output to be sent back to user */
135 : guint timeout; /**< timeout for invoking */
136 : thread_state state; /**< current state of the thread */
137 : gboolean free_output; /**< true if output tensors are allocated in single-shot */
138 : int status; /**< status of processing */
139 : gboolean invoking; /**< invoke running flag */
140 : ml_tensors_data_h in_tensors; /**< input tensor wrapper for processing */
141 : ml_tensors_data_h out_tensors; /**< output tensor wrapper for processing */
142 :
143 : GList *destroy_data_list; /**< data to be freed by filter */
144 : gboolean invoke_dynamic; /**< true to invoke flexible tensor */
145 : gboolean invoke_async; /**< true to invoke and return result asynchronously */
146 : ml_tensors_data_cb invoke_async_cb; /**< Callback function to be called when the sub-plugin generates an output asynchronously. */
147 : void *invoke_async_pdata; /**< Private data to be passed to async callback. */
148 : } ml_single;
149 :
150 : /**
151 : * @brief Internal function to get the nnfw type.
152 : */
153 : ml_nnfw_type_e
154 0 : _ml_get_nnfw_type_by_subplugin_name (const char *name)
155 : {
156 0 : ml_nnfw_type_e nnfw_type = ML_NNFW_TYPE_ANY;
157 0 : int idx = -1;
158 :
159 0 : if (name == NULL)
160 0 : return ML_NNFW_TYPE_ANY;
161 :
162 0 : idx = find_key_strv (ml_nnfw_subplugin_name, name);
163 0 : if (idx < 0) {
164 : /* check sub-plugin for android */
165 0 : if (g_ascii_strcasecmp (name, "snap") == 0)
166 0 : nnfw_type = ML_NNFW_TYPE_SNAP;
167 : else
168 0 : _ml_error_report ("Cannot find nnfw, %s is an invalid name.",
169 : _STR_NULL (name));
170 : } else {
171 0 : nnfw_type = (ml_nnfw_type_e) idx;
172 : }
173 :
174 0 : return nnfw_type;
175 : }
176 :
177 : /**
178 : * @brief Internal function to get the sub-plugin name.
179 : */
180 : const char *
181 0 : _ml_get_nnfw_subplugin_name (ml_nnfw_type_e nnfw)
182 : {
183 : /* check sub-plugin for android */
184 0 : if (nnfw == ML_NNFW_TYPE_SNAP)
185 0 : return "snap";
186 :
187 0 : return ml_nnfw_subplugin_name[nnfw];
188 : }
189 :
190 : /**
191 : * @brief Convert c-api based hw to internal representation
192 : */
193 : accl_hw
194 0 : _ml_nnfw_to_accl_hw (const ml_nnfw_hw_e hw)
195 : {
196 0 : switch (hw) {
197 0 : case ML_NNFW_HW_ANY:
198 0 : return ACCL_DEFAULT;
199 0 : case ML_NNFW_HW_AUTO:
200 0 : return ACCL_AUTO;
201 0 : case ML_NNFW_HW_CPU:
202 0 : return ACCL_CPU;
203 : #if defined (__aarch64__) || defined (__arm__)
204 0 : case ML_NNFW_HW_CPU_NEON:
205 0 : return ACCL_CPU_NEON;
206 : #else
207 : case ML_NNFW_HW_CPU_SIMD:
208 : return ACCL_CPU_SIMD;
209 : #endif
210 0 : case ML_NNFW_HW_GPU:
211 0 : return ACCL_GPU;
212 0 : case ML_NNFW_HW_NPU:
213 0 : return ACCL_NPU;
214 0 : case ML_NNFW_HW_NPU_MOVIDIUS:
215 0 : return ACCL_NPU_MOVIDIUS;
216 0 : case ML_NNFW_HW_NPU_EDGE_TPU:
217 0 : return ACCL_NPU_EDGE_TPU;
218 0 : case ML_NNFW_HW_NPU_VIVANTE:
219 0 : return ACCL_NPU_VIVANTE;
220 0 : case ML_NNFW_HW_NPU_SLSI:
221 0 : return ACCL_NPU_SLSI;
222 0 : case ML_NNFW_HW_NPU_SR:
223 : /** @todo how to get srcn npu */
224 0 : return ACCL_NPU_SR;
225 0 : default:
226 0 : return ACCL_AUTO;
227 : }
228 : }
229 :
230 : /**
231 : * @brief Checks the availability of the given execution environments with custom option.
232 : */
233 : int
234 0 : ml_check_nnfw_availability_full (ml_nnfw_type_e nnfw, ml_nnfw_hw_e hw,
235 : const char *custom, bool *available)
236 : {
237 0 : const char *fw_name = NULL;
238 :
239 0 : check_feature_state (ML_FEATURE_INFERENCE);
240 :
241 0 : if (!available)
242 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
243 : "The parameter, available (bool *), is NULL. It should be a valid pointer of bool. E.g., bool a; ml_check_nnfw_availability_full (..., &a);");
244 :
245 : /* init false */
246 0 : *available = false;
247 :
248 0 : if (nnfw == ML_NNFW_TYPE_ANY)
249 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
250 : "The parameter, nnfw (ml_nnfw_type_e), is ML_NNFW_TYPE_ANY. It should specify the framework to be probed for the hardware availability.");
251 :
252 0 : fw_name = _ml_get_nnfw_subplugin_name (nnfw);
253 :
254 0 : if (fw_name) {
255 0 : if (nnstreamer_filter_find (fw_name) != NULL) {
256 0 : accl_hw accl = _ml_nnfw_to_accl_hw (hw);
257 :
258 0 : if (gst_tensor_filter_check_hw_availability (fw_name, accl, custom)) {
259 0 : *available = true;
260 : } else {
261 0 : _ml_logi ("%s is supported but not with the specified hardware.",
262 : fw_name);
263 : }
264 : } else {
265 0 : _ml_logi ("%s is not supported.", fw_name);
266 : }
267 : } else {
268 0 : _ml_logw ("Cannot get the name of sub-plugin for given nnfw.");
269 : }
270 :
271 0 : return ML_ERROR_NONE;
272 : }
273 :
274 : /**
275 : * @brief Checks the availability of the given execution environments.
276 : */
277 : int
278 0 : ml_check_nnfw_availability (ml_nnfw_type_e nnfw, ml_nnfw_hw_e hw,
279 : bool *available)
280 : {
281 0 : return ml_check_nnfw_availability_full (nnfw, hw, NULL, available);
282 : }
283 :
284 : /**
285 : * @brief Helper function to setup tensor memory (input or output)
286 : */
287 : static int
288 0 : __setup_tensor_memory (ml_tensors_data_h *tensor_handle,
289 : const GstTensorsInfo *gst_info)
290 : {
291 : ml_tensors_data_s *tensors;
292 : ml_tensors_info_h info;
293 : guint i;
294 0 : int status = ML_ERROR_NONE;
295 :
296 0 : if (!tensor_handle || !gst_info)
297 0 : return ML_ERROR_INVALID_PARAMETER;
298 :
299 0 : tensors = (ml_tensors_data_s *) * tensor_handle;
300 :
301 : /* Setup tensor buffer */
302 0 : if (tensors) {
303 : /* Reuse existing tensor: update info */
304 0 : _ml_tensors_info_free (tensors->info);
305 0 : _ml_tensors_info_copy_from_gst (tensors->info, gst_info);
306 : } else {
307 : /* Create new tensor */
308 0 : status = _ml_tensors_info_create_from_gst (&info, gst_info);
309 0 : if (status != ML_ERROR_NONE)
310 0 : return status;
311 :
312 0 : status = _ml_tensors_data_create_no_alloc (info, tensor_handle);
313 0 : ml_tensors_info_destroy (info);
314 :
315 0 : if (status != ML_ERROR_NONE)
316 0 : return status;
317 :
318 0 : tensors = (ml_tensors_data_s *) * tensor_handle;
319 : }
320 :
321 : /* Setup tensor array */
322 0 : tensors->num_tensors = gst_info->num_tensors;
323 0 : for (i = 0; i < tensors->num_tensors; i++) {
324 : /** memory will be allocated by tensor_filter_single */
325 0 : tensors->tensors[i].data = NULL;
326 0 : tensors->tensors[i].size = gst_tensors_info_get_size (gst_info, i);
327 : }
328 :
329 0 : return ML_ERROR_NONE;
330 : }
331 :
332 : /**
333 : * @brief setup input and output tensor memory to pass to the tensor_filter.
334 : * @note this tensor memory wrapper will be reused for each invoke.
335 : */
336 : static void
337 0 : __setup_in_out_tensors (ml_single *single_h)
338 : {
339 : int status;
340 :
341 : /* Setup input buffer */
342 0 : status = __setup_tensor_memory (&single_h->in_tensors, &single_h->in_info);
343 0 : if (status != ML_ERROR_NONE) {
344 0 : _ml_error_report ("Failed to setup input tensor memory: %d", status);
345 0 : return;
346 : }
347 :
348 : /* Setup output buffer */
349 0 : status = __setup_tensor_memory (&single_h->out_tensors, &single_h->out_info);
350 0 : if (status != ML_ERROR_NONE) {
351 0 : _ml_error_report ("Failed to setup output tensor memory: %d", status);
352 0 : return;
353 : }
354 : }
355 :
356 : /**
357 : * @brief To call the framework to destroy the allocated output data
358 : */
359 : static inline void
360 0 : __destroy_notify (gpointer data_h, gpointer single_data)
361 : {
362 : ml_single *single_h;
363 : ml_tensors_data_s *data;
364 :
365 0 : data = (ml_tensors_data_s *) data_h;
366 0 : single_h = (ml_single *) single_data;
367 :
368 0 : if (G_LIKELY (single_h->filter)) {
369 0 : if (single_h->klass->allocate_in_invoke (single_h->filter)) {
370 0 : single_h->klass->destroy_notify (single_h->filter, data->tensors);
371 : }
372 : }
373 :
374 : /* reset callback function */
375 0 : data->destroy = NULL;
376 0 : }
377 :
378 : /**
379 : * @brief Wrapper function for __destroy_notify
380 : */
381 : static int
382 0 : ml_single_destroy_notify_cb (void *handle, void *user_data)
383 : {
384 0 : ml_tensors_data_h data = (ml_tensors_data_h) handle;
385 0 : ml_single_h single = (ml_single_h) user_data;
386 : ml_single *single_h;
387 0 : int status = ML_ERROR_NONE;
388 :
389 0 : if (G_UNLIKELY (!single))
390 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
391 : "Failed to destroy data buffer. Callback function argument from _ml_tensors_data_destroy_internal is invalid. The given 'user_data' is NULL. It appears to be an internal error of ML-API or the user thread has touched private data structure.");
392 0 : if (G_UNLIKELY (!data))
393 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
394 : "Failed to destroy data buffer. Callback function argument from _ml_tensors_data_destroy_internal is invalid. The given 'handle' is NULL. It appears to be an internal error of ML-API or the user thread has touched private data structure.");
395 :
396 0 : ML_SINGLE_GET_VALID_HANDLE_LOCKED (single_h, single, 0);
397 :
398 0 : if (G_UNLIKELY (!single_h->filter)) {
399 0 : status = ML_ERROR_INVALID_PARAMETER;
400 0 : _ml_error_report
401 : ("Failed to destroy the data buffer. The handle instance (single_h) is invalid. It appears to be an internal error of ML-API of the user thread has touched private data structure.");
402 0 : goto exit;
403 : }
404 :
405 0 : single_h->destroy_data_list =
406 0 : g_list_remove (single_h->destroy_data_list, data);
407 0 : __destroy_notify (data, single_h);
408 :
409 0 : exit:
410 0 : ML_SINGLE_HANDLE_UNLOCK (single_h);
411 :
412 0 : return status;
413 : }
414 :
415 : /**
416 : * @brief setup the destroy notify for the allocated output data.
417 : * @note this stores the data entry in the single list.
418 : * @note this has not overhead if the allocation of output is not performed by
419 : * the framework but by tensor filter element.
420 : */
421 : static void
422 0 : set_destroy_notify (ml_single * single_h, ml_tensors_data_s * data,
423 : gboolean add)
424 : {
425 0 : if (single_h->klass->allocate_in_invoke (single_h->filter)) {
426 0 : data->destroy = ml_single_destroy_notify_cb;
427 0 : data->user_data = single_h;
428 0 : add = TRUE;
429 : }
430 :
431 0 : if (add) {
432 0 : single_h->destroy_data_list = g_list_append (single_h->destroy_data_list,
433 : (gpointer) data);
434 : }
435 0 : }
436 :
437 : /**
438 : * @brief Internal function to call subplugin's invoke
439 : */
440 : static inline int
441 0 : __invoke (ml_single * single_h, ml_tensors_data_h in, ml_tensors_data_h out,
442 : gboolean alloc_output)
443 : {
444 : ml_tensors_data_s *in_data, *out_data;
445 0 : int status = ML_ERROR_NONE;
446 :
447 0 : in_data = (ml_tensors_data_s *) in;
448 0 : out_data = (ml_tensors_data_s *) out;
449 :
450 : /* Prevent error case when input or output is null in invoke thread. */
451 0 : if (!in_data || !out_data) {
452 0 : _ml_error_report ("Failed to invoke a model, invalid data handle.");
453 0 : return ML_ERROR_STREAMS_PIPE;
454 : }
455 :
456 : /* Invoke the thread. */
457 0 : if (!single_h->klass->invoke (single_h->filter, in_data->tensors,
458 0 : out_data->tensors, alloc_output)) {
459 0 : const char *fw_name = _ml_get_nnfw_subplugin_name (single_h->nnfw);
460 0 : _ml_error_report
461 : ("Failed to invoke the tensors. The invoke callback of the tensor-filter subplugin '%s' has failed. Please contact the author of tensor-filter-%s (nnstreamer-%s) or review its source code. Note that this usually happens when the designated framework does not support the given model (e.g., trying to run tf-lite 2.6 model with tf-lite 1.13).",
462 : fw_name, fw_name, fw_name);
463 0 : status = ML_ERROR_STREAMS_PIPE;
464 : }
465 :
466 0 : return status;
467 : }
468 :
469 : /**
470 : * @brief Internal function to post-process given output.
471 : * @note Do not call this if single_h->free_output is false (output data is not allocated in single-shot).
472 : */
473 : static inline void
474 0 : __process_output (ml_single * single_h, ml_tensors_data_h output)
475 : {
476 : ml_tensors_data_s *out_data;
477 :
478 0 : if (g_list_find (single_h->destroy_data_list, output)) {
479 : /**
480 : * Caller of the invoke thread has returned back with timeout.
481 : * So, free the memory allocated by the invoke as their is no receiver.
482 : */
483 0 : single_h->destroy_data_list =
484 0 : g_list_remove (single_h->destroy_data_list, output);
485 0 : ml_tensors_data_destroy (output);
486 : } else {
487 0 : out_data = (ml_tensors_data_s *) output;
488 0 : set_destroy_notify (single_h, out_data, FALSE);
489 : }
490 0 : }
491 :
492 : /**
493 : * @brief thread to execute calls to invoke
494 : *
495 : * @details The thread behavior is detailed as below:
496 : * - Starting with IDLE state, the thread waits for an input or change
497 : * in state externally.
498 : * - If state is not RUNNING, exit this thread, else process the
499 : * request.
500 : * - Process input, call invoke, process output. Any error in this
501 : * state sets the status to be used by ml_single_invoke().
502 : * - State is set back to IDLE and thread moves back to start.
503 : *
504 : * State changes performed by this function when:
505 : * RUNNING -> IDLE - processing is finished.
506 : * JOIN_REQUESTED -> IDLE - close is requested.
507 : *
508 : * @note Error while processing an input is provided back to requesting
509 : * function, and further processing of invoke_thread is not affected.
510 : */
511 : static void *
512 0 : invoke_thread (void *arg)
513 : {
514 : ml_single *single_h;
515 : ml_tensors_data_h input, output;
516 0 : gboolean alloc_output = FALSE;
517 :
518 0 : single_h = (ml_single *) arg;
519 :
520 0 : g_mutex_lock (&single_h->mutex);
521 :
522 0 : while (single_h->state <= RUNNING) {
523 0 : int status = ML_ERROR_NONE;
524 :
525 : /** wait for data */
526 0 : while (single_h->state != RUNNING) {
527 0 : g_cond_wait (&single_h->cond, &single_h->mutex);
528 0 : if (single_h->state == JOIN_REQUESTED)
529 0 : goto exit;
530 : }
531 :
532 0 : input = single_h->input;
533 0 : output = single_h->output;
534 : /* Set null to prevent double-free. */
535 0 : single_h->input = single_h->output = NULL;
536 :
537 0 : single_h->invoking = TRUE;
538 0 : alloc_output = single_h->free_output;
539 0 : g_mutex_unlock (&single_h->mutex);
540 0 : status = __invoke (single_h, input, output, alloc_output);
541 0 : g_mutex_lock (&single_h->mutex);
542 : /* Clear input data after invoke is done. */
543 0 : ml_tensors_data_destroy (input);
544 0 : single_h->invoking = FALSE;
545 :
546 0 : if (status != ML_ERROR_NONE || single_h->state == JOIN_REQUESTED) {
547 0 : if (alloc_output) {
548 0 : single_h->destroy_data_list =
549 0 : g_list_remove (single_h->destroy_data_list, output);
550 0 : ml_tensors_data_destroy (output);
551 : }
552 :
553 0 : if (single_h->state == JOIN_REQUESTED)
554 0 : goto exit;
555 0 : goto wait_for_next;
556 : }
557 :
558 0 : if (alloc_output)
559 0 : __process_output (single_h, output);
560 :
561 : /** loop over to wait for the next element */
562 0 : wait_for_next:
563 0 : single_h->status = status;
564 0 : if (single_h->state == RUNNING)
565 0 : single_h->state = IDLE;
566 0 : g_cond_broadcast (&single_h->cond);
567 : }
568 :
569 0 : exit:
570 : /* Do not set IDLE if JOIN_REQUESTED */
571 0 : if (single_h->state == JOIN_REQUESTED) {
572 : /* Release input and output data */
573 0 : if (single_h->input)
574 0 : ml_tensors_data_destroy (single_h->input);
575 :
576 0 : if (alloc_output && single_h->output) {
577 0 : single_h->destroy_data_list =
578 0 : g_list_remove (single_h->destroy_data_list, single_h->output);
579 0 : ml_tensors_data_destroy (single_h->output);
580 : }
581 :
582 0 : single_h->input = single_h->output = NULL;
583 0 : g_cond_broadcast (&single_h->cond);
584 0 : } else if (single_h->state == RUNNING)
585 0 : single_h->state = IDLE;
586 0 : g_mutex_unlock (&single_h->mutex);
587 0 : return NULL;
588 : }
589 :
590 : /**
591 : * @brief Internal function to get the asynchronous invoke.
592 : */
593 : static int
594 0 : ml_single_async_cb (GstTensorMemory * data, GstTensorsInfo * info,
595 : void *user_data)
596 : {
597 0 : ml_single_h single = (ml_single_h) user_data;
598 : ml_single *single_h;
599 0 : ml_tensors_info_h _info = NULL;
600 0 : ml_tensors_data_h _data = NULL;
601 : ml_tensors_data_s *_data_s;
602 : ml_tensors_data_cb async_cb;
603 : void *async_pdata;
604 : unsigned int i;
605 0 : int ret = ML_ERROR_NONE;
606 :
607 0 : ML_SINGLE_GET_VALID_HANDLE_LOCKED (single_h, single, 0);
608 0 : async_cb = single_h->invoke_async_cb;
609 0 : async_pdata = single_h->invoke_async_pdata;
610 0 : ML_SINGLE_HANDLE_UNLOCK (single_h);
611 :
612 0 : if (!async_cb) {
613 : /* No callback, do nothing. Internal state changing? */
614 0 : goto done;
615 : }
616 :
617 0 : ret = _ml_tensors_info_create_from_gst (&_info, info);
618 0 : if (ret != ML_ERROR_NONE) {
619 0 : _ml_error_report
620 : ("Cannot handle tensor data stream. Failed to create ml information.");
621 0 : goto done;
622 : }
623 :
624 0 : ret = _ml_tensors_data_create_no_alloc (_info, &_data);
625 0 : if (ret != ML_ERROR_NONE) {
626 0 : _ml_error_report
627 : ("Cannot handle tensor data stream. Failed to create ml data.");
628 0 : goto done;
629 : }
630 :
631 0 : _data_s = (ml_tensors_data_s *) _data;
632 0 : for (i = 0; i < info->num_tensors; ++i) {
633 0 : _data_s->tensors[i].data = data[i].data;
634 0 : _data_s->tensors[i].size = data[i].size;
635 : }
636 :
637 0 : ret = async_cb (_data, async_pdata);
638 0 : if (ret != ML_ERROR_NONE) {
639 0 : _ml_error_report
640 : ("Cannot handle tensor data stream. The callback function returns error '%d'.",
641 : ret);
642 : }
643 :
644 0 : done:
645 0 : if (_info) {
646 0 : ml_tensors_info_destroy (_info);
647 : }
648 :
649 0 : if (_data) {
650 0 : _ml_tensors_data_destroy_internal (_data, FALSE);
651 : }
652 :
653 0 : return (ret == ML_ERROR_NONE) ? 0 : -1;
654 : }
655 :
656 : /**
657 : * @brief Sets the information (tensor dimension, type, name and so on) of required input data for the given model, and get updated output data information.
658 : * @details Note that a model/framework may not support setting such information.
659 : * @since_tizen 6.0
660 : * @param[in] single The model handle.
661 : * @param[in] in_info The handle of input tensors information.
662 : * @param[out] out_info The handle of output tensors information. The caller is responsible for freeing the information with ml_tensors_info_destroy().
663 : * @return @c 0 on success. Otherwise a negative error value.
664 : * @retval #ML_ERROR_NONE Successful
665 : * @retval #ML_ERROR_NOT_SUPPORTED This implies that the given framework does not support dynamic dimensions.
666 : * Use ml_single_get_input_info() and ml_single_get_output_info() instead for this framework.
667 : * @retval #ML_ERROR_INVALID_PARAMETER Fail. The parameter is invalid.
668 : */
669 : static int
670 0 : ml_single_update_info (ml_single_h single,
671 : const ml_tensors_info_h in_info, ml_tensors_info_h * out_info)
672 : {
673 0 : if (!single)
674 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
675 : "The parameter, single (ml_single_h), is NULL. It should be a valid ml_single_h instance, usually created by ml_single_open().");
676 0 : if (!in_info)
677 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
678 : "The parameter, in_info (const ml_tensors_info_h), is NULL. It should be a valid instance of ml_tensors_info_h, usually created by ml_tensors_info_create() and configured by the application.");
679 0 : if (!out_info)
680 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
681 : "The parameter, out_info (ml_tensors_info_h *), is NULL. It should be a valid pointer to an instance ml_tensors_info_h, usually created by ml_tensors_info_h(). Note that out_info is supposed to be overwritten by this API call.");
682 :
683 : /* init null */
684 0 : *out_info = NULL;
685 :
686 0 : _ml_error_report_return_continue_iferr (ml_single_set_input_info (single,
687 : in_info),
688 : "Configuring the neural network model with the given input information has failed with %d error code. The given input information ('in_info' parameter) might be invalid or the given neural network cannot accept it as its input data.",
689 : _ERRNO);
690 :
691 0 : __setup_in_out_tensors (single);
692 0 : _ml_error_report_return_continue_iferr (ml_single_get_output_info (single,
693 : out_info),
694 : "Fetching output info after configuring input information has failed with %d error code.",
695 : _ERRNO);
696 :
697 0 : return ML_ERROR_NONE;
698 : }
699 :
700 : /**
701 : * @brief Internal function to get the gst info from tensor-filter.
702 : */
703 : static void
704 0 : ml_single_get_gst_info (ml_single * single_h, gboolean is_input,
705 : GstTensorsInfo * gst_info)
706 : {
707 : const gchar *prop_prefix, *prop_name, *prop_type;
708 : gchar *val;
709 : guint num;
710 :
711 0 : if (is_input) {
712 0 : prop_prefix = INPUT_STR;
713 0 : prop_type = CONCAT_MACRO_STR (INPUT_STR, TYPE_STR);
714 0 : prop_name = CONCAT_MACRO_STR (INPUT_STR, NAME_STR);
715 : } else {
716 0 : prop_prefix = OUTPUT_STR;
717 0 : prop_type = CONCAT_MACRO_STR (OUTPUT_STR, TYPE_STR);
718 0 : prop_name = CONCAT_MACRO_STR (OUTPUT_STR, NAME_STR);
719 : }
720 :
721 0 : gst_tensors_info_init (gst_info);
722 :
723 : /* get dimensions */
724 0 : g_object_get (single_h->filter, prop_prefix, &val, NULL);
725 0 : num = gst_tensors_info_parse_dimensions_string (gst_info, val);
726 0 : g_free (val);
727 :
728 : /* set the number of tensors */
729 0 : gst_info->num_tensors = num;
730 :
731 : /* get types */
732 0 : g_object_get (single_h->filter, prop_type, &val, NULL);
733 0 : num = gst_tensors_info_parse_types_string (gst_info, val);
734 0 : g_free (val);
735 :
736 0 : if (gst_info->num_tensors != num) {
737 0 : _ml_logw ("The number of tensor type is mismatched in filter.");
738 : }
739 :
740 : /* get names */
741 0 : g_object_get (single_h->filter, prop_name, &val, NULL);
742 0 : num = gst_tensors_info_parse_names_string (gst_info, val);
743 0 : g_free (val);
744 :
745 0 : if (gst_info->num_tensors != num) {
746 0 : _ml_logw ("The number of tensor name is mismatched in filter.");
747 : }
748 :
749 0 : if (single_h->invoke_dynamic) {
750 : /* flexible tensor stream */
751 0 : gst_info->format = _NNS_TENSOR_FORMAT_FLEXIBLE;
752 :
753 : /** @todo Consider multiple input tensors while invoking a model. */
754 0 : if (gst_info->num_tensors == 0) {
755 0 : gst_info->num_tensors = 1;
756 : }
757 : }
758 0 : }
759 :
760 : /**
761 : * @brief Internal function to set the gst info in tensor-filter.
762 : */
763 : static int
764 0 : ml_single_set_gst_info (ml_single * single_h, const GstTensorsInfo * in_info)
765 : {
766 : GstTensorsInfo out_info;
767 0 : int status = ML_ERROR_NONE;
768 0 : int ret = -EINVAL;
769 :
770 0 : gst_tensors_info_init (&out_info);
771 0 : ret = single_h->klass->set_input_info (single_h->filter, in_info, &out_info);
772 0 : if (ret == 0) {
773 0 : gst_tensors_info_free (&single_h->in_info);
774 0 : gst_tensors_info_free (&single_h->out_info);
775 0 : gst_tensors_info_copy (&single_h->in_info, in_info);
776 0 : gst_tensors_info_copy (&single_h->out_info, &out_info);
777 :
778 0 : __setup_in_out_tensors (single_h);
779 0 : } else if (ret == -ENOENT) {
780 0 : status = ML_ERROR_NOT_SUPPORTED;
781 : } else {
782 0 : status = ML_ERROR_INVALID_PARAMETER;
783 : }
784 :
785 0 : gst_tensors_info_free (&out_info);
786 :
787 0 : return status;
788 : }
789 :
790 : /**
791 : * @brief Set the info for input/output tensors
792 : */
793 : static int
794 0 : ml_single_set_inout_tensors_info (GObject * object,
795 : const gboolean is_input, ml_tensors_info_s * tensors_info)
796 : {
797 0 : int status = ML_ERROR_NONE;
798 : GstTensorsInfo info;
799 : gchar *str_dim, *str_type, *str_name;
800 : const gchar *str_type_name, *str_name_name;
801 : const gchar *prefix;
802 :
803 0 : if (is_input) {
804 0 : prefix = INPUT_STR;
805 0 : str_type_name = CONCAT_MACRO_STR (INPUT_STR, TYPE_STR);
806 0 : str_name_name = CONCAT_MACRO_STR (INPUT_STR, NAME_STR);
807 : } else {
808 0 : prefix = OUTPUT_STR;
809 0 : str_type_name = CONCAT_MACRO_STR (OUTPUT_STR, TYPE_STR);
810 0 : str_name_name = CONCAT_MACRO_STR (OUTPUT_STR, NAME_STR);
811 : }
812 :
813 0 : _ml_error_report_return_continue_iferr
814 : (_ml_tensors_info_copy_from_ml (&info, tensors_info),
815 : "Cannot fetch tensor-info from the given information. Error code: %d",
816 : _ERRNO);
817 :
818 : /* Set input option */
819 0 : str_dim = gst_tensors_info_get_dimensions_string (&info);
820 0 : str_type = gst_tensors_info_get_types_string (&info);
821 0 : str_name = gst_tensors_info_get_names_string (&info);
822 :
823 0 : if (!str_dim || !str_type || !str_name) {
824 0 : if (!str_dim)
825 0 : _ml_error_report
826 : ("Cannot fetch specific tensor-info from the given information: cannot fetch tensor dimension information.");
827 0 : if (!str_type)
828 0 : _ml_error_report
829 : ("Cannot fetch specific tensor-info from the given information: cannot fetch tensor type information.");
830 0 : if (!str_name)
831 0 : _ml_error_report
832 : ("Cannot fetch specific tensor-info from the given information: cannot fetch tensor name information. Even if tensor names are not defined, this should be able to fetch a list of empty strings.");
833 :
834 0 : status = ML_ERROR_INVALID_PARAMETER;
835 : } else {
836 0 : g_object_set (object, prefix, str_dim, str_type_name, str_type,
837 : str_name_name, str_name, NULL);
838 : }
839 :
840 0 : g_free (str_dim);
841 0 : g_free (str_type);
842 0 : g_free (str_name);
843 :
844 0 : gst_tensors_info_free (&info);
845 :
846 0 : return status;
847 : }
848 :
849 : /**
850 : * @brief Internal static function to set tensors info in the handle.
851 : */
852 : static gboolean
853 0 : ml_single_set_info_in_handle (ml_single_h single, gboolean is_input,
854 : ml_tensors_info_s * tensors_info)
855 : {
856 : int status;
857 : ml_single *single_h;
858 : GstTensorsInfo *dest;
859 0 : gboolean configured = FALSE;
860 0 : gboolean is_valid = FALSE;
861 : GObject *filter_obj;
862 :
863 0 : single_h = (ml_single *) single;
864 0 : filter_obj = G_OBJECT (single_h->filter);
865 :
866 0 : if (is_input) {
867 0 : dest = &single_h->in_info;
868 0 : configured = single_h->klass->input_configured (single_h->filter);
869 : } else {
870 0 : dest = &single_h->out_info;
871 0 : configured = single_h->klass->output_configured (single_h->filter);
872 : }
873 :
874 0 : if (configured) {
875 : /* get configured info and compare with input info */
876 : GstTensorsInfo gst_info;
877 0 : ml_tensors_info_h info = NULL;
878 :
879 0 : ml_single_get_gst_info (single_h, is_input, &gst_info);
880 0 : _ml_tensors_info_create_from_gst (&info, &gst_info);
881 :
882 0 : gst_tensors_info_free (&gst_info);
883 :
884 0 : if (tensors_info && !ml_tensors_info_is_equal (tensors_info, info)) {
885 : /* given input info is not matched with configured */
886 0 : ml_tensors_info_destroy (info);
887 0 : if (is_input) {
888 : /* try to update tensors info */
889 0 : status = ml_single_update_info (single, tensors_info, &info);
890 0 : if (status != ML_ERROR_NONE)
891 0 : goto done;
892 : } else {
893 0 : goto done;
894 : }
895 : }
896 :
897 0 : gst_tensors_info_free (dest);
898 0 : _ml_tensors_info_copy_from_ml (dest, info);
899 0 : ml_tensors_info_destroy (info);
900 0 : } else if (tensors_info) {
901 : status =
902 0 : ml_single_set_inout_tensors_info (filter_obj, is_input, tensors_info);
903 0 : if (status != ML_ERROR_NONE)
904 0 : goto done;
905 :
906 0 : gst_tensors_info_free (dest);
907 0 : _ml_tensors_info_copy_from_ml (dest, tensors_info);
908 : }
909 :
910 0 : is_valid = gst_tensors_info_validate (dest);
911 :
912 0 : done:
913 0 : return is_valid;
914 : }
915 :
916 : /**
917 : * @brief Internal function to create and initialize the single handle.
918 : */
919 : static ml_single *
920 0 : ml_single_create_handle (ml_nnfw_type_e nnfw)
921 : {
922 : ml_single *single_h;
923 : GError *error;
924 0 : gboolean created = FALSE;
925 :
926 0 : single_h = g_new0 (ml_single, 1);
927 0 : if (single_h == NULL)
928 0 : _ml_error_report_return (NULL,
929 : "Failed to allocate memory for the single_h handle. Out of memory?");
930 :
931 0 : single_h->filter = g_object_new (G_TYPE_TENSOR_FILTER_SINGLE, NULL);
932 0 : if (single_h->filter == NULL) {
933 0 : _ml_error_report
934 : ("Failed to create a new instance for filter. Out of memory?");
935 0 : g_free (single_h);
936 0 : return NULL;
937 : }
938 :
939 0 : single_h->magic = ML_SINGLE_MAGIC;
940 0 : single_h->timeout = SINGLE_DEFAULT_TIMEOUT;
941 0 : single_h->nnfw = nnfw;
942 0 : single_h->state = IDLE;
943 0 : single_h->thread = NULL;
944 0 : single_h->input = NULL;
945 0 : single_h->output = NULL;
946 0 : single_h->destroy_data_list = NULL;
947 0 : single_h->invoking = FALSE;
948 :
949 0 : gst_tensors_info_init (&single_h->in_info);
950 0 : gst_tensors_info_init (&single_h->out_info);
951 0 : g_mutex_init (&single_h->mutex);
952 0 : g_cond_init (&single_h->cond);
953 :
954 0 : single_h->klass = g_type_class_ref (G_TYPE_TENSOR_FILTER_SINGLE);
955 0 : if (single_h->klass == NULL) {
956 0 : _ml_error_report
957 : ("Failed to get class of the tensor-filter of single API. This binary is not compiled properly or required libraries are not loaded.");
958 0 : goto done;
959 : }
960 :
961 0 : single_h->thread =
962 0 : g_thread_try_new (NULL, invoke_thread, (gpointer) single_h, &error);
963 0 : if (single_h->thread == NULL) {
964 0 : _ml_error_report
965 : ("Failed to create the invoke thread of single API, g_thread_try_new has reported an error: %s.",
966 : error->message);
967 0 : g_clear_error (&error);
968 0 : goto done;
969 : }
970 :
971 0 : created = TRUE;
972 :
973 0 : done:
974 0 : if (!created) {
975 0 : ml_single_close (single_h);
976 0 : single_h = NULL;
977 : }
978 :
979 0 : return single_h;
980 : }
981 :
982 : /**
983 : * @brief Validate arguments for open
984 : */
985 : static int
986 0 : _ml_single_open_custom_validate_arguments (ml_single_h * single,
987 : ml_single_preset * info)
988 : {
989 0 : if (!single)
990 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
991 : "The parameter, 'single' (ml_single_h *), is NULL. It should be a valid pointer to an instance of ml_single_h.");
992 0 : if (!info)
993 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
994 : "The parameter, 'info' (ml_single_preset *), is NULL. It should be a valid pointer to a valid instance of ml_single_preset.");
995 :
996 : /* Validate input tensor info. */
997 0 : if (info->input_info && !ml_tensors_info_is_valid (info->input_info))
998 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
999 : "The parameter, 'info' (ml_single_preset *), is not valid. It has 'input_info' entry that cannot be validated. ml_tensors_info_is_valid(info->input_info) has failed while info->input_info exists.");
1000 :
1001 : /* Validate output tensor info. */
1002 0 : if (info->output_info && !ml_tensors_info_is_valid (info->output_info))
1003 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1004 : "The parameter, 'info' (ml_single_preset *), is not valid. It has 'output_info' entry that cannot be validated. ml_tensors_info_is_valid(info->output_info) has failed while info->output_info exists.");
1005 :
1006 0 : if (!info->models)
1007 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1008 : "The parameter, 'info' (ml_single_preset *), is not valid. Its models entry if NULL (info->models is NULL).");
1009 :
1010 0 : if (info->invoke_async && !info->invoke_async_cb)
1011 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1012 : "The parameter, 'info' (ml_single_preset *), is not valid. It has 'invoke_async' entry but its callback 'invoke_async_cb' is NULL");
1013 :
1014 0 : return ML_ERROR_NONE;
1015 : }
1016 :
1017 : /**
1018 : * @brief Internal function to convert accelerator as tensor_filter property format.
1019 : * @note returned value must be freed by the caller
1020 : * @note More details on format can be found in gst_tensor_filter_install_properties() in tensor_filter_common.c.
1021 : */
1022 : char *
1023 0 : _ml_nnfw_to_str_prop (const ml_nnfw_hw_e hw)
1024 : {
1025 : const gchar *hw_name;
1026 0 : const gchar *use_accl = "true:";
1027 0 : gchar *str_prop = NULL;
1028 :
1029 0 : hw_name = get_accl_hw_str (_ml_nnfw_to_accl_hw (hw));
1030 0 : str_prop = g_strdup_printf ("%s%s", use_accl, hw_name);
1031 :
1032 0 : return str_prop;
1033 : }
1034 :
1035 : /**
1036 : * @brief Opens an ML model with the custom options and returns the instance as a handle.
1037 : */
1038 : int
1039 0 : ml_single_open_custom (ml_single_h * single, ml_single_preset * info)
1040 : {
1041 : ml_single *single_h;
1042 : GObject *filter_obj;
1043 0 : int status = ML_ERROR_NONE;
1044 : ml_tensors_info_s *in_tensors_info, *out_tensors_info;
1045 : ml_nnfw_type_e nnfw;
1046 : ml_nnfw_hw_e hw;
1047 : const gchar *fw_name;
1048 0 : g_autofree gchar *converted_models = NULL;
1049 : gchar **list_models;
1050 : guint i, num_models;
1051 : char *hw_name;
1052 :
1053 0 : check_feature_state (ML_FEATURE_INFERENCE);
1054 :
1055 : /* Validate the params */
1056 0 : _ml_error_report_return_continue_iferr
1057 : (_ml_single_open_custom_validate_arguments (single, info),
1058 : "The parameter, 'info' (ml_single_preset *), cannot be validated. Please provide valid information for this object.");
1059 :
1060 : /* init null */
1061 0 : *single = NULL;
1062 :
1063 0 : in_tensors_info = (ml_tensors_info_s *) info->input_info;
1064 0 : out_tensors_info = (ml_tensors_info_s *) info->output_info;
1065 0 : nnfw = info->nnfw;
1066 0 : hw = info->hw;
1067 0 : fw_name = _ml_get_nnfw_subplugin_name (nnfw);
1068 0 : converted_models = _ml_convert_predefined_entity (info->models);
1069 :
1070 : /**
1071 : * 1. Determine nnfw and validate model file
1072 : */
1073 0 : list_models = g_strsplit (converted_models, ",", -1);
1074 0 : num_models = g_strv_length (list_models);
1075 0 : for (i = 0; i < num_models; i++)
1076 0 : g_strstrip (list_models[i]);
1077 :
1078 0 : status = _ml_validate_model_file ((const char **) list_models, num_models,
1079 : &nnfw);
1080 0 : if (status != ML_ERROR_NONE) {
1081 0 : _ml_error_report_continue
1082 : ("Cannot validate the model (1st model: %s. # models: %d). Error code: %d",
1083 : list_models[0], num_models, status);
1084 0 : g_strfreev (list_models);
1085 0 : return status;
1086 : }
1087 :
1088 0 : g_strfreev (list_models);
1089 :
1090 : /**
1091 : * 2. Determine hw
1092 : * (Supposed CPU only) Support others later.
1093 : */
1094 0 : if (!_ml_nnfw_is_available (nnfw, hw)) {
1095 0 : _ml_error_report_return (ML_ERROR_NOT_SUPPORTED,
1096 : "The given nnfw, '%s', is not supported. There is no corresponding tensor-filter subplugin available or the given hardware requirement is not supported for the given nnfw.",
1097 : fw_name);
1098 : }
1099 :
1100 : /* Create ml_single object */
1101 0 : if ((single_h = ml_single_create_handle (nnfw)) == NULL) {
1102 0 : _ml_error_report_return_continue (ML_ERROR_OUT_OF_MEMORY,
1103 : "Cannot create handle for the given nnfw, %s", fw_name);
1104 : }
1105 :
1106 0 : single_h->invoke_dynamic = info->invoke_dynamic;
1107 0 : single_h->invoke_async = info->invoke_async;
1108 0 : single_h->invoke_async_cb = info->invoke_async_cb;
1109 0 : single_h->invoke_async_pdata = info->invoke_async_pdata;
1110 :
1111 0 : filter_obj = G_OBJECT (single_h->filter);
1112 :
1113 : /**
1114 : * 3. Construct a direct connection with the nnfw.
1115 : * Note that we do not construct a pipeline since 2019.12.
1116 : */
1117 0 : if (nnfw == ML_NNFW_TYPE_TENSORFLOW || nnfw == ML_NNFW_TYPE_SNAP ||
1118 0 : nnfw == ML_NNFW_TYPE_PYTORCH || nnfw == ML_NNFW_TYPE_TRIX_ENGINE ||
1119 0 : nnfw == ML_NNFW_TYPE_NCNN) {
1120 : /* set input and output tensors information */
1121 0 : if (in_tensors_info && out_tensors_info) {
1122 : status =
1123 0 : ml_single_set_inout_tensors_info (filter_obj, TRUE, in_tensors_info);
1124 0 : if (status != ML_ERROR_NONE) {
1125 0 : _ml_error_report_continue
1126 : ("Input tensors info is given; however, failed to set input tensors info. Error code: %d",
1127 : status);
1128 0 : goto error;
1129 : }
1130 :
1131 : status =
1132 0 : ml_single_set_inout_tensors_info (filter_obj, FALSE,
1133 : out_tensors_info);
1134 0 : if (status != ML_ERROR_NONE) {
1135 0 : _ml_error_report_continue
1136 : ("Output tensors info is given; however, failed to set output tensors info. Error code: %d",
1137 : status);
1138 0 : goto error;
1139 : }
1140 : } else {
1141 0 : _ml_error_report
1142 : ("To run the given nnfw, '%s', with a neural network model, both input and output information should be provided.",
1143 : fw_name);
1144 0 : status = ML_ERROR_INVALID_PARAMETER;
1145 0 : goto error;
1146 : }
1147 0 : } else if (nnfw == ML_NNFW_TYPE_ARMNN) {
1148 : /* set input and output tensors information, if available */
1149 0 : if (in_tensors_info) {
1150 : status =
1151 0 : ml_single_set_inout_tensors_info (filter_obj, TRUE, in_tensors_info);
1152 0 : if (status != ML_ERROR_NONE) {
1153 0 : _ml_error_report_continue
1154 : ("With nnfw '%s', input tensors info is optional. However, the user has provided an invalid input tensors info. Error code: %d",
1155 : fw_name, status);
1156 0 : goto error;
1157 : }
1158 : }
1159 0 : if (out_tensors_info) {
1160 : status =
1161 0 : ml_single_set_inout_tensors_info (filter_obj, FALSE,
1162 : out_tensors_info);
1163 0 : if (status != ML_ERROR_NONE) {
1164 0 : _ml_error_report_continue
1165 : ("With nnfw '%s', output tensors info is optional. However, the user has provided an invalid output tensors info. Error code: %d",
1166 : fw_name, status);
1167 0 : goto error;
1168 : }
1169 : }
1170 : }
1171 :
1172 : /* set accelerator, framework, model files and custom option */
1173 0 : if (info->fw_name) {
1174 0 : fw_name = (const char *) info->fw_name;
1175 : } else {
1176 0 : fw_name = _ml_get_nnfw_subplugin_name (nnfw); /* retry for "auto" */
1177 : }
1178 0 : hw_name = _ml_nnfw_to_str_prop (hw);
1179 :
1180 0 : g_object_set (filter_obj, "framework", fw_name, "accelerator", hw_name,
1181 : "model", converted_models, "invoke-dynamic", single_h->invoke_dynamic,
1182 : "invoke-async", single_h->invoke_async, "latency", info->latency_mode, NULL);
1183 0 : g_free (hw_name);
1184 :
1185 0 : if (info->custom_option) {
1186 0 : g_object_set (filter_obj, "custom", info->custom_option, NULL);
1187 : }
1188 :
1189 : /* Set async callback. */
1190 0 : if (single_h->invoke_async) {
1191 0 : single_h->klass->set_invoke_async_callback (single_h->filter,
1192 : ml_single_async_cb, single_h);
1193 : }
1194 :
1195 : /* 4. Start the nnfw to get inout configurations if needed */
1196 0 : if (!single_h->klass->start (single_h->filter)) {
1197 0 : _ml_error_report
1198 : ("Failed to start NNFW, '%s', to get inout configurations. Subplugin class method has failed to start.",
1199 : fw_name);
1200 0 : status = ML_ERROR_STREAMS_PIPE;
1201 0 : goto error;
1202 : }
1203 :
1204 0 : if (nnfw == ML_NNFW_TYPE_NNTR_INF) {
1205 0 : if (!in_tensors_info || !out_tensors_info) {
1206 0 : if (!in_tensors_info) {
1207 : GstTensorsInfo in_info;
1208 :
1209 0 : gst_tensors_info_init (&in_info);
1210 :
1211 : /* ml_single_set_input_info() can't be done as it checks num_tensors */
1212 0 : status = ml_single_set_gst_info (single_h, &in_info);
1213 0 : if (status != ML_ERROR_NONE) {
1214 0 : _ml_error_report_continue
1215 : ("NNTrainer-inference-single cannot configure single_h handle instance with the given in_info. This might be an ML-API / NNTrainer internal error. Error Code: %d",
1216 : status);
1217 0 : goto error;
1218 : }
1219 : } else {
1220 0 : status = ml_single_set_input_info (single_h, in_tensors_info);
1221 0 : if (status != ML_ERROR_NONE) {
1222 0 : _ml_error_report_continue
1223 : ("NNTrainer-inference-single cannot configure single_h handle instance with the given in_info from the user. Error code: %d",
1224 : status);
1225 0 : goto error;
1226 : }
1227 : }
1228 : }
1229 : }
1230 :
1231 : /* 5. Set in/out configs and metadata */
1232 0 : if (!ml_single_set_info_in_handle (single_h, TRUE, in_tensors_info)) {
1233 0 : _ml_error_report
1234 : ("The input tensors info is invalid. Cannot configure single_h handle with the given input tensors info.");
1235 0 : status = ML_ERROR_INVALID_PARAMETER;
1236 0 : goto error;
1237 : }
1238 :
1239 0 : if (!ml_single_set_info_in_handle (single_h, FALSE, out_tensors_info)) {
1240 0 : _ml_error_report
1241 : ("The output tensors info is invalid. Cannot configure single_h handle with the given output tensors info.");
1242 0 : status = ML_ERROR_INVALID_PARAMETER;
1243 0 : goto error;
1244 : }
1245 :
1246 : /* Setup input and output memory buffers for invoke */
1247 0 : __setup_in_out_tensors (single_h);
1248 :
1249 0 : *single = single_h;
1250 0 : return ML_ERROR_NONE;
1251 :
1252 0 : error:
1253 0 : ml_single_close (single_h);
1254 0 : return status;
1255 : }
1256 :
1257 : /**
1258 : * @brief Opens an ML model and returns the instance as a handle.
1259 : */
1260 : int
1261 0 : ml_single_open (ml_single_h * single, const char *model,
1262 : const ml_tensors_info_h input_info, const ml_tensors_info_h output_info,
1263 : ml_nnfw_type_e nnfw, ml_nnfw_hw_e hw)
1264 : {
1265 0 : return ml_single_open_full (single, model, input_info, output_info, nnfw, hw,
1266 : NULL);
1267 : }
1268 :
1269 : /**
1270 : * @brief Opens an ML model and returns the instance as a handle.
1271 : */
1272 : int
1273 0 : ml_single_open_full (ml_single_h * single, const char *model,
1274 : const ml_tensors_info_h input_info, const ml_tensors_info_h output_info,
1275 : ml_nnfw_type_e nnfw, ml_nnfw_hw_e hw, const char *custom_option)
1276 : {
1277 0 : ml_single_preset info = { 0, };
1278 :
1279 0 : info.input_info = input_info;
1280 0 : info.output_info = output_info;
1281 0 : info.nnfw = nnfw;
1282 0 : info.hw = hw;
1283 0 : info.models = (char *) model;
1284 0 : info.custom_option = (char *) custom_option;
1285 :
1286 0 : return ml_single_open_custom (single, &info);
1287 : }
1288 :
1289 : /**
1290 : * @brief Open new single handle with given option.
1291 : */
1292 : int
1293 0 : ml_single_open_with_option (ml_single_h * single, const ml_option_h option)
1294 : {
1295 : void *value;
1296 0 : ml_single_preset info = { 0, };
1297 :
1298 0 : check_feature_state (ML_FEATURE_INFERENCE);
1299 :
1300 0 : if (!option) {
1301 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1302 : "The parameter, 'option' is NULL. It should be a valid ml_option_h, which should be created by ml_option_create().");
1303 : }
1304 :
1305 0 : if (!single)
1306 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1307 : "The parameter, 'single' (ml_single_h), is NULL. It should be a valid ml_single_h instance, usually created by ml_single_open().");
1308 :
1309 0 : if (ML_ERROR_NONE == ml_option_get (option, "input_info", &value))
1310 0 : info.input_info = value;
1311 0 : if (ML_ERROR_NONE == ml_option_get (option, "output_info", &value))
1312 0 : info.output_info = value;
1313 0 : if (ML_ERROR_NONE == ml_option_get (option, "nnfw", &value))
1314 0 : info.nnfw = *((ml_nnfw_type_e *) value);
1315 0 : if (ML_ERROR_NONE == ml_option_get (option, "hw", &value))
1316 0 : info.hw = *((ml_nnfw_hw_e *) value);
1317 0 : if (ML_ERROR_NONE == ml_option_get (option, "models", &value))
1318 0 : info.models = (gchar *) value;
1319 0 : if (ML_ERROR_NONE == ml_option_get (option, "custom", &value))
1320 0 : info.custom_option = (gchar *) value;
1321 0 : if (ML_ERROR_NONE == ml_option_get (option, "framework_name", &value) ||
1322 0 : ML_ERROR_NONE == ml_option_get (option, "framework", &value))
1323 0 : info.fw_name = (gchar *) value;
1324 0 : if (ML_ERROR_NONE == ml_option_get (option, "invoke_dynamic", &value)) {
1325 0 : if (g_ascii_strcasecmp ((gchar *) value, "true") == 0)
1326 0 : info.invoke_dynamic = TRUE;
1327 : }
1328 0 : if (ML_ERROR_NONE == ml_option_get (option, "invoke_async", &value)) {
1329 0 : if (g_ascii_strcasecmp ((gchar *) value, "true") == 0)
1330 0 : info.invoke_async = TRUE;
1331 : }
1332 0 : if (ML_ERROR_NONE == ml_option_get (option, "async_callback", &value)) {
1333 0 : info.invoke_async_cb = (ml_tensors_data_cb) value;
1334 : }
1335 0 : if (ML_ERROR_NONE == ml_option_get (option, "async_data", &value)) {
1336 0 : info.invoke_async_pdata = value;
1337 : }
1338 0 : if (ML_ERROR_NONE == ml_option_get (option, "profile", &value)) {
1339 0 : if (g_ascii_strcasecmp ((gchar *) value, "true") == 0) {
1340 0 : info.latency_mode = 1;
1341 0 : } else if (g_ascii_strtoll ((gchar *) value, NULL, 10) > 0) {
1342 0 : info.latency_mode = 1;
1343 : } else {
1344 0 : info.latency_mode = 0;
1345 : }
1346 : }
1347 :
1348 0 : return ml_single_open_custom (single, &info);
1349 : }
1350 :
1351 : /**
1352 : * @brief Closes the opened model handle.
1353 : *
1354 : * @details State changes performed by this function:
1355 : * ANY STATE -> JOIN REQUESTED - on receiving a request to close
1356 : *
1357 : * Once requested to close, invoke_thread() will exit after processing
1358 : * the current input (if any).
1359 : */
1360 : int
1361 0 : ml_single_close (ml_single_h single)
1362 : {
1363 : ml_single *single_h;
1364 :
1365 0 : check_feature_state (ML_FEATURE_INFERENCE);
1366 :
1367 0 : if (!single)
1368 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1369 : "The parameter, 'single' (ml_single_h), is NULL. It should be a valid ml_single_h instance, usually created by ml_single_open().");
1370 :
1371 0 : ML_SINGLE_GET_VALID_HANDLE_LOCKED (single_h, single, 1);
1372 :
1373 : /* First, clear all callbacks. */
1374 0 : single_h->invoke_async_cb = NULL;
1375 :
1376 0 : single_h->state = JOIN_REQUESTED;
1377 0 : g_cond_broadcast (&single_h->cond);
1378 : /** Wait until invoke process is finished */
1379 0 : while (single_h->invoking) {
1380 0 : _ml_logd ("Wait until invoke is finished and close the handle.");
1381 0 : g_cond_wait (&single_h->cond, &single_h->mutex);
1382 : }
1383 0 : ML_SINGLE_HANDLE_UNLOCK (single_h);
1384 :
1385 0 : if (single_h->thread != NULL)
1386 0 : g_thread_join (single_h->thread);
1387 :
1388 : /** locking ensures correctness with parallel calls on close */
1389 0 : if (single_h->filter) {
1390 0 : g_list_foreach (single_h->destroy_data_list, __destroy_notify, single_h);
1391 0 : g_list_free (single_h->destroy_data_list);
1392 :
1393 0 : if (single_h->klass)
1394 0 : single_h->klass->stop (single_h->filter);
1395 :
1396 0 : g_object_unref (single_h->filter);
1397 0 : single_h->filter = NULL;
1398 : }
1399 :
1400 0 : if (single_h->klass) {
1401 0 : g_type_class_unref (single_h->klass);
1402 0 : single_h->klass = NULL;
1403 : }
1404 :
1405 0 : gst_tensors_info_free (&single_h->in_info);
1406 0 : gst_tensors_info_free (&single_h->out_info);
1407 :
1408 0 : ml_tensors_data_destroy (single_h->in_tensors);
1409 0 : ml_tensors_data_destroy (single_h->out_tensors);
1410 :
1411 0 : g_cond_clear (&single_h->cond);
1412 0 : g_mutex_clear (&single_h->mutex);
1413 :
1414 0 : g_free (single_h);
1415 0 : return ML_ERROR_NONE;
1416 : }
1417 :
1418 : /**
1419 : * @brief Internal function to validate input/output data.
1420 : */
1421 : static int
1422 0 : _ml_single_invoke_validate_data (ml_single_h single,
1423 : const ml_tensors_data_h data, const gboolean is_input)
1424 : {
1425 : ml_single *single_h;
1426 : ml_tensors_data_s *_data;
1427 : ml_tensors_data_s *_model;
1428 : guint i;
1429 : size_t raw_size;
1430 :
1431 0 : single_h = (ml_single *) single;
1432 0 : _data = (ml_tensors_data_s *) data;
1433 :
1434 0 : if (G_UNLIKELY (!_data))
1435 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1436 : "(internal function) The parameter, 'data' (const ml_tensors_data_h), is NULL. It should be a valid instance of ml_tensors_data_h.");
1437 :
1438 0 : if (is_input)
1439 0 : _model = (ml_tensors_data_s *) single_h->in_tensors;
1440 : else
1441 0 : _model = (ml_tensors_data_s *) single_h->out_tensors;
1442 :
1443 0 : if (G_UNLIKELY (_data->num_tensors != _model->num_tensors))
1444 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1445 : "(internal function) The number of %s tensors is not compatible with model. Given: %u, Expected: %u.",
1446 : (is_input) ? "input" : "output", _data->num_tensors,
1447 : _model->num_tensors);
1448 :
1449 0 : for (i = 0; i < _data->num_tensors; i++) {
1450 0 : if (G_UNLIKELY (!_data->tensors[i].data))
1451 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1452 : "The %d-th input tensor is not valid. There is no valid dimension metadata for this tensor.",
1453 : i);
1454 :
1455 0 : if (single_h->invoke_dynamic) {
1456 : /* If tensor is not static, we cannot check tensor data size. */
1457 0 : continue;
1458 : }
1459 :
1460 0 : raw_size = _model->tensors[i].size;
1461 0 : if (G_UNLIKELY (_data->tensors[i].size != raw_size))
1462 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1463 : "The size of %d-th %s tensor is not compatible with model. Given: %zu, Expected: %zu.",
1464 : i, (is_input) ? "input" : "output", _data->tensors[i].size, raw_size);
1465 : }
1466 :
1467 0 : return ML_ERROR_NONE;
1468 : }
1469 :
1470 : /**
1471 : * @brief Internal function to invoke the model.
1472 : *
1473 : * @details State changes performed by this function:
1474 : * IDLE -> RUNNING - on receiving a valid request
1475 : *
1476 : * Invoke returns error if the current state is not IDLE.
1477 : * If IDLE, then invoke is requested to the thread.
1478 : * Invoke waits for the processing to be complete, and returns back
1479 : * the result once notified by the processing thread.
1480 : *
1481 : * @note IDLE is the valid thread state before and after this function call.
1482 : */
1483 : static int
1484 0 : _ml_single_invoke_internal (ml_single_h single,
1485 : const ml_tensors_data_h input, ml_tensors_data_h * output,
1486 : const gboolean need_alloc)
1487 : {
1488 : ml_single *single_h;
1489 : ml_tensors_data_h _in, _out;
1490 : gint64 end_time;
1491 0 : int status = ML_ERROR_NONE;
1492 :
1493 0 : check_feature_state (ML_FEATURE_INFERENCE);
1494 :
1495 0 : if (G_UNLIKELY (!single))
1496 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1497 : "(internal function) The parameter, single (ml_single_h), is NULL. It should be a valid instance of ml_single_h, usually created by ml_single_open().");
1498 :
1499 0 : if (G_UNLIKELY (!input))
1500 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1501 : "(internal function) The parameter, input (ml_tensors_data_h), is NULL. It should be a valid instance of ml_tensors_data_h.");
1502 :
1503 0 : if (G_UNLIKELY (!output))
1504 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1505 : "(internal function) The parameter, output (ml_tensors_data_h *), is NULL. It should be a valid pointer to an instance of ml_tensors_data_h to store the inference results.");
1506 :
1507 0 : ML_SINGLE_GET_VALID_HANDLE_LOCKED (single_h, single, 0);
1508 :
1509 0 : if (G_UNLIKELY (!single_h->filter)) {
1510 0 : _ml_error_report
1511 : ("The tensor_filter element of this single handle (single_h) is not valid. It appears that the handle (ml_single_h single) is not appropriately created by ml_single_open(), user thread has touched its internal data, or the handle is already closed or freed by user.");
1512 0 : status = ML_ERROR_INVALID_PARAMETER;
1513 0 : goto exit;
1514 : }
1515 :
1516 : /* Validate input/output data */
1517 0 : status = _ml_single_invoke_validate_data (single, input, TRUE);
1518 0 : if (status != ML_ERROR_NONE) {
1519 0 : _ml_error_report_continue
1520 : ("The input data for the inference is not valid: error code %d. Please check the dimensions, type, number-of-tensors, and size information of the input data.",
1521 : status);
1522 0 : goto exit;
1523 : }
1524 :
1525 0 : if (!need_alloc) {
1526 0 : status = _ml_single_invoke_validate_data (single, *output, FALSE);
1527 0 : if (status != ML_ERROR_NONE) {
1528 0 : _ml_error_report_continue
1529 : ("The output data buffer provided by the user is not valid for the given neural network mode: error code %d. Please check the dimensions, type, number-of-tensors, and size information of the output data buffer.",
1530 : status);
1531 0 : goto exit;
1532 : }
1533 : }
1534 :
1535 0 : if (single_h->state != IDLE) {
1536 0 : if (G_UNLIKELY (single_h->state == JOIN_REQUESTED)) {
1537 0 : _ml_error_report
1538 : ("The handle (single_h single) is closed or being closed awaiting for the last ongoing invocation. Invoking with such a handle is not allowed. Please open another single_h handle to invoke.");
1539 0 : status = ML_ERROR_STREAMS_PIPE;
1540 0 : goto exit;
1541 : }
1542 0 : _ml_error_report
1543 : ("The handle (single_h single) is busy. There is another thread waiting for inference results with this handle. Please retry invoking again later when the handle becomes idle after completing the current inference task.");
1544 0 : status = ML_ERROR_TRY_AGAIN;
1545 0 : goto exit;
1546 : }
1547 :
1548 : /* prepare output data */
1549 0 : if (need_alloc) {
1550 0 : *output = NULL;
1551 :
1552 0 : status = _ml_tensors_data_clone_no_alloc (single_h->out_tensors, &_out);
1553 0 : if (status != ML_ERROR_NONE)
1554 0 : goto exit;
1555 : } else {
1556 0 : _out = *output;
1557 : }
1558 :
1559 : /**
1560 : * Clone input data here to prevent use-after-free case.
1561 : * We should release single_h->input after calling __invoke() function.
1562 : */
1563 0 : status = ml_tensors_data_clone (input, &_in);
1564 0 : if (status != ML_ERROR_NONE)
1565 0 : goto exit;
1566 :
1567 0 : single_h->state = RUNNING;
1568 0 : single_h->free_output = need_alloc;
1569 0 : single_h->input = _in;
1570 0 : single_h->output = _out;
1571 :
1572 0 : if (single_h->timeout > 0) {
1573 : /* Wake up "invoke_thread" */
1574 0 : g_cond_broadcast (&single_h->cond);
1575 :
1576 : /* set timeout */
1577 0 : end_time = g_get_monotonic_time () +
1578 0 : single_h->timeout * G_TIME_SPAN_MILLISECOND;
1579 :
1580 0 : if (g_cond_wait_until (&single_h->cond, &single_h->mutex, end_time)) {
1581 0 : status = single_h->status;
1582 : } else {
1583 0 : _ml_logw ("Wait for invoke has timed out");
1584 0 : status = ML_ERROR_TIMED_OUT;
1585 : /** This is set to notify invoke_thread to not process if timed out */
1586 0 : if (need_alloc)
1587 0 : set_destroy_notify (single_h, _out, TRUE);
1588 : }
1589 : } else {
1590 : /**
1591 : * Don't worry. We have locked single_h->mutex, thus there is no
1592 : * other thread with ml_single_invoke function on the same handle
1593 : * that are in this if-then-else block, which means that there is
1594 : * no other thread with active invoke-thread (calling __invoke())
1595 : * with the same handle. Thus we can call __invoke without
1596 : * having yet another mutex for __invoke.
1597 : */
1598 0 : single_h->invoking = TRUE;
1599 0 : status = __invoke (single_h, _in, _out, need_alloc);
1600 0 : ml_tensors_data_destroy (_in);
1601 0 : single_h->invoking = FALSE;
1602 0 : single_h->state = IDLE;
1603 :
1604 0 : if (status != ML_ERROR_NONE) {
1605 0 : if (need_alloc)
1606 0 : ml_tensors_data_destroy (_out);
1607 0 : goto exit;
1608 : }
1609 :
1610 0 : if (need_alloc)
1611 0 : __process_output (single_h, _out);
1612 : }
1613 :
1614 0 : exit:
1615 0 : if (status == ML_ERROR_NONE) {
1616 0 : if (need_alloc)
1617 0 : *output = _out;
1618 : }
1619 :
1620 0 : single_h->input = single_h->output = NULL;
1621 0 : ML_SINGLE_HANDLE_UNLOCK (single_h);
1622 0 : return status;
1623 : }
1624 :
1625 : /**
1626 : * @brief Invokes the model with the given input data.
1627 : */
1628 : int
1629 0 : ml_single_invoke (ml_single_h single,
1630 : const ml_tensors_data_h input, ml_tensors_data_h * output)
1631 : {
1632 0 : return _ml_single_invoke_internal (single, input, output, TRUE);
1633 : }
1634 :
1635 : /**
1636 : * @brief Invokes the model with the given input data and fills the output data handle.
1637 : */
1638 : int
1639 0 : ml_single_invoke_fast (ml_single_h single,
1640 : const ml_tensors_data_h input, ml_tensors_data_h output)
1641 : {
1642 0 : return _ml_single_invoke_internal (single, input, &output, FALSE);
1643 : }
1644 :
1645 : /**
1646 : * @brief Gets the tensors info for the given handle.
1647 : * @param[out] info A pointer to a NULL (unallocated) instance.
1648 : */
1649 : static int
1650 0 : ml_single_get_tensors_info (ml_single_h single, gboolean is_input,
1651 : ml_tensors_info_h * info)
1652 : {
1653 : ml_single *single_h;
1654 0 : int status = ML_ERROR_NONE;
1655 :
1656 0 : check_feature_state (ML_FEATURE_INFERENCE);
1657 :
1658 0 : if (!single)
1659 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1660 : "(internal function) The parameter, 'single' (ml_single_h), is NULL. It should be a valid ml_single_h instance, usually created by ml_single_open().");
1661 0 : if (!info)
1662 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1663 : "(internal function) The parameter, 'info' (ml_tensors_info_h *) is NULL. It should be a valid pointer to an empty (NULL) instance of ml_tensor_info_h, which is supposed to be filled with the fetched info by this function.");
1664 :
1665 0 : ML_SINGLE_GET_VALID_HANDLE_LOCKED (single_h, single, 0);
1666 :
1667 0 : if (is_input)
1668 0 : status = _ml_tensors_info_create_from_gst (info, &single_h->in_info);
1669 : else
1670 0 : status = _ml_tensors_info_create_from_gst (info, &single_h->out_info);
1671 :
1672 0 : if (status != ML_ERROR_NONE) {
1673 0 : _ml_error_report_continue
1674 : ("(internal function) Failed to create an entry for the ml_tensors_info_h instance. Error code: %d",
1675 : status);
1676 : }
1677 :
1678 0 : ML_SINGLE_HANDLE_UNLOCK (single_h);
1679 0 : return status;
1680 : }
1681 :
1682 : /**
1683 : * @brief Gets the information of required input data for the given handle.
1684 : * @note information = (tensor dimension, type, name and so on)
1685 : */
1686 : int
1687 0 : ml_single_get_input_info (ml_single_h single, ml_tensors_info_h * info)
1688 : {
1689 0 : return ml_single_get_tensors_info (single, TRUE, info);
1690 : }
1691 :
1692 : /**
1693 : * @brief Gets the information of output data for the given handle.
1694 : * @note information = (tensor dimension, type, name and so on)
1695 : */
1696 : int
1697 0 : ml_single_get_output_info (ml_single_h single, ml_tensors_info_h * info)
1698 : {
1699 0 : return ml_single_get_tensors_info (single, FALSE, info);
1700 : }
1701 :
1702 : /**
1703 : * @brief Sets the maximum amount of time to wait for an output, in milliseconds.
1704 : */
1705 : int
1706 0 : ml_single_set_timeout (ml_single_h single, unsigned int timeout)
1707 : {
1708 : ml_single *single_h;
1709 :
1710 0 : check_feature_state (ML_FEATURE_INFERENCE);
1711 :
1712 0 : if (!single)
1713 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1714 : "The parameter, single (ml_single_h), is NULL. It should be a valid instance of ml_single_h, which is usually created by ml_single_open().");
1715 :
1716 0 : ML_SINGLE_GET_VALID_HANDLE_LOCKED (single_h, single, 0);
1717 :
1718 0 : single_h->timeout = (guint) timeout;
1719 :
1720 0 : ML_SINGLE_HANDLE_UNLOCK (single_h);
1721 0 : return ML_ERROR_NONE;
1722 : }
1723 :
1724 : /**
1725 : * @brief Sets the information (tensor dimension, type, name and so on) of required input data for the given model.
1726 : */
1727 : int
1728 0 : ml_single_set_input_info (ml_single_h single, const ml_tensors_info_h info)
1729 : {
1730 : ml_single *single_h;
1731 : GstTensorsInfo gst_info;
1732 0 : int status = ML_ERROR_NONE;
1733 :
1734 0 : check_feature_state (ML_FEATURE_INFERENCE);
1735 :
1736 0 : if (!single)
1737 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1738 : "The parameter, single (ml_single_h), is NULL. It should be a valid instance of ml_single_h, which is usually created by ml_single_open().");
1739 0 : if (!info)
1740 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1741 : "The parameter, info (const ml_tensors_info_h), is NULL. It should be a valid instance of ml_tensors_info_h, which is usually created by ml_tensors_info_create() or other APIs.");
1742 :
1743 0 : if (!ml_tensors_info_is_valid (info))
1744 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1745 : "The parameter, info (const ml_tensors_info_h), is not valid. Although it is not NULL, the content of 'info' is invalid. If it is created by ml_tensors_info_create(), which creates an empty instance, it should be filled by users afterwards. Please check if 'info' has all elements filled with valid values.");
1746 :
1747 0 : ML_SINGLE_GET_VALID_HANDLE_LOCKED (single_h, single, 0);
1748 0 : _ml_tensors_info_copy_from_ml (&gst_info, info);
1749 0 : status = ml_single_set_gst_info (single_h, &gst_info);
1750 0 : gst_tensors_info_free (&gst_info);
1751 0 : ML_SINGLE_HANDLE_UNLOCK (single_h);
1752 :
1753 0 : if (status != ML_ERROR_NONE)
1754 0 : _ml_error_report_continue
1755 : ("ml_single_set_gst_info() has failed to configure the single_h handle with the given info. Error code: %d",
1756 : status);
1757 :
1758 0 : return status;
1759 : }
1760 :
1761 : /**
1762 : * @brief Invokes the model with the given input data with the given info.
1763 : */
1764 : int
1765 0 : ml_single_invoke_dynamic (ml_single_h single,
1766 : const ml_tensors_data_h input, const ml_tensors_info_h in_info,
1767 : ml_tensors_data_h * output, ml_tensors_info_h * out_info)
1768 : {
1769 : int status;
1770 0 : ml_tensors_info_h cur_in_info = NULL;
1771 :
1772 0 : if (!single)
1773 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1774 : "The parameter, single (ml_single_h), is NULL. It should be a valid instance of ml_single_h, which is usually created by ml_single_open().");
1775 0 : if (!input)
1776 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1777 : "The parameter, input (const ml_tensors_data_h), is NULL. It should be a valid instance of ml_tensors_data_h with input data frame for inference.");
1778 0 : if (!in_info)
1779 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1780 : "The parameter, in_info (const ml_tensors_info_h), is NULL. It should be a valid instance of ml_tensor_info_h that describes metadata of the given input for inference (input).");
1781 0 : if (!output)
1782 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1783 : "The parameter, output (ml_tensors_data_h *), is NULL. It should be a pointer to an empty (NULL or do-not-care) instance of ml_tensors_data_h, which is filled by this API with the result of inference.");
1784 0 : if (!out_info)
1785 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1786 : "The parameter, out_info (ml_tensors_info_h *), is NULL. It should be a pointer to an empty (NULL or do-not-care) instance of ml_tensors_info_h, which is filled by this API with the neural network model info.");
1787 :
1788 : /* init null */
1789 0 : *output = NULL;
1790 0 : *out_info = NULL;
1791 :
1792 0 : status = ml_single_get_input_info (single, &cur_in_info);
1793 0 : if (status != ML_ERROR_NONE) {
1794 0 : _ml_error_report_continue
1795 : ("Failed to get input metadata configured by the opened single_h handle instance. Error code: %d.",
1796 : status);
1797 0 : goto exit;
1798 : }
1799 0 : status = ml_single_update_info (single, in_info, out_info);
1800 0 : if (status != ML_ERROR_NONE) {
1801 0 : _ml_error_report_continue
1802 : ("Failed to reconfigure the opened single_h handle instance with the updated input/output metadata. Error code: %d.",
1803 : status);
1804 0 : goto exit;
1805 : }
1806 :
1807 0 : status = ml_single_invoke (single, input, output);
1808 0 : if (status != ML_ERROR_NONE) {
1809 0 : ml_single_set_input_info (single, cur_in_info);
1810 0 : if (status != ML_ERROR_TRY_AGAIN) {
1811 : /* If it's TRY_AGAIN, ml_single_invoke() has already gave enough info. */
1812 0 : _ml_error_report_continue
1813 : ("Invoking the given neural network has failed. Error code: %d.",
1814 : status);
1815 : }
1816 : }
1817 :
1818 0 : exit:
1819 0 : if (cur_in_info)
1820 0 : ml_tensors_info_destroy (cur_in_info);
1821 :
1822 0 : if (status != ML_ERROR_NONE) {
1823 0 : if (*out_info) {
1824 0 : ml_tensors_info_destroy (*out_info);
1825 0 : *out_info = NULL;
1826 : }
1827 : }
1828 :
1829 0 : return status;
1830 : }
1831 :
1832 : /**
1833 : * @brief Sets the property value for the given model.
1834 : */
1835 : int
1836 0 : ml_single_set_property (ml_single_h single, const char *name, const char *value)
1837 : {
1838 : ml_single *single_h;
1839 0 : int status = ML_ERROR_NONE;
1840 0 : char *old_value = NULL;
1841 :
1842 0 : check_feature_state (ML_FEATURE_INFERENCE);
1843 :
1844 0 : if (!single)
1845 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1846 : "The parameter, single (ml_single_h), is NULL. It should be a valid instance of ml_single_h, which is usually created by ml_single_open().");
1847 0 : if (!name)
1848 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1849 : "The parameter, name (const char *), is NULL. It should be a valid string representing a property key.");
1850 :
1851 : /* get old value, also check the property is updatable. */
1852 0 : _ml_error_report_return_continue_iferr
1853 : (ml_single_get_property (single, name, &old_value),
1854 : "Cannot fetch the previous value for the given property name, '%s'. It appears that the property key, '%s', is invalid (not supported).",
1855 : name, name);
1856 :
1857 : /* if sets same value, do not change. */
1858 0 : if (old_value && value && g_ascii_strcasecmp (old_value, value) == 0) {
1859 0 : g_free (old_value);
1860 0 : return ML_ERROR_NONE;
1861 : }
1862 :
1863 0 : ML_SINGLE_GET_VALID_HANDLE_LOCKED (single_h, single, 0);
1864 :
1865 : /* update property */
1866 0 : if (g_str_equal (name, "is-updatable")) {
1867 0 : if (!value)
1868 0 : goto error;
1869 :
1870 : /* boolean */
1871 0 : if (g_ascii_strcasecmp (value, "true") == 0) {
1872 0 : if (g_ascii_strcasecmp (old_value, "true") != 0)
1873 0 : g_object_set (G_OBJECT (single_h->filter), name, (gboolean) TRUE, NULL);
1874 0 : } else if (g_ascii_strcasecmp (value, "false") == 0) {
1875 0 : if (g_ascii_strcasecmp (old_value, "false") != 0)
1876 0 : g_object_set (G_OBJECT (single_h->filter), name, (gboolean) FALSE,
1877 : NULL);
1878 : } else {
1879 0 : _ml_error_report
1880 : ("The property value, '%s', is not appropriate for a boolean property 'is-updatable'. It should be either 'true' or 'false'.",
1881 : value);
1882 0 : status = ML_ERROR_INVALID_PARAMETER;
1883 : }
1884 0 : } else if (g_str_equal (name, "input") || g_str_equal (name, "inputtype")
1885 0 : || g_str_equal (name, "inputname") || g_str_equal (name, "output")
1886 0 : || g_str_equal (name, "outputtype") || g_str_equal (name, "outputname")) {
1887 : GstTensorsInfo gst_info;
1888 0 : gboolean is_input = g_str_has_prefix (name, "input");
1889 : guint num;
1890 :
1891 0 : if (!value)
1892 0 : goto error;
1893 :
1894 0 : ml_single_get_gst_info (single_h, is_input, &gst_info);
1895 :
1896 0 : if (g_str_has_suffix (name, "type"))
1897 0 : num = gst_tensors_info_parse_types_string (&gst_info, value);
1898 0 : else if (g_str_has_suffix (name, "name"))
1899 0 : num = gst_tensors_info_parse_names_string (&gst_info, value);
1900 : else
1901 0 : num = gst_tensors_info_parse_dimensions_string (&gst_info, value);
1902 :
1903 0 : if (num == gst_info.num_tensors) {
1904 : /* change configuration */
1905 0 : status = ml_single_set_gst_info (single_h, &gst_info);
1906 : } else {
1907 0 : _ml_error_report
1908 : ("The property value, '%s', is not appropriate for the given property key, '%s'. The API has failed to parse the given property value.",
1909 : value, name);
1910 0 : status = ML_ERROR_INVALID_PARAMETER;
1911 : }
1912 :
1913 0 : gst_tensors_info_free (&gst_info);
1914 : } else {
1915 0 : g_object_set (G_OBJECT (single_h->filter), name, value, NULL);
1916 : }
1917 0 : goto done;
1918 0 : error:
1919 0 : _ml_error_report
1920 : ("The parameter, value (const char *), is NULL. It should be a valid string representing the value to be set for the given property key, '%s'",
1921 : name);
1922 0 : status = ML_ERROR_INVALID_PARAMETER;
1923 0 : done:
1924 0 : ML_SINGLE_HANDLE_UNLOCK (single_h);
1925 :
1926 0 : g_free (old_value);
1927 0 : return status;
1928 : }
1929 :
1930 : /**
1931 : * @brief Gets the property value for the given model.
1932 : */
1933 : int
1934 0 : ml_single_get_property (ml_single_h single, const char *name, char **value)
1935 : {
1936 : ml_single *single_h;
1937 0 : int status = ML_ERROR_NONE;
1938 :
1939 0 : check_feature_state (ML_FEATURE_INFERENCE);
1940 :
1941 0 : if (!single)
1942 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1943 : "The parameter, single (ml_single_h), is NULL. It should be a valid instance of ml_single_h, which is usually created by ml_single_open().");
1944 0 : if (!name)
1945 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1946 : "The parameter, name (const char *), is NULL. It should be a valid string representing a property key.");
1947 0 : if (!value)
1948 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1949 : "The parameter, value (const char *), is NULL. It should be a valid string representing the value to be set for the given property key, '%s'",
1950 : name);
1951 :
1952 : /* init null */
1953 0 : *value = NULL;
1954 :
1955 0 : ML_SINGLE_GET_VALID_HANDLE_LOCKED (single_h, single, 0);
1956 :
1957 0 : if (g_str_equal (name, "input") || g_str_equal (name, "output") ||
1958 0 : g_str_equal (name, "inputtype") || g_str_equal (name, "inputname") ||
1959 0 : g_str_equal (name, "inputlayout") || g_str_equal (name, "outputtype") ||
1960 0 : g_str_equal (name, "outputname") || g_str_equal (name, "outputlayout") ||
1961 0 : g_str_equal (name, "accelerator") || g_str_equal (name, "custom")) {
1962 : /* string */
1963 0 : g_object_get (G_OBJECT (single_h->filter), name, value, NULL);
1964 0 : } else if (g_str_equal (name, "is-updatable")) {
1965 0 : gboolean bool_value = FALSE;
1966 :
1967 : /* boolean */
1968 0 : g_object_get (G_OBJECT (single_h->filter), name, &bool_value, NULL);
1969 0 : *value = (bool_value) ? g_strdup ("true") : g_strdup ("false");
1970 : } else {
1971 0 : _ml_error_report
1972 : ("The property key, '%s', is not available for get_property and not recognized by the API. It should be one of {input, inputtype, inputname, inputlayout, output, outputtype, outputname, outputlayout, accelerator, custom, is-updatable}.",
1973 : name);
1974 0 : status = ML_ERROR_NOT_SUPPORTED;
1975 : }
1976 :
1977 0 : ML_SINGLE_HANDLE_UNLOCK (single_h);
1978 0 : return status;
1979 : }
1980 :
1981 : /**
1982 : * @brief Internal helper function to validate model files.
1983 : */
1984 : static int
1985 0 : __ml_validate_model_file (const char *const *model,
1986 : const unsigned int num_models, gboolean * is_dir)
1987 : {
1988 : guint i;
1989 :
1990 0 : if (!model)
1991 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1992 : "The parameter, model, is NULL. It should be a valid array of strings, where each string is a valid file path for a neural network model file.");
1993 0 : if (num_models < 1)
1994 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1995 : "The parameter, num_models, is 0. It should be the number of files for the given neural network model.");
1996 :
1997 0 : if (g_file_test (model[0], G_FILE_TEST_IS_DIR)) {
1998 0 : *is_dir = TRUE;
1999 0 : return ML_ERROR_NONE;
2000 : }
2001 :
2002 0 : for (i = 0; i < num_models; i++) {
2003 0 : if (!model[i] ||
2004 0 : !g_file_test (model[i], G_FILE_TEST_EXISTS | G_FILE_TEST_IS_REGULAR)) {
2005 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
2006 : "The given param, model path [%d] = \"%s\" is invalid or the file is not found or accessible.",
2007 : i, _STR_NULL (model[i]));
2008 : }
2009 : }
2010 :
2011 0 : *is_dir = FALSE;
2012 :
2013 0 : return ML_ERROR_NONE;
2014 : }
2015 :
2016 : /**
2017 : * @brief Internal helper to check if the file has one of the valid extensions.
2018 : * @return TRUE if valid, FALSE otherwise.
2019 : */
2020 : static gboolean
2021 0 : _is_valid_extension (const char *filename, const char *const *valid_exts)
2022 : {
2023 : const char *dot;
2024 :
2025 0 : if (!filename || !valid_exts)
2026 0 : return FALSE;
2027 :
2028 0 : dot = strrchr (filename, '.');
2029 0 : if (!dot)
2030 0 : return FALSE;
2031 :
2032 0 : for (; *valid_exts; valid_exts++) {
2033 0 : if (g_ascii_strcasecmp (dot, *valid_exts) == 0)
2034 0 : return TRUE;
2035 : }
2036 :
2037 0 : return FALSE;
2038 : }
2039 :
2040 : /**
2041 : * @brief Validates the nnfw model file.
2042 : * @since_tizen 5.5
2043 : * @param[in] model The path of model file.
2044 : * @param[in/out] nnfw The type of NNFW.
2045 : * @return @c 0 on success. Otherwise a negative error value.
2046 : * @retval #ML_ERROR_NONE Successful
2047 : * @retval #ML_ERROR_NOT_SUPPORTED Not supported, or framework to support this model file is unavailable in the environment.
2048 : * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
2049 : */
2050 : int
2051 0 : _ml_validate_model_file (const char *const *model,
2052 : const unsigned int num_models, ml_nnfw_type_e * nnfw)
2053 : {
2054 0 : int status = ML_ERROR_NONE;
2055 0 : ml_nnfw_type_e detected = ML_NNFW_TYPE_ANY;
2056 0 : gboolean is_dir = FALSE;
2057 : gchar *fw_name;
2058 :
2059 0 : if (!nnfw)
2060 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
2061 : "The parameter, nnfw, is NULL. It should be a valid pointer of ml_nnfw_type_e.");
2062 :
2063 0 : _ml_error_report_return_continue_iferr (__ml_validate_model_file (model,
2064 : num_models, &is_dir),
2065 : "The parameters, model and num_models, are not valid.");
2066 :
2067 : /**
2068 : * @note detect-fw checks the file ext and returns proper fw name for given models.
2069 : * If detected fw and given nnfw are same, we don't need to check the file extension.
2070 : * If any condition for auto detection is added later, below code also should be updated.
2071 : */
2072 0 : fw_name = gst_tensor_filter_detect_framework (model, num_models, TRUE);
2073 0 : detected = _ml_get_nnfw_type_by_subplugin_name (fw_name);
2074 0 : g_free (fw_name);
2075 :
2076 0 : if (*nnfw == ML_NNFW_TYPE_ANY) {
2077 0 : if (detected == ML_NNFW_TYPE_ANY) {
2078 0 : _ml_error_report
2079 : ("The given neural network model (1st path is \"%s\", and there are %d paths declared) has unknown or unsupported extension. Please check its corresponding neural network framework and try to specify it instead of \"ML_NNFW_TYPE_ANY\".",
2080 : model[0], num_models);
2081 0 : status = ML_ERROR_INVALID_PARAMETER;
2082 : } else {
2083 0 : _ml_logi ("The given model is supposed a %s model.",
2084 : _ml_get_nnfw_subplugin_name (detected));
2085 0 : *nnfw = detected;
2086 : }
2087 :
2088 0 : goto done;
2089 0 : } else if (is_dir && *nnfw != ML_NNFW_TYPE_NNFW) {
2090 : /* supposed it is ONE if given model is directory */
2091 0 : _ml_error_report
2092 : ("The given model (1st path is \"%s\", and there are %d paths declared) is directory, which is allowed by \"NNFW (One Runtime)\" only, Please check the model and framework.",
2093 : model[0], num_models);
2094 0 : status = ML_ERROR_INVALID_PARAMETER;
2095 0 : goto done;
2096 0 : } else if (detected == *nnfw) {
2097 : /* Expected framework, nothing to do. */
2098 0 : goto done;
2099 : }
2100 :
2101 : /** @todo Make sure num_models is correct for each nnfw type */
2102 0 : switch ((int) *nnfw) {
2103 0 : case ML_NNFW_TYPE_NNFW:
2104 : case ML_NNFW_TYPE_TVM:
2105 : case ML_NNFW_TYPE_ONNX_RUNTIME:
2106 : case ML_NNFW_TYPE_NCNN:
2107 : case ML_NNFW_TYPE_TENSORRT:
2108 : case ML_NNFW_TYPE_QNN:
2109 : case ML_NNFW_TYPE_LLAMACPP:
2110 : case ML_NNFW_TYPE_TIZEN_HAL:
2111 : /**
2112 : * We cannot check the file ext with NNFW.
2113 : * NNFW itself will validate metadata and model file.
2114 : */
2115 0 : break;
2116 0 : case ML_NNFW_TYPE_MVNC:
2117 : case ML_NNFW_TYPE_OPENVINO:
2118 : case ML_NNFW_TYPE_EDGE_TPU:
2119 : /**
2120 : * @todo Need to check method to validate model
2121 : * Although nnstreamer supports these frameworks,
2122 : * ML-API implementation is not ready.
2123 : */
2124 0 : _ml_error_report
2125 : ("Given NNFW is not supported by ML-API Inference.Single, yet, although it is supported by NNStreamer. If you have such NNFW integrated into your machine and want to access via ML-API, please update the corresponding implementation or report and discuss at github.com/nnstreamer/nnstreamer/issues.");
2126 0 : status = ML_ERROR_NOT_SUPPORTED;
2127 0 : break;
2128 0 : case ML_NNFW_TYPE_VD_AIFW:
2129 : {
2130 0 : const char *exts[] = { ".nb", ".ncp", ".tvn", ".bin", NULL };
2131 0 : if (!_is_valid_extension (model[0], exts))
2132 0 : status = ML_ERROR_INVALID_PARAMETER;
2133 : }
2134 0 : break;
2135 0 : case ML_NNFW_TYPE_SNAP:
2136 : #if !defined (__ANDROID__)
2137 0 : _ml_error_report ("SNAP is supported by Android/arm64-v8a devices only.");
2138 0 : status = ML_ERROR_NOT_SUPPORTED;
2139 : #endif
2140 : /* SNAP requires multiple files, set supported if model file exists. */
2141 0 : break;
2142 0 : case ML_NNFW_TYPE_ARMNN:
2143 : {
2144 0 : const char *exts[] =
2145 : { ".caffemodel", ".tflite", ".pb", ".prototxt", NULL };
2146 0 : if (!_is_valid_extension (model[0], exts)) {
2147 0 : _ml_error_report ("Invalid extension for ARMNN: %s", model[0]);
2148 0 : status = ML_ERROR_INVALID_PARAMETER;
2149 : }
2150 : }
2151 0 : break;
2152 0 : case ML_NNFW_TYPE_MXNET:
2153 : {
2154 0 : const char *exts[] = { ".params", ".json", NULL };
2155 0 : if (!_is_valid_extension (model[0], exts))
2156 0 : status = ML_ERROR_INVALID_PARAMETER;
2157 : }
2158 0 : break;
2159 0 : default:
2160 0 : _ml_error_report
2161 : ("You have designated an incorrect neural network framework (out of bound).");
2162 0 : status = ML_ERROR_INVALID_PARAMETER;
2163 0 : break;
2164 : }
2165 :
2166 0 : done:
2167 0 : if (status == ML_ERROR_NONE) {
2168 0 : if (!_ml_nnfw_is_available (*nnfw, ML_NNFW_HW_ANY)) {
2169 0 : status = ML_ERROR_NOT_SUPPORTED;
2170 0 : _ml_error_report
2171 : ("The subplugin for tensor-filter \"%s\" is not available. Please install the corresponding tensor-filter subplugin file (usually, \"libnnstreamer_filter_${NAME}.so\") at the correct path. Please use \"nnstreamer-check\" utility to check related configurations. If you do not have the utility ready, build and install \"confchk\", which is located at ${nnstreamer_source}/tools/development/confchk/ .",
2172 : _ml_get_nnfw_subplugin_name (*nnfw));
2173 : }
2174 : } else {
2175 0 : _ml_error_report
2176 : ("The given model file, \"%s\" (1st of %d files), is invalid.",
2177 : model[0], num_models);
2178 : }
2179 :
2180 0 : return status;
2181 : }
|