Passing a variable to DBMS Parallel Execute SQL Statement - database

/*FOO Process Variables*/
l_chunk_sql VARCHAR2(1000);
l_sql_stmt varchar2(1000);
v_date DATE := trunc(sysdate) + 5;
v_order_time VARCHAR2(200) := 'AFTERNOON';
v_flag VARCHAR2(1) := 'N';
BEGIN
/*Create chunks by store*/
l_chunk_sql := 'SELECT distinct loc_nbr, loc_nbr FROM location where close_date is null ';
dbms_parallel_execute.create_task('PROCESS PREPAREDATA');
dbms_parallel_execute.create_chunks_by_sql('PROCESS PREPAREDATA', l_chunk_sql, false);
dbms_parallel_execute.run_task(task_name => 'PROCESS PREPAREDATA',
sql_stmt => 'begin preparedata( :start_id, :end_id, v_date ,v_order_time,v_flag); end;',
language_flag => dbms_sql.native, parallel_level => 10);
dbms_parallel_execute.drop_task('PROCESS PREPAREDATA');
END;
Variables v_date,v_order_date,v_flag not getting recognized. Any help please

Those variables are declared out of scope of that statement, so - how about concatenating them?
sql_stmt => 'begin preparedata( :start_id, :end_id, ' || v_date ||',' || v_order_time ||','|| v_flag || '); end;',

Related

Capturing the o/p of "execute immediate" to a variable

I have the need to capture the o/p of "execute immediate" statement and use that o/p variable in incremental steps execution of the proc.
Like the code is :
var_01 := 'SELECT COUNT(1) AS cnt FROM ALL_RESOURCE_MONITOR_MASTER WHERE' || ' '
|| 'account_name = ' ||''''|| lv_acct_name_new ||''''|| ' ' || 'AND' || ' '
|| 'rm_type= ' ||''''|| type || ''''||' ' || 'AND' || ' '
|| 'env= ' ||''''|| env || ''''||' ';
cnt := (execute immediate :var_01);
Now post I get the cnt as populated, then I need to run the below if~else statement:
if (cnt > 0) then
return 'execute the statement';
else
return 'Resource monitor already present hence insert skipped';
end if;
But the problem is when I try to execute this block I am always getting an error like :
SQL compilation error: Invalid expression value (?SqlExecuteImmediateDynamic?) for assignment.
Please do provide your inputs on resolving this. Thanks in advance!!
Unfortunately there is no syntax like EXECTUE <sql> INTO <output_variable_list> USING <input_variable_list> known from different dialects.
EXECUTE IMMEDIATE:
Executes a string that contains a SQL statement or a Snowflake Scripting statement.
EXECUTE IMMEDIATE '<string_literal>'
[ USING (bind_variable_1 [, bind_variable_2 ...] ) ] ;
In this scenario there is no need to use dynamic SQL for checking in metadata table as it could be rewritten to use parametrized if-statement:
CREATE OR REPLACE TABLE ALL_RESOURCE_MONITOR_MASTER
AS
SELECT 'acc1' AS account_name, 'TEST' AS env, 'WAREHOUSE' AS rm_type;
Code:
DECLARE
lv_acct_name_new TEXT := 'acc1';
type TEXT := 'WAREHOUSE';
env TEXT := 'TEST';
BEGIN
IF (EXISTS (SELECT 1
FROM ALL_RESOURCE_MONITOR_MASTER
WHERE account_name = :lv_acct_name_new
AND rm_type = :type
AND env = :env
)) THEN
RETURN 'Executing some code';
ELSE
RETURN 'Resource monitor already present hence insert skipped';
END IF;
END;

How to import Oracle DMP file without knowing the original tablespace?

Use case: customer's will send their backup of their DB in a dmp file. That's all they will send.
I'm building a script that will import that dmp file into Oracle using DBMS_DATAPUMP. However, as I do not know the original tablespace name, I'm getting the following error:
ORA-39083: Object type USER:"XXXXX" failed to create with error:
ORA-00959: tablespace 'XXXXXXX' does not exist
This is my PL/SQL procedure:
PROCEDURE IMPORTING
(
DMPFILES IN VARCHAR2,
FROMSCHEMA IN VARCHAR2,
TOSCHEMA IN VARCHAR2
) AS
ind NUMBER; -- Loop index
h1 NUMBER; -- Data Pump job handle
percent_done NUMBER; -- Percentage of job complete
job_state VARCHAR2(30); -- To keep track of job state
le ku$_LogEntry; -- For WIP and error messages
js ku$_JobStatus; -- The job status from get_status
jd ku$_JobDesc; -- The job description from get_status
sts ku$_Status; -- The status object returned by get_status
array apex_application_global.vc_arr2;
BEGIN
h1 := DBMS_DATAPUMP.open('IMPORT','FULL',NULL,'EXAMPLE1');
dbms_datapump.add_file(handle => h1, filename => 'IMPORT.LOG', directory => 'DATA_PUMP_DIR', filetype => 3);
-- usign this function to split the files passed as a String to an array
array := apex_util.string_to_table(DMPFILES, ',');
for i in 1 .. array.count loop
DBMS_DATAPUMP.ADD_FILE(h1,array(i),'DATA_PUMP_DIR');
end loop;
DBMS_DATAPUMP.METADATA_REMAP(h1,'REMAP_SCHEMA',FROMSCHEMA,TOSCHEMA);
dbms_datapump.set_parameter(handle => h1, name =>
'INCLUDE_METADATA', value => 1);
dbms_datapump.set_parameter(handle => h1, name =>
'DATA_ACCESS_METHOD', value => 'AUTOMATIC');
dbms_datapump.set_parameter(handle => h1, name =>
'REUSE_DATAFILES', value => 0);
dbms_datapump.set_parameter(handle => h1, name =>
'SKIP_UNUSABLE_INDEXES', value => 0);
DBMS_DATAPUMP.START_JOB(h1);
-- The import job should now be running. In the following loop, the job is
-- monitored until it completes. In the meantime, progress information is
-- displayed. Note: this is identical to the export example.
percent_done := 0;
job_state := 'UNDEFINED';
while (job_state != 'COMPLETED') and (job_state != 'STOPPED') loop
dbms_datapump.get_status(h1,
dbms_datapump.ku$_status_job_error +
dbms_datapump.ku$_status_job_status +
dbms_datapump.ku$_status_wip,-1,job_state,sts);
js := sts.job_status;
-- If the percentage done changed, display the new value.
if js.percent_done != percent_done
then
dbms_output.put_line('*** Job percent done = ' ||
to_char(js.percent_done));
percent_done := js.percent_done;
end if;
-- If any work-in-progress (WIP) or Error messages were received for the job,
-- display them.
if (bitand(sts.mask,dbms_datapump.ku$_status_wip) != 0)
then
le := sts.wip;
else
if (bitand(sts.mask,dbms_datapump.ku$_status_job_error) != 0)
then
le := sts.error;
else
le := null;
end if;
end if;
if le is not null
then
ind := le.FIRST;
while ind is not null loop
dbms_output.put_line(le(ind).LogText);
ind := le.NEXT(ind);
end loop;
end if;
end loop;
-- Indicate that the job finished and gracefully detach from it.
dbms_output.put_line('Job has completed');
dbms_output.put_line('Final job state = ' || job_state);
dbms_datapump.detach(h1);
END IMPORTING;
So basically my question is:
Is there a way to import a dmp file into Oracle without knowing the original tablespace...?
Thanks in advance for the help.
UPDATE:
I found when using this script, I also need to know the schema name. So that will make the following changes on the script:
PROCEDURE IMPORTING
(
DMPFILES IN VARCHAR2,
FROMSCHEMA IN VARCHAR2,
TOSCHEMA IN VARCHAR2,
FROMTABLESPACE IN VARCHAR2,
TOTABLESPACE IN VARCHAR2
) AS
ind NUMBER; -- Loop index
h1 NUMBER; -- Data Pump job handle
percent_done NUMBER; -- Percentage of job complete
job_state VARCHAR2(30); -- To keep track of job state
le ku$_LogEntry; -- For WIP and error messages
js ku$_JobStatus; -- The job status from get_status
jd ku$_JobDesc; -- The job description from get_status
sts ku$_Status; -- The status object returned by get_status
array apex_application_global.vc_arr2;
BEGIN
h1 := DBMS_DATAPUMP.open('IMPORT','FULL',NULL,'EXAMPLE1');
dbms_datapump.add_file(handle => h1, filename => 'IMPORT.LOG', directory => 'DATA_PUMP_DIR', filetype => 3);
-- usign this function to split the files passed as a String to an array
array := apex_util.string_to_table(DMPFILES, ',');
for i in 1 .. array.count loop
DBMS_DATAPUMP.ADD_FILE(h1,array(i),'DATA_PUMP_DIR');
end loop;
DBMS_DATAPUMP.METADATA_REMAP(h1,'REMAP_TABLESPACE',FROMTABLESPACE,TOTABLESPACE);
DBMS_DATAPUMP.METADATA_REMAP(h1,'REMAP_SCHEMA',FROMSCHEMA,TOSCHEMA);
dbms_datapump.set_parameter(handle => h1, name =>
'INCLUDE_METADATA', value => 1);
dbms_datapump.set_parameter(handle => h1, name =>
'DATA_ACCESS_METHOD', value => 'AUTOMATIC');
dbms_datapump.set_parameter(handle => h1, name =>
'REUSE_DATAFILES', value => 0);
dbms_datapump.set_parameter(handle => h1, name =>
'SKIP_UNUSABLE_INDEXES', value => 0);
DBMS_DATAPUMP.START_JOB(h1);
-- The import job should now be running. In the following loop, the job is
-- monitored until it completes. In the meantime, progress information is
-- displayed. Note: this is identical to the export example.
percent_done := 0;
job_state := 'UNDEFINED';
while (job_state != 'COMPLETED') and (job_state != 'STOPPED') loop
dbms_datapump.get_status(h1,
dbms_datapump.ku$_status_job_error +
dbms_datapump.ku$_status_job_status +
dbms_datapump.ku$_status_wip,-1,job_state,sts);
js := sts.job_status;
-- If the percentage done changed, display the new value.
if js.percent_done != percent_done
then
dbms_output.put_line('*** Job percent done = ' ||
to_char(js.percent_done));
percent_done := js.percent_done;
end if;
-- If any work-in-progress (WIP) or Error messages were received for the job,
-- display them.
if (bitand(sts.mask,dbms_datapump.ku$_status_wip) != 0)
then
le := sts.wip;
else
if (bitand(sts.mask,dbms_datapump.ku$_status_job_error) != 0)
then
le := sts.error;
else
le := null;
end if;
end if;
if le is not null
then
ind := le.FIRST;
while ind is not null loop
dbms_output.put_line(le(ind).LogText);
ind := le.NEXT(ind);
end loop;
end if;
end loop;
-- Indicate that the job finished and gracefully detach from it.
dbms_output.put_line('Job has completed');
dbms_output.put_line('Final job state = ' || job_state);
dbms_datapump.detach(h1);
END IMPORTING;
I tested that with a dmp that I exported so I knew the TableSpace and the Schema name.
Summary: any way then to know the tablespace name and the schema name from a dmp file?
Thanks.
Short answer: no
Long answer: yes if you generate the ddl file and then you can actually grep the fille and search for the tablespaces and users. See bellow the script to generate the DDL file:
create or replace
PROCEDURE GENERATE_SQL
(
DMPFILES IN VARCHAR2
, SQLFILENAME IN VARCHAR2
, VERSIONPARAM IN VARCHAR2
, JOBNAME IN VARCHAR2
) AS
h1 number;
array apex_application_global.vc_arr2;
ind NUMBER; -- Loop indexdasdads
percent_done NUMBER; -- Percentage of job complete
job_state VARCHAR2(30); -- To keep track of job state
le ku$_LogEntry; -- For WIP and error messages
js ku$_JobStatus; -- The job status from get_status
jd ku$_JobDesc; -- The job description from get_status
sts ku$_Status; -- The status object returned by get_status
BEGIN
h1 := dbms_datapump.open(
operation => 'SQL_FILE',
job_mode => 'SCHEMA',
remote_link => null,
job_name => JOBNAME,
version => VERSIONPARAM
);
array := apex_util.string_to_table(DMPFILES, ',');
for i in 1 .. array.count loop
dbms_output.put_line('array(i): ' || array(i));
DBMS_DATAPUMP.ADD_FILE(h1,array(i),'DBRAT_DMP',dbms_datapump.ku$_file_type_dump_file);
end loop;
dbms_output.put_line('datapump_job: ' || h1);
dbms_datapump.add_file(
handle => h1,
filename => SQLFILENAME,
directory => 'DBRAT_DMP',
filetype => dbms_datapump.ku$_file_type_sql_file);
dbms_datapump.start_job(
handle => h1,
skip_current => 0,
abort_step => 0);
percent_done := 0;
job_state := 'UNDEFINED';
while (job_state != 'COMPLETED') and (job_state != 'STOPPED') loop
dbms_datapump.get_status(h1,
dbms_datapump.ku$_status_job_error +
dbms_datapump.ku$_status_job_status +
dbms_datapump.ku$_status_wip,-1,job_state,sts);
js := sts.job_status;
-- If the percentage done changed, display the new value.
if js.percent_done != percent_done
then
dbms_output.put_line('*** Job percent done = ' ||
to_char(js.percent_done));
percent_done := js.percent_done;
end if;
-- If any work-in-progress (WIP) or Error messages were received for the job,
-- display them.
if (bitand(sts.mask,dbms_datapump.ku$_status_wip) != 0)
then
le := sts.wip;
else
if (bitand(sts.mask,dbms_datapump.ku$_status_job_error) != 0)
then
le := sts.error;
else
le := null;
end if;
end if;
if le is not null
then
ind := le.FIRST;
while ind is not null loop
dbms_output.put_line(le(ind).LogText);
ind := le.NEXT(ind);
end loop;
end if;
end loop;
-- Indicate that the job finished and gracefully detach from it.
dbms_output.put_line('Job has completed');
dbms_output.put_line('Final job state = ' || job_state);
dbms_datapump.detach(h1);
END GENERATE_SQL;

Eof not triggering

I have a function where I get data from a DB, my test data set returns 6500 rows (I extracted the formatted SQL statement from the SQLText Variable and ran it as a test), but then when I run the following code Eof never triggers and I have seen over 100k rows imported.
ADOQuery := TADOQuery.Create(nil);
ADOQuery.ConnectionString := CONNECT_STRING;
// Build SQL Query
SQLText := Format( 'Select Temp.Serial, Temp.QCSample , Temp.Scrap , Temp.StationID , Temp.Defect , Temp.AddData , Temp2.Serial as Parent_Serial ' +
'from TAB_ELEMENT as Temp ' +
'left join TAB_ELEMENT as Temp2 on Temp.Parent_Id = Temp2.Element_Id ' +
'where Temp.Batch_ID = %d and Temp.StationID = 0 ',[iSearchID]);
ADOQuery.SQL.Clear; // Clear query of garbage values
ADOQuery.SQL.Text := SQLText; // Add query text to query module
ADOQuery.Open;
// Handle Results
iIndexPos := 0;
tDataImport.BeginUpdate;
while not ADOQuery.Eof do
begin
tDataImport.Items[iIndexPos].Serial := ADOQuery.FieldByName('Serial').AsString;
tDataImport.Items[iIndexPos].QCStatus := ADOQuery.FieldByName('QCSample').AsBoolean;
tDataImport.Items[iIndexPos].Scrap := ADOQuery.FieldByName('Scrap').AsInteger;
tDataImport.Items[iIndexPos].StationID := ADOQuery.FieldByName('StationID').AsInteger;
tDataImport.Items[iIndexPos].Defect := ADOQuery.FieldByName('Defect').AsBoolean;
tDataImport.Items[iIndexPos].AddData := ADOQuery.FieldByName('AddData').AsString;
tDataImport.Items[iIndexPos].ParentSerial := ADOQuery.FieldByName('Parent_Serial').AsString;
inc(iIndexPos);
end;
So in summery running this query with these parameters I expect 6500 rows, when I run this it never ends even after 100k+ rows are processed.
Open() places the cursor on the first record and sets Eof accordingly. You are not calling Next() to advance the cursor to the next record and update Eof, so you are processing the same record over and over:
ADOQuery.Open;
while not ADOQuery.Eof do
begin
//...
ADOQuery.Next; // <-- add this!
end;
On a side note, you should be using a parameterized query instead of a formatted SQL query string. It is safer, faster, and more efficient on the DB:
ADOQuery := TADOQuery.Create(nil);
ADOQuery.ConnectionString := CONNECT_STRING;
ADOQuery.SQL.Text := 'Select Temp.Serial, Temp.QCSample , Temp.Scrap , Temp.StationID , Temp.Defect , Temp.AddData , Temp2.Serial as Parent_Serial ' +
'from TAB_ELEMENT as Temp ' +
'left join TAB_ELEMENT as Temp2 on Temp.Parent_Id = Temp2.Element_Id ' +
'where Temp.Batch_ID = :iSearchID and Temp.StationID = 0 ';
with ADOQuery.Parameters.ParamByName('iSearchID') do
begin
DataType := ftInteger;
Value := iSearchID;
end;
ADOQuery.Open;
try
iIndexPos := 0;
tDataImport.BeginUpdate;
try
while not ADOQuery.Eof do
begin
tDataImport.Items[iIndexPos].Serial := ADOQuery.FieldByName('Serial').AsString;
tDataImport.Items[iIndexPos].QCStatus := ADOQuery.FieldByName('QCSample').AsBoolean;
tDataImport.Items[iIndexPos].Scrap := ADOQuery.FieldByName('Scrap').AsInteger;
tDataImport.Items[iIndexPos].StationID := ADOQuery.FieldByName('StationID').AsInteger;
tDataImport.Items[iIndexPos].Defect := ADOQuery.FieldByName('Defect').AsBoolean;
tDataImport.Items[iIndexPos].AddData := ADOQuery.FieldByName('AddData').AsString;
tDataImport.Items[iIndexPos].ParentSerial := ADOQuery.FieldByName('Parent_Serial').AsString;
inc(iIndexPos);
ADOQuery.Next;
finally
tDataImport.EndUpdate;
end;
end;
finally
ADOQuery.Close;
end;

PostgreSQL - Dynamic Tables, insert values from Text array in a function

A client wants to store anything in seperate tables. (Long story, it's nescessary).
To do this, i've build an Postgres function to create new tables on the fly in it's own namespace.
These tables can have 2, 4, or 100 columns, just what the user wants.
No problem, this works. The used datatypes in these dynamic tables are native, e.g. text, booleans, integers, etcetera.
Now comes the problem, i've got to insert data into these tables. The point is, the users can not access the tables directly, they'll do this through a function.
For a couple of datatypes is this not a problem, but for the text datatypes it's problematic.
Here is the function till now:
-- Function: add(integer, text[])
-- DROP FUNCTION add(integer, text[]);
CREATE OR REPLACE FUNCTION add(id integer, fields text[])
RETURNS integer AS
$BODY$
DECLARE
l_line_ending text := ')';
l_fieldtype integer;
l_ito_table_name text;
l_ito_fieldnames text;
l_field ito_fields%rowtype;
l_first_loop boolean := true;
l_values_to_insert text := 'VALUES (';
l_loop_counter integer := 0;
l_query text;
BEGIN
select into l_ito_table_name ito_table_name from ito where id = target_ito_id;
l_ito_fieldnames := 'insert into ' || l_ito_table_name || '(';
FOR l_field IN SELECT * FROM ito_fields
WHERE ito_fields.ito_id = target_ito_id
order by ito_fields.id asc
LOOP
l_loop_counter := l_loop_counter +1;
l_fieldtype := l_field.fieldtype;
if not l_first_loop THEN
l_values_to_insert := (l_values_to_insert || ', ');
end if;
if l_field.fieldtype = 1 THEN
l_values_to_insert := (l_values_to_insert || '''' || (fields[l_loop_counter]) || '''' );
elsif l_field.fieldtype = 2 THEN
l_values_to_insert := quote_literal(l_values_to_insert || private.cast_to_integer(fields[l_loop_counter]));
elsif l_field.fieldtype = 3 THEN
l_values_to_insert := quote_literal(l_values_to_insert || private.cast_to_boolean(fields[l_loop_counter]));
elsif l_field.fieldtype = 4 THEN
l_values_to_insert := quote_literal(l_values_to_insert || private.cast_to_float(fields[l_loop_counter]));
else
return 103;
end if;
if l_first_loop then
l_ito_fieldnames := l_ito_fieldnames || l_field.column_name;
l_first_loop := false;
else
l_ito_fieldnames := l_ito_fieldnames || ', ' || l_field.column_name;
end if;
END LOOP;
l_ito_fieldnames := l_ito_fieldnames || l_line_ending;
l_values_to_insert := ((l_values_to_insert) || (l_line_ending));
l_query := (l_ito_fieldnames || l_values_to_insert);
EXECUTE l_query;
return 0;
END;
$BODY$
LANGUAGE plpgsql VOLATILE
COST 100;
ALTER FUNCTION add(integer, text[])
OWNER TO postgres;
The table ito_fields stores all the field metadata, so the datatype, versions, descriptions are stored here.
the ito table stores all the dynamic table data.
The point in this function are the quotes. The insert function is created dynamicly, therefore i've got to add some quotes around the text fields in the insert function.
Postgres is giving errors as soon as i do that. Even with the quote_literal functions it's still a problem, because of the string concatenation (i know, security risks, but that's no problem for now).
I've tried to use quote_literal, quote_ident, even replacing the quotes (') with a replacement, until the execute function (replace(query, l_quote_rep, '''').. I really don't have a clue now how to fix this...
Thanks in advance.
Arrays, aggregates, quote_ident and quote_nullable are your friends.
This should work, and code is shorter:
CREATE OR REPLACE FUNCTION add(id integer, fields text[])
RETURNS integer AS
$BODY$
DECLARE
l_ito_table_name text;
l_query text;
l_fields text;
r_values text;
BEGIN
--get table name
SELECT INTO l_ito_table_name quote_ident(ito_table_name) FROM ito WHERE id = target_ito_id;
-- get column names
SELECT INTO l_fields
array_to_string( array_agg(quote_ident(column_name)), ',' )
FROM ito_fields
WHERE ito_fields.ito_id = target_ito_id
order by ito_fields.id asc;
-- prepare values
SELECT INTO r_values
array_to_string( array_agg(quote_nullable(u.name)), ',' )
FROM unnest(fields) u(name);
l_query := 'insert into ' || l_ito_table_name || '(' || l_fields || ') values (' || r_values || ')';
EXECUTE l_query;
return 0; -- why 0?
END;
$BODY$;

PLPGSQL how to use an array in a execute statement?

I have a plpgsql function that executes a query, but when i put an array into it it wont parse it, at least it gives me this error:
[FAIL] syntax error at or near "{"
LINE 21: AND c.category_uuid != ANY({"c0857e20-111e-11e0-ac64-0800...
CONTEXT: PL/pgSQL function "get_membercategories" line 22 at FOR over EXECUTE statement
This is the complete line:
AND c.category_uuid != ANY({"c0857e20-111e-11e0-ac64-0800200c9a66"})
This is how my function looks
CREATE OR REPLACE FUNCTION get_membercategories(in_company_uuid uuid, in_parent_uuid uuid, in_start integer, in_limit integer, in_sortby character varying, in_order character varying, in_querystring character varying, IN in_excludestring UUID[], OUT out_status integer, OUT out_status_description character varying, OUT out_value character varying[]) RETURNS record
LANGUAGE plpgsql
AS $$
DECLARE
temp_record RECORD;
altered_parent_uuid UUID;
temp_out_value VARCHAR[];
temp_iterator INTEGER := 0;
temp_parent_uuidstring VARCHAR;
temp_excludestring VARCHAR := '';
BEGIN
IF in_parent_uuid IS NOT NULL THEN
temp_parent_uuidstring := 'AND c.parent_uuid = ''' || in_parent_uuid || '''';
ELSE
temp_parent_uuidstring := 'AND c.parent_uuid IS NULL';
END IF;
IF in_excludestring IS NOT NULL THEN
temp_excludestring := 'AND c.category_uuid != ANY(' || in_excludestring || ')';
END IF;
FOR temp_record IN EXECUTE '
SELECT
c.name,
c.category_uuid,
mc.password,
(
SELECT COUNT(*)
FROM
targetgroupusers tgu
WHERE
tgu.targetgroup_uuid = mc.targetgroup_uuid
) AS receivers
FROM
membercategories AS mc
LEFT JOIN
categories AS c
ON
mc.category_uuid = c.category_uuid
WHERE
c.isdeleted IS NULL
' || temp_excludestring || '
AND
mc.company_uuid = ''' || in_company_uuid || '''
' || in_querystring || '
' || temp_parent_uuidstring || '
ORDER BY
' || in_sortby || ' ' || in_order || '
OFFSET
' || in_start || '
LIMIT
' || in_limit
LOOP
temp_out_value[temp_iterator] = ARRAY[temp_record.category_uuid::VARCHAR(36), temp_record.name::CHARACTER VARYING, temp_record.receivers::CHARACTER VARYING, CASE WHEN temp_record.password IS NOT NULL THEN '1' ELSE '0' END];
temp_iterator = temp_iterator+1;
END LOOP;
out_status := 0;
out_status_description := 'Member categories successfully retrieved';
out_value := temp_out_value;
RETURN;
END$$;
I am clueless at this point, any help is very much appreciated!
Your problem is that the the array gets expanded to an invalid string (as you can see in the error message). The string representation of an array needs to be enclosed into single quotes again and then casted to the approriate array type:
The following should work:
IF in_excludestring IS NOT NULL THEN
temp_excludestring := 'AND c.category_uuid != ANY(''' || in_excludestring::varchar||'''::UUID[])';
END IF;
Note that in_excludestring is explicitely cast to a varchar to force the string representation of an array, enclosed in single quotes to satisfy the "array literal" syntax and then cast back to an UUID array.
The resulting string will look something like this:
category_uuid != ANY ( '{c0857e20-111e-11e0-ac64-0800200c9a66,7fffda0c-11c9-11e0-967c-a300aec7eb54}'::UUID[] )

Resources