Updating JSON in postgres based on dynamic input of type json array - arrays

I have a column in postgres table which is of JSON type and looks something like
{
"Name": "Some Name",
"Stages": [
{
"Title": "Early Flight",
"Tags": [....],
"Date": "2021-11-05T00:00:00",
"CloseDate": ""
},
{
"Title": "Midway Flight",
"Tags": [....],
"Date": "2021-11-05T00:00:00",
"CloseDate": ""
},
{
"Title": "Pro Flight",
"Tags": [....],
"Date": "2021-11-05T00:00:00",
"CloseDate": ""
},
{
"Title": "Expert Start",
"Tags": [....],
"Date": "2021-11-05T00:00:00",
"CloseDate": ""
}
]
}
I want to update the Date for the number of items that are provide in the newInputItem,
meaning the Date for Midway Flight and Expert Flight needs to change.
I tried using CTE as below but the query updates only the first element of the input array in this case its just Midway Flight that gets updated.
WITH newInputItem as
(
select
arr.newInputItem ::json ->> 'Title' as State,
(arr.newInputItem ::json ->> 'NewDate')::timestamp as NewDate
from
json_array_elements('[
{"Title" : "Midway Flight", "Date" : "01 / 01 / 1777"},
{"Title" : "Expert Flight", "Date" : "01 / 01 / 1999"}
]') WITH ORDINALITY arr(newInputItem, index)
),
oldItem AS
(
SELECT
('{Stages,' || index - 1 || ',"Date"}')::TEXT[] AS path,
user_id,
arr.oldItem ::json ->> 'Title' AS title
FROM
department.Process_Instance
jsonb_array_elements(process_instance_data -> 'Stages') WITH ORDINALITY arr(oldItem, index)
WHERE
department.Process_Instance."user_id" = 17
)
UPDATE
department.Process_Instance pi
SET
process_instance_data = jsonb_set(process_instance_data, oldItem.path, to_json(newInputItem.NewDate)::JSONB)
FROM
oldItem,
newInputItem
WHERE
pi.user_id = oldItem.user_id
AND oldItem.title = newInputItem.State;

In order to make several updates into the same jsonb data within the same query, you need to create an aggregate function based on the standard jsonb_set function :
CREATE OR REPLACE FUNCTION jsonb_set (x jsonb, y jsonb, p text[], z jsonb, b boolean)
RETURNS jsonb LANGUAGE sql IMMUTABLE AS
$$ SELECT jsonb_set (COALESCE(x, y), p, z, b) ; $$ ;
CREATE AGGREGATE jsonb_set_agg(jsonb, text[], jsonb, boolean)
( sfunc = jsonb_set, stype = jsonb) ;
Then, as you can't call an aggregate function directly in the SET clause of an UPDATE statement, you have to insert an additional cte before your UPDATE statement :
WITH newInputItem as
(
select
arr.newInputItem ::json ->> 'Title' as State,
(arr.newInputItem ::json ->> 'NewDate')::timestamp as NewDate
from
json_array_elements('[
{"Title" : "Midway Flight", "Date" : "01 / 01 / 1777"},
{"Title" : "Expert Flight", "Date" : "01 / 01 / 1999"}
]') WITH ORDINALITY arr(newInputItem, index)
), oldItem AS
(
SELECT
('{Stages,' || index - 1 || ',"Date"}')::TEXT[] AS path,
user_id,
arr.oldItem ::json ->> 'Title' AS title
FROM
department.Process_Instance
jsonb_array_elements(process_instance_data -> 'Stages') WITH ORDINALITY arr(oldItem, index)
WHERE
department.Process_Instance."user_id" = 17
), final AS
(
SELECT oldItem.user_id
, jsonb_set_agg( process_instance_data, oldItem.path,
to_json(newInputItem.NewDate)::JSONB, True) AS data_final
FROM oldItem
INNER JOIN newInputItem
ON oldItem.title = newInputItem.State
GROUP BY oldItem.user_id
)
UPDATE
department.Process_Instance pi
SET
process_instance_data = final.data_final
FROM
final
WHERE
pi.user_id = final.user_id ;

Related

I need to get the duplicate data in column parenthesis

UPDATE tab s
SET s.DATA = json_transform(DATA, REPLACE '$.dataFilterDefn.columns[1]' = (SELECT JSON_ARRAYAGG(JSON FORMAT JSON RETURNING CLOB)
FROM
(SELECT JSON
FROM
(SELECT j.json || ',' || JSON AS json
FROM tab d
CROSS APPLY JSON_TABLE(d.tab, '$.dataFilterDefn.columns[1]' COLUMNS(json CLOB FORMAT JSON PATH '$')) j
WHERE d.UID = 252))));
The data look like this:
"columns":[
{
"label":"Subcategory",
"aggFn":"NONE",
"datasetId":"ADVENTURE_WORKS",
"fieldName":"SUB_CAT",
"id":"FILTER-1"
}
]
My expectation:
"columns":[
{
"label":"Subcategory",
"aggFn":"NONE",
"datasetId":"ADVENTURE_WORKS",
"fieldName":"SUB_CAT",
"id":"FILTER-1"
},
{
"label":"Subcategory",
"aggFn":"NONE",
"datasetId":"ADVENTURE_WORKS",
"fieldName":"SUB_CAT",
"id":"FILTER-1"
}
]
I want the columns data should duplicate.The columns value will be different.It"ll not be the same. I need to update the column values dynamically..It should not be hardcoded. How can I achieve this in json using Oracle version 19c?
Using json_transform you can append items to an array. Using json_query you can extract the first element of the array.
So you can do something like this:
with rws as (
select '{ "columns":[
{
"label":"Subcategory",
"aggFn":"NONE",
"datasetId":"ADVENTURE_WORKS",
"fieldName":"SUB_CAT",
"id":"ART-DATA-FILTER-1"
}
] }' jdata
from dual
)
select json_transform (
jdata,
append '$.columns' = json_query ( jdata, '$.columns[0]' )
returning varchar2 pretty
)
from rws r;
{
"columns" :
[
{
"label" : "Subcategory",
"aggFn" : "NONE",
"datasetId" : "ADVENTURE_WORKS",
"fieldName" : "SUB_CAT",
"id" : "ART-DATA-FILTER-1"
},
{
"label" : "Subcategory",
"aggFn" : "NONE",
"datasetId" : "ADVENTURE_WORKS",
"fieldName" : "SUB_CAT",
"id" : "ART-DATA-FILTER-1"
}
]
}
Note that json_transform was only added in a recent release update, so ensure your 19c version is fully patched and up-to-date.

Hasura query that works similar to a SQL Left join

Assume the following SQL Server table structure
dbo.users
(
id int not null,
favoriteBook int null --fk
)
dbo.books
(
id int not null,
title nvarchar(max)
)
Assuming a user exists that has not entered in their favorite book I'd like to use Hasura's GraphQL engine to search for all users and return the title of their favorite book if one exists, or null if it doesn't.
I'd expect something like this to work:
query Users{
users {
id
FavoriteBook {
title
}
}
}
However when using this query the engine returns the following error:
{
"errors": [
{
"extensions": {
"internal": {
"tag": "unsuccessful_return_code",
"contents": [
"odbc_SQLExecDirectW",
-1,
"[Microsoft][ODBC Driver 17 for SQL Server][SQL Server]JSON text is not properly formatted. Unexpected character '.' is found at position 4.[Microsoft][ODBC Driver 17 for SQL Server][SQL Server]JSON text is not properly formatted. Unexpected character '.' is found at position 4."
]
},
"path": "$",
"code": "unexpected"
},
"message": "sql server exception"
}
]
}
The SQL generated by Hasura looks something like this:
select
isnull (
(
select
[t_Users1].[id] as [id]
,json_query ( [or_books1].[json] ) as [FavoriteBook]
from [dbo].[users] as [t_Users1]
outer apply
(
select
isnull ((
select
[t_Books1].[id] as [id]
from [dbo].[books] as [t_Books1]
where
(([t_Books1].[id]) = ([t_Users1].[favoriteBook]))
for json path, include_null_values, without_array_wrapper
)
,'null'
)
) as [or_Booksr1]([json])
order by
(
select null
) /* ORDER BY is required for OFFSET */ offset 0 rows fetch next 1000 rows only
for json path, include_null_values
)
,'[]'
)
;
Ideally the output would look something like this:
{
"data": {
"users": [
{
"id": 254,
"favoriteBook": {
"id": 1,
"title": "book about foo"
}
},
{
"id": 892,
"favoriteBook": null
}
]
}
}

Snowflake: JSON Data in Array

JSON data as below
{name : Mike, job : [{name: abc, value: 123},{name: def,value: 456}]}
How to retrieve the value of name = abc and def?
EDIT:(SOLUTION) Got the solution myself thanks
WITH x AS (
SELECT parse_json('{"name" : "Mike", "job" : [{"name": "abc", "value": "123"},{"name": "def","value": "456"}]}' ) as payload_json)
select x.payload_json:name,
job.value:name::varchar as name,
job.value:value::varchar as value
from x,
lateral flatten( input => x.payload_json:job, outer => true) as job;
I got the answer myself as below
WITH x AS (
SELECT parse_json('{"name" : "Mike", "job" : [{"name": "abc", "value": "123"},{"name": "def","value": "456"}]}' ) as payload_json)
select x.payload_json:name,
job.value:name::varchar as name,
job.value:value::varchar as value
from x,
lateral flatten( input => x.payload_json:job, outer => true) as job;

Is there any way to build below using UDF in snowflake instead of flattening?

i have below tables
table1:
Payload(column)
{
"list": "212=1.00,214"
}
table 2 looks like below
i want result like below using UDF instead of using flatten
{
"test13": {
"code": "212",
"desc": "success",
"value": "1.00"
},
"test15": {
"code": "214",
"desc": "Impression",
"value": ""
}
}
You ought to be able to do JavaScript UDTFs (User-Defined Table Functions) https://docs.snowflake.com/en/sql-reference/udf-js-table-functions.html that can take the single row payload and return multiple rows.
So the SQL to do this, I understand you don't want:
with table1 AS (
select parse_json('{"list": "212=1.00,214"}') as payload
), table2 AS (
select parse_json(column1) as payload
,column2 as key
,column3 as value
from values ('{"id":"212"}', 'test13', 'success' ),
('{"id":"214"}', 'test15', 'impression' )
), table1_demunged AS (
select split(f.value,'=')[0] as id
,split(f.value,'=')[1] as val
from table1 t, lateral flatten(input=>split(t.payload:list,',')) f
), tables_joined as (
select t2.key as obj_key
,t1.id as code
,t2.value as desc
,t1.val as value
from table2 t2
join table1_demunged t1 on t2.payload:id = t1.id
), as_objects AS (
select obj_key, object_construct('code', code, 'desc', desc, 'value', coalesce(value,'')) as res
from tables_joined t
)
select object_agg(obj_key, res) object
from as_objects
group by true;
gives the result you do want:
OBJECT
{ "test13": { "code": "212", "desc": "success", "value": "1.00" }, "test15": { "code": "214", "desc": "impression", "value": "" } }
But I do not understand if your are really want a UDF to do all that, given it's a FLATTEN then a JOIN and then some OBJECT_ functions, or if you are just want to avoid the FALTTEN as it "tricky SQL and you want to find it behind a UDF" or perhaps your using some system that cannot parse the => thus you need the flatten hidden behind a UDF, but in that case the UDF cannot do all the joins for you..
It just feels like there is more to the question than has been asked.

Query elements in a nested array of a json object in postgresql 9.4 or 9.5

{
"studentID": 1,
"StudentName": "jhon",
"Data":{
"schoolname":"school1",
"enrolmentInfo":
[{
"year":"2015",
"info":
[
{
"courseID":"csc213",
"school":"IT",
"enrollmentdate":"2015-01-01",
"finshdate":"2015-07-01",
"grade": 80 },
{
"courseID":"csc113",
"school":"IT1",
"enrollmentdate":"2015-09-02",
"finshdate":null,
"grade": 90 } ]
},
{
"year":"2014",
"info":
[{
"courseID":"info233",
"school":"IT",
"enrollmentdate":"2014-03-11",
"finshdate":"2014-09-01",
"grade": 81 },
{
"courseID":"csc783",
"school":"IT",
"enrollmentdate":"2014-01-02",
"finshdate":"2014-08-01",
"grade": 87 } ]
} ]
}
}
I have stored in postgresql database json objects of the above format. Each object consists of informations about a certain student with enrollment information. I have complex objects with nested array inside arrays. I am trying to select all the element inside the "info" array. I tried to use the following query:
with recursive x (info) as (select value->'info' from jsontesting r, json_array_elements(r.data->'Data'->'enrolmentinfo')
UNION ALL
SELECT (e).value->'courseID', (e).value->'school', (e).value->'grade',(e).value->'enrollmentdate', (e).value->'finshdate'
from (select json_each(json_array_elements (info)) e from x) p)
select * from x;
This query is not working and it is giving the following error:"cannot call json_array_elements on a scalar". Is there any other query that I can use to extract the elements of the nested array "info"??
-- assuming that jsontesting.data contains your JSON
WITH info_data AS (
SELECT enrolment_info->'info' AS info
FROM jsontesting t, json_array_elements(t.data -> 'Data' -> 'enrolmentInfo') AS enrolment_info
)
SELECT info_item->>'courseID',
info_item->>'school',
info_item->>'enrollmentdate',
info_item->>'finshdate',
info_item->>'grade'
FROM info_data idata, json_array_elements(idata.info) AS info_item;

Resources