This is the JSON definition that is going to be provided (just a short example) and the code that I have implemented to get the expected result:
declare #json nvarchar(max)
set #json = '{
"testJson":{
"testID":"Test1",
"Value":[
{
"Value1":"",
"Value2":"",
"Value3":"",
"Type": "1A"
},
{
"Value1":"123",
"Value2":"456",
"Value3":"Automatic",
"Type": "2A"
},
{
"Value1":"789",
"Value2":"159",
"Value3":"Manual",
"Value4":"Success" ,
"Type": "3A"
}
]
}
}'
select
'ValueFields' as groupDef,
-- b.[key],
-- c.[key],
STRING_AGG( c.value , ' | ') as val
from
openjson(#json, '$.testJson.Value') as b
cross apply
openjson(b.value) as c
where
b.[key] not in (select b.[key]
from openjson(#json, '$.testJson.Value') as b
where b.value like ('%1A%'))
As you can see each element in the array can have different quantity of attributes (value1,.., value4..), and I only need to consider those elements where the type attribute is not equal to "1A". The query gives me the result requested, however, I am wondering how can I improve the performance of the code given that I'm using the like operator in the sub select, and obviously the original JSON file could a considerable number of elements in the array.
…
select b.Value --,c.value
from
openjson(#json, '$.testJson.Value')
with
(
Value nvarchar(max) '$' as json,
Type varchar(100) '$.Type'
) as b
--cross apply openjson(b.Value) as c
where b.Type <> '1A'
SELECT
'ValueFields' as groupDef,
J.value as val
FROM
OPENJSON(#json,'$.testJson.Value') J
WHERE
JSON_VALUE([value],'$.Type') <> '1A'
Related
I have used following query to parse and store json elements into table 'pl'
'test' table is used to store raw json.
select
each_attribute ->> 'id' id,
each_attribute ->> 'sd' sd,
each_attribute ->> 'v' v
from test
cross join json_array_elements(json_array) each_section
cross join json_array_elements(each_section -> 'firstt') each_attribute
I am able to view following json values using above query but not able to insert it into another table using json_populate_recordset.
Table definition I need to insert nested json into:
id integer, character varying(6666), character varying(99999)
Table1(for above definition) should store value for key firstt
Table2(for above definition) should store value for key secondt
Json format:
{
"firstt": [
{
"id": 1,
"sd": "test3",
"v": "2223"
},
{
"id": 2,
"sd": "test2",
"v": "2222"
}],
"secondt": [
{
"id": 1,
"sd": "test3",
"v": "2223"
},
{
"id": 2,
"sd": "test2",
"v": "2222"
}]
}
Please assist. I have tried every possible thing from stackoverflow solutions but nothing is given for nested array like this for insertion.
Adding code for dynamic query. It does not work. Error -'too few arguments for format'.
do $$
DECLARE
my record;
tb_n varchar(50);
BEGIN
FOR my IN
SELECT json_object_keys(json_array) as t FROM test
LOOP
tb_n := my.t;
EXECUTE format($$ WITH tbl_record_arrays as(
SELECT
entries.*
FROM
test
JOIN LATERAL json_each(json_array) as entries(tbl_name,tbl_data_arr) ON TRUE
)
INSERT INTO %I
SELECT
records.*
FROM
tbl_record_arrays
JOIN LATERAL json_populate_recordset(null::%I,tbl_data_arr) records ON TRUE
WHERE
tbl_name = %I$$,tb_n);
END LOOP;
END;
$$;
To create a plpgsql function that dynamically inserts a json array for a specified key into a specified table, you can do:
CREATE OR REPLACE FUNCTION dynamic_json_insert(key_name text,tbl text) RETURNS VOID AS $$
BEGIN
-- the $<tag>$ syntax allows for generating a multiline string
EXECUTE format($sql$
INSERT INTO %1$I
SELECT
entries.*
FROM test
JOIN LATERAL json_populate_recordset(null::%1$I,json_data -> $1) as entries ON TRUE;
$sql$::text,tbl) USING dynamic_json_insert.key_name;
END;
$$ LANGUAGE plpgsql
VOLATILE --modifies data
STRICT -- Returns NULL if any arguments are NULL
SECURITY INVOKER; --Execute this function with the Role of the caller, rather than the Role that defined the function;
and call it like
SELECT dynamic_json_insert('firstt','table_1')
If you want to insert into multiple tables using multiple key value pairs you can make a plpgsql function that takes a variadic array of key,table pairs and then generate a single Common Table Expression (CTE) with all of the INSERTs in a single atomic statement.
First create a custom type:
CREATE TYPE table_key as (
tbl_key text,
relation regclass -- special type that refers to a Postgresql relation
);
Then define the function:
CREATE OR REPLACE FUNCTION dynamic_json_insert(variadic table_keys table_key[]) RETURNS VOID AS $$
DECLARE
tbl_key_len integer = array_length(dynamic_json_insert.table_keys,1);
BEGIN
IF tbl_key_len > 0 THEN
EXECUTE (
--generates a single atomic insert CTE when there are multiple table_keys OR a single insert statement otherwise
--the SELECT is enclosed in parenthesis because it generates a single text value which EXECUTE receives.
SELECT
--append WITH if We have more than 1 table_key (for CTE)
CASE WHEN tbl_key_len > 1 THEN 'WITH ' ELSE '' END
|| string_agg(
CASE
WHEN
--name the auxiliary statement and put it in parenthesis.
is_aux THEN format('%1$I as (%2$s)','ins_' || tk.tbl_key,stmt) || end_char
ELSE stmt
END,E'\n') || ';'
FROM
--unnest the table_keys argument and get its index (rn)
unnest(dynamic_json_insert.table_keys) WITH ORDINALITY AS tk(tbl_key,relation,rn)
-- the JOIN LATERAL here means "for each unnested table_key, generate the rows of the following subquery"
JOIN LATERAL (
SELECT
rn < tbl_key_len is_aux,
--we need a comma between auxiliary statements
CASE WHEN rn = tbl_key_len - 1 THEN '' ELSE ',' END end_char,
--dynamically generate INSERT statement
format($sql$
INSERT INTO %1$I
SELECT
entries.*
FROM test
JOIN LATERAL json_populate_recordset(null::%1$I,json_data -> %2$L) as entries ON TRUE
$sql$::text,tk.relation,tk.tbl_key) stmt
) stmts ON TRUE
);
END IF;
END;
$$ LANGUAGE plpgsql
VOLATILE --modifies data
STRICT -- Returns NULL if any arguments are NULL
SECURITY INVOKER; --Execute this function with the Role of the caller, rather than the Role that defined the function;
Then call the function like:
SELECT dynamic_json_insert(
('firstt','table_1'),
('secondt','table_2')
);
Because of the use of the variadic keyword, you can pass in each element of the array as an individual argument and Postgres will cast to the appropriate types automatically.
The generated/executed SQL for the above function call will be:
WITH ins_firstt as (
INSERT INTO table_1
SELECT
entries.*
FROM test
JOIN LATERAL json_populate_recordset(null::table_1,json_data -> 'firstt') as entries ON TRUE
)
INSERT INTO table_2
SELECT
entries.*
FROM test
JOIN LATERAL json_populate_recordset(null::table_2,json_data -> 'secondt') as entries ON TRUE
;
In our SQL Server table we have a json object stored with an array of strings. I want to programatically split that string into several columns. However, I cannot seem to get it to work or even if it's possible.
Is this a possibility to create multiple columns within the WITH clause or it is a smarter move to do it within the select statement?
I trimmed down some of the code to give a simplistic idea of what's given.
The example JSON is similar to { "arr": ["str1 - str2"] }
SELECT b.* FROM [table] a
OUTER APPLY
OPENJSON(a.value, '$.arr')
WITH
(
strSplit1 VARCHAR(100) SPLIT('$.arr', '-',1),
strSplit2 VARCHAR(100) SPLIT('$.arr', '-',2)
) b
Due to the tag [tsql] and the usage of OPENJSON I assume this is SQL-Server. But might be wrong... Please always specify your RDBMS (with version).
Your JSON is rather weird... I think you've overdone it while trying to simplify this for brevity...
Try this:
DECLARE #tbl TABLE(ID INT IDENTITY,YourJSON NVARCHAR(MAX));
INSERT INTO #tbl VALUES(N'{ "arr": ["str1 - str2"] }') --weird example...
,(N'{ "arr": ["a","b","c"] }'); --array with three elements
SELECT t.ID
,B.[value] AS arr
FROM #tbl t
CROSS APPLY OPENJSON(YourJSON)
WITH(arr NVARCHAR(MAX) AS JSON) A
CROSS APPLY OPENJSON(A.arr) B;
A rather short approach (but fitting to this simple example only) was this:
SELECT t.ID
,A.*
FROM #tbl t
OUTER APPLY OPENJSON(JSON_QUERY(YourJSON,'$.arr')) A
Hint
JSON support was introduced with SQL-Server 2016
UPDATE: If the JSON's content is a weird CSV-string...
There's a trick to transform a CSV into a JSON-array. Try this
DECLARE #tbl TABLE(ID INT IDENTITY,YourJSON NVARCHAR(MAX));
INSERT INTO #tbl VALUES(N'{ "arr": ["str1 - str2"] }') --weird example...
,(N'{ "arr": ["a","b","c"] }') --array with three elements
,(N'{ "arr": ["x-y-z"] }'); --array with three elements in a weird CSV format
SELECT t.ID
,B.[value] AS arr
,C.[value]
FROM #tbl t
CROSS APPLY OPENJSON(YourJSON)
WITH(arr NVARCHAR(MAX) AS JSON) A
CROSS APPLY OPENJSON(A.arr) B
CROSS APPLY OPENJSON('["' + REPLACE(B.[value],'-','","') + '"]') C;
Some simple replacements in OPENJSON('["' + REPLACE(B.[value],'-','","') + '"]') will create a JSON array out of your CSV-string, which can be opened in OPENJSON.
I'm not aware of any way to split a string within JSON. I wonder if the issue is down to your JSON containing a single string rather than multiple values?
The below example shows how to extract each string from the array; and if you wish to go further and split those strings on the hyphen, shows how to do that using SQL's normal SUBSTRING and CHARINDEX functions.
create table [table]
(
value nvarchar(max)
)
insert [table](value)
values ('{ "arr": ["str1 - str2"] }'), ('{ "arr": ["1234 - 5678","abc - def"] }')
SELECT b.value
, rtrim(substring(b.value,1,charindex('-',b.value)-1))
, ltrim(substring(b.value,charindex('-',b.value)+1,len(b.value)))
FROM [table] a
OUTER APPLY OPENJSON(a.value, '$.arr') b
If you want all values in a single column, you can use the string_split function: https://learn.microsoft.com/en-us/sql/t-sql/functions/string-split-transact-sql?view=sql-server-2017
SELECT ltrim(rtrim(c.value))
FROM [table] a
OUTER APPLY OPENJSON(a.value, '$.arr') b
OUTER APPLY STRING_SPLIT(b.value, '-') c
Let's say that I have multiple rows in a table with an nvarchar column that contains JSON data. Each row would have some simple JSON object, like { "key": "value" }. What is the best way to compose all of these objects into a single JSON object as an array, for a group of rows, such as:
{
"data": [
{ "key": "value" },
{ "key": "value" },
{ "key": "value" }
]
}
There could be any number of groups, and any number of rows per group. Each object could be different.
Currently, my approach would be to use FOR XML PATH to concatenate them into a single string, but this is prone to odd text (e.g.
) getting in there which makes it less than a resilient approach. It seems possible that I could use JSON_MODIFY but I'm not sure how I would use it in a way that accommodates unknown rows per group.
Creating arrays of JSON data in SQL is not exactly straightforward. Take the following table as an example:
CREATE TABLE #test (id INT, jsonCol NVARCHAR(MAX))
INSERT INTO #test
(
id
,jsonCol
)
VALUES
(
1
,N'{ "make": "ford" }'
),
(
1
,N'{ "make": "mazda" }'
)
,
(
2
,N'{ "color": "black" }'
)
You can use the following query to create a single JSON object with the array as you posted in your question:
SELECT DISTINCT [data] =
JSON_QUERY( '[' + STUFF(
(SELECT ',' + jsonCol FROM #test FOR XML PATH (''))
, 1, 1, '')
+ ']')
FROM #test
FOR JSON PATH, WITHOUT_ARRAY_WRAPPER
The output would look like this:
{
"data":[
{ "make": "ford" },
{ "make": "mazda" },
{ "color": "black" }
]
}
You can also correlate the id columns in the main and subqueries to get the json grouped by id.
Here's an example:
SELECT DISTINCT id
, [json] = (SELECT DISTINCT [data] =
JSON_QUERY( '[' + STUFF(
(SELECT ',' + jsonCol FROM #test t3 WHERE t3.id = t2.id FOR XML PATH (''))
, 1, 1, '')
+ ']')
FROM #test t2
WHERE t2.id = t1.id
FOR JSON PATH, WITHOUT_ARRAY_WRAPPER)
FROM #test t1
Output would be:
id json
1 {"data":[{ "make": "ford" },{ "make": "mazda" }]}
2 {"data":[{ "color": "black" }]}
The sql-server OPENJSON() function can take a json array and convert it into sql table with key-value pairs, e.g.:
DECLARE #json NVARCHAR(MAX);
SET #json = '{
"key1": "val1",
"key2": "val2",
"key3": "val3"
}';
SELECT * FROM OPENJSON(#json, '$')
Result:
key value type
--------------------
key1 val1 1
key2 val2 1
key3 val3 1
What is the best general-purpose method for converting this key/value table back into a json array?
Why? If we can do this with a single function, it opens up a range of json modifications which are otherwise not possible on sql server, e.g.:
Re-order elements
Rename properties (key names)
Split json array into smaller arrays / combine json arrays
Compare json arrays (which key/value elements exists in both jsons? What are the differences?)
Clean json (remove syntactical whitespace/newlines to compress it)
Now, I could start to do simple CONCAT('"',[key],'":"',[value]), then do a comma-list-aggregration. But if I want a code that is both easy to apply across my codebase and works for all data types, this is not a simple task. By looking at the json format definition, the conversion should take into account a) the 6 different data types, b) escape characters, c) SQL NULL/json null handling, d) what I may have overlooked I.e. at minimum, the below example should be supported:
DECLARE #test_json NVARCHAR(MAX);
SET #test_json = '{
"myNull": null,
"myString": "start_\\_\"_\/_\b_\f_\n_\r_\t_\u2600_stop",
"myNumber": 3.14,
"myBool": true,
"myArray": ["1", 2],
"myObject": {"key":"val"}
}'
SELECT * FROM OPENJSON(#test_json, '$')
Result:
key value type
------------------------------------------------
myNull NULL 0
myString start_\_"_/___ _ _ _☀_stop 1
myNumber 3.14 2
myBool true 3
myArray ["1", 2] 4
myObject {"key":"val"} 5
For the string-aggregation part, we have long suffered the 'FOR XML PATH'-pain. Luckily we have STRING_AGG() on SQL2017/AzureDB, and I will accept a solution depending on STRING_AGG().
You can do with this command, using FOR JSON
select * from table for json auto
My result:
[{"LogId":1,"DtLog":"2017-09-30T21:04:45.6700000","FileId":1},
{"LogId":2,"DtLog":"2017-09-30T21:08:35.8633333","FileId":3},{"LogId":3,"DtLog":"2017-09-30T21:08:36.4433333","FileId":2},{"LogId":4,"DtLog":"2017-09-30T21:08:36.9866667","FileId":12},{"LogId":5,"DtLog":"2017-09-30T21:15:22.5366667","FileId":13},{"LogId":6,"DtLog":"2017-09-30T21:38:43.7866667","FileId":17}]
I use string_agg
declare #json table ( Name varchar(80), Value varchar(max) )
insert into #json
select [Key], Value from openjson(#attributes)
insert into #json values ( 'name', #name )
insert into #json values ( 'title', #title )
insert into #json values ( 'description', #description )
set #attributes = '{' + (select STRING_AGG( '"' + Name + '":"' +
REPLACE (value, '"', '\"' ) +'"', ',') from #json) + '}'
I have the following scenario:
class question {
idQuestion: string;
question: string;
type: string;
}
class options {
idOption: string;
option: string;
}
My SQL returns:
idquestion question type idoption option
i.e:
question1 foo? textbox null null
question2 bar? select option1 aaa
question2 bar? select option2 bbb
question3 foobar? radio option1 aaa
question3 foobar? radio option2 bbb
question3 foobar? radio option3 ccc
I want to map the SQL response to the following interface:
questionOptions{
question: Question;
options: Option[];
}
How can I make it possible? So in the end, I could have a list of question, each one containing its options.
P.S: Would it be a better option to make the association between question and options from the sql database?
EDIT:
From the sample data I want to obtain the following json:
[
{
idQuestion: "question1",
question: "foo?",
options: []
},
{
idQuestion: "question2",
question: "bar?"
options: [
{
idOption: "option1",
option: "aaa"
},
{
idOption: "option2",
option: "bbb"
},
]
}
]
I have a helper function which will transform virtually any row/data set into a JSON String/Array.
Assuming 2012+
Declare #YourData table (idQuestion varchar(50),question varchar(50), type varchar(50),idOption varchar(50),[option] varchar(50))
Insert Into #YourData values
('question1','foo?','textbox', null, null),
('question2','bar?','select','option1','aaa'),
('question2','bar?','select','option2','bbb'),
('question3','foobar?','radio','option1','aaa'),
('question3','foobar?','radio','option2','bbb'),
('question3','foobar?','radio','option3','ccc')
Declare #JSON varchar(max) = ''
Select #JSON=#JSON+','+String
From (
Select String=Replace(B.JSON,'}',',"options":'+IsNull(C.JSON,'[]')+'}')
From (Select Distinct idquestion,question From #YourData) A
Cross Apply (Select JSON=[dbo].[udf-Str-JSON](0,0,(Select A.idQuestion,A.question for XML RAW))) B
Cross Apply (Select JSON=[dbo].[udf-Str-JSON](0,0,(Select idOption,[option] from #YourData Where idquestion=A.idquestion for XML RAW))) C
) A
Select '['+Stuff(#JSON,1,1,'')+']'
Returns
The UDF
CREATE FUNCTION [dbo].[udf-Str-JSON] (#IncludeHead int,#ToLowerCase int,#XML xml)
Returns varchar(max)
AS
Begin
Declare #Head varchar(max) = '',#JSON varchar(max) = ''
; with cteEAV as (Select RowNr =Row_Number() over (Order By (Select NULL))
,Entity = xRow.value('#*[1]','varchar(100)')
,Attribute = xAtt.value('local-name(.)','varchar(100)')
,Value = xAtt.value('.','varchar(max)')
From #XML.nodes('/row') As R(xRow)
Cross Apply R.xRow.nodes('./#*') As A(xAtt) )
,cteSum as (Select Records=count(Distinct Entity)
,Head = IIF(#IncludeHead=0,IIF(count(Distinct Entity)<=1,'[getResults]','[[getResults]]'),Concat('{"status":{"successful":"true","timestamp":"',Format(GetUTCDate(),'yyyy-MM-dd hh:mm:ss '),'GMT','","rows":"',count(Distinct Entity),'"},"retults":[[getResults]]}') )
From cteEAV)
,cteBld as (Select *
,NewRow=IIF(Lag(Entity,1) over (Partition By Entity Order By (Select NULL))=Entity,'',',{')
,EndRow=IIF(Lead(Entity,1) over (Partition By Entity Order By (Select NULL))=Entity,',','}')
,JSON=Concat('"',IIF(#ToLowerCase=1,Lower(Attribute),Attribute),'":','"',Value,'"')
From cteEAV )
Select #JSON = #JSON+NewRow+JSON+EndRow,#Head = Head From cteBld, cteSum
Return Replace(#Head,'[getResults]',Stuff(#JSON,1,1,''))
End
-- Parameter 1: #IncludeHead 1/0
-- Parameter 2: #ToLowerCase 1/0 (converts field name to lowercase
-- Parameter 3: (Select * From ... for XML RAW)