SQL Server : I want to access Json Array of object - sql-server

DECLARE #json varchar(max),
#errors varchar(max),
#policy_number varchar(10)
SET #json =
'{
"returnMessage": "",
"policy_number": "12345",
"documents": {
"policy_document": "",
"tax_invoice_document": ""
},
"errors": [
{
"error_code": "999",
"error_message1": "Error"
}
]
}'
I want to get Error_code, error_message1
SELECT
#policy_number = policy_number,
#errors = errors
FROM
OPENJSON(#json) WITH
(
policy_number VARCHAR(10) '$.policy_number',
errors VARCHAR(max) '$.errors'
)

If you only want data from the errors property, you can go straight to that with a single OPENJSON call
DECLARE #json varchar(max)
SET #json =
'{
"returnMessage": "",
"policy_number": "12345",
"documents": {
"policy_document": "",
"tax_invoice_document": ""
},
"errors": [
{
"error_code": "999",
"error_message1": "Error"
}
]
}'
SELECT
policy_number = JSON_VALUE(#json, '$.policy_number'),
error_code,
error_message1
FROM
OPENJSON(#json, '$.errors')
WITH (
error_code VARCHAR(100),
error_message1 VARCHAR(100)
);

You're close. You need AS JSON in your OPENJSON and then you can use CROSS APPLY to pull your desired values out of errors.
declare #json varchar(max),#errors varchar(max),#policy_number varchar(10)
set #json =
'{
"returnMessage": "",
"policy_number": "12345",
"documents": {
"policy_document": "",
"tax_invoice_document": ""
},
"errors": [
{
"error_code": "999",
"error_message1": "Error"
}
]
}';
SELECT error_code, error_message1
FROM OPENJSON(#json)
WITH (errors NVARCHAR(MAX) AS JSON) l1
CROSS APPLY OPENJSON(l1.errors)
WITH (
error_code VARCHAR(100),
error_message1 VARCHAR(100)
);

Related

TSQL - Pivot not returning all rows

I have the following JSON array data set that needs to be parsed into 2 table rows:
[
{ "eid": "ABCDGD",
"name": "Carol E",
"email": "carole#gmail.com",
"role": "Recruiter"
},
{ "eid": "HDHDK",
"name": "Mark H",
"email": "markh#gmail.com",
"role": "Manager"
}
]
I need the code below to return both sets of employee information but it only returns one. How do I achieve this?
select p.* from
(SELECT j2.[key] as _keys, j2.Value as _vals
FROM OPENJSON(#c) j1
CROSS APPLY OPENJSON(j1.Value) j2
) as pds
PIVOT
(
max(pds._vals)
FOR pds._keys IN([eid], [name], [email], [role])
) AS p
SQLfiddle - http://sqlfiddle.com/#!18/9eecb/54970
No need to pivot, just specify your json columns and will give your desired results.
SELECT *
FROM OPENJSON(#c) WITH (
eid varchar(200) '$.eid',
name varchar(200) '$.name',
email varchar(200) '$.email',
role varchar(200) '$.role'
) j1
JSON is already maintaining a table kind structure in it and can be directly converted into table by using OPENJSON.
As the syntax of OPENJSON on MSDN website.
OPENJSON( jsonExpression [ , path ] ) [ <with_clause> ]
<with_clause> ::= WITH ( { colName type [ column_path ] [ AS JSON ] } [ ,...n ] )
Here just need to pass your column name as maintained in JSON and will convert your JSON into SQL Table.
You may find more details on this link.
For your above query you may try this.
DECLARE #json NVARCHAR(MAX)
SET #json =
N'[
{ "eid": "ABCDGD",
"name": "Carol E",
"email": "carole#gmail.com",
"role": "Recruiter"
},
{ "eid": "HDHDK",
"name": "Mark H",
"email": "markh#gmail.com",
"role": "Manager"
}
]'
SELECT *
FROM OPENJSON(#json)
WITH (
eid nvarchar(50) '$.eid',
name nvarchar(50) '$.name',
email nvarchar(50) '$.email',
role nvarchar(50) '$.role',
)

How to join JSON to update multiple rows by primary key

I am trying to update a log with JSON in SQL Server 2017. I can update a data point with json_value, which covers a few cases, but would ultimately like to join in incoming JSON.
Sample table:
key | col_1 | col_2 | col_3
----+-------------------------------+---------------|-----------------
1 | json.lines[0].data.meta.data | json.lines[0] | json.header.note
2 | json.lines[1].data.meta.data} | json.lines[1] | json.header.note
3 | json.lines[2].data.meta.data} | json.lines[2] | json.header.note
I'd like to update a single property in col_1 and update col_2 with an object as as as string.
Sample JSON:
declare #json nvarchar(max) = '[{
header: {
note: 'some note'
}, lines: [{
data {
id: {
key: 0,
name: 'item_1'
},
meta: {
data: 'item_1_data'
}
}, {...}, {...}
}]
}]'
Query:
update logTable set
col_1 = json_value(#json,'$.lines[__index__].data.meta.data'), -- what would the syntax for __index__ be?
col_2 = j.lines[key], -- pseudo code
col_3 = json_value(#json, '$'.header.note')
inner join openjson(#json) j
on json_value(#json,'$.line[?].id.key') = logTable..key -- ? denotes indices that I'd like to iterate = join over
Expected Output:
key | col_1 | col_2 | col_3
----+---------------+----------------------------|---------
1 | 'item_1_data' | 'data: { id: { key: 0...}' | '{header: { note: ...} }'
2 | 'item_2_data' | 'data: { id: { key: 1...}' | '{header: { note: ...} }'
3 | 'item_3_data' | 'data: { id: { key: 2...}' | '{header: { note: ...} }'
I'm not sure how to handle iterating over the $.line indices, but think a join would solve this if properly implemented.
How can I join to arrays of objects to update SQL rows by primary key?
Original answer:
You may try to parse your JSON using OPENJSON with explicit schema (note, that your JSON is not valid):
Table and JSON:
CREATE TABLE #Data (
[key] int,
col_1 nvarchar(100),
col_2 nvarchar(max)
)
INSERT INTO #Data
([key], [col_1], [col_2])
VALUES
(1, N'', N''),
(2, N'', N''),
(3, N'', N'')
DECLARE #json nvarchar(max) = N'[{
"lines": [
{
"data": {
"id": {
"key": 1,
"name": "item_1"
},
"meta": {
"data": "item_1_data"
}
}
},
{
"data": {
"id": {
"key": 2,
"name": "item_2"
},
"meta": {
"data": "item_2_data"
}
}
},
{
"data": {
"id": {
"key": 3,
"name": "item_3"
},
"meta": {
"data": "item_3_data"
}
}
}
]
}]'
Statement:
UPDATE #Data
SET
col_1 = j.metadata,
col_2 = j.data
FROM #Data
INNER JOIN (
SELECT *
FROM OPENJSON(#json, '$[0].lines') WITH (
[key] int '$.data.id.key',
metadata nvarchar(100) '$.data.meta.data',
data nvarchar(max) '$' AS JSON
)
) j ON #Data.[key] = j.[key]
Update:
Header is common for all rows, so use JSON_QUERY() to update the table:
Table and JSON:
CREATE TABLE #Data (
[key] int,
col_1 nvarchar(100),
col_2 nvarchar(max),
col_3 nvarchar(max)
)
INSERT INTO #Data
([key], col_1, col_2, col_3)
VALUES
(1, N'', N'', N''),
(2, N'', N'', N''),
(3, N'', N'', N'')
DECLARE #json nvarchar(max) = N'[{
"header": {
"note": "some note"
},
"lines": [
{
"data": {
"id": {
"key": 1,
"name": "item_1"
},
"meta": {
"data": "item_1_data"
}
}
},
{
"data": {
"id": {
"key": 2,
"name": "item_2"
},
"meta": {
"data": "item_2_data"
}
}
},
{
"data": {
"id": {
"key": 3,
"name": "item_3"
},
"meta": {
"data": "item_3_data"
}
}
}
]
}]'
Statement:
UPDATE #Data
SET
col_1 = j.metadata,
col_2 = j.data,
col_3 = JSON_QUERY(#json, '$[0].header')
FROM #Data
INNER JOIN (
SELECT *
FROM OPENJSON(#json, '$[0].lines') WITH (
[key] int '$.data.id.key',
metadata nvarchar(100) '$.data.meta.data',
data nvarchar(max) '$' AS JSON
)
) j ON #Data.[key] = j.[key]

How can i do a join with a GEOJson file and a table that exists already in the database?

I have a file with GEOJson data like so:
{
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"geometry": {
"type": "Polygon",
"coordinates": [
[
[-73.759739, 42.61379],
[-73.759565, 42.614179],
[-73.752172, 42.614284999999995],
[-73.744867, 42.617281],
[-73.743042, 42.628958999999995],
[-73.74260799999999, 42.631581999999995],
[-73.734443, 42.631879999999995],
[-73.733936, 42.632020999999995],
[-73.73479499999999, 42.636396],
[-73.73097299999999, 42.643890999999996],
[-73.759739, 42.61379]
],
[[-73.72579, 42.650059], [-73.725143, 42.649788], [-73.725071, 42.649817], [-73.724823, 42.650282999999995], [-73.72552499999999, 42.650486], [-73.72579, 42.650059]]
]
},
"properties": { "STATEFP": "36", "UNSDLEA": "09630", "AFFGEOID": "9700000US3609630", "GEOID": "3609630", "NAME": "East Greenbush Central School District", "LSAD": "00", "ALAND": 195779723, "AWATER": 2721773 }
},
{
"type": "Feature",
"geometry": {
"type": "MultiPolygon",
"coordinates": [
[[[-73.64970199999999, 42.351976], [-73.647074, 42.352069], [-73.647874, 42.353819], [-73.644886, 42.353538], [-73.640999, 42.354502], [-73.640935, 42.350439], [-73.64918399999999, 42.350263], [-73.64970199999999, 42.351976]]],
[
[
[-73.65831, 42.392008],
[-73.656461, 42.394642],
[-73.656983, 42.398876],
[-73.653469, 42.398039999999995],
[-73.65123799999999, 42.396592],
[-73.647519, 42.395765],
[-73.64356599999999, 42.392081],
[-73.649436, 42.392233],
[-73.652639, 42.393062],
[-73.65522, 42.39261],
[-73.655879, 42.390594],
[-73.658508, 42.391143],
[-73.65831, 42.392008]
]
],
[
[
[-73.77776399999999, 42.424766999999996],
[-73.775817, 42.429938],
[-73.774451, 42.435269],
[-73.77367, 42.44404],
[-73.773833, 42.449467999999996],
[-73.77420099999999, 42.451465999999996],
[-73.77553499999999, 42.451522999999995],
[-73.776663, 42.452602999999996],
[-73.77599, 42.454141],
[-73.777172, 42.455293999999995],
[-73.77776399999999, 42.424766999999996]
]
]
]
},
"properties": { "STATEFP": "36", "UNSDLEA": "15210", "AFFGEOID": "9700000US3615210", "GEOID": "3615210", "NAME": "Kinderhook Central School District (Ichabod Crane)", "LSAD": "00", "ALAND": 202445671, "AWATER": 9611722 }
}
]
}
How I can do a join where I can select the entire record E.g.
{
"type": "Feature",
"geometry": {
"type": "Polygon",
"coordinates": [
[
[-73.759739, 42.61379],
[-73.759565, 42.614179],
[-73.752172, 42.614284999999995],
[-73.744867, 42.617281],
[-73.743042, 42.628958999999995],
[-73.74260799999999, 42.631581999999995],
[-73.734443, 42.631879999999995],
[-73.733936, 42.632020999999995],
[-73.73479499999999, 42.636396],
[-73.73097299999999, 42.643890999999996],
[-73.759739, 42.61379]
],
[[-73.72579, 42.650059], [-73.725143, 42.649788], [-73.725071, 42.649817], [-73.724823, 42.650282999999995], [-73.72552499999999, 42.650486], [-73.72579, 42.650059]]
]
},
"properties": { "STATEFP": "36", "UNSDLEA": "09630", "AFFGEOID": "9700000US3609630", "GEOID": "3609630", "NAME": "East Greenbush Central School District", "LSAD": "00", "ALAND": 195779723, "AWATER": 2721773 }
}
where the name property from the file matches name column from my table??
My table is like so:
CREATE TABLE SCHOOL_INFO (
[id] [int] IDENTITY(1,1) NOT NULL,
[name] [varchar](128) NOT NULL,
[address] [varchar](128) NOT NULL,
[city] [varchar](128) NOT NULL,
[state] [varchar](128) NOT NULL,
[zip] [varchar](16) NOT NULL,
[jsondata] [varchar](MAX) NOT NULL
)
All I want to do is insert the insert the matching json data into the jsondata column as a string. I do not need to use any of SQL spatial capabilities.
I figured out the following SQL:
Declare #JSON varchar(max)
SELECT #JSON = BulkColumn
FROM OPENROWSET (BULK 'C:\temp\nyusd.json', SINGLE_CLOB) as j
SELECT * FROM OPENJSON (#JSON, '$.features')
I tried to query for one record using the following SQL but I think I have the syntax wrong:
Declare #JSON varchar(max)
SELECT #JSON = BulkColumn
FROM OPENROWSET (BULK 'C:\temp\nyusd.json', SINGLE_CLOB) as j
SELECT * FROM OPENJSON (#JSON, '$.features') where JSON_VALUE(#JSON, '$.features.properties.Name') = 'East Greenbush Central School District';
Can someone point me in the right direction?
Thanks!
Please try:
Declare #JSON nvarchar(max)
SELECT #JSON = BulkColumn
FROM OPENROWSET (BULK 'C:\temp\nyusd.json', SINGLE_CLOB) as j
select *
From
(
SELECT * FROM OPENJSON (#JSON, '$.features')
)x
WHERE JSON_VALUE(x.[value], '$.properties.NAME') = 'East Greenbush Central School District'

Nest Json Array merge

I have a column saved json data in my table:
declare #json nvarchar(max)
set #json = N'
{
"Companies": [
{
"CompanyId": "A",
"Employee": null
},
{
"CompanyId": "B",
"Employee": [
{
"EmployeePictureId": null,
"Name": "Employee1"
},
{
"EmployeePictureId": "PictureId2",
"Name": "Employee2"
}
]
},
{
"CompanyId": "C",
"Employee": [
{
"EmployeePictureId": null,
"Name": "Employee3"
},
{
"EmployeePictureId": null,
"Name": "Employee4"
}
]
}
]
}
'
Is it posible to get the result like:
{
"EmployeePictureIds": ["PictureId2"]
}
using the Json_Query, Json_Value, OPENJSON...
Only get EmployeePictureId and skip empty(null) data
By the way, the count of elements in array are not sure.
In SQL Server 2017 you can use the following query:
select json_query(QUOTENAME(STRING_AGG('"' + STRING_ESCAPE( A.EmployeePictureId , 'json')
+ '"', char(44)))) as [EmployeePictureIds]
FROM OPENJSON(#json, '$.Companies')
WITH
(
CompanyId NVARCHAR(MAX),
Employee NVARCHAR(MAX) as json
) as B
cross apply openjson (B.Employee)
with
(
EmployeePictureId VARCHAR(50),
[Name] VARCHAR(50)
) as A
where A.EmployeePictureId is not null
for json path , WITHOUT_ARRAY_WRAPPER
Results for the JSON you provided:
Results adding another non null EmployeePictureId:

How to reference array items of json object in T-SQL

I am using the JSON functionality in SQL Server 2016. How do I reference an item in the $.people[] array as shown below, using a variable? Instead of hardcoding the "1" in the path parameter of the JSON_QUERY function, I want to use a variable and loop through each item in the people array.
declare #json nvarchar(max) = '{
"people": [{
"name": "John",
"surname": "Doe"
}, {
"name": "Jane",
"surname": null,
"active": true
}]
}';
select JSON_QUERY(#json,'$.people[1]'); -- this works
declare #test nvarchar(max) = '$.people[1]';
select JSON_QUERY(#json,#test); -- ERROR: The argument 2 of the "JSON_VALUE or JSON_QUERY" must be a string literal.
You can use dynamic SQL through the sp_executesql procedure.
Both the original select statement, and the sp_executesql have the same output listed at the bottom.
declare #json nvarchar(max) = '{
"people": [{
"name": "John",
"surname": "Doe"
}, {
"name": "Jane",
"surname": null,
"active": true
}]
}';
declare #i int = 1
declare #dyn_sql nvarchar(1000) =
'select json_query(#json_dyn, ''$.people[' + cast(#i as varchar(10)) + ']'');'
select JSON_QUERY(#json,'$.people[1]'); -- this works
exec sp_executesql #dyn_sql --set the SQL to run
, N'#json_dyn nvarchar(max)' --"declare" the variable in the SQL to run
, #json_dyn = #json --"set" the variable in the SQL to run
/*
Output : { "name": "Jane", "surname": null, "active": true }
*/

Resources