I'm using SQLite3 on NodeJS and have a database in memory with a relation between table1 and table2. fk field in table2 is id in table1.
table1 :
id
value1
value2
1
v1_t1
v2_t1
table2 :
id
value1
fk
1
v1_t2
1
2
v2_t2
1
When I run this query:
SELECT * from table1 t1 INNER JOIN table2 t2 ON t2.fk=t1.id WHERE t1.id=1;
Result is :
[
{
id: 1,
value1: v1_t2,
fk:1
},
{
id: 2,
value1: v2_t2,
fk:1
}
]
But I want :
[
{
fk: 1,
value1: "v1_t1",
value2: "v2_t1",
result: [
{
id: 1,
value1: "v1_t2",
fk: 1
},
{
id: 2,
value1: "v2_t2",
fk: 1
}
]
}
]
Is this possible or should I use a non-relational database?
You can use SQLite's JSON1 Extension functions:
SELECT json_object(
'fk', t2.fk,
'value1', t1.value1,
'value2', t1.value2,
'result',
json_group_array(json_object('id', t2.id, 'value1', t2.value1, 'fk', t2.fk))
) col
FROM table1 t1 INNER JOIN table2 t2
ON t2.fk = t1.id
WHERE t1.id = 1;
See the demo.
Related
I'm required to supply a json object like this:
[
{
id: '59E59BC82852A1A5881C082D5FFCAC10',
user: {
...users[1],
last_message: "16-06-2022",
topic: "Shipment"
},
unread: 2,
},
{
id: '521A754B2BD028B13950CB08CDA49075',
user: {
...users[2],
last_message: "15-06-2022",
topic: "Settings"
},
unread: 0,
}
]
it is not difficult for me to build a json like this:
(with this fiddle https://dbfiddle.uk/?rdbms=sqlserver_2019&fiddle=bf62626de20d3ca7191aa9c1ef0cd39b)
[
{
"id": "59E59BC82852A1A5881C082D5FFCAC10",
"user": {
"id": 1,
"last_message": "16-06-2022",
"topic": "Shipment"
},
"unread": 2
},
{
"id": "521A754B2BD028B13950CB08CDA49075",
"user": {
"id": 2,
"last_message": "15-06-2022",
"topic": "Settings"
},
"unread": 1
},
{
"id": "898BB874D0CBBB1EFBBE56D2626DC847",
"user": {
"id": 3,
"last_message": "18-06-2022",
"topic": "Account"
},
"unread": 1
}
]
but I have no idea how to put the ...users[1], instead of "id": 1 into user node:
is there a way?
This is not actually valid JSON, but you can create it yourself using STRING_AGG and CONCAT
SELECT
'[' + STRING_AGG(u.spread, ',') + ']'
FROM (
SELECT
spread = CONCAT(
'{id:''',
u.userId,
''',user:{...users[',
ROW_NUMBER() OVER (ORDER BY u.id),
'],last_message: "',
t.created_at,
'",topic:"',
t.topic,
'"},unread:',
(SELECT COUNT(*) FROM #tickets t3 WHERE t3.userId = u.userId AND t3.read_at IS NULL),
'}'
)
FROM #Users u
CROSS APPLY (
SELECT top 1
t.ticketId,
t.created_at,
t.topic
FROM #Tickets t
WHERE t.userId = u.userId
ORDER BY
t.created_at DESC
) t
) u
Note that you may need to escape values, but I don't know how this not-JSON works so couldn't say.
db<>fiddle
In Postgres 11.x I am trying to aggregate elements in a nested jsonb object which has an array field into a single row per device_id. Here's example data for a table called configurations.
id
device_id
data
1
1
"{""sensors"": [{""other_data"": {}, ""sensor_type"": 1}], ""other_data"": {}}"
2
1
"{""sensors"": [{""other_data"": {}, ""sensor_type"": 1}, {""other_data"": {}, ""sensor_type"": 2}], ""other_data"": {}}"
3
1
"{""sensors"": [{""other_data"": {}, ""sensor_type"": 3}], ""other_data"": {}}"
4
2
"{""sensors"": [{""other_data"": {}, ""sensor_type"": 4}], ""other_data"": {}}"
5
2
"{""sensors"": null, ""other_data"": {}}"
6
3
"{""sensors"": [], ""other_data"": {}}"
My goal output would have a single row per device_id with an array of distinct sensor_types, example:
device_id
sensor_types
1
[1,2,3]
2
[4]
3
[ ] null would also be fine here
Tried a bunch of things but running into various problems, here's some SQL to set up a test environment:
CREATE TEMPORARY TABLE configurations(
id SERIAL PRIMARY KEY,
device_id SERIAL,
data JSONB
);
INSERT INTO configurations(device_id, data) VALUES
(1, '{ "other_data": {}, "sensors": [ { "sensor_type": 1, "other_data": {} } ] }'),
(1, '{ "other_data": {}, "sensors": [ { "sensor_type": 1, "other_data": {} }, { "sensor_type": 2, "other_data": {} }] }'),
(1, '{ "other_data": {}, "sensors": [ { "sensor_type": 3, "other_data": {} }] }'),
(2, '{ "other_data": {}, "sensors": [ { "sensor_type": 4, "other_data": {} }] }'),
(2, '{ "other_data": {}, "sensors": null }'),
(3, '{ "other_data": {}, "sensors": [] }');
Quick note, my real table has about 100,000 rows and the jsonb data is much more complicated but follows this general structure.
The JSONB null causes some problems in Postgres and should rather be avoided when possible. You can convert the value to an empty array with the expression
coalesce(nullif(data->'sensors', 'null'), '[]')
The first attempt:
select device_id, array_agg(distinct value->'sensor_type') as sensor_types
from configurations
left join jsonb_array_elements(coalesce(nullif(data->'sensors', 'null'), '[]')) on true
group by device_id;
device_id | sensor_types
-----------+--------------
1 | {1,2,3}
2 | {4,NULL}
3 | {NULL}
(3 rows)
may be unsatisfactory because of nulls in the result. When trying to remove them
select device_id, array_agg(distinct value->'sensor_type') as sensor_types
from configurations
left join jsonb_array_elements(coalesce(nullif(data->'sensors', 'null'), '[]')) on true
where value is not null
group by device_id;
device_id | sensor_types
-----------+--------------
1 | {1,2,3}
2 | {4}
(2 rows)
device_id = 3 disappears. Well, we can get all device_ids from the table:
select distinct device_id, sensor_types
from configurations
left join (
select device_id, array_agg(distinct value->'sensor_type') as sensor_types
from configurations
left join jsonb_array_elements(coalesce(nullif(data->'sensors', 'null'), '[]')) on true
where value is not null
group by device_id
) s
using(device_id);
device_id | sensor_types
-----------+--------------
1 | {1,2,3}
2 | {4}
3 |
(3 rows)
I have two columns of json that I would like to join on id into a single select.
Sample Data
| a | b |
+------------------------------------------------+-------------------------------------+
| [{id: 1, name: "Alice"},{id:2, name: "Bob"}] | [{id: 1, age: 30}, {id:2, age: 32}] |
| [{id: 5, name: "Charlie"},{id:6, name: "Dale"} | [{id: 5, age: 20}, {id:6, age: 14}] |
Desired Output
| c |
+-------------------------------------------------------------------+
| [{id: 1, name: "Alice", age: 30},{id:2, name: "Bob", age: 32}] |
| [{id: 5, name: "Charlie", age: 20},{id:6, name: "Dale", age: 14}] |
I'd like to do something like
select
id,
name,
age
from openJson(select a from someDb) sd
with (
id int '$.id',
age int '$.age'
)
inner join (
select
id,
age
from openJson(select b from someDb)
with (
id int '$.id',
age int '$.name'
)
) x
on x.id = sd.id
I don't think that current versions of SQL Server support a MERGE function. The only option is JSON_MODIFY() function, that can either update the value of an existing property, insert a new key:value pair or delete a key.
But in your case, the more appropriate approach is to parse the stored JSON as tables using OPENJSON() with explicit schema, join the tables and rebuild the required JSON output again:
SELECT
c = (
SELECT a.id, a.name, b.age
FROM OPENJSON(v.a) WITH (
id int '$.id',
name varchar(50) '$.name'
) a
FULL JOIN OPENJSON(v.b) WITH (
id int '$.id',
age int '$.age'
) b ON a.id = b.id
FOR JSON PATH
)
FROM (VALUES
('[{"id": 1, "name": "Alice"}, {"id":2, "name": "Bob"}]', '[{"id": 1, "age": 30}, {"id":2, "age": 32}]'),
('[{"id": 5, "name": "Charlie"}, {"id":6, "name": "Dale"}]', '[{"id": 5, "age": 20}, {"id":6, "age": 14}]')
) v (a, b)
Result:
c
--------------------------------------------------------------------
[{"id":1,"name":"Alice","age":30},{"id":2,"name":"Bob","age":32}]
[{"id":5,"name":"Charlie","age":20},{"id":6,"name":"Dale","age":14}]
I'm trying to import some data as nested documents.
I've tried to simplify my problem as much as possible.
Here my parent query:
SELECT 1 AS id,
'xxx' AS col1,
'yyy' AS col2
UNION
SELECT 2 AS id,
'xxx' AS col1,
'yyy' AS col2
UNION
SELECT 3 AS id,
'xxx' AS col1,
'yyy' AS col2
You can see data here:
1 | xxx | yyy
2 | xxx | yyy
3 | xxx | yyy
My child query is:
SELECT 1 AS id,
1 AS rel_id,
'aaa' AS col1,
'bbb' AS col2
UNION
SELECT 2 AS id,
1 AS rel_id,
'aaa' AS col1,
'bbb' AS col2
UNION
SELECT 3 AS id,
2 AS rel_id,
'aaa' AS col1,
'bbb' AS col2
UNION
SELECT 4 AS id,
3 AS rel_id,
'aaa' AS col1,
'bbb' AS col2
Data is:
1 | 1 | aaa | bbb
2 | 1 | aaa | bbb
3 | 2 | aaa | bbb
4 | 3 | aaa | bbb
I'm trying to nest using this DIH configuration:
<entity
name="item"
query="select 1 as id, 'xxx' as col1, 'yyy' as col2 union select 2 as id, 'xxx' as col1, 'yyy' as col2 union select 3 as id, 'xxx' as col1, 'yyy' as col2">
<field column="id" name="id"/>
<field column="col1" name="column1_s" />
<field column="col2" name="column2_s" />
<entity
name="autor"
child="true"
query="select 1 as id, 1 as rel_id, 'aaa' as col1, 'bbb' as col2 union select 2 as id, 1 as rel_id, 'aaa' as col1, 'bbb' as col2 union select 3 as id, 2 as rel_id, 'aaa' as col1, 'bbb' as col2 union select 4 as id, 3 as rel_id, 'aaa' as col1, 'bbb' as col2"
cacheKey="rel_id" cacheLookup="item.id" cacheImpl="SortedMapBackedCache">
<field column="node_type" template="autor"/>
<field column="alt_code" name="id" template="${autor.id}-${autor.rel_id}"/>
<field column="col1" name="column1_s" />
<field column="col2" name="column2_s" />
</entity>
</entity>
However, they are not nested:
$ curl "http://localhost:8983/solr/arxius/select?q=*%3A*"
{
"responseHeader": {
"status": 0,
"QTime": 0,
"params": {
"q": "*:*"
}
},
"response": {
"numFound": 7,
"start": 0,
"numFoundExact": true,
"docs": [
{
"id": "1",
"column2_s": "bbb",
"column1_s": "aaa",
"_version_": 1682901366056419300
},
{
"id": "2",
"column2_s": "bbb",
"column1_s": "aaa",
"_version_": 1682901366056419300
},
{
"id": "1",
"column2_s": "yyy",
"column1_s": "xxx",
"_version_": 1682901366056419300
},
{
"id": "3",
"column2_s": "bbb",
"column1_s": "aaa",
"_version_": 1682901366058516500
},
{
"id": "2",
"column2_s": "yyy",
"column1_s": "xxx",
"_version_": 1682901366058516500
},
{
"id": "4",
"column2_s": "bbb",
"column1_s": "aaa",
"_version_": 1682901366058516500
},
{
"id": "3",
"column2_s": "yyy",
"column1_s": "xxx",
"_version_": 1682901366058516500
}
]
}
}
As you can see, documents are not nested.
I've been struggling a lot over that issue.
I've tried to strightforward the problem.
I hope I've explained so well.
Please, any ideas?
I am trying to update a log with JSON in SQL Server 2017. I can update a data point with json_value, which covers a few cases, but would ultimately like to join in incoming JSON.
Sample table:
key | col_1 | col_2 | col_3
----+-------------------------------+---------------|-----------------
1 | json.lines[0].data.meta.data | json.lines[0] | json.header.note
2 | json.lines[1].data.meta.data} | json.lines[1] | json.header.note
3 | json.lines[2].data.meta.data} | json.lines[2] | json.header.note
I'd like to update a single property in col_1 and update col_2 with an object as as as string.
Sample JSON:
declare #json nvarchar(max) = '[{
header: {
note: 'some note'
}, lines: [{
data {
id: {
key: 0,
name: 'item_1'
},
meta: {
data: 'item_1_data'
}
}, {...}, {...}
}]
}]'
Query:
update logTable set
col_1 = json_value(#json,'$.lines[__index__].data.meta.data'), -- what would the syntax for __index__ be?
col_2 = j.lines[key], -- pseudo code
col_3 = json_value(#json, '$'.header.note')
inner join openjson(#json) j
on json_value(#json,'$.line[?].id.key') = logTable..key -- ? denotes indices that I'd like to iterate = join over
Expected Output:
key | col_1 | col_2 | col_3
----+---------------+----------------------------|---------
1 | 'item_1_data' | 'data: { id: { key: 0...}' | '{header: { note: ...} }'
2 | 'item_2_data' | 'data: { id: { key: 1...}' | '{header: { note: ...} }'
3 | 'item_3_data' | 'data: { id: { key: 2...}' | '{header: { note: ...} }'
I'm not sure how to handle iterating over the $.line indices, but think a join would solve this if properly implemented.
How can I join to arrays of objects to update SQL rows by primary key?
Original answer:
You may try to parse your JSON using OPENJSON with explicit schema (note, that your JSON is not valid):
Table and JSON:
CREATE TABLE #Data (
[key] int,
col_1 nvarchar(100),
col_2 nvarchar(max)
)
INSERT INTO #Data
([key], [col_1], [col_2])
VALUES
(1, N'', N''),
(2, N'', N''),
(3, N'', N'')
DECLARE #json nvarchar(max) = N'[{
"lines": [
{
"data": {
"id": {
"key": 1,
"name": "item_1"
},
"meta": {
"data": "item_1_data"
}
}
},
{
"data": {
"id": {
"key": 2,
"name": "item_2"
},
"meta": {
"data": "item_2_data"
}
}
},
{
"data": {
"id": {
"key": 3,
"name": "item_3"
},
"meta": {
"data": "item_3_data"
}
}
}
]
}]'
Statement:
UPDATE #Data
SET
col_1 = j.metadata,
col_2 = j.data
FROM #Data
INNER JOIN (
SELECT *
FROM OPENJSON(#json, '$[0].lines') WITH (
[key] int '$.data.id.key',
metadata nvarchar(100) '$.data.meta.data',
data nvarchar(max) '$' AS JSON
)
) j ON #Data.[key] = j.[key]
Update:
Header is common for all rows, so use JSON_QUERY() to update the table:
Table and JSON:
CREATE TABLE #Data (
[key] int,
col_1 nvarchar(100),
col_2 nvarchar(max),
col_3 nvarchar(max)
)
INSERT INTO #Data
([key], col_1, col_2, col_3)
VALUES
(1, N'', N'', N''),
(2, N'', N'', N''),
(3, N'', N'', N'')
DECLARE #json nvarchar(max) = N'[{
"header": {
"note": "some note"
},
"lines": [
{
"data": {
"id": {
"key": 1,
"name": "item_1"
},
"meta": {
"data": "item_1_data"
}
}
},
{
"data": {
"id": {
"key": 2,
"name": "item_2"
},
"meta": {
"data": "item_2_data"
}
}
},
{
"data": {
"id": {
"key": 3,
"name": "item_3"
},
"meta": {
"data": "item_3_data"
}
}
}
]
}]'
Statement:
UPDATE #Data
SET
col_1 = j.metadata,
col_2 = j.data,
col_3 = JSON_QUERY(#json, '$[0].header')
FROM #Data
INNER JOIN (
SELECT *
FROM OPENJSON(#json, '$[0].lines') WITH (
[key] int '$.data.id.key',
metadata nvarchar(100) '$.data.meta.data',
data nvarchar(max) '$' AS JSON
)
) j ON #Data.[key] = j.[key]