Getting data from the table with two conditions and two different limits - database

I have a table of posts where each post has a category field. There are 10 different categories.
I want to write an api that returns 10 posts from the posts table, 7 of them should be from A category and 3 of them should be from B category.
Is there a way to do this with just one db.collection("posts").find()?
The way I do it now is:
const postsA = await db.collection("posts").find({ category: "A" }).limit(7).toArray();
const postsB = await db.collection("posts").find({ category: "B" }).limit(3).toArray();

Sure, you could use an aggregation pipeline with 2 $facet as follow :
const posts = await db.collection("posts").aggregate([
{
"$facet": {
"postsA": [
{
"$match": {
"category": "A"
}
},
{
"$limit": 7
}
],
"postsB": [
{
"$match": {
"category": "B"
}
},
{
"$limit": 3
}
],
}
}
]);
https://www.mongodb.com/docs/v6.0/reference/operator/aggregation/facet/

Related

adding data to nested document : mongodb

I want to add new record within 'music'. My document looks similar to
{
"username": "erkin",
"email": "erkin-07#hotmail.com",
"password": "b",
"playlists": [
{
"_id": 58,
"name": "asdsa",
"date": "09-01-15",
"musics": {
"song-one":{
"song-one": "INNA - Cola Song (feat. J Balvin)",
"duration": "3.00"
},
"song-two":{
"song-two": "blabla",
"duration": "3.00"
}
}
}
]
}
After navigating to "music" and then using $set to add/update multiple records at once. But new records is getting added in lexicographical manner(behaviour of $set).
I'm using query similar to (so in my case song-four is coming before song-three) :
db.getCollection('myCollection').update({username:"erkin"},{$set:{"playlists.musics.song-three":{...},"playlists.musics.song-four":{...}}})
Is there any way I can add new records to that location in a way my $set query is arranged ?
As playlists is an array:
Option 1: Update the first document in playlists.
Specify the index: 0 for the first document in playlists.
playlists.0.musics.song-three
db.collection.update({
username: "erkin"
},
{
$set: {
"playlists.0.musics.song-three": {
"song-three": "song-three"
},
"playlists.0.musics.song-four": {
"song-four": "song-four"
}
}
})
Sample Demo on Mongo Playground (Option 1)
Option 2: Update all documents in playlists.
With $[] all positional operator.
playlists.$[].musics.song-three
db.collection.update({
username: "erkin"
},
{
$set: {
"playlists.$[].musics.song-three": {
"song-three": "song-three"
},
"playlists.$[].musics.song-four": {
"song-four": "song-four"
}
}
})
Sample Demo on Mongo Playground (Option 2)
Option 3: Update specified document in playlists.
With $[<identifier>] filtered positional operator.
playlists.$[playlist].musics.song-three
db.collection.update({
username: "erkin"
},
{
$set: {
"playlists.$[playlist].musics.song-three": {
"song-three": "song-three"
},
"playlists.$[playlist].musics.song-four": {
"song-four": "song-four"
}
}
},
{
arrayFilters: [
{
"playlist._id": 58
}
]
})
Sample Demo on Mongo Playground (Option 3)

Mongodb user table with friends

I have a USER table with documents:
{
_id: 1,
name: 'funny-guy43',
image: '../../../img1.jpg',
friends: [2, 3]
},
{
_id: 2,
name: 'SurfinGirl3',
image: '../../../img2.jpg',
friends: []
},
{
_id: 3,
name: 'FooBarMan',
image: '../../../img3.jpg',
friends: [2]
}
friends is an array of USER _ids. (1) I want to get user by _id, (2) look at his friends and (3) query the USER table with the friend ids to return all friends.
for example, find user 1, query the table based on his friends 2 and 3, and return 2 and 3.
Can I do that in one transaction? Or do I query the table to get user array of friends, then query the table again with array of friends ids.
I'm using .Net Core if that matters.
I am very open to alternative approaches as well.
It is, in fact, possible to do this in one transaction. Or, to be more exact, in one aggregation.
I would first split the users into 2 different subsets, one called searched_user and the other other_users, where searched_user will have only the user we are searching for and other_users will have everyone else. We can do that using $facet. Here is the idea:
{
"$facet": {
"searched_user": [
{
$match: {
_id: 1
}
}
],
"other_users": [
{
$match: {
_id: {
$ne: 1
}
}
}
]
}
}
Once they are separated like this, we can search the other_users subset using the friend ids from the searched_user. So here is the full aggregation:
db.collection.aggregate([
{
"$facet": {
"searched_user": [
{
$match: {
_id: 1
}
}
],
"other_users": [
{
$match: {
_id: {
$ne: 1
}
}
}
]
}
},
{
"$unwind": "$searched_user"
},
{
$project: {
user_friends: {
$filter: {
input: "$other_users",
as: "other_users",
cond: {
$in: [
"$$other_users._id",
"$searched_user.friends"
]
}
}
}
}
}
])
Here we are looking for user 1 and the result will be user 1's friends.
[
{
"user_friends": [
{
"_id": 2,
"friends": [],
"image": "../../../img2.jpg",
"name": "SurfinGirl3"
},
{
"_id": 3,
"friends": [
2
],
"image": "../../../img3.jpg",
"name": "FooBarMan"
}
]
}
]
Playground: https://mongoplayground.net/p/-8pNnQXg8r6
You can achieve this by using lookup in aggregation, Tried it with MongoDB version v4.2.11.
db.users.aggregate([
{
'$match': {
'_id': 1,
}
},
{
'$lookup': {
'from' : 'users',
'let' : {
'friendIds': '$friends',
},
'pipeline': [
{
'$match':{
'$expr': {'$in': [ '$_id', '$$friendIds']}
}
}
],
'as': 'friendsArr'
}
}
])
Result:
[
{
"_id" : 1,
"name" : "funny-guy43",
"image" : "../../../img1.jpg",
"friends" : [
2,
3
],
"friendsArr" : [
{
"_id" : 2,
"name" : "SurfinGirl3",
"image" : "../../../img2.jpg",
"friends" : [ ]
},
{
"_id" : 3,
"name" : "FooBarMan",
"image" : "../../../img3.jpg",
"friends" : [
2
]
}
]
}
]

MongoDB: How to copy Documents to a new field of associated Documents from other collections?

Collections that I have:
Product:
[
{
"_id":"product_id_1",
"name":"Product 1",
"price":50
},
{
"_id":"product_id_2",
"name":"Product 2",
"price":100
}
]
Category:
[
{
"_id":"category_id_1",
"name":"Category 1"
},
{
"_id":"category_id_2",
"name":"Category 2"
}
]
Audit:
[
{
"_id":"audit_id_1",
"resource_type":"product",
"resource_id":"product_id_1",
"attribute":"name",
"executionTime":"2021-01-10T00:00:00.000Z"
},
{
"_id":"audit_id_2",
"resource_type":"product",
"resource_id":"product_id_1",
"attribute":"name",
"executionTime":"2021-01-09T00:00:00.000Z"
},
{
"_id":"audit_id_3",
"resource_type":"product",
"resource_id":"product_id_1",
"attribute":"price",
"executionTime":"2021-01-10T00:00:00.000Z"
},
{
"_id":"audit_id_4",
"resource_type":"category",
"resource_id":"category_id_1",
"attribute":"name",
"executionTime":"2021-01-10T00:00:00.000Z"
},
{
"_id":"audit_id_5",
"resource_type":"category",
"resource_id":"category_id_1",
"attribute":"name",
"executionTime":"2021-01-09T00:00:00.000Z"
}
]
Collection Audit is using for saving details about each Product or Category documents updates.
For example, we see that the attribute name of Product with id product_id_1 was changed twice:
9th of January and 10th of January.
attribute price of the same Product was changed only once: 10th of January.
The same kind of information saved for Category collection as well.
The goal that I want to achieve is:
Extract existing Documents from Audit collection that contain information only about the latest changes for each unique attribute per each unique resource and copy them to a new field of related document of Product/Category collections.
As result, the Product/Category collections should look like this:
Product:
[
{
"_id":"product_id_1",
"name":"Product 1",
"price":50,
"audit":[
{
"_id":"audit_id_1",
"resource_type":"product",
"resource_id":"product_id_1",
"attribute":"name",
"executionTime":"2021-01-10T00:00:00.000Z"
},
{
"_id":"audit_id_3",
"resource_type":"product",
"resource_id":"product_id_1",
"attribute":"price",
"executionTime":"2021-01-10T00:00:00.000Z"
}
]
},
{
"_id":"product_id_2",
"name":"Product 2",
"price":100,
"audit":[
]
}
]
Category:
[
{
"_id":"category_id_1",
"name":"Category 1",
"audit":[
{
"_id":"audit_id_4",
"resource_type":"category",
"resource_id":"category_id_1",
"attribute":"name",
"executionTime":"2021-01-10T00:00:00.000Z"
}
]
},
{
"_id":"category_id_2",
"name":"Category 2",
"audit":[
]
}
]
I tried to write a query by myself, and this is what I got:
db.getCollection("audit").aggregate([
{
$match: {
"resource_type": "product"}
},
{
$sort: {
executionTime: -1
}
},
{
$group: {
_id: {
property: "$attribute",
entity: "$resource_id"
},
document: {
$first: "$$ROOT"
}
}
},
{
$replaceRoot: {
newRoot: "$document"
}
}
]).forEach(function(a){
db.getCollection("product").update({"_id" :ObjectId(a.resource_id)},{addToSet : {audit:[a]}})
});
The problems that I see with my solution are:
it will update only one Product collection. It means that I need to execute my code at list twice, for each existing collections.
forEach statement, I am not sure where exactly this command executed on the server-side or on client-side, assume Audit collection contains approx 100k documents, from the performance point of view, how fast this command will be executed?
So, definitely, I have a feeling that I need to rewrite my solution, but I have doubts about how to make it better.
For example, I read about $merge command, which can do a quite similar job that I do in forEach section, but I do not know how to apply $merge in the aggregation flow that I wrote above properly.
First of all forEach is executed on the client side, which means you download result of the aggregation and make 1 update request per each document in the result. Although it is the most flexible way it is the most expensive one. Aggregation pipeline with $out and $merge on the other hand is executed on the serverside so you don't pipe data through the client.
Secondly, if you need to update 2 collections you will need at least 2 queries. There is no way to $out to multiple collections.
Finally, you need to use the subquery syntax of the $lookup. It is more flexible and let you define "joining" logic in pipeline terms. For products it would be:
db.products.aggregate([
{
$lookup: {
from: "audit",
let: {
id: "$_id"
},
pipeline: [
{ "$match": {
$expr: { $eq: [ "$resource_id", "$$id" ] }, // the foreign key match
resource_type: "product" // the discriminator
} },
{ $sort: { "executionTime": -1 } }, // chronological order
{ "$group": {
_id: {
attribute: "$attribute", // for each unique attribute
id: "$resource_id" // per each unique resource
},
value: {
$first: "$$ROOT" // pick the latest
}
} },
{ "$replaceRoot": { "newRoot": "$value" } }
],
as: "audit"
}
}
])
The $out stage and its limitations you already learned from the previous answer.
The second pipeline to update categories will be exactly the same but with another $out destination and another value in the discriminator.
want to post the code written by myself:
db.getCollection("product").aggregate([
{ $match: {} },
{
$lookup: {
from: 'audit',
localField: '_id',
foreignField: 'resource_id',
as: 'audit'
}
},
{
$unwind: '$audit'
},
{
$sort: { "audit.executionTime": -1 }
},
{
$group: {
_id: {
property: "$audit.attribute",
entity: "$audit.resource_id"
},
document: {
$first: "$$ROOT"
}
}
},
{
$replaceRoot: {
newRoot: "$document"
}
},
{
$group: {
_id: "$_id",
audit: { $push: "$audit" }
}
},
{
$merge: {
into: 'product',
on: "_id",
whenMatched: 'merge',
whenNotMatched: 'insert'
}
}])

$inc with multiple conditions and different update values in mongoDB

I have a Products collection with data as given below. I am stuck with the requirement to write a single update query to decrease the stock of pId 1 by 2 and pId 2 by 3.
{
"category":"electronics",
"products":[
{
"pId":1,
"stock":20
},
{
"pId":2,
"stock":50
},
{
"pId":3,
"stock":40
}
]
}
You need the filtered positional operator to define array elements matching conditions separately:
db.col.update(
{ category: "electronics" },
{ $inc: { "products.$[p1].stock": -2, "products.$[p2].stock": -3 } },
{ arrayFilters: [ { "p1.pId": 1 }, { "p2.pId": 2 } ] }
)

MongoDB: Query documents where one of its field is equal to one of its sub-document field?

Given the following dataset of books with a related books list:
{ "_id" : 1, "related_books" : [ { book_id: 1 }, { book_id: 2 }, { book_id: 3 } ] } <-- this one
{ "_id" : 2, "related_books" : [ { book_id: 1 } }
{ "_id" : 3, "related_books" : [ { book_id: 3 }, { book_id: 2 } ] } <-- and this one
{ "_id" : 4, "related_books" : [ { book_id: 1 }, { book_id: 2 } ] }
I'm trying to get the list of books when _id === related_book.book_id, so in this case:
book 1: it contains a related_book with book_id = 1
book 3: it contains a related_book with book_id = 3
I've been trying to find my way with aggregate filters but I can't make it work with the check of a sub-document field:
db.books.aggregate([{
"$project": {
"selected_books": {
"$filter": {
"input": "$books",
"as":"book",
"cond": { "$in": ["$_id", "$$book.related_books.book_id" ]
}}}}}])
This is my solution to this problem:
db.getCollection("books").aggregate([{
$addFields: {
hasBookWithSameId: {
$reduce: {
input: "$related_books",
initialValue: false,
in: {$or: ["$$value", {$eq: ["$_id", "$$this.book_id"]}]}
}
}
}
},
{
$match: {
hasBookWithSameId: true
}
}])
In the first step I'm creating a field hasBookWithSameId that represents a boolean: true if there is a related book with same id, false otherwise. This is made using the reduce operator, which is a powerful tool for dealing with embedded arrays, it works by iterating over the array verifying if it has any related book with the same id as the parent.
At the end, I just match all the documents that have this property set to true.
Update:
There is a more elegant solution to this problem with just one aggregation step, using $map and $anyElementTrue
db.collection.aggregate({
$match: {
$expr: {
$anyElementTrue: {
$map: {
input: "$related_books",
in: {
$eq: ["$$this.book_id", "$_id"]
}
}
}
}
}
})

Resources