golang database updating a non-existing entry - database

I have a function to update a database entry as follows. The logic is that updating a non-exisitng entry returns error. Somehow I find the code a bit verbose on error handling. Is there a better way to do this? Thanks.
func (adb *AppDB) UpdateTicket(t Ticket) error {
result, err := adb.db.NamedExec(`UPDATE ticket SET detail=:detail, start_time=:start_time, end_time=:end_time, priority=:priority WHERE id=:id;`, &t)
if err != nil {
return err
}
nRows, err := result.RowsAffected()
if err != nil {
return err
}
if nRows == 0 {
return fmt.Errorf("Ticket:%s does not exist for update", t)
}
return nil
}

If you need to make it less verbose and easy to maintain I would highly recomend using ORM. You may try https://github.com/jinzhu/gorm
Named return might make it less verbose, but this is not a best practise for lot of scenarios
func (adb *AppDB) UpdateTicket(t Ticket) (err error) {
var result ResultType
var nRows int
if result, err = adb.db.NamedExec(`UPDATE ticket SET detail=:detail, start_time=:start_time, end_time=:end_time, priority=:priority WHERE id=:id;`, &t); err != nil {
return
}
if nRows, err = result.RowsAffected(); err != nil {
return
}
if nRows == 0 {
err = fmt.Errorf("Ticket:%s does not exist for update", t)
}
return
}

Related

Alternative to 'coll.CountDocuments' function on Mongodb in golang. (Aggregation Pipeline)

As you can see I am calling the "coll.CountDocuments" functions multiples times. What I want is to write the code without calling the "coll.CountDocuments" function multiple times by aggregating all the filters into a single query.
func NoOfDocumentsInfo(DB string, col string, filters ...bson.D) ([]int64, error) {
if nil == dbInstance {
if nil == GetDBInstance() {
logger.Error("Not connecting to DB")
err := errors.New("DB connection error")
return nil, err
}
}
logger.Debugf("%s %s", DB, col)
coll := dbInstance.Database(DB).Collection(col)
counts := make([]int64, len(filters))
for i, filter := range filters {
count, err := coll.CountDocuments(context.TODO(), filter)
if err != nil {
logger.Fatal(err)
return nil, err
}
counts[i] = count
}
return counts, nil
}
I have tried to used aggragation pipeline but "cur" and "result" is giving null output.
`func NoOfDocumentsInfo(DB string, col string, filters ...bson.D) ([]int64, error) {
if dbInstance == nil {
if GetDBInstance() == nil {
logger.Error("Not connecting to DB")
err := errors.New("DB connection error")
return nil, err
}
}
logger.Debugf("%s %s", DB, col)
coll := dbInstance.Database(DB).Collection(col)
pipeline := make([]bson.M, 0, len(filters)+2)
pipeline = append(pipeline, bson.M{"$match": bson.M{"$or": filters}})
pipeline = append(pipeline, bson.M{"$group": bson.M{"_id": nil, "count": bson.M{"$sum": 1}}})
pipeline = append(pipeline, bson.M{"$group": bson.M{"_id": nil, "count": bson.M{"$first": "$count"}}})
var result struct {
Count int64 `bson:"count"`
}
cur, err := coll.Aggregate(context.TODO(), pipeline)
if err != nil {
logger.Fatal(err)
return nil, err
}
logger.Debugf("cur: %+v", cur)
err = cur.Decode(&result)
logger.Debugf("result: %+v, err: %v", result, err)
if err != nil {
logger.Fatal(err)
return nil, err
}
return []int64{result.Count}, nil
}`
A much simpler approach would be the one that I'm going to share here. Let's start with the code:
package main
import (
"context"
"fmt"
"time"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
var (
dbInstance *mongo.Client
ctx context.Context
cancel context.CancelFunc
)
func NoOfDocumentsInfo(client *mongo.Client, DB string, col string, filters bson.A) (int64, error) {
coll := client.Database(DB).Collection(col)
myFilters := bson.D{
bson.E{
Key: "$and",
Value: filters,
},
}
counts, err := coll.CountDocuments(ctx, myFilters)
if err != nil {
panic(err)
}
return counts, nil
}
func main() {
ctx, cancel = context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()
// set MongoDB connection
clientOptions := options.Client().ApplyURI("mongodb://root:root#localhost:27017")
mongoClient, err := mongo.Connect(ctx, clientOptions)
if err != nil {
panic(err)
}
defer mongoClient.Disconnect(ctx)
// query with filters
numDocs, err := NoOfDocumentsInfo(mongoClient, "demodb", "myCollection", bson.A{
bson.D{bson.E{Key: "Name", Value: bson.D{bson.E{Key: "$eq", Value: "John Doe"}}}},
bson.D{bson.E{Key: "Song", Value: bson.D{bson.E{Key: "$eq", Value: "White Roses"}}}},
})
if err != nil {
panic(err)
}
fmt.Println("num docs:", numDocs)
}
Let's see the relevant changes applied to the code:
Expect a parameter called filters of type bson.A which is the type for the array in the MongoDB environment.
Build the myFilters variable which is of type bson.D (slice) with the following single item (bson.E) in this way:
The Key is the logical operator
The Value is the array passed into the function
Build the array to pass to the function with all of the needed filters (e.g. two equal conditions: one on the Name key and the other on the Song).
Finally, I also did some improvements on how you've opened the MongoDB connection and how you've released the allocated resources.
Let me know if this solves your issue, thanks!

How to reuse MongoDB connection in Go

I would like to connect my server that was written in Go with a MongoDB but I'm not sure how to do it in an efficient way. A couple of examples I found implemented it like shown below.
libs/mongodb/client.go
package mongodb
import (
"context"
"log"
"project/keys"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
func GetClient() *mongo.Database {
client, err := mongo.Connect(
context.Background(),
options.Client().ApplyURI(keys.GetKeys().MONGO_URI),
)
if err != nil {
log.Fatal(err)
}
return client.Database(keys.GetKeys().MONGO_DB_NAME)
}
services/user/findOne.go
package userservices
import (
"context"
"log"
"project/libs/mongodb"
"project/models"
"go.mongodb.org/mongo-driver/bson"
)
func FindOne(filter bson.M) (models.User, error) {
var user models.User
collection := mongodb.GetClient().Collection("users")
result := collection.FindOne(context.TODO(), filter)
if result.Err() != nil {
return user, result.Err()
}
if err := result.Decode(&user); err != nil {
log.Println("Failed to decode user with error:", err)
return user, err
}
return user, nil
}
The GetClient function returns a database instance that is then used throughout the app. This seems to work, but I'm wondering if this really is best practice as it seems to create a new connection every time a new client is requested as shown in the second code snippet or is that assumption incorrect? I also thought about converting GetClient to a singleton, that always returns the same database instance but how would a lost connection be handled in that case? Thank you
I do it this way. Do it once at the service start and then pass the MongoDatastore object around to orchestrator, service layers and repository layers. I am using the "github.com/mongodb/mongo-go-driver/mongo" driver for mongo. I think it internally monitors and recycles idle connections. Hence, we don't have to bother about broken connections as long as reference to the mongo.Client object is not lost.
const CONNECTED = "Successfully connected to database: %v"
type MongoDatastore struct {
db *mongo.Database
Session *mongo.Client
logger *logrus.Logger
}
func NewDatastore(config config.GeneralConfig, logger *logrus.Logger) *MongoDatastore {
var mongoDataStore *MongoDatastore
db, session := connect(config, logger)
if db != nil && session != nil {
// log statements here as well
mongoDataStore = new(MongoDatastore)
mongoDataStore.db = db
mongoDataStore.logger = logger
mongoDataStore.Session = session
return mongoDataStore
}
logger.Fatalf("Failed to connect to database: %v", config.DatabaseName)
return nil
}
func connect(generalConfig config.GeneralConfig, logger *logrus.Logger) (a *mongo.Database, b *mongo.Client) {
var connectOnce sync.Once
var db *mongo.Database
var session *mongo.Client
connectOnce.Do(func() {
db, session = connectToMongo(generalConfig, logger)
})
return db, session
}
func connectToMongo(generalConfig config.GeneralConfig, logger *logrus.Logger) (a *mongo.Database, b *mongo.Client) {
var err error
session, err := mongo.NewClient(generalConfig.DatabaseHost)
if err != nil {
logger.Fatal(err)
}
session.Connect(context.TODO())
if err != nil {
logger.Fatal(err)
}
var DB = session.Database(generalConfig.DatabaseName)
logger.Info(CONNECTED, generalConfig.DatabaseName)
return DB, session
}
You may now create your repository as below:-
type TestRepository interface{
Find(ctx context.Context, filters interface{}) []Document, error
}
type testRepository struct {
store *datastore.MongoDatastore
}
func (r *testRepository) Find(ctx context.Context , filters interface{}) []Document, error{
cur, err := r.store.GetCollection("some_collection_name").Find(ctx, filters)
if err != nil {
return nil, err
}
defer cur.Close(ctx)
var result = make([]models.Document, 0)
for cur.Next(ctx) {
var currDoc models.Document
err := cur.Decode(&currDoc)
if err != nil {
//log here
continue
}
result = append(result, currDoc)
}
return result, err
}
I solved it doing this
var CNX = Connection()
func Connection() *mongo.Client {
// Set client options
clientOptions := options.Client().ApplyURI("mongodb://localhost:27017")
// Connect to MongoDB
client, err := mongo.Connect(context.TODO(), clientOptions)
if err != nil {
log.Fatal(err)
}
// Check the connection
err = client.Ping(context.TODO(), nil)
if err != nil {
log.Fatal(err)
}
fmt.Println("Connected to MongoDB!")
return client
}
//calll connection
func main() {
collection := db.CNX.Database("tasks").Collection("task")
}
output "Connected to MongoDB!"

Golang : Byte insert into [ ] byte

I'm working on GRPC stream, In server side, I receive the multiple byte inside the for loop, I want to merge in a single array of byte (I tried append method but not use), Here I have attached my sample code. Any one guide me.
Example code
func (s *ServerGRPC) Upload(stream pb.GuploadService_UploadServer) (err error) {
for {
resp, err := stream.Recv()
if err != nil {
if err == io.EOF {
goto END
}
err = errors.Wrapf(err,
"failed unexpectadely while reading chunks from stream")
return err
}
for _, result := range resp.Content {
fmt.Println("result ====>>>", result)
//Actual Output
//result ====>>> 136
//result ====>>> 84
//result ====>>> 232
//result ====>>> 12
//Expectation
//result ===> [136 84 232 12]
}
}
s.logger.Info().Msg("upload received")
END:
err = stream.SendAndClose(&pb.UploadStatus{
Message: "Upload received with success",
Code: pb.UploadStatusCode_Ok,
})
if err != nil {
err = errors.Wrapf(err,
"failed to send status code")
return
}
return
}
You are appending to Sample but printing req.Content
There is nothing wrong with merging slices. if just have to print Sample to see merged result.
func (s *ServerGRPC) Upload(stream pb.GuploadService_UploadServer) (err error) {
var respBytes []byte
for {
resp, err := stream.Recv()
if err != nil {
if err == io.EOF {
goto END // you can use break here
}
err = errors.Wrapf(err,
"failed unexpectadely while reading chunks from stream")
return err
}
for _, result := range resp.Content {
fmt.Println("result ====>>>", result)
respBytes = append(respBytes, result)
}
}
s.logger.Info().Msg("upload received")
// print respBytes here
END:
err = stream.SendAndClose(&pb.UploadStatus{
Message: "Upload received with success",
Code: pb.UploadStatusCode_Ok,
})
if err != nil {
err = errors.Wrapf(err,
"failed to send status code")
return
}
return
}

iterate over a large number of entities on appengine with go

On app engine I have a large number of entities of a particular kind.
I want to run a function on each entity (e.g. edit the entity or copy it)
I would do this in a taskqueue but a taskqueue is limited to 10 minutes runtime and each function call is prone to many kinds of errors. What is the best way to do this?
Here's my solution although I'm hoping someone out there has a better solution. I also wonder if this is prone to fork bombs e.g. if the task runs twice, it will set off two chains of iteration.. ! I'm only using it to iterate a few hundred thousand entities, although the operation on each entity is expensive.
First I create a taskqueue for running each individual function call on an entity one at a time:
queue:
- name: entity-iter
rate: 100/s
max_concurrent_requests: 1
retry_parameters:
task_retry_limit: 3
task_age_limit: 30m
min_backoff_seconds: 200
and then I have an iterate entity method which, given the kind, will call your delay func on each entity with the key.
package sysadmin
import (
"google.golang.org/appengine/datastore"
"golang.org/x/net/context"
"google.golang.org/appengine/log"
"google.golang.org/appengine/delay"
"google.golang.org/appengine/taskqueue"
)
func ForEachEntity(kind string, f *delay.Function) *delay.Function {
var callWithNextKey *delay.Function // func(c context.Context, depth int, cursorString string) error
callWithNextKey = delay.Func("something", func(c context.Context, depth int, cursorString string) error {
q := datastore.NewQuery(kind).KeysOnly()
if cursorString != "" {
if curs, err := datastore.DecodeCursor(cursorString); err != nil {
log.Errorf(c, "error decoding cursor %v", err)
return err
} else {
q = q.Start(curs)
}
}
it := q.Run(c)
if key, err := it.Next(nil); err != nil {
if err == datastore.Done {
log.Infof(c, "Done %v", err)
return nil
}
log.Errorf(c, "datastore error %v", err)
return err
} else {
curs, _ := it.Cursor()
if t, err := f.Task(key); err != nil {
return err
} else if _, err = taskqueue.Add(c, t, "entity-iter"); err != nil {
log.Errorf(c, "error %v", err)
return err
}
if depth - 1 > 0 {
if err := callWithNextKey.Call(c, depth - 1, curs.String()); err != nil {
log.Errorf(c, "error2 %v", err)
return err
}
}
}
return nil
})
return callWithNextKey
}
example usage:
var DoCopyCourse = delay.Func("something2", CopyCourse)
var DoCopyCourses = ForEachEntity("Course", DoCopyCourse)
func CopyCourses(c context.Context) {
//sharedmodels.MakeMockCourses(c)
DoCopyCourses.Call(c, 9999999, "")
}

GAE Go — How to use GetMulti with non-existent entity keys?

I've found myself needing to do a GetMulti operation with an array of keys for which some entities exist, but some do not.
My current code, below, returns an error (datastore: no such entity).
err := datastore.GetMulti(c, keys, infos)
So how can I do this? I'd use a "get or insert" method, but there isn't one.
GetMulti can return a appengine.MultiError in this case. Loop through that and look for datastore.ErrNoSuchEntity. For example:
if err := datastore.GetMulti(c, keys, dst); err != nil {
if me, ok := err.(appengine.MultiError); ok {
for i, merr := range me {
if merr == datastore.ErrNoSuchEntity {
// keys[i] is missing
}
}
} else {
return err
}
}
I know this topic is up for more than a few days, but I like to post an alternative, using type switch.
if err := datastore.GetMulti(c, keys, dst); err != nil {
switch errt := err.(type) {
case appengine.MultiError:
for ix, e := range errt {
if e == datastore.ErrNoSuchEntity {
// keys[ix] not found
} else if e != nil {
// keys[ix] have error "e"
}
}
default:
// datastore returned an error that is not a multi-error
}
}
Thought I'd throw my answer in to display another usecase. The following will take in any number of keys and return all the valid keys only.
// Validate keys
var validKeys []*ds.Key
if err := c.DB.GetMulti(ctx, tempKeys, dst); err != nil {
if me, ok := err.(ds.MultiError); ok {
for i, merr := range me {
if merr == ds.ErrNoSuchEntity {
continue
}
validKeys = append(validKeys, tempKeys[i])
}
} else {
return "", err
}
} else {
// All tempKeys are valid
validKeys = append(validKeys, tempKeys...)
}

Resources