Testing around MSSQL table-valued parameters in sqlmock - sql-server

I have a function that is designed to insert a large number of elements into an MSSQL database using a table-valued parameter and a procedure.
func (requester *Requester) doQuery(ctx context.Context, dtos interface{}) error {
conn, err := requester.conn.Conn(ctx)
if err != nil {
return err
}
defer func() {
if clErr := conn.Close(); clErr != nil {
err = clErr
}
}()
tx, err := conn.BeginTx(ctx, &sql.TxOptions{Isolation: sql.LevelRepeatableRead, ReadOnly: false})
if err != nil {
return err
}
defer func() {
if p := recover(); p != nil {
tx.Rollback()
panic(p)
} else if err != nil {
tx.Rollback()
} else {
tx.Commit()
}()
param := sql.Named("TVP", mssql.TVP{
TypeName: "MyTypeName",
Value: dtos,
})
return tx.ExecContext(ctx, "EXEC [dbo].[usp_InsertConsumption] #TVP", param)
}
The test I wrote for this function is included below (note that it depends on ginkgo and gomega):
Describe("SQL Tests", func() {
It("AddConsumption - No failures - Added", func() {
db, mock, _ := sqlmock.New()
requester := Requester{conn: db}
defer db.Close()
mock.ExpectBegin()
mock.ExpectExec(regexp.QuoteMeta("EXEC [dbo].[usp_InsertConsumption] #TVP")).
WithArgs("").WillReturnResult(sqlmock.NewResult(1, 1))
mock.ExpectExec(regexp.QuoteMeta("EXEC [dbo].[usp_InsertTags] #TVP")).
WithArgs("").WillReturnResult(sqlmock.NewResult(1, 1))
mock.ExpectCommit()
err := requester.doQuery(context.TODO(), generateData())
Expect(err).ShouldNot(HaveOccurred())
Expect(mock.ExpectationsWereMet()).ShouldNot(HaveOccurred())
})
})
Now, this code was written for a MySQL context and since I've ported the code over to MSSQL, I've been getting a peculiar error:
sql: converting argument with name \"TVP\" type: unsupported type mssql.TVP, a struct
It appears that sqlmock is attempting to call ConvertValue on the TVP object, which is invalid. So, how do I make sqlmock handle this value correctly so I can unit test around the query?

What I discovered here is that sqlmock has a function called ValueConverterOption, which, when provided with a custom driver.ValueConverter interface. This will be called in place of the standard function for every invocation of ConvertValue. If you want to test around the ExecContext function when it receives a non-standard argument, a TVP in this case, then you can use this function to inject custom conversion logic into sqlmock.
type mockTvpConverter struct {}
func (converter *mockTvpConverter) ConvertValue(raw interface{}) (driver.Value, error) {
// Since this function will take the place of every call of ConvertValue, we will inevitably
// the fake string we return from this function so we need to check whether we've recieved
// that or a TVP. More extensive logic may be required
switch inner := raw.(type) {
case string:
return raw.(string), nil
case mssql.TVP:
// First, verify the type name
Expect(inner.TypeName).Should(Equal("MyTypeName"))
// VERIFICATION LOGIC HERE
// Finally, return a fake value that we can use when verifying the arguments
return "PASSED", nil
}
// We had an invalid type; return an error
return nil, fmt.Errorf("Invalid type")
}
which means, the test then becomes:
Describe("SQL Tests", func() {
It("AddConsumption - No failures - Added", func() {
db, mock, _ := sqlmock.New(sqlmock.ValueConverterOption(&mockTvpConverter{}))
requester := Requester{conn: db}
defer db.Close()
mock.ExpectBegin()
mock.ExpectExec(regexp.QuoteMeta("EXEC [dbo].[usp_InsertConsumption] #TVP")).
WithArgs("PASSED").WillReturnResult(sqlmock.NewResult(1, 1))
mock.ExpectExec(regexp.QuoteMeta("EXEC [dbo].[usp_InsertTags] #TVP")).
WithArgs("PASSED").WillReturnResult(sqlmock.NewResult(1, 1))
mock.ExpectCommit()
err := requester.doQuery(context.TODO(), generateData())
Expect(err).ShouldNot(HaveOccurred())
Expect(mock.ExpectationsWereMet()).ShouldNot(HaveOccurred())
})
})

Related

MongoDB aggregation query not matching the structure that being defined

I have a requirement in my project where I have to perform a DB operation for getting a particular type of a total number of users. What I am doing is that, filtered all the queries in a slice and passing that Silce to my DB function.
This is the code snippet from where I am calling the DB function
{
filters = []bson.D{
{{Key: "Mykey", Value: myvalue}},
{{Key: "Mykey", Value: myvalue}},
{{Key: "Mykey", Value: myvalue}},
{{Key: "Mykey", Value: myvalue}},
counts, err := dbmain.NoOfDocumentsInfo(MyDBName, myCollectionName, filters...)
}
Below is my called function
func NoOfDocumentsInfo(DB string, col string, filters ...bson.D) ([]int64, error) {
if nil == dbInstance {
if nil == GetDBInstance() {
logger.Error("Not connecting to DB")
err := errors.New("DB connection error")
return nil, err
}
}
logger.Debugf("%s %s", DB, col)
coll := dbInstance.Database(DB).Collection(col)
counts := make([]int64, len(filters))
for i, filter := range filters {
count, err := coll.CountDocuments(context.TODO(), filter)
if err != nil {
logger.Fatal(err)
return nil, err
}
counts[i] = count
}
return counts, nil
}
As you can see I am calling the "coll.CountDocuments" functions multiple times. What I want is to write the code without calling the "coll.CountDocuments" function multiple times by aggregating all the filters into a single query.
I have tried to use the aggregation pipeline but my "cur" and "result" is giving null output. If you run the code you will be able to see it.
func NoOfDocumentsInfo(DB string, col string, filters ...bson.D) ([]int64, error) {
if dbInstance == nil {
if GetDBInstance() == nil {
logger.Error("Not connecting to DB")
err := errors.New("DB connection error")
return nil, err
}
}
logger.Debugf("%s %s", DB, col)
coll := dbInstance.Database(DB).Collection(col)
pipeline := make([]bson.M, 0, len(filters)+2)
pipeline = append(pipeline, bson.M{"$match": bson.M{"$or": filters}})
pipeline = append(pipeline, bson.M{"$group": bson.M{"_id": nil, "count": bson.M{"$sum": 1}}})
pipeline = append(pipeline, bson.M{"$group": bson.M{"_id": nil, "count": bson.M{"$first": "$count"}}})
var result struct {
Count int64 `bson:"count"`
}
cur, err := coll.Aggregate(context.TODO(), pipeline)
if err != nil {
logger.Fatal(err)
return nil, err
}
logger.Debugf("cur: %+v", cur)
err = cur.Decode(&result)
logger.Debugf("result: %+v, err: %v", result, err)
if err != nil {
logger.Fatal(err)
return nil, err
}
return []int64{result.Count}, nil
}
You have to add a field for each filter in $group, you may use $cond to conditionally increment the given counter. But this may very well end up not using indices, and thus being even slower than the separate, original count queries. Also note that using $or may also result in skipping indices. Also note that in $cond you may have to transform filters (e.g. add $ to field names).
You'd better launch concurrent count queries (using go) for each filter, and if they are indexed, they will complete fast. This is how it could look like:
func docCounts(db string, col string, filters ...bson.D) ([]int64, error) {
// ... obtain collection
coll := dbInstance.Database(db).Collection(col)
counts := make([]int64, len(filters))
errs := make([]error, len(filters))
wg := &sync.WaitGroup{}
wg.Add(len(filters))
for i := range filters {
go func(i int) {
defer wg.Done()
counts[i], errs[i] = coll.CountDocuments(context.TODO(), filters[i])
}(i)
}
wg.Wait()
// Produce some kind of error if any of the queries failed.
var err error
for _, e := range errs {
if e != nil {
err = fmt.Errorf("at least one query failed: %w", e)
break
}
}
// Note: starting with Go 1.20, you could simply write:
// err = errors.Join(errs)
return counts, err
}

Alternative to 'coll.CountDocuments' function on Mongodb in golang. (Aggregation Pipeline)

As you can see I am calling the "coll.CountDocuments" functions multiples times. What I want is to write the code without calling the "coll.CountDocuments" function multiple times by aggregating all the filters into a single query.
func NoOfDocumentsInfo(DB string, col string, filters ...bson.D) ([]int64, error) {
if nil == dbInstance {
if nil == GetDBInstance() {
logger.Error("Not connecting to DB")
err := errors.New("DB connection error")
return nil, err
}
}
logger.Debugf("%s %s", DB, col)
coll := dbInstance.Database(DB).Collection(col)
counts := make([]int64, len(filters))
for i, filter := range filters {
count, err := coll.CountDocuments(context.TODO(), filter)
if err != nil {
logger.Fatal(err)
return nil, err
}
counts[i] = count
}
return counts, nil
}
I have tried to used aggragation pipeline but "cur" and "result" is giving null output.
`func NoOfDocumentsInfo(DB string, col string, filters ...bson.D) ([]int64, error) {
if dbInstance == nil {
if GetDBInstance() == nil {
logger.Error("Not connecting to DB")
err := errors.New("DB connection error")
return nil, err
}
}
logger.Debugf("%s %s", DB, col)
coll := dbInstance.Database(DB).Collection(col)
pipeline := make([]bson.M, 0, len(filters)+2)
pipeline = append(pipeline, bson.M{"$match": bson.M{"$or": filters}})
pipeline = append(pipeline, bson.M{"$group": bson.M{"_id": nil, "count": bson.M{"$sum": 1}}})
pipeline = append(pipeline, bson.M{"$group": bson.M{"_id": nil, "count": bson.M{"$first": "$count"}}})
var result struct {
Count int64 `bson:"count"`
}
cur, err := coll.Aggregate(context.TODO(), pipeline)
if err != nil {
logger.Fatal(err)
return nil, err
}
logger.Debugf("cur: %+v", cur)
err = cur.Decode(&result)
logger.Debugf("result: %+v, err: %v", result, err)
if err != nil {
logger.Fatal(err)
return nil, err
}
return []int64{result.Count}, nil
}`
A much simpler approach would be the one that I'm going to share here. Let's start with the code:
package main
import (
"context"
"fmt"
"time"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
var (
dbInstance *mongo.Client
ctx context.Context
cancel context.CancelFunc
)
func NoOfDocumentsInfo(client *mongo.Client, DB string, col string, filters bson.A) (int64, error) {
coll := client.Database(DB).Collection(col)
myFilters := bson.D{
bson.E{
Key: "$and",
Value: filters,
},
}
counts, err := coll.CountDocuments(ctx, myFilters)
if err != nil {
panic(err)
}
return counts, nil
}
func main() {
ctx, cancel = context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()
// set MongoDB connection
clientOptions := options.Client().ApplyURI("mongodb://root:root#localhost:27017")
mongoClient, err := mongo.Connect(ctx, clientOptions)
if err != nil {
panic(err)
}
defer mongoClient.Disconnect(ctx)
// query with filters
numDocs, err := NoOfDocumentsInfo(mongoClient, "demodb", "myCollection", bson.A{
bson.D{bson.E{Key: "Name", Value: bson.D{bson.E{Key: "$eq", Value: "John Doe"}}}},
bson.D{bson.E{Key: "Song", Value: bson.D{bson.E{Key: "$eq", Value: "White Roses"}}}},
})
if err != nil {
panic(err)
}
fmt.Println("num docs:", numDocs)
}
Let's see the relevant changes applied to the code:
Expect a parameter called filters of type bson.A which is the type for the array in the MongoDB environment.
Build the myFilters variable which is of type bson.D (slice) with the following single item (bson.E) in this way:
The Key is the logical operator
The Value is the array passed into the function
Build the array to pass to the function with all of the needed filters (e.g. two equal conditions: one on the Name key and the other on the Song).
Finally, I also did some improvements on how you've opened the MongoDB connection and how you've released the allocated resources.
Let me know if this solves your issue, thanks!

Unmarshall list into set

Moved from python to golang:
jsonBlob := `{ "test" : {"thing":["team1", "team2"]}}`
type other map[string]Myset
type stuff map[string]other
type MySet struct {
set mapset.Set
}
//Custom unmarshaller
func (s *MySet) UnmarshalJSON(p []byte) error {
var a []interface{}
if err := json.Unmarshal(p, &a); err != nil {
return err
}
s.set = mapset.NewSet(a)
return nil
}
// Unmarshall it
var s stuff
err := json.Unmarshall(jsonBlob, &s)
if err != nil {
return err
}
but it throws: runtime error: hash of unhashable type []interface {}
Given that the data type you wish to use is an interface, and does not satisfy the json.Unmarshaler interface, you have two options:
Unmarshal to an array, then convert to your preferred type.
Create a custom type, that wraps your preferred type, and provides an UnmarshalJSON method. This is functionally the same as #1, but may be easier to use. Example:
type MySet struct {
set mapset.Set
}
func (s *MySet) UnmarshalJSON(p []byte) error {
var a []interface{}
if err := json.Unmarshal(p, &a); err != nil {
return err
}
s.set = mapset.NewSet(a)
return nil
}
(Note, this code is untested; it is not meant to be a complete solution, but a guide in the right direction.)

Dynamically insert multiple documents using mgo golang mongodb

How can I insert an array of documents into MongoDB with mgo library using only a single DB call as in db.collection.insert()?
I have the following Transaction structure:
type Transaction struct {
Brand string `json:"brand"`
Name string `json:"name"`
Plu string `json:"plu"`
Price string `json:"price"`
}
From a POST request I will recieve an array of these structures. I want to insert them into MongoDB as individual documents but using a single DB call as explained in db.collection.insert()
I tried using c.Insert of mgo
The following is the code snippet:
func insertTransaction(c *gin.Context) {
var transactions []Transaction
err := c.BindJSON(&transactions)
if err != nil {
c.AbortWithStatusJSON(http.StatusBadRequest, map[string]string{"error":"invalid JSON"})
return
}
err = InsertTransactons(transactions)
if err != nil {
c.AbortWithStatusJSON(http.StatusInternalServerError, &map[string](interface{}){
"status": "error",
"code": "500",
"message": "Internal server error",
"error": err,
})
return
}
c.JSON(http.StatusCreated, &map[string](interface{}){
"status": "success",
"code": "0",
"message": "created",
})
}
func InsertTransactons(u []Transaction) error {
s := GetSession()
defer s.Close()
c := s.DB(DB).C(TransactionColl)
err := c.Insert(u...)
if err != nil {
return err
}
return nil
}
But as I compile and run the code, I get the following error:
go/database.go:34:17: cannot use u (type *[]Transaction) as type
[]interface {} in argument to c.Insert
You cannot pass []*Transaction as []interface{}. You need to convert each Transaction to inferface{} to change its memory layout.
var ui []interface{}
for _, t := range u{
ui = append(ui, t)
}
Pass ui to c.Insert instead
Create slice of interface for document structs by appending and then inserting data using Bulk insert which takes variable arguments.
type Bulk struct {
// contains filtered or unexported fields
}
func (b *Bulk) Insert(docs ...interface{})
For inserting documents in Bulk
const INSERT_COUNT int = 10000
type User struct {
Id bson.ObjectId `bson:"_id,omitempty" json:"_id"`
Email string `bson:"email" json:"email"`
}
func (self *User) Init() {
self.Id = bson.NewObjectId()
}
Call Bulk() function on collection returned from db connection. Bulk() function returns pointer to *Bulk.
bulk := dbs.Clone().DB("").C("users").Bulk()
bulk.Insert(users...)
Assign it to variable which will be used to call Insert() method using Bulk pointer receiver.
func main(){
// Database
dbs, err := mgo.Dial("mongodb://localhost/")
if err != nil {
panic(err)
}
// Collections
uc := dbs.Clone().DB("").C("users")
defer dbs.Clone().DB("").Session.Close()
for n := 0; n < b.N; n++ {
count := INSERT_COUNT
users := make([]interface{}, count)
for i := 0; i < count; i++ {
loop_user := User{}
loop_user.Init()
loop_user.Email = fmt.Sprintf("report-%d#example.com", i)
users[i] = loop_user
}
bulk := uc.Bulk()
bulk.Unordered()
bulk.Insert(users...)
_, bulkErr := bulk.Run()
if bulkErr != nil {
panic(err)
}
}
}

appengine is not saving

For some reason nothing gets saved when the test code below is run. I have other api methods that when run (not through tests, this is just the first test) do save.
When I check the database stats via localhost:8000, it can be seen that nothing is being inserted.
Update: After copying and pasting the code below and wrapping it is GET request handler with some hardcoded data it does save to the database. So this seems like an issue with the testing aetest.Context that is used. I have added the code for the NewTestHandler helper code.
Method to create the context within the tests
func NewTestHandler(handlerFunc func(appengine.Context, http.ResponseWriter, *http.Request)) http.HandlerFunc {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
c, _ := aetest.NewContext(nil)
handlerFunc(c, w, r)
})
}
Error (update: the key that is generated returns 0 when calling .IntId())
// happens in the .Get() error handling
--- err datastore: internal error: server returned the wrong number of entities
Model
package app
import "time"
type League struct {
Name string `json:"name"`
Location string `json:"location"`
CreatedAt time.Time
}
Code
func (api *LeagueApi) Create(c appengine.Context, w http.ResponseWriter, r *http.Request) {
// data
var league League
json.NewDecoder(r.Body).Decode(&league)
defer r.Body.Close()
// save to db
key := datastore.NewIncompleteKey(c, "leagues", nil)
if _, err := datastore.Put(c, key, &league); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
var leagueCheck League
if err := datastore.Get(c, key, &leagueCheck); err != nil {
log.Println("--- err", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// json response
if err := json.NewEncoder(w).Encode(league); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
Test
func Test_LeagueReturnedOnCreate(t *testing.T) {
league := League{Name: "foobar"}
data, _ := json.Marshal(league)
reader := bytes.NewReader(data)
// setup request and writer
r, _ := http.NewRequest("POST", "/leagues", reader)
w := httptest.NewRecorder()
// make request
api := LeagueApi{}
handler := tux.NewTestHandler(api.Create)
handler.ServeHTTP(w, r)
// extract api response
var leagueCheck League
json.NewDecoder(w.Body).Decode(&leagueCheck)
if leagueCheck.Name != "foobar" {
t.Error("should return the league")
}
// ensure the league is in the db
}

Resources