App Engine: Go - Latency - google-app-engine

Hi I Made a simple photo blog app with go and when I deployed it to App Engine it had issues updating the pictures on the home page. When i upload a picture and go back to the home page, it take a few refreshes to show the image. https://hello-world-170523.appspot.com/.
login info:
email: test#example.com
password: test
This is the source code:
package main
import (
"fmt"
"html/template"
"io"
"net/http"
"os"
"path/filepath"
"strings"
"google.golang.org/appengine"
"github.com/gorilla/sessions"
)
type IndexPage struct {
Photos []string
LogedIn bool
}
type LoginPage struct {
Body string
FirstName string
LastName string
Email string
Error string
}
type UploadPage struct {
Error string
Msg string
}
func getPhotos() []string {
photos := make([]string, 0)
filepath.Walk("assets/img", func(path string, fi os.FileInfo, err error) error {
if fi.IsDir() {
return nil
}
path = strings.Replace(path, "\\", "/", -1)
photos = append(photos, path)
return nil
})
return photos
}
var store = sessions.NewCookieStore([]byte("HelloWorld"))
func loginPage(res http.ResponseWriter, req *http.Request) {
loginError := ""
session, _ := store.Get(req, "session")
str, _ := session.Values["logged-in"].(string)
if str == "YES" {
http.Redirect(res, req, "/admin", 302)
return
}
if req.Method == "POST" {
email := req.FormValue("email")
password := req.FormValue("password")
if email == "test#example.com" && password == "test" {
session.Values["logged-in"] = "YES"
session.Save(req, res)
http.Redirect(res, req, "/admin", 302)
return
} else {
loginError = "Invalid Credential. Please Resubmit"
}
}
tpl, err := template.ParseFiles("assets/tpl/login.gohtml", "assets/tpl/header.gohtml")
if err != nil {
http.Error(res, err.Error(), 500)
return
}
err = tpl.Execute(res, LoginPage{
Error: loginError,
})
}
func admin(res http.ResponseWriter, req *http.Request) {
uploadError := ""
successMsg := ""
session, _ := store.Get(req, "session")
str, _ := session.Values["logged-in"].(string)
if str != "YES" {
http.Redirect(res, req, "/login", 302)
return
}
if req.Method == "POST" {
// <input type="file" name="file">
src, hdr, err := req.FormFile("file")
if err != nil {
http.Error(res, "Invalid File.", 500)
return
}
defer src.Close()
// create a new file
// make sure you have a "tmp" directory in your web root
dst, err := os.Create("assets/img/" + hdr.Filename)
if err != nil {
http.Error(res, err.Error(), 500)
return
}
defer dst.Close()
// copy the uploaded file into the new file
io.Copy(dst, src)
}
tpl, err := template.ParseFiles("assets/tpl/admin.gohtml", "assets/tpl/header.gohtml")
if err != nil {
http.Error(res, err.Error(), 500)
return
}
err = tpl.Execute(res, UploadPage{
Error: uploadError,
Msg: successMsg,
})
if err != nil {
http.Error(res, err.Error(), 500)
}
}
func index(res http.ResponseWriter, req *http.Request) {
session, _ := store.Get(req, "session")
str, _ := session.Values["logged-in"].(string)
logged := false
if str == "YES" {
logged = true
}
tpl, err := template.ParseFiles("assets/tpl/index.gohtml", "assets/tpl/header.gohtml")
if err != nil {
fmt.Println(err)
http.Error(res, err.Error(), 500)
return
}
err = tpl.Execute(res, IndexPage{
Photos: getPhotos(),
LogedIn: logged,
})
if err != nil {
fmt.Println(err)
http.Error(res, err.Error(), 500)
}
}
func logout(res http.ResponseWriter, req *http.Request) {
session, _ := store.Get(req, "session")
str, _ := session.Values["logged-in"].(string)
if str == "YES" {
delete(session.Values, "logged-in")
session.Save(req, res)
http.Redirect(res, req, "/", 302)
} else {
http.Redirect(res, req, "/login", 302)
}
}
func deletePic(res http.ResponseWriter, req *http.Request) {
session, _ := store.Get(req, "session")
str, _ := session.Values["logged-in"].(string)
if str != "YES" {
http.Redirect(res, req, "/", 302)
return
}
if req.Method == "POST" {
imgName := req.FormValue("imgName")
err := os.Remove(imgName)
if err != nil {
http.Error(res, err.Error(), 500)
}
}
tpl, err := template.ParseFiles("assets/tpl/delete.gohtml", "assets/tpl/header.gohtml")
if err != nil {
http.Error(res, err.Error(), 500)
}
err = tpl.Execute(res, IndexPage{
Photos: getPhotos(),
})
if err != nil {
http.Error(res, err.Error(), 500)
}
}
func main() {
http.HandleFunc("/delete", deletePic)
http.Handle("/assets/", http.StripPrefix("/assets", http.FileServer(http.Dir("./assets"))))
http.HandleFunc("/", index)
http.HandleFunc("/admin", admin)
http.HandleFunc("/login", loginPage)
http.HandleFunc("/logout", logout)
appengine.Main()
}
And my app.yaml:
runtime: go
env: flex

Related

Send a File from url to cloudflare images fails

I'm using Google App engine so that means writing files is only allowed through cloud storage. When the API is hit, I can grab the file and store it in google cloud storage without issues. That function just returns the URL where it is saved.
I want to get that image URL and then send it to Cloudflare images because they let you create variants.
type ImageResult struct {
Result struct {
ID string `json:"id"`
Filename string `json:"filename"`
Uploaded time.Time `json:"uploaded"`
RequireSignedURLs bool `json:"requireSignedURLs"`
Variants []string `json:"variants"`
} `json:"result"`
ResultInfo interface{} `json:"result_info"`
Success bool `json:"success"`
Errors []interface{} `json:"errors"`
Messages []interface{} `json:"messages"`
}
The above is the struct that represents the Cloudflare response. Below is the function to take the google cloud storage URL directly and "download" it before sending it off to Cloudflare.
func CloudFlareURL(url, filename string) (*ImageResult, error) {
cloudFlareUrl := "https://api.cloudflare.com/client/v4/accounts/" + konsts.CloudFlareAcc + "/images/v1"
cloudFlareAuth := "Bearer " + konsts.CloudFlareApi
r, err := http.Get(url)
if err != nil {
return nil, errors.Wrap(err, "Couldn't get the file")
}
if r.StatusCode != 200 {
return nil, errors.New("Couldn't get the file")
}
defer r.Body.Close()
buff := make([]byte, 4096)
_, err = r.Body.Read(buff)
req, err := http.NewRequest("POST", cloudFlareUrl, bytes.NewReader(buff))
if err != nil {
return nil, errors.Wrap(err, "Couldn't create the request")
}
req.Header.Set("Content-Type", "multipart/form-data")
req.Header.Set("Authorization", cloudFlareAuth)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return nil, errors.Wrap(err, "Couldn't send the request")
}
var result ImageResult
bodi := &bytes.Buffer{}
_, err = bodi.ReadFrom(resp.Body)
if err != nil {
return nil, errors.Wrap(err, "Couldn't read the response body")
}
resp.Body.Close()
err = json.Unmarshal(bodi.Bytes(), &result)
if err != nil {
return nil, errors.Wrap(err, "Couldn't unmarshal the response body")
}
return &result, nil
}
This is the error message;
invalid character 'E' looking for beginning of value
Couldn't unmarshal the response body
Now on my laptop if I'm running the api server once a file has been sent I can save it on disk, open it and send to cloudflare with no problems. Here's the code for that
func CloudFlareFile(params map[string]string, paramName, path string) (*ImageResult, error) {
file, err := os.Open(path)
if err != nil {
return nil, err
}
defer file.Close()
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
part, err := writer.CreateFormFile(paramName, filepath.Base(path))
if err != nil {
return nil, err
}
_, err = io.Copy(part, file)
for key, val := range params {
_ = writer.WriteField(key, val)
}
err = writer.Close()
if err != nil {
return nil, err
}
cloudFlareUrl := "https://api.cloudflare.com/client/v4/accounts/" + konsts.CloudFlareAcc + "/images/v1"
cloudFlareAuth := "Bearer " + konsts.CloudFlareApi
req, err := http.NewRequest("POST", cloudFlareUrl, body)
req.Header.Set("Content-Type", writer.FormDataContentType())
req.Header.Set("Authorization", cloudFlareAuth)
var result ImageResult
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return nil, errors.Wrap(err, "Couldn't send the request")
} else {
body := &bytes.Buffer{}
_, err := body.ReadFrom(resp.Body)
if err != nil {
return nil, errors.Wrap(err, "Couldn't read the response body")
}
resp.Body.Close()
err = json.Unmarshal(body.Bytes(), &result)
if err != nil {
return nil, errors.Wrap(err, "Couldn't unmarshal the response body")
}
}
return &result, nil
}
I've tried variations and it always fails. For example;
req, err := http.NewRequest("POST", cloudFlareUrl, r.body)
if err != nil {
return nil, errors.Wrap(err, "Couldn't create the request")
}
req.Header.Set("Content-Type", "multipart/form-data")
req.Header.Set("Authorization", cloudFlareAuth)
Ok for anyone else having this issue. I solved it.
r, err := http.Get(url)
if err != nil {
return nil, errors.Wrap(err, "Couldn't get the file")
}
if r.StatusCode != 200 {
return nil, errors.New("Couldn't get the file")
}
defer r.Body.Close()
b := &bytes.Buffer{}
a := make([]byte, 4096)
wr := multipart.NewWriter(b)
part, err := wr.CreateFormFile("file", filename)
if err != nil {
return nil, errors.Wrap(err, "Couldn't create the form file")
}
_, err = io.CopyBuffer(part, r.Body, a)
wr.Close()
req, err := http.NewRequest("POST", cloudFlareUrl, bytes.NewReader(b.Bytes()))
if err != nil {
return nil, errors.Wrap(err, "Couldn't create the request")
}
// req.Header.Set("Content-Type", "multipart/form-data")
req.Header.Set("Content-Type", wr.FormDataContentType())
req.Header.Set("Authorization", cloudFlareAuth)

http.Client in goroutines

I have a text file with this content:
192.168.1.2$nick
192.168.1.3$peter
192.168.1.4$mike
192.168.1.5$joe
A web server is running on each IP in the list.
I need to check if the servers are currently available and output a message if not available.
I wrote a small application. It works, but periodically produces incorrect results - it does not output messages for servers that are not actually available.
I can't figure out what's going on and in fact I'm not sure if I'm using http.Client correctly in goroutines.
Help me plese.
package main
import "fmt"
import "os"
import "strings"
import "io/ioutil"
import "net/http"
import "crypto/tls"
import "time"
import "strconv"
func makeGetRequest(URL string, c *http.Client) {
resp, err := c.Get(URL)
if err != nil {
fmt.Println(err)
}
defer resp.Body.Close()
if !((resp.StatusCode >= 200 && resp.StatusCode <= 209)) {
fmt.Printf("%s-%d\n", URL, resp.StatusCode)
}
}
func makeHeadRequestAsync(tasks chan string, done chan bool, c *http.Client) {
for {
URL := <-tasks
if len(URL) == 0 {
break
}
resp, err := c.Head(URL)
if err != nil {
fmt.Println(err)
continue
}
defer resp.Body.Close()
if !((resp.StatusCode >= 200 && resp.StatusCode <= 209)) {
makeGetRequest(URL, c) // !!! Some servers do not support HEAD requests. !!!
}
}
done <- true
}
func main() {
if len(os.Args) < 3 {
fmt.Println("Usage: main <number of threads> <input-file>")
os.Exit(0)
}
threadsNum, err := strconv.Atoi(os.Args[1])
if err != nil {
fmt.Println("Bad first parameter. Exit.")
os.Exit(0)
}
http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
client := &http.Client {
Timeout: 30 * time.Second,
}
file, err := ioutil.ReadFile(os.Args[2])
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fileLines := strings.Split(string(file), "\n")
tasks := make(chan string, threadsNum)
done := make(chan bool)
for i := 0; i < threadsNum; i++ {
go makeHeadRequestAsync(tasks, done, client)
}
for i := 0; i < len(fileLines); i++ {
tasks <- strings.Split(string(fileLines[i]), "$")[0:1][0]
}
for i := 0; i < threadsNum; i++ {
tasks <- ""
<-done
}
}
The program terminates when the main() function returns. The code does not ensure that all goroutines are done before returning from main.
Fix by doing the following:
Use a sync.WaitGroup to wait for the goroutines to complete before exiting the program.
Exit the goroutine when tasks is closed. Close tasks after submitting all work.
Here's the code:
func makeHeadRequestAsync(tasks chan string, wg *sync.WaitGroup, c *http.Client) {
defer wg.Done()
// for range on channel breaks when the channel is closed.
for URL := range tasks {
resp, err := c.Head(URL)
if err != nil {
fmt.Println(err)
continue
}
defer resp.Body.Close()
if !(resp.StatusCode >= 200 && resp.StatusCode <= 209) {
makeGetRequest(URL, c) // !!! Some servers do not support HEAD requests. !!!
}
}
}
func main() {
if len(os.Args) < 3 {
fmt.Println("Usage: main <number of threads> <input-file>")
os.Exit(0)
}
threadsNum, err := strconv.Atoi(os.Args[1])
if err != nil {
fmt.Println("Bad first parameter. Exit.")
os.Exit(0)
}
http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
client := &http.Client{
Timeout: 30 * time.Second,
}
file, err := ioutil.ReadFile(os.Args[2])
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fileLines := strings.Split(string(file), "\n")
tasks := make(chan string)
var wg sync.WaitGroup
wg.Add(threadsNum)
for i := 0; i < threadsNum; i++ {
go makeHeadRequestAsync(tasks, &wg, client)
}
for i := 0; i < len(fileLines); i++ {
tasks <- strings.Split(string(fileLines[i]), "$")[0:1][0]
}
close(tasks)
wg.Wait()
}

How to gzip string and return byte array in golang

My java code below:
public static byte[] gzip(String str) throws Exception{
ByteArrayOutputStream baos = new ByteArrayOutputStream();
GZIPOutputStream gos = new GZIPOutputStream(baos);
gos.write(str.getBytes("UTF-8"));
gos.close();
return baos.toByteArray();
}
How to gzip string and return byte array in golang as my java done?
Here is complete example of gzipString function which uses standard library compress/gzip
package main
import (
"bytes"
"compress/gzip"
"fmt"
)
func gzipString(src string) ([]byte, error) {
var buf bytes.Buffer
zw := gzip.NewWriter(&buf)
_, err := zw.Write([]byte(src))
if err != nil {
return nil, err
}
if err := zw.Close(); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func main() {
gzippedBytes, err := gzipString("")
if err != nil {
panic(err)
}
fmt.Printf("Zipped out: %v", gzippedBytes)
}
Have a look at following snippet. Playgorund: https://play.golang.org/p/3kXBmQ-c9xE
Golang has everything in its standard library. Check https://golang.org/pkg/compress/gzip
package main
import (
"bytes"
"compress/gzip"
"fmt"
"log"
"strings"
"io"
)
func main() {
s := "Hello, playground"
// Create source reader
src := strings.NewReader(s)
buf := bytes.NewBuffer(nil)
// Create destination writer
dst := gzip.NewWriter(buf)
// copy the content as gzip compressed
_, err := io.Copy(dst, src)
if err != nil {
log.Fatal(err)
}
fmt.Println(buf.String())
}

Writing to byte range within a file in Go

I am downloading a large file in concurrent Chunks of 10MB using GO as shown below.
package main
import (
"fmt"
"io/ioutil"
"net/http"
"strconv"
)
func main() {
chunkSize := 1024 * 1024 * 10 // 10MB
url := "http://path/to/large/zip/file/zipfile.zip"
filepath := "zipfile.zip"
res, _ := http.Head(url)
maps := res.Header
length, _ := strconv.Atoi(maps["Content-Length"][0]) // Get the content length from the header request
// startByte and endByte determines the positions of the chunk that should be downloaded
var startByte = 0
var endByte = chunkSize - 1
for startByte < length {
if endByte > length {
endByte = length - 1
}
go func(startByte, endByte int) {
client := &http.Client {}
req, _ := http.NewRequest("GET", url, nil)
rangeHeader := fmt.Sprintf("bytes=%d-%d", startByte, endByte)
req.Header.Add("Range", rangeHeader)
resp,_ := client.Do(req)
defer resp.Body.Close()
data, _ := ioutil.ReadAll(resp.Body)
addToFile(filepath, startByte, endByte, data)
}(startByte, endByte)
startByte = endByte + 1
endByte += chunkSize
}
}
func addToFile(filepath string, startByte, endByte int, data []byte) {
// TODO: write to byte range in file
}
How should I go about creating the file, and writing to a specified byte range within the file corresponding to the byte range of the chunk?
For example, if I get the data from the bytes 262144000-272629759, the addToFile function should write to 262144000-272629759 within the zipfile.zip. Then, if data from another range is obtained, that should be written to the respective range in zipfile.zip.
Figured out how to do this. Change the addToFile function as shown below.
func addToFile(filepath string, startByte int, data []byte) {
f, err := os.OpenFile(filepath, os.O_CREATE | os.O_WRONLY, os.ModeAppend)
if err != nil {
panic("File not found")
}
whence := io.SeekStart
_, err = f.Seek(int64(startByte), whence)
f.Write(data)
f.Sync() //flush to disk
f.Close()
}
For example,
package main
import (
"fmt"
"io"
"io/ioutil"
"os"
)
func write(ws io.WriteSeeker, offset int64, p []byte) (n int, err error) {
_, err = ws.Seek(offset, io.SeekStart)
if err != nil {
return 0, err
}
n, err = ws.Write(p)
if err != nil {
return 0, err
}
return n, nil
}
func main() {
filename := `test.file`
f, err := os.Create(filename)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
defer f.Close()
buf := make([]byte, 16)
for i := range buf {
buf[i] = byte('A' + i)
}
_, err = write(f, 16, buf)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
for i := range buf {
buf[i] = byte('a' + i)
}
_, err = write(f, 0, buf)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
err = f.Close()
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
data, err := ioutil.ReadFile(filename)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
fmt.Printf("%q\n", data)
}
Output:
"abcdefghijklmnopABCDEFGHIJKLMNOP"

find duplicated files in a directory

This is my first Go program. I'm learning the language but it's a bit difficult to understand all the concepts so in order to practice I wrote a code to detect same file. It's a simple program which recursively check for duplicated files in a directory.
but:
how to detect duplicate file in directory files
the matter isn't directory recursively. the matter is how to compare
You could take the hash of each file body and then compare the hashes in a dictionary/map.
package main
import (
"crypto/md5"
"fmt"
"io"
"io/ioutil"
"log"
"os"
)
func main() {
contentHashes := make(map[string]string)
if err := readDir("./", contentHashes); err != nil {
log.Fatal(err)
}
}
func readDir(dirName string, contentHashes map[string]string) (err error) {
filesInfos, err := ioutil.ReadDir(dirName)
if err != nil {
return
}
for _, fi := range filesInfos {
if fi.IsDir() {
err := readDir(dirName+fi.Name()+"/", contentHashes)
if err != nil {
return err
}
} else {
// The important bits for this question
location := dirName + fi.Name()
// open the file
f, err := os.Open(location)
if err != nil {
return err
}
h := md5.New()
// copy the file body into the hash function
if _, err := io.Copy(h, f); err != nil {
return err
}
// Check if a file body with the same hash already exists
key := fmt.Sprintf("%x", h.Sum(nil))
if val, exists := contentHashes[key]; exists {
fmt.Println("Duplicate found", val, location)
} else {
contentHashes[key] = location
}
}
}
return
}
use sha256 to compare files
example:
package main
import (
"crypto/sha256"
"encoding/hex"
"fmt"
"os"
"path/filepath"
"sync"
"flag"
"runtime"
"io"
)
var dir string
var workers int
type Result struct {
file string
sha256 [32]byte
}
func worker(input chan string, results chan<- *Result, wg *sync.WaitGroup) {
for file := range input {
var h = sha256.New()
var sum [32]byte
f, err := os.Open(file)
if err != nil {
fmt.Fprintln(os.Stderr, err)
continue
}
if _, err = io.Copy(h, f); err != nil {
fmt.Fprintln(os.Stderr, err)
f.Close()
continue
}
f.Close()
copy(sum[:], h.Sum(nil))
results <- &Result{
file: file,
sha256: sum,
}
}
wg.Done()
}
func search(input chan string) {
filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
fmt.Fprintln(os.Stderr, err)
} else if info.Mode().IsRegular() {
input <- path
}
return nil
})
close(input)
}
func main() {
flag.StringVar(&dir, "dir", ".", "directory to search")
flag.IntVar(&workers, "workers", runtime.NumCPU(), "number of workers")
flag.Parse()
fmt.Printf("Searching in %s using %d workers...\n", dir, workers)
input := make(chan string)
results := make(chan *Result)
wg := sync.WaitGroup{}
wg.Add(workers)
for i := 0; i < workers; i++ {
go worker(input, results, &wg)
}
go search(input)
go func() {
wg.Wait()
close(results)
}()
counter := make(map[[32]byte][]string)
for result := range results {
counter[result.sha256] = append(counter[result.sha256], result.file)
}
for sha, files := range counter {
if len(files) > 1 {
fmt.Printf("Found %d duplicates for %s: \n", len(files), hex.EncodeToString(sha[:]))
for _, f := range files {
fmt.Println("-> ", f)
}
}
}
}

Resources