react-fine-uploader to authenticate in the backend with redux-token-auth - reactjs

In my stack I am using redux-token-auth to authenticate users with my rails backend (devise_token_auth). In my app I need to allow authenticated users to upload images, for this I'm using react-fine-uploader.
My problem is that I'm not seeing a way to have ract-fine-uploader to POST the images in an authenticated way. and in general how to use redux-token-auth to upload data to my backend with authentication.
I found that redux-token-auth stores authentication tokens in the localStorage, so I'm kinda able to retrieve it and authenticate my requests. Though I don't like accessing such data directly with a localStorage.get("access-token") (It's written by the package, it just seems fair to use the package to handle it).
Also with react-fine-uploader the response object doesn't contain the headers from the server response, so I'm not sure on how to get the new tokens to store them.
Here's the code I got so far:
my ImagesUploader:
import React from "react";
import PropTypes from "prop-types";
import FineUploaderTraditional from "fine-uploader-wrappers";
import Gallery from "react-fine-uploader";
import { backend } from "../../libs/itemTools";
import "react-fine-uploader/gallery/gallery.css";
const ImagesUploader = props => {
const uploader = new FineUploaderTraditional({
options: {
multiple: false,
validation: {
itemLimit: 1
},
request: {
endpoint: backend.createImage,
customHeaders: {
"access-token": localStorage.getItem("access-token"),
"token-type": localStorage.getItem("token-type"),
client: localStorage.getItem("client"),
uid: localStorage.getItem("uid")
}
},
session: {
endpoint: backend.loadImages(props.itemId)
},
deleteFile: {
enabled: true,
endpoint: backend.deleteImage(props.itemId)
},
cors: {
expected: true
},
callbacks: {
onComplete: (id, name, response, xhr) => {
if (response.success) {
response.image.id;
//TODO save new access-token
// ####### THIS DOESN'T work, #########
//fine-uploader doesn't include headers in the response object.
localStorage.setItem("access-token", response.headers["access-token"]);
localStorage.setItem("token-type", response.headers("token-type"));
localStorage.setItem("client", response.headers(client));
localStorage.setItem("uid", response.headers(uid));
} else {
// [...]
}
},
onSessionRequestComplete: (...params) => {
console.log(
"onSessionRequestComplete: " + JSON.stringify(params, 0, 2)
);
}
}
}
});
return (
<div id="upload-area">
<Gallery uploader={uploader} />
</div>
);
};
ImagesUploader.propTypes = {
userId: PropTypes.number.isRequired,
itemId: PropTypes.number.isRequired
};
export default ImagesUploader;
It seems strange to me that redux-token-auth packages doesn't account for authenticated backend calls, and that fine-uploader doesn't give access to the response headers..
Is there maybe something I'm missing?

From taking a look at redux-token-auth, it expects to handle all auth calls and doesn't give you an escape hatch in the code - so yanking the localstorage seems like a prudent thing to do there. You can see in the code that it sets it for every request: https://github.com/kylecorbelli/redux-token-auth/blob/8c5a8fe573918d406a733ca1b21c0b4349f137ab/src/actions.ts#L160
As for fine-uploader it looks like you can grab the rawheaders by using xhr.response
https://github.com/FineUploader/fine-uploader/blob/master/client/js/traditional/traditional.xhr.upload.handler.js#L59
The response variable that you get back has been JSON parsed via the qq variable you pass in: https://github.com/FineUploader/fine-uploader/blob/master/client/js/traditional/traditional.xhr.upload.handler.js#L109

Related

Uppy/Shrine: How to retrieve presigned url for video after successful upload (using AWS S3)

I'm using Uppy for file uploads in React, with a Rails API using Shrine.
I'm trying to show a preview for an uploaded video before submitting a form. It's important to emphasize that this is specifically for a video upload, not an image. So the 'thumbnail:generated' event will not apply here.
I can't seem to find any events that uppy provides that returns a cached video preview (like thumbnail:generated does) or anything that passes back a presigned url for the uploaded file (less expected, obviously), so the only option I see is constructing the url manually. Here's what I'm currently trying for that (irrelevant code removed for brevity):
import React, { useEffect, useState } from 'react'
import AwsS3 from '#uppy/aws-s3'
import Uppy from '#uppy/core'
import axios from 'axios'
import { DragDrop } from '#uppy/react'
import { API_BASE } from '../../../api'
const constructParams = (metadata) => ([
`?X-Amz-Algorithm=${metadata['x-amz-algorithm']}`,
`&X-Amz-Credential=${metadata['x-amz-credential']}`,
`&X-Amz-Date=${metadata['x-amz-date']}`,
'&X-Amz-Expires=900',
'&X-Amz-SignedHeaders=host',
`&X-Amz-Signature=${metadata['x-amz-signature']}`,
].join('').replaceAll('/', '%2F'))
const MediaUploader = () => {
const [videoSrc, setVideoSrc] = useState('')
const uppy = new Uppy({
meta: { type: 'content' },
restrictions: {
maxNumberOfFiles: 1
},
autoProceed: true,
})
const getPresigned = async (id, type) => {
const response = await axios.get(`${API_BASE}/s3/params?filename=${id}&type=${type}`)
const { fields, url } = response.data
const params = constructParams(fields)
const presignedUrl = `${url}/${fields.key}${params}`
console.log('presignedUrl from Shrine request data: ', presignedUrl)
setVideoSrc(presignedUrl)
}
useEffect(() => {
uppy
.use(AwsS3, {
id: `AwsS3:${Math.random()}`,
companionUrl: API_BASE,
})
uppy.on('upload-success', (file, _response) => {
const { type, meta } = file
// First attempt to construct presigned URL here
const url = 'https://my-s3-bucket.s3.us-west-1.amazonaws.com'
const params = constructParams(meta)
const presignedUrl = `${url}/${meta.key}${params}`
console.log('presignedUrl from upload-success data: ', presignedUrl)
// Second attempt to construct presigned URL here
const id = meta.key.split(`${process.env.REACT_APP_ENV}/cache/`)[1]
getPresigned(id, type)
})
}, [uppy])
return (
<div className="MediaUploader">
<div className="Uppy__preview__wrapper">
<video
src={videoSrc || ''}
className="Uppy__preview"
controls
/>
</div>
{(!videoSrc || videoSrc === '') && (
<DragDrop
uppy={uppy}
className="UploadForm"
locale={{
strings: {
dropHereOr: 'Drop here or %{browse}',
browse: 'browse',
},
}}
/>
)}
</div>
)
}
export default MediaUploader
Both urls here come back with a SignatureDoesNotMatch error from AWS.
The manual construction of the url comes mainly from constructParams. I have two different implementations of this, the first of which takes the metadata directly from the uploaded file data in the 'upload-success' event, and then just concatenates a string to build the url. The second one uses getPresigned, which makes a request to my API, which points to a generated Shrine path that should return data for a presigned URL. API_BASE simply points to my Rails API. More info on the generated Shrine route here.
It's worth noting that everything works perfectly with the upload process that passes through Shrine, and after submitting the form, I'm able to get a presigned url for the video and play it without issue on the site. So I have no reason to believe Shrine is returning incorrectly signed urls.
I've compared the two presigned urls I'm manually generating in the form, with the url returned from Shrine after uploading. All 3 are identical in structure, but have different signatures. Here are those three urls:
presignedUrl from upload-success data:
https://my-s3-bucket.s3.us-west-1.amazonaws.com/development/cache/41b229fb17cbf21925d2cd907a59be25.mp4?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAW63AYCMFA4374OLC%2F20221210%2Fus-west-1%2Fs3%2Faws4_request&X-Amz-Date=20221210T132613Z&X-Amz-Expires=900&X-Amz-SignedHeaders=host&X-Amz-Signature=97aefd1ac7f3d42abd2c48fe3ad50b542742ad0717a51528c35f1159bfb15609
presignedUrl from Shrine request data:
https://my-s3-bucket.s3.us-west-1.amazonaws.com/development/cache/023592fb14c63a45f02c1ad89a49e5fd.mp4?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAW63AYCMFA4374OLC%2F20221210%2Fus-west-1%2Fs3%2Faws4_request&X-Amz-Date=20221210T132619Z&X-Amz-Expires=900&X-Amz-SignedHeaders=host&X-Amz-Signature=7171ac72f7db2b8871668f76d96d275aa6c53f71b683bcb6766ac972e549c2b3
presigned url displayed on site after form submission:
https://my-s3-bucket.s3.us-west-1.amazonaws.com/development/cache/41b229fb17cbf21925d2cd907a59be25.mp4?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAW63AYCMFA4374OLC%2F20221210%2Fus-west-1%2Fs3%2Faws4_request&X-Amz-Date=20221210T132734Z&X-Amz-Expires=900&X-Amz-SignedHeaders=host&X-Amz-Signature=9ecc98501866f9c5bd460369a7c2ce93901f94c19afa28144e0f99137cdc2aaf
The first two urls come back with SignatureDoesNotMatch, while the third url properly plays the video.
I'm aware the first and third urls have the same file name, while the second url does not. I'm not sure what to make of that, though, but the relevance of this is secondary to me, since that solution was more of a last ditch effort anyway.
I'm not at all attached to the current way I'm doing things. It's just the only solution I could come up with, due to lack of options. If there's a better way of going about this, I'm very open to suggestions.

React Docusign Clickwrap Credentials

In my react application, when Im rendering the Docusign clickwrap, I need to supply the accountId and the clickwrapId. Is there a secure way to reference the accountId/clickwrapId without actually putting those values in. I dont want to expose those credentials in my react application
function App() {
React.useLayoutEffect(() => {
docuSignClick.Clickwrap.render({
environment: 'https://demo.docusign.net',
accountId: '...',
clickwrapId: '...',
onMustAgree(agreement) {
// Called when no users have previously agreed by the above client user ID for the most recent required Clickwrap version
},
onAgreed(agreement) {
// Called when either 1) the clientUserId has previously agreed 2) the clientUserId has clicked agree and completed successfully
},
onDeclined(agreement) {
// Called when the clientUserId has declined successfully
}
}, '#ds-clickwrap');
}, []);
return <div id="ds-clickwrap" />
}
Ah, you can use the server-side API to generate an agreement URL instead if that is desired.
POST /clickapi/v1/accounts/.../clickwraps/.../agreements:
Only the clientUserId is required to make this API call. You would do this from your server and then pass the URL from the response to the client.
{
"clientUserId": "..."
}
This would return an agreement URL to then be used in the snippet of JS:
{
...
"status": "created",
"agreementUrl": "https://...."
}
This agreementUrl could then be used in the snippet:
docuSignClick.render({
agreementUrl: '...agreementUrl from REST API response...',
onAgreed...
}, '#ds-clickwrap');

Amplify - GraphQL request headers are empty

I am attempting to create an app that utilizes Cognito user pools for user auth and then sending api requests to a dynamoDB table through graphQL.
The user auth/signup works correctly, however I receive a 401 error when attempting to query a data table. The message states "Missing authorization header"
I saw in a similar post that the auth token should be auto-populated into the request headers, but that does not occur for me. I also saw that Amplify created a function for custom graphql headers. I attempted this also but still get the same "Missing authorization header" error.
Any suggestions?
aws_appsync_graphqlEndpoint:'',
aws_appsync_region:'',
aws_appsync_authenticationType:'AMAZON_COGNITO_USER_POOLS',
graphql_headers: async () => ({
'My-Custom-Header': cognitoUser
})
}
This is in my config/exports file for amplify ---- Amplify.configure(config)
if (cognitoUser != null) {
cognitoUser.getSession((err, session) => {
if (err) {
console.log(err);
} else if (!session.isValid()) {
console.log("Invalid session.");
} else {
console.log( session.getIdToken().getJwtToken());
}
});
} else {
console.log("User not found.");
}
console.log(cognitoUser)
Amplify.configure(config)
const client = new AWSAppSyncClient({
disableOffline: true,
url: config.aws_appsync_graphqlEndpoint,
region: config.aws_appsync_region,
identityPoolId: config.aws_cognito_identity_pool_id,
userPoolId: config.aws_user_pools_id,
userPoolWebClientId: config.ws_user_pools_web_client_id,
auth: {
type: config.aws_appsync_authenticationType,
jwtoken: async () =>
(await Auth.currentSession()).getIdToken().getJwtToken(),
apiKey: config.aws_appsync_apiKey
}
});```
This is my client settings in my index.js folder
I apologize if I missed something blatant. I am new to backend and am having trouble with getting this to work.
I have only gotten it to work when using API_Key auth.

InvalidSignatureException from POST request

I have a Lambda function that handles reading data from a file(stored inside S3 bucket) as well as inserting data to a Dynamodb table. This Lambda function is exposed as a REST endpoint using API gateway. The function accepts GET request as well as POST request. I'm making GET/POST requests from my REACT project using axios and aws4(for signing) libraries. GET request is to read data from a file stored inside S3 and it works just fine. And POST request is for inserting data into Dynamodb table. However, it doesn't work and AWS returns InvalidSignatureException error as a respond. This is an excerpt of my code :
createAWSSignedRequest(postData) {
let request = {};
if (postData) {
request = {
host: process.env.AWS_HOST,
method: 'POST',
url: process.env.AWS_URL,
path: process.env.AWS_PATH,
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify(postData)
}
} else {
request = {
host: process.env.AWS_HOST,
method: 'GET',
url: process.env.AWS_URL,
path: process.env.AWS_PATH
}
}
let signedRequest = aws4.sign(request, {
secretAccessKey: process.env.AWS_SECRET_KEY,
accessKeyId: process.env.AWS_ACCESS_KEY
});
return signedRequest;
}
This is how GET request is made :
let signedRequest = this.createAWSSignedRequest('GET');
axios(signedRequest)
.then(response => {
})
.catch((error) => {
console.log("error",error);
});
This is how POST request is made :
const data = {
uuid: "916b7d90-0137-11e8-94e6-116965754e23", //just a mock value
date : "22/jan/2018",
user_response: [
{
question:"this is quesiton1",
choice:"user selected A"
},
{
question:"this is quesiton2",
choice: "user selected b"
},
{
question:"this is quesiton3",
choice: "user selected C"
}
]
};
let signedRequest = this.createAWSSignedRequest(data);
axios(signedRequest)
.then(response => {
......
})
.catch((error) => {
console.log("error",error);
});
As you can see, the code for both GET and POST requests are exactly the same (except payload and method type). I'm singing with the same secret access key and access key id for both requests. I'm not sure why one request results in "InvalidSignatureException" when the other doesn't. Can anyone shed a light on this issue for me.
Thanks
After having discussion with AWS4 lib developer, I figured out what I did wrong. AWS4 uses "body" as a payload attribute to compute signature. However, Axios uses "data" attribute as payload. My mistake was only setting either one of them. So when I set just "data" attribute, the payload was present in the request and content-length is computed correctly. However, the signature was incorrect since the payload was not taken into consideration when computing signature. When I set just "body", payload was not present in the request because Axios does not use "body" attribute for payload. The solution is to set both attributes with payload. I hope this helps to anyone who are having the same issue I have.
If you use the AWS Amplify library it has a module called API which should fit your use cases, and it will perform Sigv4 signing for you either with authenticated or unauthenticated roles. The Auth category uses Cognito as the default implementation. For instance:
npm install aws-amplify --save
Then import and configure the lib:
import Amplify, { API } from 'aws-amplify';
Amplify.configure({
Auth: {
identityPoolId: 'XX-XXXX-X:XXXXXXXX-XXXX-1234-abcd-1234567890ab',
region: 'XX-XXXX-X'
},
API: {
endpoints: [
{
name: "APIName",
endpoint: "https://invokeURI.amazonaws.com"
}
]
}
});
Then for your API Gateway endpoint calling a Lambda:
let apiName = 'MyApiName';
let path = '/path';
let options = {
headers: {...} // OPTIONAL
}
API.get(apiName, path, options).then(response => {
// Add your code here
});
More info here: https://github.com/aws/aws-amplify

Error: User credentials required in Google Cloud Print API

I'm trying to use Google Cloud Print(GCP) API, but I can't make it works.
Maybe I've understood bad the workflow because is the first time I'm using the google api, please help me to understand how to make it works.
Initial considerations:
I'm trying to implement it in reactJS, but It is indifferent because the logic to make GCP works is independent of the technology. Then you also can help me understand the workflow.
What exactly I want:
To make my first test, I am looking to get all information about my printer.
What I did:
I created a project in: https://console.developers.google.com
Inside the project created, I created a credential:
create credentials -> OAuth client ID
And I chose Application type: Web, and also configure the restrictions to source and redirection to my localhost.
Manually in https://www.google.com/cloudprint, I added my printer, I made a test printing a PDF and was OK.
I created a project in reactJS to get the information of my printer I've added.
Component:
Explanation:
I'm using a component react-google-login to obtain easily the user accessToken: https://github.com/anthonyjgrove/react-google-login
This component only obtains the access token and save it in localStorage, in a variable called googleToken and it draws a button to call a function to obtain the information about the printer.
code:
import React, { Component } from 'react'
import GoogleLogin from 'react-google-login';
import { connect } from 'react-redux'
import { getPrinters } from '../actions/settings'
class Setting extends Component {
responseGoogle(response) {
const accessToken = response.accessToken
localStorage.setItem('googleToken', accessToken)
}
render() {
return (
<div>
<GoogleLogin
clientId="CLIENT_ID_REMOVED_INTENTIONALLY.apps.googleusercontent.com"
buttonText="Login"
onSuccess={this.responseGoogle}
onFailure={this.responseGoogle}
/>
<button
onClick = {() => {
this.props.getPrinters()
}}
>test printer</button>
</div>
)
}
}
const mapStateToProps = state => {
return {
state: state
}
}
const mapDispatchToProps = dispatch => {
return {
getPrinters() {
dispatch(getPrinters())
}
}
}
export default connect(
mapStateToProps,
mapDispatchToProps
)(Setting)
Action or Function to get information printer:
Explanation:
I'm passing the parameter printerid to get information about that printer.
In authorization, I'm using OAuth ... because in the documentation says that(second paragraph).: https://developers.google.com/cloud-print/docs/appInterfaces
The next two headers I wrote it because I tried solutions as:
Google Cloud Print API: User credentials required
Google Cloud Print User credentials required
code:
import axios from 'axios'
axios.defaults.headers.common['Authorization'] = 'OAuth ' + localStorage.getItem('googleToken')
axios.defaults.headers.common['scope'] = 'https://www.googleapis.com/auth/cloudprint'
axios.defaults.headers.common['X-CloudPrint-Proxy'] = 'printingTest'
const getPrinters = () => {
return () => {
return axios.get('https://www.google.com/cloudprint/printer'
, {
params: {
printeid: 'PRINTER_ID_REMOVED_INTENTIONALLY'
}
}
)
.then(response => {
console.log('response of google cloud print')
console.log(response)
})
}
}
export { getPrinters }
Error:
After all explained before, I got the next error:
User credentials required
Error 403
Note:
I'm using CORS plugin by recommendation of:
Chrome extensions for silent print?
because initially, I had cors error.
Any suggestion or recommendation would be very useful, thanks.
I've resolved my problem, my main problem about User Credential required were because I was using the incorrect access token and It was because I was getting the access token incorrectly.
I'm going to explain my whole solution because there are few examples of codes with this API.
Solutions:
The steps described were Ok until the fourth step where I used the external component react-google-login to trying to get the access token, instead I used googleapis module: Link Github googleapis
Also to avoid CORS problem(and not use CORS chrome plugin) I wrote the requests to Google API in server side.(NODEJS)
I had also a problem in the frontend when I tried to generate a popup to give permission for printer(problems about CORS), my solution was to use this very simple module for authentication: Link Github oauth-open
General scheme:
Explanation:
Knowing I have all data described in my question post(until the third step).
Authentication:
The next step in getting a URL and use it to the user can authenticate.
As I said before I used the module oauth-open in the frontend to generate the popup and only this module need the URL. To get the URL in the backend I used the endpoint /googleurl, where here I used the method generateAuthUrl of the module googleapis to generate the URL.
After that In the frontend, I got the authentication_code(that returned the module oauth-open), I send It to my endpoint /googletoken and here I process the authentication_code to generate access token, refresh token and expiration date with the method getToken of the module googleapis. Finally, these data are stored in the database.
Print:
For print, since the frontend, I send what data I need send to the printer. I used my endpoint /print
In the backend endpoint, my logic was the next:
Recover tokens and expiration date from database, with the expiration date check if the token has expired, and if It has already expired then gets another token and replace the old access token with the new one, replacing also with the new expiration date, to obtain this new data only is necessary call to method refreshAccessToken of module googleapis.Note: the refresh token never expires.
After having the access token updated, use it to send data to the printer with Google route(.../submit)
Code:
All the next codes are in only 1 file
Some data as validation, static variables, error handler, etc, has been removed to better understanding.
Route get URL authentication.
const express = require('express');
const google = require('googleapis');
const router = express.Router();
var OAuth2 = google.auth.OAuth2;
const redirect_url = 'http://localhost:3001/setting'; //Your redirect URL
var oauth2Client = new OAuth2(
'CLIENT ID', //Replace it with your client id
'CLIEND SECRET', //Replace it with your client secret
redirect_url
);
var url = oauth2Client.generateAuthUrl({
access_type: 'offline',
scope: 'https://www.googleapis.com/auth/cloudprint'
});
router.get('/googleurl', (req, res) => {
return res.status(200).send({
result: { googleURLToken: url }
});
});
To get tokens using the authentication code and save these in the database.
const Setting = require('../models/setting'); // My model(Mongoose)
router.post('/googletoken', (req, res) => {
oauth2Client.getToken(req.body.code, function (err, tokens) {
oauth2Client.credentials = tokens;
// If refresh token exits save it
// because the refresh token it returned only 1 time! IMPORTANT
if (tokens.hasOwnProperty('refresh_token')) {
let setting = new Setting();
setting.refreshTokenGoogle = tokens.refresh_token;
setting.expirationTokenGoogle = tokens.expiry_date;
setting.tokenGoogle = tokens.access_token;
setting.save()
.then((settingCreated) => {
return res.status(200).send({
message: 'OK'
});
})
}
});
});
To print
const axios = require('axios');
const moment = require('moment');
router.post('/print',async (req, res) => {
const tickeProperties = {
'version': '1.0',
'print': {
'vendor_ticket_item': [],
'color': { 'type': 'STANDARD_MONOCHROME' },
'copies': { 'copies': 1 }
}
};
const accessToken = await getTokenGoogleUpdated();
axios.get(
'https://www.google.com/cloudprint/submit',
{
params: {
printerid : printerID, // Replace by your printer ID
title: 'title printer',
ticket: tickeProperties,
content : 'print this text of example!!!',
contentType: 'text/plain'
},
headers: {
'Authorization': 'Bearer ' + accessToken
}
}
)
.then(response => {
return res.status(200).send({
result: response.data
});
})
}
);
async function getTokenGoogleUpdated() {
return await Setting.find({})
.then(async setting => {
const refreshTokenGoogle = setting[0].refreshTokenGoogle;
const expirationTokenGoogle = setting[0].expirationTokenGoogle;
const tokenGoogle = setting[0].tokenGoogle;
const dateToday = new Date();
// 1 minute forward to avoid exact time
const dateTodayPlus1Minute = moment(dateToday).add(1, 'm').toDate();
const dateExpiration = new Date(expirationTokenGoogle);
// Case date expiration, get new token
if (dateExpiration < dateTodayPlus1Minute) {
console.log('Updating access token');
oauth2Client.credentials['refresh_token'] = refreshTokenGoogle;
return await oauth2Client.refreshAccessToken( async function(err, tokens) {
// Save new token and new expiration
setting[0].expirationTokenGoogle = tokens.expiry_date;
setting[0].tokenGoogle = tokens.access_token;
await setting[0].save();
return tokens.access_token;
});
} else {
console.log('Using old access token');
return tokenGoogle;
}
})
.catch(err => {
console.log(err);
});
}
I hope It helps you if you want to use Google Cloud Print to not waste a lot of time as I did.
The important part there is a scope https://www.googleapis.com/auth/cloudprint which is not obvious and took one day for me to figure out.

Resources