I have a problem with large file download from the web api to the win forms app. On the win form app I'm using HttpClient for grabbing data. I have following code on server side:
[HttpPost]
[Route]
public async Task<HttpResponseMessage> GetBackup(BackupRequestModel request)
{
HttpResponseMessage response;
try
{
response = await Task.Run<HttpResponseMessage>(() =>
{
var directory = new DirectoryInfo(request.Path);
var files = directory.GetFiles();
var lastCreatedFile = files.OrderByDescending(f => f.CreationTime).FirstOrDefault();
var filestream = lastCreatedFile.OpenRead();
var fileResponse = new HttpResponseMessage(HttpStatusCode.OK);
fileResponse.Content = new StreamContent(filestream);
fileResponse.Content.Headers.ContentType = new MediaTypeHeaderValue("application/octet-stream");
return fileResponse;
});
}
catch (Exception e)
{
logger.Error(e);
response = Request.CreateResponse(HttpStatusCode.InternalServerError);
}
return response;
}
on client side:
private async void btnStart_Click(object sender, EventArgs e)
{
var requestModel = new BackupRequestModel();
requestModel.Username = txtUsername.Text;
requestModel.Password = txtPassword.Text;
requestModel.Path = txtServerPath.Text;
var client = new HttpClient();
var result = await client.PostAsJsonAsync("http://localhost:50116/api/backup", requestModel);
var stream = await result.Content.ReadAsStreamAsync();
var localPath = #"d:\test\filenew.bak";
var fileStream = File.Create(localPath);
stream.CopyTo(fileStream);
fileStream.Close();
stream.Close();
fileStream.Dispose();
stream.Dispose();
client.Dispose();
}
}
This is actually working, but the purpose of this program is to grab large files over 3GB and save it to the client.
I have tried this on files sized 630MB what I notice is: When I call web api with http client, http client actually loads 630MB in the memory stream, and from the memory stream to the file stream, but when I try to load a different file I'm getting OutOfMemoryException. This is happening because the application doesn't release memory from the previous loaded file. I can see in task manager that it is holding 635MB of ram memory.
My question is how can I write data directly from HttpClient to file without using memory stream, or in other words how can I write data to file while HttpClient is downloading data?
To make the request, use a SendAsync overload that allows you to specify a HttpCompletionOption and use ResponseHeadersRead. You'll have to manually build the request though, without using the PostAsJsonAsync convenience method.
Related
When using ASP.NET (Core, .NET 5) MVC's IApplicationBuilder.UseSpa / ISpaBuilder.UseReactDevelopmentServer (in development), is there a way to postprocess the index HTML before it's sent to the browser? I need to inject a script tag holding data about the currently auth'd user to be consumed by the React app.
I want to avoid having to do an extra call from inside my React app just to get the currently logged on user at startup.
You can use custom middleware to do that. Assuming you're on .net core 3.1, the middleware would look something along these lines:
using Microsoft.AspNetCore.Builder;
using Microsoft.AspNetCore.Http;
using System.IO;
using System.IO.Compression;
using System.Text.RegularExpressions;
using System.Threading.Tasks;
namespace TestReactDevServer.Middleware
{
public class ScriptInjectorMiddleware
{
private readonly RequestDelegate _next;
public ScriptInjectorMiddleware(RequestDelegate next)
{
_next = next;
}
public async Task InvokeAsync(HttpContext context)
{
//Save pointer to the original response body stream
var originalBodyStream = context.Response.Body;
//Create new memory stream
using (var responseBody = new MemoryStream())
{
//...and use it for subsequent requests so we can peek the contents
context.Response.Body = responseBody;
//Continue down the Middleware pipeline, eventually returning to this class
await _next(context);
//inspect response, inject script
await InjectScript(responseBody, "window.myUser='123';");
//copy the contents of the new memory stream to the original place
await responseBody.CopyToAsync(originalBodyStream);
}
}
private async Task<Stream> InjectScript(Stream input, string script)
{
input.Seek(0, SeekOrigin.Begin);
var decompressed = new MemoryStream();
using (var tmp = new GZipStream(input, CompressionMode.Decompress, true))
{
tmp.CopyTo(decompressed);
}
var html = await decompressed.StreamToString();
var modifiedHtml = Regex.Replace(html, "</body>[\\n\\r]+</html>", $"<script type=\"text/javascript\">{script}</script></body></html>", RegexOptions.IgnoreCase | RegexOptions.Multiline); // any way to locate closing tags will work here, you probably can be more efficient
input.Seek(0, SeekOrigin.Begin);
using (var modifiedHtmlStream = modifiedHtml.ToStream())
using (var tmp = new GZipStream(input, CompressionMode.Compress, true)) // might be optional
{
modifiedHtmlStream.CopyTo(tmp);
}
return input;
}
}
public static class ScriptInjectorMiddlewareExtensions
{
public static IApplicationBuilder UseScriptInjectorMiddleware(
this IApplicationBuilder builder)
{
return builder.UseMiddleware<ScriptInjectorMiddleware>();
}
}
public static class StreamExtensions {
public static async Task<string> StreamToString(this Stream stream) {
stream.Seek(0, SeekOrigin.Begin);
return await new StreamReader(stream).ReadToEndAsync();
}
public static Stream ToStream(this string str)
{
var stream = new MemoryStream();
var writer = new StreamWriter(stream);
writer.Write(str);
writer.Flush();
stream.Seek(0, SeekOrigin.Begin);
return stream;
}
}
}
a couple of things to point out:
Testing this with Chrome, I ended up having to decompress the proxied response and compress it back after modification - I think compression step might be optional.
Depending on your user agent you might need to handle more compression cases (see more examples on Github)
You will need to inject this middleware before your call to .UseSpa() in Startup.cs: adding app.UseUserInjectorMiddleware(); should pick up the included extension method
I suspect this example is far from being complete, especially in terms of handling different encodings and content types - I am hoping you'd be able to adapt the idea to your use case.
I have been using the following code to upload files on my server as it is doing the job but i want to monitor the Upload Progress Percentage during the opration and Update the UI accordingly to reflect the prgress to the user
uploadFile({File imageFile, String refCode}) async {
// open a bytestream
var stream =
new http.ByteStream(DelegatingStream.typed(imageFile.openRead()));
// get file length
var length = await imageFile.length();
// string to uri
var uri = Uri.parse(
'http://-------------/api/FilesUploadB/?refCode=$refCode');
// create multipart request
var request = new http.MultipartRequest("POST", uri);
// multipart that takes file
var multipartFile = new http.MultipartFile('file', stream, length,
filename: basename(imageFile.path));
// add file to multipart
request.files.add(multipartFile);
// send
var response = await request.send();
// listen for response
response.stream.transform(utf8.decoder).listen((value) {
print(value);
});
//return response.
}
NOTE that the value in the listen is getting me the final return from the WebAPI on the server.
how to achieve that?
Take a look at this example on GitHub. It demonstrates how you can access the current upload progress of your file.
File is not downloading at browser. I'm preparing the file and writing it to output stream of response.
Rest API is there:
#RequestMapping(value = "/export-companies",
method = {RequestMethod.GET, RequestMethod.HEAD})
#Timed
public void downloadCompanies(HttpServletResponse response) throws URISyntaxException {
HSSFWorkbook workbook = new HSSFWorkbook();
HSSFSheet sheet = workbook.createSheet("Sample sheet");
Map<String, Object[]> data = new HashMap<String, Object[]>();
data.put("1", new Object[] {"Emp No.", "Name", "Salary"});
data.put("2", new Object[] {1d, "John", 1500000d});
data.put("3", new Object[] {2d, "Sam", 800000d});
data.put("4", new Object[] {3d, "Dean", 700000d});
Set<String> keyset = data.keySet();
int rownum = 0;
for (String key : keyset) {
Row row = sheet.createRow(rownum++);
Object [] objArr = data.get(key);
int cellnum = 0;
for (Object obj : objArr) {
Cell cell = row.createCell(cellnum++);
if(obj instanceof Date)
cell.setCellValue((Date)obj);
else if(obj instanceof Boolean)
cell.setCellValue((Boolean)obj);
else if(obj instanceof String)
cell.setCellValue((String)obj);
else if(obj instanceof Double)
cell.setCellValue((Double)obj);
}
}
try {
ByteArrayOutputStream outByteStream = new ByteArrayOutputStream();
workbook.write(outByteStream);
byte [] outArray = outByteStream.toByteArray();
response.setContentType("application/ms-excel");
response.setContentLength(outArray.length);
response.setHeader("Expires:", "0"); // eliminates browser caching
response.setHeader("Content-Disposition", "attachment; filename=template.xls");
OutputStream outStream = response.getOutputStream();
outStream.write(outArray);
outStream.flush();
workbook.close();
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
}
From front end (using Angular JS):
(function() {
'use strict';
angular
.module('MyApp')
.factory('CompanyExportService', CompanyExportService);
CompanyExportService.$inject = ['$resource'];
function CompanyExportService ($resource) {
var service = $resource('api/export-companies', {}, {
'get': {
method: 'GET',
isArray: false
}
});
return service;
}
})();
File contents are there in response as non-readable format. But file is not downloaded at browser.
Angular will receive the file contents mere character sequences. You need to create a file from these characters and initiate the browser download in frontend.
You can do it like this -
var blob = new Blob([data],
{type: 'application/vnd.openxmlformat-officedocument.spreadsheetml.sheet;'});
saveAs(blob, fileName);
where data is the response you received form your API. The saveAs function is part of FileSaver.js library. Although you can look on how to manually do that but why reinvent the wheel?
Downloading files with XHR is problematic. As long as you do only GET requests, there exists much simpler approach to trigger browser to download file.
Use JavaScript native method window.open(url).
It does work well in all browsers including IE9.
In code below, I use $window, which is Angular's proxy for native window object.
Example for your code could be like:
(function() {
'use strict';
angular
.module('MyApp')
.factory('CompanyExportService', CompanyExportService);
CompanyExportService.$inject = ['$window'];
function CompanyExportService ($window) {
var exportUrl = 'api/export-companies';
return {
download: download
}
function download() {
$window.open(exportUrl);
}
}
})();
Note that this action is out of scope of Angular, you can't do much about error handling or waiting till the file will be downloaded. Might be problem if you want to generate huge Excel files or your API is slow.
For more details, read question: Spring - download response as a file
Update:
I've replaced window.location.href with window.open() which seems to be better choice for downloading files.
If your API will throw an error page instead of file, window.location.href will replace current page (thus losing its state). $window.open() however will opens this error in new tab without losing current state of of application.
You can download file in new tab. Modern browser are closing them automatically when downloading is completed.
By opening new window you get reference to it, when downloading is completed then window.closed is set to true.
Unfortunatelly you need to check from time-to-time this param inside interval ...
var newWindowRef = $window.open(url, name);
if (newWindowRef) {
if (newWindowRef.document.body) { // not working on IE
newWindowRef.document.title = "Downloading ...";
newWindowRef.document.body.innerHTML = '<h4>Your file is generating ... please wait</h4>';
}
var interval = setInterval(function() {
if (!!newWindowRef.closed) {
// Downloading completed
clearInterval(interval);
}
}, 1000);
} else {
$log.error("Opening new window is probably blocked");
}
Tested and works on Chrome v52, FF v48 and IE 11
I have the following simple application page that uses the phone camera to upload the taken photo to azure blob:
public partial class AddReport : PhoneApplicationPage
{
// blobs stuff
string storageAccount = "MYACCOUNT";
string storageKey = "MYKEY";
string blobServiceUri = "http://MYACCOUNT.blob.core.windows.net";
CloudBlobClient blobClient;
private Report newReport;
public AddReport()
{
InitializeComponent();
}
protected override void OnNavigatedTo(System.Windows.Navigation.NavigationEventArgs e)
{
//base.OnNavigatedTo(e);
newReport = new Report();
var credentials = new StorageCredentialsAccountAndKey(storageAccount, storageKey);
blobClient = new CloudBlobClient(blobServiceUri, credentials);
}
private void TakePhotoClick(object sender, EventArgs eventArgs)
{
//The camera chooser used to capture a picture.
CameraCaptureTask ctask;
//Create new instance of CameraCaptureClass
ctask = new CameraCaptureTask();
//Create new event handler for capturing a photo
ctask.Completed += new EventHandler<PhotoResult>(ctask_Completed);
//Show the camera.
ctask.Show();
}
void ctask_Completed(object sender, PhotoResult e)
{
if (e.TaskResult == TaskResult.OK && e.ChosenPhoto != null)
{
WriteableBitmap CapturedImage = PictureDecoder.DecodeJpeg(e.ChosenPhoto);
UploadToBlobContainer(e.ChosenPhoto);
}
else
{
//user decided not to take a picture
}
}
private void UploadToBlobContainer(System.IO.Stream stream)
{
string containerName = "reportsPhotos";
var container = blobClient.GetContainerReference(containerName);
container.CreateIfNotExist(true, r =>
Dispatcher.BeginInvoke(() =>
{
var blobName = "report" + newReport.ReportId.ToString();
var blob = container.GetBlobReference(blobName);
blob.Metadata["ReportId"] = newReport.ReportId.ToString();
blob.UploadFromStream(stream, r2 =>
Dispatcher.BeginInvoke(() =>
{
newReport.Photo = container.Uri + "/" + blobName;
}));
}));
}
}
This is a simple case and I am not using SAS to authenticate, instead I save the key in the app itself (this is only for testing purposes) and also my blobs are publicly available.
when I run in debug mode it seems that everything is working, but the photo doesn't get uploaded to the blob. Also, I don't know how I can debug this to see if there was any error from the blob service.
Can anyone tell me what might be wrong ?
EDIT1: it seems that the container is not being created either. i've confirmed this using azure blob explorer
EDIT2: I am getting a System.Net.WebException : "The remote server returned an error: NotFound."
After long hours I have finally discovered that the problem was with this line:
string containerName = "reportsPhotos";
According to here all letters in a container name must be lowercase.
Changing it to reportsphotos solved the issue
That was time well spent.
Can you try just doing it like this instead:
// Retrieve storage account from connection-string
CloudStorageAccount storageAccount = CloudStorageAccount.Parse(
RoleEnvironment.GetConfigurationSettingValue("StorageConnectionString"));
// Create the blob client
CloudBlobClient blobClient = storageAccount.CreateCloudBlobClient();
// Retrieve reference to a previously created container
CloudBlobContainer container = blobClient.GetContainerReference("mycontainer");
// Retrieve reference to a blob named "myblob"
CloudBlob blob = container.GetBlobReference("myblob");
// Create or overwrite the "myblob" blob with contents from a local file
using (var fileStream = System.IO.File.OpenRead(#"path\myfile"))
{
blob.UploadFromStream(fileStream);
}
This is from:
http://www.windowsazure.com/en-us/develop/net/how-to-guides/blob-storage/#upload-blob
I am trying to access google places api from appengine using code like this:
String PLACES_DETAILS_URL = "https://maps.googleapis.com/maps/api/place/details/json";
// setup up the HTTP transport
HttpTransport transport = new UrlFetchTransport();
// add default headers
GoogleHeaders defaultHeaders = new GoogleHeaders();
transport.defaultHeaders = defaultHeaders;
transport.defaultHeaders.put("Content-Type", "application/json");
JsonHttpParser parser = new JsonHttpParser();
parser.jsonFactory = new JacksonFactory();
transport.addParser(parser);
// build the HTTP GET request and URL
HttpRequest request = transport.buildGetRequest();
request.setUrl(PLACES_DETAILS_URL);
GenericData data = new GenericData();
data.put("reference", restaurantGoogleId);
data.put("sensor", "false");
data.put("key", ApplicationConstants.GoogleApiKey);
JsonHttpContent content = new JsonHttpContent();
content.jsonFactory=new JacksonFactory();
content.data = data;
request.content = content;
try {
HttpResponse response = request.execute();
String r = response.parseAsString();
r=r;
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
I don't know even if this is the recommended way. If so, why this doesn't work?If I put a request in the browser directly it works, but with this code it always returns me "Request Denied".
Thanks in advance.
At the end it was easy, I mixed get and post verbs:
HttpTransport transport = new UrlFetchTransport();
// add default headers
GoogleHeaders defaultHeaders = new GoogleHeaders();
transport.defaultHeaders = defaultHeaders;
transport.defaultHeaders.put("Content-Type", "application/json");
JsonCParser parser = new JsonCParser();
parser.jsonFactory = new JacksonFactory();
transport.addParser(parser);
// build the HTTP GET request and URL
HttpRequest request = transport.buildGetRequest();
request.setUrl("https://maps.googleapis.com/maps/api/place/details/json?reference=CmRYAAAAciqGsTRX1mXRvuXSH2ErwW-jCINE1aLiwP64MCWDN5vkXvXoQGPKldMfmdGyqWSpm7BEYCgDm-iv7Kc2PF7QA7brMAwBbAcqMr5i1f4PwTpaovIZjysCEZTry8Ez30wpEhCNCXpynextCld2EBsDkRKsGhSLayuRyFsex6JA6NPh9dyupoTH3g&sensor=true&key=<APIKEY>");
try {
HttpResponse response = request.execute();
String r = response.parseAsString();