I have two versions for the same logic. Below are the two codes:
/*Code 1*/
this.state.dataModel[0].route.routeLine = this.props.routeLine.join(' ');
/*Code 2*/
this.setState(prevState => {
const newDataModel = [...prevState.dataModel];
newDataModel[0].route.routeLine = this.props.routeLine.join(' ');
return {dataModel: newDataModel};
});
/* Common Code */
let finalData = { ...this.state.dataModel[0],
hasLocationChanged: this.state.isLocationEditedForConsSeg };
let data = JSON.stringify(finalData)
console.log('Stringify Data ',JSON.stringify(data))
When I use Code 1 the object is converted to JSON strings without data loss. But if I use Code 2 I am loosing data in JSON strings when using JSON.stringify().But according to my understanding of react Code 1 is not the correct way to mutate state
Below is the finalData in JSON object:
{
"layer": 1,
"layerName": "ConstructionSegment",
"layerId": 7384,
"agencyId": 79,
"lpModel": {
"lastProjectType": 2,
"lastProjectSurf": 2,
"gravelType": 0,
"gravelTreatment": 0,
"bitType": 1,
"concrType": 0,
"astType": 0,
"compType": 0,
"lastProjectYear": 2002,
"lpDepth": 7,
"totalDepth": 7,
"lastProjectMile": "0",
"comment": "",
"lastProjectYearEst": 0,
"lpDepthEst": 0,
"totalDepthEst": 0
},
"bModel": {
"topBaseType": 3,
"topBaseDpt": 23,
"topBaseYear": 1973,
"topBaseTrt": 6,
"bottomBaseType": 2,
"bottomBaseDpt": 23,
"bottomBaseYear": 0,
"bottomBaseTrt": 1,
"fabric": 0,
"subgradeStrength": 100,
"subgradeStrengthType": 5,
"lastSGImpYear": 0,
"subgradeTrt": 1,
"topBaseTypeEst": 0,
"topBaseDptEst": 0,
"topBaseYearEst": 0,
"bottomBaseTypeEst": 0,
"bottomBaseDptEst": 0,
"bottomBaseYearEst": 0,
"subgradeStrengthEst": 0,
"lastSGImpYearEst": 0
},
"xModel": {
"laneWidth": 4,
"numLanes": 2,
"rtShoulderTotal": 5,
"rtShoulderPaved": 0,
"gradingYear": 1973,
"curbs": 0,
"inslopeRatio": 0,
"paveSloughWidth": 0,
"sloughRatio": 0,
"edgelineTrt": 0,
"centerlineTrt": 0,
"medianType": 0,
"medianWidth": 0,
"rightOfWay": 100,
"sectionOwner": 2,
"gradingYearEst": 0
},
"maintenance": {
"blade": [],
"regravel": [],
"reshape": [],
"spotRep": [],
"dustControl": [],
"surfacing": [],
"crackSeal": [],
"patching": [],
"striping": [],
"cpr": [],
"crackSealConcrete": [],
"reApplyAst": []
},
"pavementcondition": {
"PlannedProjType": 0
},
"route": {
"id": 12804,
"start": "47.04905, -95.722035",
"end": "47.107359, -95.721154",
"routeLength": "4.03",
"routeArray": [
"47.04906,-95.72206",
"47.06001,-95.72184",
"47.06281,-95.72179",
"47.07266,-95.7216",
"47.07647,-95.72154",
"47.07991,-95.72149",
"47.08224,-95.72154",
"47.08528,-95.72169",
"47.08749,-95.72178",
"47.08846,-95.72179",
"47.09029,-95.72181",
"47.09236,-95.72178",
"47.09425,-95.72172",
"47.09729,-95.72154",
"47.10185,-95.721",
"47.10291,-95.72095",
"47.10738,-95.72119"
],
"routePointArray": [
[
47.04906,
-95.72206
],
[
47.06001,
-95.72184
],
[
47.06281,
-95.72179
],
[
47.07266,
-95.7216
],
[
47.07647,
-95.72154
],
[
47.07991,
-95.72149
],
[
47.08224,
-95.72154
],
[
47.08528,
-95.72169
],
[
47.08749,
-95.72178
],
[
47.08846,
-95.72179
],
[
47.09029,
-95.72181
],
[
47.09236,
-95.72178
],
[
47.09425,
-95.72172
],
[
47.09729,
-95.72154
],
[
47.10185,
-95.721
],
[
47.10291,
-95.72095
],
[
47.10738,
-95.72119
]
],
"startPoint": [
[
47.04905,
-95.72204
]
],
"endPoint": [
[
47.10736,
-95.72115
]
],
"waypoints": [],
"isPolyLine": false,
"hwyNum": "34",
"segDesc": "",
"routeLine": "47.04906,-95.72206 47.06001,-95.72184 47.06281,
-95.72179 47.07266,-95.7216 47.07647,
-95.72154 47.07991,-95.72149 47.08224,-95.72154 47.08528,
-95.72169 47.08749,-95.72178 47.08846,-95.72179 47.09029,
-95.72181 47.09236,-95.72178 47.09425,
-95.72172 47.09729,-95.72154 47.10185,-95.721 47.10291,
-95.72095 47.10738,-95.72119"
}
}
I can see the data routeLine in the console.log() statement if I use Code 1 but I cannot see the data routeLine in the console.log() if I use Code 2
You are losing it becasue setState is asynchronous. If you call this.setState and soon after you read this.state you'll notice that it's not immediately updated.
See this question for further details please and setState docs.
edit1: I created a sandbox example for you. Check code in App.js, you'll se 4 click functions and a logState function. click1 and click2 do what you did in your original post, click3 and click4 propose a solution to overcome the problem.
Your routeLine is a string: "47.04906,-95.72206 47.06001,-95.72184 47.06281", not an array as you might think. You need to first convert it to an array, and then apply a join():
this.state.dataModel[0].route.routeLine.split(' ').join(). You might need to revise your state and make sure you are parsing data according to your expectations.
Related
Looking to ingest this RESTAPI data to SPLUNK, but having issues with LINE BREAKER, can't seem to discover the correct combination for props.conf.
Also as data is returned in array format without keys, do I need a script to add the keys to the returned array data or can this be achieved using SPLUNK?
N.B.
The keys are returned in the tail of the response.
RESTAPI CALL:
{{base_url}}accounts/{{account}}/{{siteid}}/report?dimensions=queryName,queryType,responseCode,responseCached,coloName,origin,dayOfWeek,tcp,ipVersion,querySizeBucket,responseSizeBucket&metrics=queryCount,uncachedCount,staleCount,responseTimeAvg&limit=2
Any help appreciated.
{
"result": {
"rows": 100,
"data": [
{
"dimensions": [
"college.edu",
"A",
"REFUSED",
"uncached",
"EWR",
"192.0.0.0",
"1",
"0",
"4",
"48-63",
"48-63"
],
"metrics": [
1,
1,
0,
16
]
},
{
"dimensions": [
"school.edu",
"A",
"REFUSED",
"uncached",
"EWR",
"192.0.0.0",
"1",
"0",
"4",
"32-47",
"32-47"
],
"metrics": [
1,
1,
0,
10
]
}
],
"data_lag": 0,
"min": {},
"max": {},
"totals": {
"queryCount": 12,
"responseTimeAvg": 37.28936572607269,
"staleCount": 0,
"uncachedCount": 2147541
},
"query": {
"dimensions": [
"queryName",
"queryType",
"responseCode",
"responseCached",
"coloName",
"origin",
"dayOfWeek",
"tcp",
"ipVersion",
"querySizeBucket",
"responseSizeBucket"
],
"metrics": [
"queryCount",
"uncachedCount",
"staleCount",
"responseTimeAvg"
],
"since": "2022-10-17T04:37:00Z",
"until": "2022-10-17T10:37:00Z",
"limit": 100
}
},
"success": true,
"errors": [],
"messages": []
}
Assuming you want the JSON object to be a single event, the LINE_BREAKER setting should be }([\r\n]+){.
Splunk should have no problems parsing the JSON, but I think there will be problems relating metrics to dimensions because there are multiple sets of data and only one set of keys. Creating a script to combine them seems to be the best option.
I am using mongo DB In which I'm updating a row for multiple types with different payloads and conditions but every time I update the row it overrides the previous one
for the first time the request. data is
request.data: {
"farm_area_count": 1,
"farm_area": [
{
"area_id": 1,
"area_name": "Area 1",
"area_acerage": 4,
"area_structure_type": "polyhouse",
"zone_latest_id": 0
}
]
}
output is
{
"farm_area_count": 1,
"farm_area": [
{
"area_id": 1,
"area_name": "Area 1",
"area_acerage": 4,
"area_structure_type": "polyhouse",
"zone_latest_id": 0
}
]
}
for the second time the request. data is
request.data:
{
"farm_area_count": 1,
"farm_area": [
{
"area_id": 1,
"zone_latest_id": 1,
"zone_name":"test zone",
"zone_acerage":2
}
]
}
the output should be
{
"farm_area_count": 1,
"farm_area": [
{
"area_id": 1,
"area_name": "Area 1",
"area_acerage": 4,
"area_structure_type": "polyhouse",
"zone_latest_id": 1,
"zone_name":"test zone",
"zone_acerage":2
}
]
}
but the output that I'm getting is
{
"farm_area_count": 1,
"farm_area": [
{
"area_id": 1,
"zone_latest_id": 1,
"zone_name":"test zone",
"zone_acerage":2
}
]
}
here is the updated code
collection.update_one({"_id": ObjectId(str(kwargs['pk']))}, {"$set": request.data})
I am trying to parse out the values I need from this nested JSON data. I need to get from quotas is responders and I need to get from qualified is service_id, codes.
I tried first to get just the quotas but kept getting this error []': no implicit conversion of String into Integer
hash = JSON::parse(response.body)
hash.each do |data|
p data["quotas"]
end
Json data
{
"id": 14706,
"relationships" : [
{
"id": 538
}
]
"quotas": [
{
"id": 48894,
"name": "Test",
"responders": 6,
"qualified": [
{
"service_id": 12,
"codes": [
1,
2,
3,
6,
]
},
{
"service_id": 23,
"pre_codes": [
1,
2
]
}
]
}
]
}
I needed to convert your example into json. Then I could loop the quotas and output the values.
hash = JSON::parse(data.to_json)
hash['quotas'].each do |data|
p data["responders"]
data["qualified"].each do |responder|
p responder['service_id']
p responder['codes']
end
end
Hash in data variable (needed for the sample code to work):
require "json"
data = {
"id": 14706,
"relationships": [
{
"id": 538
}
],
"quotas": [
{
"id": 48894,
"name": "Test",
"responders": 6,
"qualified": [
{
"service_id": 12,
"codes": [
1,
2,
3,
6,
]
},
{
"service_id": 23,
"pre_codes": [
1,
2
]
}
]
}
]
}
I would like to plot a graph in AngularJS, using Highcharts, to be something like this:
This graph represents the load of a server in the last hour. So the datapoints given, contains a point and epoch time.
The data is received in a JSON format, [point,epochtime], as follows:
[
{
"ds_name": "a0",
"cluster_name": "",
"graph_type": "stack",
"host_name": "",
"metric_name": "1-min",
"color": "#BBBBBB",
"datapoints": [
[
0.58,
1604244900
],
[
0.59733333333,
1604244915
],
[
0.6,
1604244930
],
[
0.6,
1604244945
],
[
0.6,
1604244960
],
[
0.6,
1604244975
],
[
0.612,
1604244990
]
]
},
{
"ds_name": "a2",
"cluster_name": "",
"graph_type": "line",
"host_name": "",
"metric_name": "CPUs ",
"color": "#FF0000",
"datapoints": [
[
2,
1604244900
],
[
2,
1604244915
],
[
2,
1604244930
],
[
2,
1604244945
],
[
2,
1604244960
],
[
2,
1604244975
],
[
2,
1604244990
],
[
2,
1604245005
]
]
},
{
"ds_name": "a3",
"cluster_name": "",
"graph_type": "line",
"host_name": "",
"metric_name": "Procs",
"color": "#2030F4",
"datapoints": [
[
1,
1604244900
],
[
1,
1604244915
],
[
1,
1604244930
]
]
}
]
I posted here only part of the dataset, since it is too long, but I think you can understand the format.
How can I draw something similar using Highcharts?
The most important thing here is to properly parse the date. Since the names in the JSON are not the same as Highcharts requires you have to pass them manually to the right places as I did it in the demo below.
I also recommend using the keys property because the data should be declared as [x, y], in your case it is the other way around. You can change that by setting this property.
Notice that in the Highcharts there is no such series as stack, it is just column.
API: https://api.highcharts.com/highcharts/series.line.keys
Demo: https://jsfiddle.net/BlackLabel/246phxum/
So this is how I did it eventually, the most suitable types of charts are line, and area chart.
Highcharts.chart("container", {
chart: {
type: 'line'
},
xAxis: {
type: "datetime",
labels: {
formatter: function () {
return Highcharts.dateFormat('%H:%M:%S.%L', this.value);
}
}
},
series: [{
name: "A",
type: "area",
color: "#BB000B",
keys: ["y", "x"],
data: [
[0.58, 1604244900],
[0.59733333333, 1604244915],
[0.6, 1604244930],
[0.6, 1604244945],
[0.6, 1604244960],
[0.6, 1604244975],
[0.612, 1604244990]
]
},
{
name: "B",
type: "line",
color: "#FF00F0",
keys: ["y", "x"],
data: [
[2, 1604244900],
[2, 1604244915],
[2, 1604244930],
[2, 1604244945],
[2, 1604244960],
[2, 1604244975],
[2, 1604244990],
[2, 1604245005]
]
}, {
name: 'C',
keys: ['y', 'x'],
data: [
[1, 1604244900],
[1, 1604244915],
[1, 1604244930],
[1, 1604244945],
[1, 1604244960],
[1, 1604244975],
[1, 1604244990],
[1, 1604245005]
]
}
]
});
I need FEMMES.COM to get tokenized as singular + plural forms of the base word FEMME.
Custom Analyzer Config
"analyzers": [ { "#odata.type": "#Microsoft.Azure.Search.CustomAnalyzer", "name": "text_language_search_custom_analyzer", "tokenizer": "text_language_search_custom_analyzer_ms_tokenizer", "tokenFilters": [ "lowercase", "asciifolding" ], "charFilters": [ "html_strip" ] } ], "tokenizers": [ { "#odata.type": "#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer", "name": "text_language_search_custom_analyzer_ms_tokenizer", "maxTokenLength": 300, "isSearchTokenizer": false, "language": "english" } ], "tokenFilters": [], "charFilters": []}
Analyze API call for FEMMES
{ "analyzer": "text_language_search_custom_analyzer", "text": "FEMMES" }
Analyze API response for FEMMES
{ "#odata.context": "https://one-adscope-search-eu-stage.search.windows.net/$metadata#Microsoft.Azure.Search.V2016_09_01.AnalyzeResult", "tokens": [ { "token": "femme", "startOffset": 0, "endOffset": 6, "position": 0 }, { "token": "femmes", "startOffset": 0, "endOffset": 6, "position": 0 } ] }
Analyze API response for FEMMES.COM
{ "#odata.context": "https://one-adscope-search-eu-stage.search.windows.net/$metadata#Microsoft.Azure.Search.V2016_09_01.AnalyzeResult", "tokens": [ { "token": "femmes", "startOffset": 0, "endOffset": 6, "position": 0 }, { "token": "com", "startOffset": 7, "endOffset": 10, "position": 1 } ] }
Analyze API response for FEMMES COM
{ "#odata.context": "https://one-adscope-search-eu-stage.search.windows.net/$metadata#Microsoft.Azure.Search.V2016_09_01.AnalyzeResult", "tokens": [ { "token": "femme", "startOffset": 0, "endOffset": 6, "position": 0 }, { "token": "femmes", "startOffset": 0, "endOffset": 6, "position": 0 }, { "token": "com", "startOffset": 7, "endOffset": 10, "position": 1 } ]}
I think I figured this one out myself after some experimentation. I found the MappingCharFilter could be used to replace . with , before the indexer did the tokenization. This allowed the lemmatization/stemming to work as expected on the terms in question. I need to do more thorough integration tests with our other use cases, but I think this would solve the problem for anybody facing the same type of issue.
My previous answer was not correct. Azure Search implementation actually applies the language tokenizer BEFORE token filters. This essentially made the WordDelimiterToken filter useless in my use case.
What I ended up having to do was to pre-process data BEFORE I uploaded to Azure for indexing. In my C# code, I added some regex logic that would break apart text like FEMMES2017 into FEMMES 2017, before I sent it to Azure. This way, when the text got to Azure, the indexer would see FEMMES by itself and properly tokenize as FEMME and FEMMES using the language tokenizer.