Cannot retrieve attribute value from geoserver - postgis

Iam new to geoserver. I have created shape file of my district and added certain attributes like covid count, covid zone , district name etc related to COVID . I have loaded this to postgis database and I could see attributes also in table .But when I try to retrieve the feature using postman . Attribute values are not retrieved. Can anyone help
Below is my request
http://localhost:8080/geoserver/rest/workspaces/DistrictWpc/datastores/district_store/featuretypes/ernakulam.json
Response is
{
"featureType": {
"name": "ernakulam",
"nativeName": "ernakulam",
"namespace": {
"name": "DistrictWpc",
"href": "http://localhost:8080/geoserver/rest/namespaces/DistrictWpc.json"
},
"title": "ernakulam",
"keywords": {
"string": [
"features",
"ernakulam"
]
},
"srs": "EPSG:404000",
"nativeBoundingBox": {
"minx": 76.1618881225586,
"maxx": 76.6080093383789,
"miny": 9.63820648193359,
"maxy": 10.1869020462036
},
"latLonBoundingBox": {
"minx": 76.1618881225586,
"maxx": 76.6080093383789,
"miny": 9.63820648193359,
"maxy": 10.1869020462036,
"crs": "EPSG:4326"
},
"projectionPolicy": "FORCE_DECLARED",
"enabled": true,
"store": {
"#class": "dataStore",
"name": "DistrictWpc:district_store",
"href": "http://localhost:8080/geoserver/rest/workspaces/DistrictWpc/datastores/district_store.json"
},
"serviceConfiguration": false,
"maxFeatures": 0,
"numDecimals": 0,
"padWithZeros": false,
"forcedDecimal": false,
"overridingServiceSRS": false,
"skipNumberMatched": false,
"circularArcPresent": false,
"attributes": {
"attribute": [
{
"name": "id",
"minOccurs": 0,
"maxOccurs": 1,
"nillable": true,
"binding": "java.lang.Long"
},
{
"name": "district",
"minOccurs": 0,
"maxOccurs": 1,
"nillable": true,
"binding": "java.lang.String"
},
{
"name": "count",
"minOccurs": 0,
"maxOccurs": 1,
"nillable": true,
"binding": "java.lang.Long"
},
{
"name": "zone",
"minOccurs": 0,
"maxOccurs": 1,
"nillable": true,
"binding": "java.lang.String"
},
{
"name": "geom",
"minOccurs": 0,
"maxOccurs": 1,
"nillable": true,
"binding": "org.locationtech.jts.geom.MultiPolygon"
}
]
}
}
}

GeoServer's REST API is used for administrative tasks and as such does not provide a way to see the actual data you have stored in the database, just the details of how GeoServer is connecting to the database and some metadata about the store.
To access the actual data you need to use the WFS endpoint which is described by the OGC WES Specification and described in the GeoServer manual.
If you must have REST access to the features you could use the experimental OGC Features API module to do this.

Related

AutoPage Alexa skill APL

I'm trying to create a slide show (2-3 images) using the Alexa authoring tool.I have managed to do this using the APL Pager which displays a series of components one at a time. The thing is that in order to switch from image A to image B..C I have to touch the screen and swipe left/right.
i want to make this happen automatically and have alexa swicth the images within a certain time, and it seems that this can be achieved using APL autopage but for some reason this is not working 😩
What I've done
Set up the APL using the APL pager
Added the auto page to the APL document
Component Id
duration
delay
After trying the simulation and directly in an echo show 5 it still only triggers when the display is touched.
Also tried:
Adding the standard command (auto pager) directly in the handler of alexa but same response.
Some doubts
Does it matter if i put the commands in the APLdocument.json[1] file or directly in the handler when i call .addDirective[2]..the only difference i see if i want the content or duration to be dynamic i should put it directly in the backend code(index.js) right?
[1]
{
"type": "APL",
"version": "1.4",
"settings": {},
"theme": "light",
"import": [],
"resources": [],
"styles": {},
"onMount": [],
"graphics": {},
"commands": [
{
"type": "AutoPage",
"componentId": "fisrtpager",
"duration": 1000,
"delay": 500
}
],
[2]
handlerInput.responseBuilder.addDirective({
type: 'Alexa.Presentation.APL.RenderDocument',
token:'arrugas',
document: physiolift,
commands: [{
"type": "AutoPage",
"componentId": "fisrtpager",
"duration": 1000,
"delay": 500
}]
});
}
Expected outPut
Have Alexa (echo show 5) to display a series of images like a carousel (without the need to touch the screen)
My code
APL Document
{
"type":"APL",
"version":"1.4",
"settings":{
},
"theme":"light",
"import":[
],
"resources":[
],
"styles":{
},
"onMount":[
],
"graphics":{
},
"commands":[
{
"type":"AutoPage",
"componentId":"fisrtpager",
"duration":1000,
"delay":500
}
],
"layouts":{
},
"mainTemplate":{
"parameters":[
"payload"
],
"items":[
{
"type":"Pager",
"id":"fisrtpager",
"width":"100%",
"height":"100%",
"items":[
{
"type":"Image",
"width":"100%",
"height":"100%",
"scale":"best-fill",
"source":"https://dyl80ryjxr1ke.cloudfront.net/external_assets/hero_examples/hair_beach_v1785392215/original.jpeg",
"align":"center"
},
{
"type":"Image",
"width":"100%",
"height":"100%",
"source":"https://interactive-examples.mdn.mozilla.net/media/cc0-images/grapefruit-slice-332-332.jpg",
"scale":"best-fill"
},
{
"type":"Text",
"text":"Just text content shown on page #3",
"textAlign":"center"
}
],
"navigation":"wrap"
}
]
}
}
index.js
// somewhere inside the intent im invoking
if (Alexa.getSupportedInterfaces(handlerInput.requestEnvelope)['Alexa.Presentation.APL']) {
// Create Render Directive.
handlerInput.responseBuilder.addDirective({
type: 'Alexa.Presentation.APL.RenderDocument',
token:'arrugas',
document: require('./documents/ImageTest.json')
});
}
speakOutput += ' just saying somthing'
return handlerInput.responseBuilder
.speak(speakOutput)
.reprompt('just saying something else')
.getResponse();
Just add the command in the "onMount" event handler. Here is the modified code which does exactly what you need:
{
"type": "APL",
"version": "1.4",
"settings": {},
"theme": "light",
"import": [],
"resources": [],
"styles": {},
"onMount": [],
"graphics": {},
"layouts": {},
"mainTemplate": {
"parameters": [
"payload"
],
"items": [
{
"type": "Pager",
"id": "fisrtpager",
"width": "100%",
"height": "100%",
"items": [
{
"type": "Image",
"width": "100%",
"height": "100%",
"scale": "best-fill",
"source": "https://dyl80ryjxr1ke.cloudfront.net/external_assets/hero_examples/hair_beach_v1785392215/original.jpeg",
"align": "center"
},
{
"type": "Image",
"width": "100%",
"height": "100%",
"source": "https://interactive-examples.mdn.mozilla.net/media/cc0-images/grapefruit-slice-332-332.jpg",
"scale": "best-fill"
},
{
"type": "Text",
"text": "Just text content shown on page #3",
"textAlign": "center"
}
],
"navigation": "none",
"onMount": [{
"type": "AutoPage",
"componentId": "fisrtpager",
"duration": 1000,
"delay": 500
}]
}
]
}
}
to update dynamically this feature from your backend code you can do the following:
// check if device supports APL
if (Alexa.getSupportedInterfaces(handlerInput.requestEnvelope)['Alexa.Presentation.APL']) {
// Create Render Directive.
handlerInput.responseBuilder.addDirective({
type: 'Alexa.Presentation.APL.RenderDocument',
token: "dialogManagementPagerDoc",
document: require('./PATH-TO/YOUR-APL-FILE.json')
})
.addDirective({
type: "Alexa.Presentation.APL.ExecuteCommands",
token: "dialogManagementPagerDoc",
commands: [
{
type: "AutoPage",
componentId: "YOUR_PAGER_ID",
delay: 1000,
duration: 5000
}
]
});
}

c-lightning public node data on explorers

I'm trying to set my first clightning node with docker-compose using image from https://hub.docker.com/r/elementsproject/lightningd. Currently, my node can connect and open channel with other nodes (and I can open a channel to the node just fine), but it's still not updated (ie. has no information) on most explorers.
I've tried to make my port 9735 open, set the bind-addr as the docker's IP address, even tried to set announce-addr with tor addresses. Nothing works.
The following is current results of getinfo and listconfigs:
getinfo
{
"id": "03db40337c2de299a8fa454fdf89d311615d50a27129d43286696d9e497b2b027a",
"alias": "TestName",
"color": "fff000",
"num_peers": 3,
"num_pending_channels": 0,
"num_active_channels": 3,
"num_inactive_channels": 0,
"address": [
{
"type": "ipv4",
"address": "68.183.195.14",
"port": 9735
}
],
"binding": [
{
"type": "ipv4",
"address": "172.18.0.3",
"port": 9735
}
],
"version": "v0.7.1-906-gf657146",
"blockheight": 601917,
"network": "bitcoin",
"msatoshi_fees_collected": 0,
"fees_collected_msat": "0msat"
}
listconfigs
{
"# version": "v0.7.1-906-gf657146",
"lightning-dir": "/root/.lightning",
"wallet": "sqlite3:///root/.lightning/lightningd.sqlite3",
"plugin": "/usr/local/bin/../libexec/c-lightning/plugins/pay",
"plugin": "/usr/local/bin/../libexec/c-lightning/plugins/autoclean",
"plugin": "/usr/local/bin/../libexec/c-lightning/plugins/fundchannel",
"network": "bitcoin",
"allow-deprecated-apis": true,
"always-use-proxy": false,
"daemon": "false",
"rpc-file": "lightning-rpc",
"rgb": "fff000",
"alias": "HubTest",
"bitcoin-rpcuser": [redacted],
"bitcoin-rpcpassword": [redacted],
"bitcoin-rpcconnect": "bitcoind",
"bitcoin-retry-timeout": 60,
"pid-file": "lightningd-bitcoin.pid",
"ignore-fee-limits": false,
"watchtime-blocks": 144,
"max-locktime-blocks": 2016,
"funding-confirms": 3,
"commit-fee-min": 200,
"commit-fee-max": 2000,
"commit-fee": 500,
"cltv-delta": 14,
"cltv-final": 10,
"commit-time": 10,
"fee-base": 0,
"rescan": 15,
"fee-per-satoshi": 1,
"max-concurrent-htlcs": 30,
"min-capacity-sat": 10000,
"bind-addr": "172.18.0.3:9735",
"announce-addr": "68.183.195.14:9735",
"offline": "false",
"autolisten": true,
"disable-dns": "false",
"enable-autotor-v2-mode": "false",
"encrypted-hsm": false,
"log-level": "DEBUG",
"log-prefix": "lightningd(7):"
}
Is there something wrong with this configuration? Or, is it another issue after all?
I understand that explorers update their node list irregularly, and as far as the node can open channels (and can be connected), everything is fine. but this thing has bugging me for weeks.
updated the docker image and put bind-addr to 0.0.0.0:9375 somehow fixed the problem for some unknown reason.

How to shrink the storage size limit of IPFS?

I am using IPFS and trying to understand its structure.
ISSUE: 1
The issue I am facing is that when I am trying to shrink the storage size of IPFS (for testing it is taking so long time, hence I just want to shrink File System for a while) but is unable to shrink.
I tried to configure the config file by changing the StorageMax limit
using this command
ipfs config Datastore.StorageMax 1GB
but still it does not work, it accepting (ipfs add ) the files after the 1GB also, why he is not limiting the file-storage?
anyone, please suggest me How to reduce file-system size?
ISSUE: 2
Even in case of IPFS's default storage limit (Which is 10 GB), I saw when I am adding the files on IPFS it is not stopping to store files after crosses 10GB (which was limit).
It should not be cross after the limit ends, for this what to do?
below is the config file
{
"API": {
"HTTPHeaders": {}
},
"Addresses": {
"API": "/ip4/0.0.0.0/tcp/5001",
"Announce": [],
"Gateway": "/ip4/0.0.0.0/tcp/8080",
"NoAnnounce": [],
"Swarm": [
"/ip4/0.0.0.0/tcp/4001",
"/ip6/::/tcp/4001"
]
},
"Bootstrap": [
"/dnsaddr/bootstrap.libp2p.io/ipfs/QmNnooDu7bfjPFoTZYxMWUQJyrVwtbZg5gBMjTezGAJN",
"/dnsaddr/bootstrap.libp2p.io/ipfs/QmQCU2EcMqAqQPR2i9bChGNJchTbq5TbXJJ16u19uLTa",
"/dnsaddr/bootstrap.libp2p.io/ipfs/QmbLHAnMoJPWSCR5Zhtx6BX9KiKNN6tpvbUcqanj75Nb",
"/dnsaddr/bootstrap.libp2p.io/ipfs/QmcZf59bWwK5XFi76CZX8cbBhTzzA3gU1ZjYZcYW3dwt",
"/ip4/104.131.131.82/tcp/4001/ipfs/QmaCpDMGvV2BGHeYERUEQAwe3N8SzbUtfsmvsqQLuvuJ",
"/ip4/104.236.179.241/tcp/4001/ipfs/QmSoLPppuBtQSGwKDZT2MULpjvfd3aZ6ha4oFGL1KrGM",
"/ip4/128.199.219.111/tcp/4001/ipfs/QmSoLSafTMBsPKadTEgaXDQVcqN88CNLHXMkTNwMKPnu",
"/ip4/104.236.76.40/tcp/4001/ipfs/QmSoLV4Bbm51jM9C4gDYZCy3U6aXMJDAbzgu2fzaDs64",
"/ip4/178.62.158.247/tcp/4001/ipfs/QmSoLer265NRgSp2LA3dPakiS1J6DifTC88f5uVQKNAd",
"/ip6/2604:a880:1:20::203:d001/tcp/4001/ipfs/QmSoLPppuBtQSGwKDZT2M7Lpjvfd3aZ6ha4oFGL1KrGM",
"/ip6/2400:6180:0:d0::151:6001/tcp/4001/ipfs/QmSoLSafTMBsPKadTEgaXctVcqN88CNLHXMkTNwMKPnu",
"/ip6/2604:a880:800:10::4a:5001/tcp/4001/ipfs/QmSoLV4Bbm51jM9C4gDYZQ9Cy3U6aXMJDAbzgu2fzaDs64",
"/ip6/2a03:b0c0:0:1010::23:1001/tcp/4001/ipfs/QmSoLer265NRgSp2LA3dPaeykiS1J6DifTC88f5uVQKNAd"
],
"Datastore": {
"BloomFilterSize": 0,
"GCPeriod": "1h",
"HashOnRead": false,
"Spec": {
"mounts": [
{
"child": {
"path": "blocks",
"shardFunc": "/repo/flatfs/shard/v1/next-to-last/2",
"sync": true,
"type": "flatfs"
},
"mountpoint": "/blocks",
"prefix": "flatfs.datastore",
"type": "measure"
},
{
"child": {
"compression": "none",
"path": "datastore",
"type": "levelds"
},
"mountpoint": "/",
"prefix": "leveldb.datastore",
"type": "measure"
}
],
"type": "mount"
},
"StorageGCWatermark": 0,
"StorageMax": "1GB"
},
"Discovery": {
"MDNS": {
"Enabled": true,
"Interval": 10
}
},
"Experimental": {
"FilestoreEnabled": false,
"Libp2pStreamMounting": false,
"P2pHttpProxy": false,
"QUIC": false,
"ShardingEnabled": false,
"UrlstoreEnabled": false
},
"Gateway": {
"APICommands": [],
"HTTPHeaders": {
"Access-Control-Allow-Headers": [
"X-Requested-With",
"Range"
],
"Access-Control-Allow-Methods": [
"GET"
],
"Access-Control-Allow-Origin": [
"*"
]
},
"PathPrefixes": [],
"RootRedirect": "",
"Writable": false
},
"Identity": {
"PeerID": "QmfB8xVzjndgMWHuxszYADGrwBo1Zx6zjBQQCsqARDupsW"
},
"Ipns": {
"RecordLifetime": "",
"RepublishPeriod": "",
"ResolveCacheSize": 128
},
"Mounts": {
"FuseAllowOther": false,
"IPFS": "/ipfs",
"IPNS": "/ipns"
},
"Pubsub": {
"DisableSigning": false,
"Router": "",
"StrictSignatureVerification": false
},
"Reprovider": {
"Interval": "12h",
"Strategy": "all"
},
"Routing": {
"Type": "dht"
},
"Swarm": {
"AddrFilters": null,
"ConnMgr": {
"GracePeriod": "20s",
"HighWater": 900,
"LowWater": 600,
"Type": "basic"
},
"DisableBandwidthMetrics": false,
"DisableNatPortMap": false,
"DisableRelay": false,
"EnableRelayHop": false
}
}
It looks like StorageMax does not actually limit the size of the IPFS node, instead it's used to determine whether or not to run garbage collection. IPFS will write until the disk is full.

ARM Template Web App Authentication Settings not working

I am working on setting up my site authentication settings to use the AAD provider. Most of the template is respected. However, the unauthenticatedClientAction and allowedAudiences is not being properly assigned. I observe 'allow anonymous' and no 'allowed audiences' being assigned.
Please note that I was working with the ARM Template API 2018-02-01. This problem may still exist due to the documentation, if you provide an answer, please note the ARM Template version it addresses.
Additionally, create an issue for the ARM documentation team to correct any issues.
Here is my template segment for these settings. It is nested under resources in my website template.
root > Microsoft.Web/Site > Resources
{
"type": "config",
"name": "web",
"apiVersion": "2016-08-01",
"location": "[parameters('app-location')]",
"dependsOn": [
"[resourceId('Microsoft.Web/sites', variables('web-site-name'))]"
],
"properties": {
"siteAuthEnabled": true,
"siteAuthSettings": {
"enabled": true,
"unauthenticatedClientAction": "RedirectToLoginPage",
"tokenStoreEnabled": true,
"defaultProvider": "AzureActiveDirectory",
"clientId": "[parameters('web-aad-client-id')]",
"issuer": "[concat('https://sts.windows.net/', parameters('web-aad-tenant'))]",
"allowedAudiences": [
"[concat('https://', variables('web-site-name'), '.azurewebsites.net')]"
]
}
}
}
Template Validates
Deployment does not output any errors
Issues:
unauthenticatedClientAction is assigned allow anonymous not RedirectToLoginPage
allowedAudiences is not assigned any sites
What could be causing these issues? What could I have missed?
I got my answer after working with the fine people at Azure Support.
Please note that this solution targets API 2018-02-01 which was the current version at the time of this post.
This sub-resource is no longer a valid solution, while the endpoint may still recognize some of its fields, this is deprecated.
The new solution is to add the siteAuthSettings object to the main 'Microsoft.Web/site' properties and the siteAuthEnabled is no longer needed as siteAuthSettings.enable duplicates this functionality.
Updated ARM Template (removed other settings for brevity)
{
"name": "[variables('app-service-name')]",
"type": "Microsoft.Web/sites",
"location": "[parameters('app-location')]",
"apiVersion": "2016-08-01",
"dependsOn": [
"[variables('app-plan-name')]"
],
"properties": {
//... other app service settings
"siteAuthSettings": {
"enabled": true,
"unauthenticatedClientAction": "RedirectToLoginPage",
"tokenStoreEnabled": true,
"defaultProvider": "AzureActiveDirectory",
"clientId": "[parameters('web-aad-client-id')]",
"issuer": "[concat('https://sts.windows.net/', parameters('web-aad-tenant'))]",
"allowedAudiences": [
"[concat('https://', variables('web-site-name'), '.azurewebsites.net')]"
]
}
}
}
As suggested by #Michael, the siteAuthSettings object must be added to the siteConfig object, not just under the root properties object.
{
"apiVersion": "2019-08-01",
"name": "[variables('webAppName')]",
"type": "Microsoft.Web/sites",
"kind": "app",
"location": "[resourceGroup().location]",
"dependsOn": [
"[resourceId('Microsoft.Web/serverfarms', variables('appServiceName'))]"
],
"properties": {
...
"siteConfig": {
"siteAuthSettings": {
"enabled": true,
"unauthenticatedClientAction": "RedirectToLoginPage",
"tokenStoreEnabled": true,
"defaultProvider": "AzureActiveDirectory",
"clientId": "[parameters('clientId')]",
"issuer": "[concat('https://sts.windows.net/', parameters('tenantId'), '/')]"
}
}
}
}
The other solutions given only apply when using the classic authentication experience (Authentication (Classic)). If you would like to use the new authentication experience, use below configuration:
{
"$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"resources": [
{
"apiVersion": "2019-08-01",
"name": "[variables('webAppName')]",
"type": "Microsoft.Web/sites",
"kind": "app",
"location": "[resourceGroup().location]",
"resources": [
{
"type": "config",
"apiVersion": "2020-12-01",
"name": "authsettingsV2",
"location": "[resourceGroup().location]",
"dependsOn": [
"[concat('Microsoft.Web/sites/', variables('webAppName'))]"
],
"properties": {
"platform": {
"enabled": true,
"runtimeVersion": "~1"
},
"identityProviders": {
"azureActiveDirectory": {
"isAutoProvisioned": false,
"registration": {
"clientId": "[parameters('clientId')]",
"clientSecret": "[variables('clientSecret')]",
"openIdIssuer": "[concat('https://sts.windows.net/', parameters('tenantId'), '/v2.0')]"
},
"validation": {
"allowedAudiences": [
"https://management.core.windows.net/"
]
}
}
},
"login": {
"routes": {},
"tokenStore": {
"enabled": true,
"tokenRefreshExtensionHours": 72,
"fileSystem": {},
"azureBlobStorage": {}
},
"preserveUrlFragmentsForLogins": false,
"allowedExternalRedirectUrls": [],
"cookieExpiration": {
"convention": "FixedTime",
"timeToExpiration": "08:00:00"
},
"nonce": {
"validateNonce": true,
"nonceExpirationInterval": "00:05:00"
}
},
"globalValidation": {
"redirectToProvider": "azureactivedirectory",
"unauthenticatedClientAction": "RedirectToLoginPage"
},
"httpSettings": {
"requireHttps": true,
"routes": {
"apiPrefix": "/.auth"
},
"forwardProxy": {
"convention": "NoProxy"
}
}
}
}
]
}
]
}

How do I get french text FEMMES.COM to index as language variants of FEMMES

I need FEMMES.COM to get tokenized as singular + plural forms of the base word FEMME.
Custom Analyzer Config
"analyzers": [ { "#odata.type": "#Microsoft.Azure.Search.CustomAnalyzer", "name": "text_language_search_custom_analyzer", "tokenizer": "text_language_search_custom_analyzer_ms_tokenizer", "tokenFilters": [ "lowercase", "asciifolding" ], "charFilters": [ "html_strip" ] } ], "tokenizers": [ { "#odata.type": "#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer", "name": "text_language_search_custom_analyzer_ms_tokenizer", "maxTokenLength": 300, "isSearchTokenizer": false, "language": "english" } ], "tokenFilters": [], "charFilters": []}
Analyze API call for FEMMES
{ "analyzer": "text_language_search_custom_analyzer", "text": "FEMMES" }
Analyze API response for FEMMES
{ "#odata.context": "https://one-adscope-search-eu-stage.search.windows.net/$metadata#Microsoft.Azure.Search.V2016_09_01.AnalyzeResult", "tokens": [ { "token": "femme", "startOffset": 0, "endOffset": 6, "position": 0 }, { "token": "femmes", "startOffset": 0, "endOffset": 6, "position": 0 } ] }
Analyze API response for FEMMES.COM
{ "#odata.context": "https://one-adscope-search-eu-stage.search.windows.net/$metadata#Microsoft.Azure.Search.V2016_09_01.AnalyzeResult", "tokens": [ { "token": "femmes", "startOffset": 0, "endOffset": 6, "position": 0 }, { "token": "com", "startOffset": 7, "endOffset": 10, "position": 1 } ] }
Analyze API response for FEMMES COM
{ "#odata.context": "https://one-adscope-search-eu-stage.search.windows.net/$metadata#Microsoft.Azure.Search.V2016_09_01.AnalyzeResult", "tokens": [ { "token": "femme", "startOffset": 0, "endOffset": 6, "position": 0 }, { "token": "femmes", "startOffset": 0, "endOffset": 6, "position": 0 }, { "token": "com", "startOffset": 7, "endOffset": 10, "position": 1 } ]}
I think I figured this one out myself after some experimentation. I found the MappingCharFilter could be used to replace . with , before the indexer did the tokenization. This allowed the lemmatization/stemming to work as expected on the terms in question. I need to do more thorough integration tests with our other use cases, but I think this would solve the problem for anybody facing the same type of issue.
My previous answer was not correct. Azure Search implementation actually applies the language tokenizer BEFORE token filters. This essentially made the WordDelimiterToken filter useless in my use case.
What I ended up having to do was to pre-process data BEFORE I uploaded to Azure for indexing. In my C# code, I added some regex logic that would break apart text like FEMMES2017 into FEMMES 2017, before I sent it to Azure. This way, when the text got to Azure, the indexer would see FEMMES by itself and properly tokenize as FEMME and FEMMES using the language tokenizer.

Resources