Creating SQL server RDS instance using Terraform - sql-server

I'm going to create a SQL server database in RDS using Terraform. My Terraform file looks like this:
### RDS ###
# Subnet Group
resource "aws_db_subnet_group" "private" {
name = "db_arcgis-${var.env_name}-dbsubnet"
description = "Subnet Group for Arcgis ${var.env_tag}} DB"
subnet_ids = ["${aws_subnet.public1.id}", "${aws_subnet.public2.id}"]
tags {
Env = "${var.env_tag}"
}
}
# RDS DB parameter group
# Must enabled triggers to allow Multi-AZ
resource "aws_db_parameter_group" "allow_triggers" {
name = "arcgis-${var.env_name}-allow-triggers"
family = "sqlserver-se-12.0"
description = "Parameter Group for Arcgis ${var.env_tag} to allow triggers"
parameter {
name = "log_bin_trust_function_creators"
value = "1"
}
tags {
Env = "${var.env_tag}"
}
}
# RDS
resource "aws_db_instance" "main" {
allocated_storage = "${var.db_size}"
engine = "${var.db_engine}"
engine_version = "${var.db_version}"
instance_class = "${var.db_instance}"
identifier = "arcgis-${var.env_name}-db"
name = "${var.db_name}"
username = "${var.db_username}"
password = "${var.db_password}"
db_subnet_group_name = "${aws_db_subnet_group.private.id}"
parameter_group_name = "${aws_db_parameter_group.allow_triggers.id}"
multi_az = "${var.db_multiaz}"
vpc_security_group_ids = ["${aws_security_group.private_rds.id}"]
#availability_zone = "${var.vpc_az1}"
publicly_accessible = "true"
backup_retention_period = "2"
apply_immediately = "true"
tags {
Env = "${var.env_tag}"
}
}
I get this error by applying the Terraform files:
Error applying plan:
1 error(s) occurred:
* aws_db_parameter_group.allow_triggers: Error modifying DB Parameter Group: InvalidParameterValue: Could not find parameter with name: log_bin_trust_function_creators
status code: 400, request id: d298ab14-8b94-11e6-a088-31e21873c378

The obvious issue here is that log_bin_trust_function_creators isn't an available parameter for the sqlserver-se-12.0 parameter group family as you can see here when listing all the parameters in a parameter group based on sqlserver-se-12.0:
$ aws rds describe-db-parameters --db-parameter-group-name test-sqlserver-se-12-0 --query 'Parameters[*].ParameterName'
[
"1204",
"1211",
"1222",
"1224",
"2528",
"3205",
"3226",
"3625",
"4199",
"4616",
"6527",
"7806",
"access check cache bucket count",
"access check cache quota",
"ad hoc distributed queries",
"affinity i/o mask",
"affinity mask",
"agent xps",
"allow updates",
"backup compression default",
"blocked process threshold (s)",
"c2 audit mode",
"clr enabled",
"contained database authentication",
"cost threshold for parallelism",
"cross db ownership chaining",
"cursor threshold",
"database mail xps",
"default full-text language",
"default language",
"default trace enabled",
"disallow results from triggers",
"filestream access level",
"fill factor (%)",
"ft crawl bandwidth (max)",
"ft crawl bandwidth (min)",
"ft notify bandwidth (max)",
"ft notify bandwidth (min)",
"in-doubt xact resolution",
"index create memory (kb)",
"lightweight pooling",
"locks",
"max degree of parallelism",
"max full-text crawl range",
"max server memory (mb)",
"max text repl size (b)",
"max worker threads",
"media retention",
"min memory per query (kb)",
"min server memory (mb)",
"nested triggers",
"network packet size (b)",
"ole automation procedures",
"open objects",
"optimize for ad hoc workloads",
"ph timeout (s)",
"priority boost",
"query governor cost limit",
"query wait (s)",
"recovery interval (min)",
"remote access",
"remote admin connections",
"remote login timeout (s)",
"remote proc trans",
"remote query timeout (s)",
"replication xps",
"scan for startup procs",
"server trigger recursion",
"set working set size",
"show advanced options",
"smo and dmo xps",
"transform noise words",
"two digit year cutoff",
"user connections",
"user options",
"xp_cmdshell"
]
Instead that parameter is only available in MySQL flavours:
$ aws rds describe-db-parameters --db-parameter-group-name default.mysql5.6 --query 'Parameters[*].ParameterName'
[
"allow-suspicious-udfs",
"auto_increment_increment",
"auto_increment_offset",
"autocommit",
"automatic_sp_privileges",
"back_log",
"basedir",
"binlog_cache_size",
"binlog_checksum",
"binlog_error_action",
"binlog_format",
"binlog_max_flush_queue_time",
"binlog_order_commits",
"binlog_row_image",
"binlog_rows_query_log_events",
"binlog_stmt_cache_size",
"binlogging_impossible_mode",
"bulk_insert_buffer_size",
"character-set-client-handshake",
"character_set_client",
"character_set_connection",
"character_set_database",
"character_set_filesystem",
"character_set_results",
"character_set_server",
"collation_connection",
"collation_server",
"completion_type",
"concurrent_insert",
"connect_timeout",
"core-file",
"datadir",
"default_storage_engine",
"default_time_zone",
"default_tmp_storage_engine",
"default_week_format",
"delay_key_write",
"delayed_insert_limit",
"delayed_insert_timeout",
"delayed_queue_size",
"div_precision_increment",
"end_markers_in_json",
"enforce_gtid_consistency",
"eq_range_index_dive_limit",
"event_scheduler",
"explicit_defaults_for_timestamp",
"flush",
"flush_time",
"ft_boolean_syntax",
"ft_max_word_len",
"ft_min_word_len",
"ft_query_expansion_limit",
"ft_stopword_file",
"general_log",
"general_log_file",
"group_concat_max_len",
"gtid-mode",
"host_cache_size",
"init_connect",
"innodb_adaptive_flushing",
"innodb_adaptive_flushing_lwm",
"innodb_adaptive_hash_index",
"innodb_adaptive_max_sleep_delay",
"innodb_autoextend_increment",
"innodb_autoinc_lock_mode",
"innodb_buffer_pool_dump_at_shutdown",
"innodb_buffer_pool_dump_now",
"innodb_buffer_pool_filename",
"innodb_buffer_pool_instances",
"innodb_buffer_pool_load_abort",
"innodb_buffer_pool_load_at_startup",
"innodb_buffer_pool_load_now",
"innodb_buffer_pool_size",
"innodb_change_buffer_max_size",
"innodb_change_buffering",
"innodb_checksum_algorithm",
"innodb_cmp_per_index_enabled",
"innodb_commit_concurrency",
"innodb_compression_failure_threshold_pct",
"innodb_compression_level",
"innodb_compression_pad_pct_max",
"innodb_concurrency_tickets",
"innodb_data_home_dir",
"innodb_fast_shutdown",
"innodb_file_format",
"innodb_file_per_table",
"innodb_flush_log_at_timeout",
"innodb_flush_log_at_trx_commit",
"innodb_flush_method",
"innodb_flush_neighbors",
"innodb_flushing_avg_loops",
"innodb_force_load_corrupted",
"innodb_ft_aux_table",
"innodb_ft_cache_size",
"innodb_ft_enable_stopword",
"innodb_ft_max_token_size",
"innodb_ft_min_token_size",
"innodb_ft_num_word_optimize",
"innodb_ft_result_cache_limit",
"innodb_ft_server_stopword_table",
"innodb_ft_sort_pll_degree",
"innodb_ft_user_stopword_table",
"innodb_io_capacity",
"innodb_io_capacity_max",
"innodb_large_prefix",
"innodb_lock_wait_timeout",
"innodb_log_buffer_size",
"innodb_log_compressed_pages",
"innodb_log_file_size",
"innodb_log_group_home_dir",
"innodb_lru_scan_depth",
"innodb_max_dirty_pages_pct",
"innodb_max_purge_lag",
"innodb_max_purge_lag_delay",
"innodb_monitor_disable",
"innodb_monitor_enable",
"innodb_monitor_reset",
"innodb_monitor_reset_all",
"innodb_old_blocks_pct",
"innodb_old_blocks_time",
"innodb_online_alter_log_max_size",
"innodb_open_files",
"innodb_optimize_fulltext_only",
"innodb_page_size",
"innodb_print_all_deadlocks",
"innodb_purge_batch_size",
"innodb_purge_threads",
"innodb_random_read_ahead",
"innodb_read_ahead_threshold",
"innodb_read_io_threads",
"innodb_read_only",
"innodb_replication_delay",
"innodb_rollback_on_timeout",
"innodb_rollback_segments",
"innodb_sort_buffer_size",
"innodb_spin_wait_delay",
"innodb_stats_auto_recalc",
"innodb_stats_method",
"innodb_stats_on_metadata",
"innodb_stats_persistent",
"innodb_stats_persistent_sample_pages",
"innodb_stats_transient_sample_pages",
"innodb_strict_mode",
"innodb_support_xa",
"innodb_sync_array_size",
"innodb_sync_spin_loops",
"innodb_table_locks",
"innodb_thread_concurrency",
"innodb_thread_sleep_delay",
"innodb_undo_directory",
"innodb_undo_logs",
"innodb_undo_tablespaces",
"innodb_use_native_aio",
"innodb_write_io_threads",
"interactive_timeout",
"join_buffer_size",
"keep_files_on_create",
"key_buffer_size",
"key_cache_age_threshold",
"key_cache_block_size",
"key_cache_division_limit",
"lc_time_names",
"local_infile",
"lock_wait_timeout",
"log-bin",
"log_bin_trust_function_creators",
"log_bin_use_v1_row_events",
"log_error",
"log_output",
"log_queries_not_using_indexes",
"log_slave_updates",
"log_slow_admin_statements",
"log_slow_slave_statements",
"log_throttle_queries_not_using_indexes",
"log_warnings",
"long_query_time",
"low_priority_updates",
"lower_case_table_names",
"master-info-repository",
"master_verify_checksum",
"max_allowed_packet",
"max_binlog_cache_size",
"max_binlog_size",
"max_binlog_stmt_cache_size",
"max_connect_errors",
"max_connections",
"max_delayed_threads",
"max_error_count",
"max_heap_table_size",
"max_insert_delayed_threads",
"max_join_size",
"max_length_for_sort_data",
"max_prepared_stmt_count",
"max_seeks_for_key",
"max_sort_length",
"max_sp_recursion_depth",
"max_tmp_tables",
"max_user_connections",
"max_write_lock_count",
"metadata_locks_cache_size",
"min_examined_row_limit",
"myisam_data_pointer_size",
"myisam_max_sort_file_size",
"myisam_mmap_size",
"myisam_sort_buffer_size",
"myisam_stats_method",
"myisam_use_mmap",
"net_buffer_length",
"net_read_timeout",
"net_retry_count",
"net_write_timeout",
"old-style-user-limits",
"old_passwords",
"optimizer_prune_level",
"optimizer_search_depth",
"optimizer_switch",
"optimizer_trace",
"optimizer_trace_features",
"optimizer_trace_limit",
"optimizer_trace_max_mem_size",
"optimizer_trace_offset",
"performance_schema",
"performance_schema_accounts_size",
"performance_schema_digests_size",
"performance_schema_events_stages_history_long_size",
"performance_schema_events_stages_history_size",
"performance_schema_events_statements_history_long_size",
"performance_schema_events_statements_history_size",
"performance_schema_events_waits_history_long_size",
"performance_schema_events_waits_history_size",
"performance_schema_hosts_size",
"performance_schema_max_cond_classes",
"performance_schema_max_cond_instances",
"performance_schema_max_file_classes",
"performance_schema_max_file_handles",
"performance_schema_max_file_instances",
"performance_schema_max_mutex_classes",
"performance_schema_max_mutex_instances",
"performance_schema_max_rwlock_classes",
"performance_schema_max_rwlock_instances",
"performance_schema_max_socket_classes",
"performance_schema_max_socket_instances",
"performance_schema_max_stage_classes",
"performance_schema_max_statement_classes",
"performance_schema_max_table_handles",
"performance_schema_max_table_instances",
"performance_schema_max_thread_classes",
"performance_schema_max_thread_instances",
"performance_schema_session_connect_attrs_size",
"performance_schema_setup_actors_size",
"performance_schema_setup_objects_size",
"performance_schema_users_size",
"pid_file",
"plugin_dir",
"port",
"preload_buffer_size",
"profiling_history_size",
"query_alloc_block_size",
"query_cache_limit",
"query_cache_min_res_unit",
"query_cache_size",
"query_cache_type",
"query_cache_wlock_invalidate",
"query_prealloc_size",
"range_alloc_block_size",
"read_buffer_size",
"read_only",
"read_rnd_buffer_size",
"relay-log",
"relay_log_info_repository",
"relay_log_recovery",
"safe-user-create",
"secure_auth",
"secure_file_priv",
"server_id",
"simplified_binlog_gtid_recovery",
"skip-character-set-client-handshake",
"skip-slave-start",
"skip_external_locking",
"skip_name_resolve",
"skip_show_database",
"slave_checkpoint_group",
"slave_checkpoint_period",
"slave_parallel_workers",
"slave_pending_jobs_size_max",
"slave_sql_verify_checksum",
"slave_type_conversions",
"slow_launch_time",
"slow_query_log",
"slow_query_log_file",
"socket",
"sort_buffer_size",
"sql_mode",
"sql_select_limit",
"stored_program_cache",
"sync_binlog",
"sync_frm",
"sync_master_info",
"sync_relay_log",
"sync_relay_log_info",
"sysdate-is-now",
"table_definition_cache",
"table_open_cache",
"table_open_cache_instances",
"temp-pool",
"thread_cache_size",
"thread_stack",
"time_zone",
"timed_mutexes",
"tmp_table_size",
"tmpdir",
"transaction_alloc_block_size",
"transaction_prealloc_size",
"tx_isolation",
"updatable_views_with_limit",
"validate-password",
"validate_password_dictionary_file",
"validate_password_length",
"validate_password_mixed_case_count",
"validate_password_number_count",
"validate_password_policy",
"validate_password_special_char_count",
"wait_timeout"
]

Related

Springboot #DataJpaTest Integration test with external sql server db

I have below repository class in my spring boot project. This repository has a method that returns Inventory data from the SQL server. This is working on my project.
#Repository
public interface InventoryRepository extends JpaRepository<Inventory, Integer> {
Inventory findByInventoryIdAndCompanyId(Integer inventoryId, Integer companyId);
}
I want to write a integration for the repository which should get data from dev and test environment SQL server DB.
This dev and test environment db has data already.
Below are the application.yml files in my resources folder (I have changed url and credential intentionally to show here).
application.yml :
spring:
profiles.active: development
application-developement.yml :
spring:
profiles: development
spring.datasource.type: com.zaxxer.hikari.HikariDataSource
spring.datasource.jdbc-url: jdbc:sqlserver://22.22.22.22:1533;instanceName=SQLSVR;databaseName=dev
spring.datasource.username: admin
spring.datasource.password: admin
spring.datasource.driver-class-name: com.microsoft.sqlserver.jdbc.SQLServerDriver
application-test.yml :
spring:
profiles: test
spring.datasource.type: com.zaxxer.hikari.HikariDataSource
spring.datasource.jdbc-url: jdbc:sqlserver://11.11.11.11:1533;instanceName=SQLSVR;databaseName=qa
spring.datasource.url: jdbc:sqlserver://11.11.11.11:1533;instanceName=SQLSVR;databaseName=qa
spring.datasource.username: admin
spring.datasource.password: admin
spring.datasource.driver-class-name: com.microsoft.sqlserver.jdbc.SQLServerDriver
Below is the test class for my repository.
#ExtendWith(SpringExtension.class)
#DataJpaTest
#ContextConfiguration
#AutoConfigureTestDatabase(replace = AutoConfigureTestDatabase.Replace.NONE)
public class InventoryRepositoryTest {
#Autowired
InventoryRepository inventoryRepository;
#Test
public void getRepositoryByIdTest () {
Assertions.assertEquals(1,inventoryRepository.findByInventoryIdAndCompanyId(1,1));
}
}
Below is the error I am getting while performing this test
2021-10-18 03:35:38.917 INFO 11968 --- [ main] o.s.t.c.transaction.TransactionContext : Began transaction (1) for test context [DefaultTestContext#18d87d80 testClass = InventoryRepositoryTest, testInstance = com.cropin.mwarehouse.common.repository.InventoryRepositoryTest#437da279, testMethod = getRepositoryByIdTest#InventoryRepositoryTest, testException = [null], mergedContextConfiguration = [MergedContextConfiguration#618425b5 testClass = InventoryRepositoryTest, locations = '{}', classes = '{class com.cropin.mwarehouse.CropinMWarehouseServiceApplication}', contextInitializerClasses = '[]', activeProfiles = '{}', propertySourceLocations = '{}', propertySourceProperties = '{org.springframework.boot.test.autoconfigure.orm.jpa.DataJpaTestContextBootstrapper=true}', contextCustomizers = set[[ImportsContextCustomizer#58695725 key = [org.springframework.boot.autoconfigure.cache.CacheAutoConfiguration, org.springframework.boot.autoconfigure.data.jpa.JpaRepositoriesAutoConfiguration, org.springframework.boot.autoconfigure.flyway.FlywayAutoConfiguration, org.springframework.boot.autoconfigure.jdbc.DataSourceAutoConfiguration, org.springframework.boot.autoconfigure.jdbc.DataSourceTransactionManagerAutoConfiguration, org.springframework.boot.autoconfigure.jdbc.JdbcTemplateAutoConfiguration, org.springframework.boot.autoconfigure.liquibase.LiquibaseAutoConfiguration, org.springframework.boot.autoconfigure.orm.jpa.HibernateJpaAutoConfiguration, org.springframework.boot.autoconfigure.transaction.TransactionAutoConfiguration, org.springframework.boot.test.autoconfigure.jdbc.TestDatabaseAutoConfiguration, org.springframework.boot.test.autoconfigure.orm.jpa.TestEntityManagerAutoConfiguration]], org.springframework.boot.test.context.filter.ExcludeFilterContextCustomizer#4b2bac3f, org.springframework.boot.test.json.DuplicateJsonObjectContextCustomizerFactory$DuplicateJsonObjectContextCustomizer#26794848, org.springframework.boot.test.mock.mockito.MockitoContextCustomizer#0, org.springframework.boot.test.autoconfigure.OverrideAutoConfigurationContextCustomizerFactory$DisableAutoConfigurationContextCustomizer#6ad82709, org.springframework.boot.test.autoconfigure.filter.TypeExcludeFiltersContextCustomizer#351584c0, org.springframework.boot.test.autoconfigure.properties.PropertyMappingContextCustomizer#fb6c1252, org.springframework.boot.test.autoconfigure.web.servlet.WebDriverContextCustomizerFactory$Customizer#158a8276], contextLoader = 'org.springframework.boot.test.context.SpringBootContextLoader', parent = [null]], attributes = map[[empty]]]; transaction manager [org.springframework.orm.jpa.JpaTransactionManager#f310675]; rollback [true]
2021-10-18 03:35:38.963 DEBUG 11968 --- [ main] org.hibernate.SQL :
select
inventory0_.inventoryId as inventor1_10_,
inventory0_.createdBy as createdB2_10_,
inventory0_.createdDate as createdD3_10_,
inventory0_.lastModifiedBy as lastModi4_10_,
inventory0_.lastModifiedDate as lastModi5_10_,
inventory0_.balanceWeight as balanceW6_10_,
inventory0_.batchCreationDate as batchCre7_10_,
inventory0_.batchNumber as batchNum8_10_,
inventory0_.clientId as clientId9_10_,
inventory0_.companyId as company10_10_,
inventory0_.currencyUnitId as currenc11_10_,
inventory0_.dateOfEntry as dateOfE12_10_,
inventory0_.harvestReferenceId as harvest13_10_,
inventory0_.inventoryStatus as invento14_10_,
inventory0_.isActive as isActiv15_10_,
inventory0_.itemId as itemId16_10_,
inventory0_.locationId as locatio17_10_,
inventory0_.parentInventoryId as parentI18_10_,
inventory0_.processId as process19_10_,
inventory0_.quantity as quantit20_10_,
inventory0_.quantityBalance as quantit21_10_,
inventory0_.supplierId as supplie22_10_,
inventory0_.unitPrice as unitPri23_10_,
inventory0_.weight as weight24_10_
from
inv.Inventory inventory0_
where
inventory0_.inventoryId=?
and inventory0_.companyId=?
Hibernate:
select
inventory0_.inventoryId as inventor1_10_,
inventory0_.createdBy as createdB2_10_,
inventory0_.createdDate as createdD3_10_,
inventory0_.lastModifiedBy as lastModi4_10_,
inventory0_.lastModifiedDate as lastModi5_10_,
inventory0_.balanceWeight as balanceW6_10_,
inventory0_.batchCreationDate as batchCre7_10_,
inventory0_.batchNumber as batchNum8_10_,
inventory0_.clientId as clientId9_10_,
inventory0_.companyId as company10_10_,
inventory0_.currencyUnitId as currenc11_10_,
inventory0_.dateOfEntry as dateOfE12_10_,
inventory0_.harvestReferenceId as harvest13_10_,
inventory0_.inventoryStatus as invento14_10_,
inventory0_.isActive as isActiv15_10_,
inventory0_.itemId as itemId16_10_,
inventory0_.locationId as locatio17_10_,
inventory0_.parentInventoryId as parentI18_10_,
inventory0_.processId as process19_10_,
inventory0_.quantity as quantit20_10_,
inventory0_.quantityBalance as quantit21_10_,
inventory0_.supplierId as supplie22_10_,
inventory0_.unitPrice as unitPri23_10_,
inventory0_.weight as weight24_10_
from
inv.Inventory inventory0_
where
inventory0_.inventoryId=?
and inventory0_.companyId=?
2021-10-18 03:35:39.142 DEBUG 11968 --- [ main] org.hibernate.SQL :
select
companymas0_.companyId as companyI1_1_0_,
companymas0_.createdBy as createdB2_1_0_,
companymas0_.createdDate as createdD3_1_0_,
companymas0_.lastModifiedBy as lastModi4_1_0_,
companymas0_.lastModifiedDate as lastModi5_1_0_,
companymas0_.companyAddress as companyA6_1_0_,
companymas0_.companyCode as companyC7_1_0_,
companymas0_.companyDesc as companyD8_1_0_,
companymas0_.companyLogo as companyL9_1_0_,
companymas0_.companyName as company10_1_0_,
companymas0_.companyPreferredSubDomain as company11_1_0_,
companymas0_.contactEmail as contact12_1_0_,
companymas0_.contactNumber as contact13_1_0_,
companymas0_.defaultRadiusForGeoFencing as default14_1_0_,
companymas0_.fiscalMonth as fiscalM15_1_0_,
companymas0_.isGDPRRequired as isGDPRR16_1_0_,
companymas0_.isActive as isActiv17_1_0_,
companymas0_.isBlueToothRequired as isBlueT18_1_0_,
companymas0_.isGeoFencingRequired as isGeoFe19_1_0_,
companymas0_.isHarvestPaid as isHarve20_1_0_,
companymas0_.isShareImage as isShare21_1_0_,
companymas0_.isVerified as isVerif22_1_0_,
companymas0_.isZohoEnable as isZohoE23_1_0_,
companymas0_.planTypeId as planTyp24_1_0_,
companymas0_.primaryCountry as primary25_1_0_,
companymas0_.sTA as sTA26_1_0_,
companymas0_.webSite as webSite27_1_0_
from
dbo.CompanyMaster companymas0_
where
companymas0_.companyId=?
Hibernate:
select
companymas0_.companyId as companyI1_1_0_,
companymas0_.createdBy as createdB2_1_0_,
companymas0_.createdDate as createdD3_1_0_,
companymas0_.lastModifiedBy as lastModi4_1_0_,
companymas0_.lastModifiedDate as lastModi5_1_0_,
companymas0_.companyAddress as companyA6_1_0_,
companymas0_.companyCode as companyC7_1_0_,
companymas0_.companyDesc as companyD8_1_0_,
companymas0_.companyLogo as companyL9_1_0_,
companymas0_.companyName as company10_1_0_,
companymas0_.companyPreferredSubDomain as company11_1_0_,
companymas0_.contactEmail as contact12_1_0_,
companymas0_.contactNumber as contact13_1_0_,
companymas0_.defaultRadiusForGeoFencing as default14_1_0_,
companymas0_.fiscalMonth as fiscalM15_1_0_,
companymas0_.isGDPRRequired as isGDPRR16_1_0_,
companymas0_.isActive as isActiv17_1_0_,
companymas0_.isBlueToothRequired as isBlueT18_1_0_,
companymas0_.isGeoFencingRequired as isGeoFe19_1_0_,
companymas0_.isHarvestPaid as isHarve20_1_0_,
companymas0_.isShareImage as isShare21_1_0_,
companymas0_.isVerified as isVerif22_1_0_,
companymas0_.isZohoEnable as isZohoE23_1_0_,
companymas0_.planTypeId as planTyp24_1_0_,
companymas0_.primaryCountry as primary25_1_0_,
companymas0_.sTA as sTA26_1_0_,
companymas0_.webSite as webSite27_1_0_
from
dbo.CompanyMaster companymas0_
where
companymas0_.companyId=?
2021-10-18 03:35:39.292 DEBUG 11968 --- [ main] org.hibernate.SQL :
select
locationma0_.locationId as location1_22_0_,
locationma0_.createdBy as createdB2_22_0_,
locationma0_.createdDate as createdD3_22_0_,
locationma0_.lastModifiedBy as lastModi4_22_0_,
locationma0_.lastModifiedDate as lastModi5_22_0_,
locationma0_.addressLine1 as addressL6_22_0_,
locationma0_.addressLine2 as addressL7_22_0_,
locationma0_.companyId as companyI8_22_0_,
locationma0_.coordinates as coordina9_22_0_,
locationma0_.districtId as distric10_22_0_,
locationma0_.geoId as geoId11_22_0_,
locationma0_.imageName as imageNa12_22_0_,
locationma0_.isActive as isActiv13_22_0_,
locationma0_.latitude as latitud14_22_0_,
locationma0_.locationTypeId as locatio15_22_0_,
locationma0_.longitude as longitu16_22_0_,
locationma0_.name as name17_22_0_,
locationma0_.parentLocationId as parentL18_22_0_,
locationma0_.pincode as pincode19_22_0_,
locationma0_.placeName as placeNa20_22_0_,
locationma0_.stateId as stateId21_22_0_
from
inv.LocationMaster locationma0_
where
locationma0_.locationId=?
Hibernate:
select
locationma0_.locationId as location1_22_0_,
locationma0_.createdBy as createdB2_22_0_,
locationma0_.createdDate as createdD3_22_0_,
locationma0_.lastModifiedBy as lastModi4_22_0_,
locationma0_.lastModifiedDate as lastModi5_22_0_,
locationma0_.addressLine1 as addressL6_22_0_,
locationma0_.addressLine2 as addressL7_22_0_,
locationma0_.companyId as companyI8_22_0_,
locationma0_.coordinates as coordina9_22_0_,
locationma0_.districtId as distric10_22_0_,
locationma0_.geoId as geoId11_22_0_,
locationma0_.imageName as imageNa12_22_0_,
locationma0_.isActive as isActiv13_22_0_,
locationma0_.latitude as latitud14_22_0_,
locationma0_.locationTypeId as locatio15_22_0_,
locationma0_.longitude as longitu16_22_0_,
locationma0_.name as name17_22_0_,
locationma0_.parentLocationId as parentL18_22_0_,
locationma0_.pincode as pincode19_22_0_,
locationma0_.placeName as placeNa20_22_0_,
locationma0_.stateId as stateId21_22_0_
from
inv.LocationMaster locationma0_
where
locationma0_.locationId=?
2021-10-18 03:35:39.663 INFO 11968 --- [ main] o.s.t.c.transaction.TransactionContext : Rolled back transaction for test: [DefaultTestContext#18d87d80 testClass = InventoryRepositoryTest, testInstance = com.cropin.mwarehouse.common.repository.InventoryRepositoryTest#437da279, testMethod = getRepositoryByIdTest#InventoryRepositoryTest, testException = org.opentest4j.AssertionFailedError: expected: <1> but was: <com.cropin.mwarehouse.common.entity.Inventory#5b58f639>, mergedContextConfiguration = [MergedContextConfiguration#618425b5 testClass = InventoryRepositoryTest, locations = '{}', classes = '{class com.cropin.mwarehouse.CropinMWarehouseServiceApplication}', contextInitializerClasses = '[]', activeProfiles = '{}', propertySourceLocations = '{}', propertySourceProperties = '{org.springframework.boot.test.autoconfigure.orm.jpa.DataJpaTestContextBootstrapper=true}', contextCustomizers = set[[ImportsContextCustomizer#58695725 key = [org.springframework.boot.autoconfigure.cache.CacheAutoConfiguration, org.springframework.boot.autoconfigure.data.jpa.JpaRepositoriesAutoConfiguration, org.springframework.boot.autoconfigure.flyway.FlywayAutoConfiguration, org.springframework.boot.autoconfigure.jdbc.DataSourceAutoConfiguration, org.springframework.boot.autoconfigure.jdbc.DataSourceTransactionManagerAutoConfiguration, org.springframework.boot.autoconfigure.jdbc.JdbcTemplateAutoConfiguration, org.springframework.boot.autoconfigure.liquibase.LiquibaseAutoConfiguration, org.springframework.boot.autoconfigure.orm.jpa.HibernateJpaAutoConfiguration, org.springframework.boot.autoconfigure.transaction.TransactionAutoConfiguration, org.springframework.boot.test.autoconfigure.jdbc.TestDatabaseAutoConfiguration, org.springframework.boot.test.autoconfigure.orm.jpa.TestEntityManagerAutoConfiguration]], org.springframework.boot.test.context.filter.ExcludeFilterContextCustomizer#4b2bac3f, org.springframework.boot.test.json.DuplicateJsonObjectContextCustomizerFactory$DuplicateJsonObjectContextCustomizer#26794848, org.springframework.boot.test.mock.mockito.MockitoContextCustomizer#0, org.springframework.boot.test.autoconfigure.OverrideAutoConfigurationContextCustomizerFactory$DisableAutoConfigurationContextCustomizer#6ad82709, org.springframework.boot.test.autoconfigure.filter.TypeExcludeFiltersContextCustomizer#351584c0, org.springframework.boot.test.autoconfigure.properties.PropertyMappingContextCustomizer#fb6c1252, org.springframework.boot.test.autoconfigure.web.servlet.WebDriverContextCustomizerFactory$Customizer#158a8276], contextLoader = 'org.springframework.boot.test.context.SpringBootContextLoader', parent = [null]], attributes = map[[empty]]]
org.opentest4j.AssertionFailedError:
Expected :1
Actual :com.cropin.mwarehouse.common.entity.Inventory#5b58f639
<Click to see difference>
at org.junit.jupiter.api.AssertionUtils.fail(AssertionUtils.java:55)
at org.junit.jupiter.api.AssertionUtils.failNotEqual(AssertionUtils.java:62)
at org.junit.jupiter.api.AssertEquals.assertEquals(AssertEquals.java:182)
at org.junit.jupiter.api.AssertEquals.assertEquals(AssertEquals.java:177)
at org.junit.jupiter.api.Assertions.assertEquals(Assertions.java:1124)
at com.cropin.mwarehouse.common.repository.InventoryRepositoryTest.getRepositoryByIdTest(InventoryRepositoryTest.java:23)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.junit.platform.commons.util.ReflectionUtils.invokeMethod(ReflectionUtils.java:675)
at org.junit.jupiter.engine.execution.MethodInvocation.proceed(MethodInvocation.java:60)
at org.junit.jupiter.engine.execution.InvocationInterceptorChain$ValidatingInvocation.proceed(InvocationInterceptorChain.java:125)
at org.junit.jupiter.engine.extension.TimeoutExtension.intercept(TimeoutExtension.java:132)
at org.junit.jupiter.engine.extension.TimeoutExtension.interceptTestableMethod(TimeoutExtension.java:124)
at org.junit.jupiter.engine.extension.TimeoutExtension.interceptTestMethod(TimeoutExtension.java:74)
at org.junit.jupiter.engine.execution.ExecutableInvoker$ReflectiveInterceptorCall.lambda$ofVoidMethod$0(ExecutableInvoker.java:115)
at org.junit.jupiter.engine.execution.ExecutableInvoker.lambda$invoke$0(ExecutableInvoker.java:105)
at org.junit.jupiter.engine.execution.InvocationInterceptorChain$InterceptedInvocation.proceed(InvocationInterceptorChain.java:104)
at org.junit.jupiter.engine.execution.InvocationInterceptorChain.proceed(InvocationInterceptorChain.java:62)
at org.junit.jupiter.engine.execution.InvocationInterceptorChain.chainAndInvoke(InvocationInterceptorChain.java:43)
at org.junit.jupiter.engine.execution.InvocationInterceptorChain.invoke(InvocationInterceptorChain.java:35)
at org.junit.jupiter.engine.execution.ExecutableInvoker.invoke(ExecutableInvoker.java:104)
at org.junit.jupiter.engine.execution.ExecutableInvoker.invoke(ExecutableInvoker.java:98)
at org.junit.jupiter.engine.descriptor.TestMethodTestDescriptor.lambda$invokeTestMethod$6(TestMethodTestDescriptor.java:202)
at org.junit.platform.engine.support.hierarchical.ThrowableCollector.execute(ThrowableCollector.java:73)
at org.junit.jupiter.engine.descriptor.TestMethodTestDescriptor.invokeTestMethod(TestMethodTestDescriptor.java:198)
at org.junit.jupiter.engine.descriptor.TestMethodTestDescriptor.execute(TestMethodTestDescriptor.java:135)
at org.junit.jupiter.engine.descriptor.TestMethodTestDescriptor.execute(TestMethodTestDescriptor.java:69)
at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$5(NodeTestTask.java:135)
at org.junit.platform.engine.support.hierarchical.ThrowableCollector.execute(ThrowableCollector.java:73)
at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$7(NodeTestTask.java:125)
at org.junit.platform.engine.support.hierarchical.Node.around(Node.java:135)
at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$8(NodeTestTask.java:123)
at org.junit.platform.engine.support.hierarchical.ThrowableCollector.execute(ThrowableCollector.java:73)
at org.junit.platform.engine.support.hierarchical.NodeTestTask.executeRecursively(NodeTestTask.java:122)
at org.junit.platform.engine.support.hierarchical.NodeTestTask.execute(NodeTestTask.java:80)
at java.util.ArrayList.forEach(ArrayList.java:1259)
at org.junit.platform.engine.support.hierarchical.SameThreadHierarchicalTestExecutorService.invokeAll(SameThreadHierarchicalTestExecutorService.java:38)
at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$5(NodeTestTask.java:139)
at org.junit.platform.engine.support.hierarchical.ThrowableCollector.execute(ThrowableCollector.java:73)
at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$7(NodeTestTask.java:125)
at org.junit.platform.engine.support.hierarchical.Node.around(Node.java:135)
at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$8(NodeTestTask.java:123)
at org.junit.platform.engine.support.hierarchical.ThrowableCollector.execute(ThrowableCollector.java:73)
at org.junit.platform.engine.support.hierarchical.NodeTestTask.executeRecursively(NodeTestTask.java:122)
at org.junit.platform.engine.support.hierarchical.NodeTestTask.execute(NodeTestTask.java:80)
at java.util.ArrayList.forEach(ArrayList.java:1259)
at org.junit.platform.engine.support.hierarchical.SameThreadHierarchicalTestExecutorService.invokeAll(SameThreadHierarchicalTestExecutorService.java:38)
at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$5(NodeTestTask.java:139)
at org.junit.platform.engine.support.hierarchical.ThrowableCollector.execute(ThrowableCollector.java:73)
at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$7(NodeTestTask.java:125)
at org.junit.platform.engine.support.hierarchical.Node.around(Node.java:135)
at org.junit.platform.engine.support.hierarchical.NodeTestTask.lambda$executeRecursively$8(NodeTestTask.java:123)
at org.junit.platform.engine.support.hierarchical.ThrowableCollector.execute(ThrowableCollector.java:73)
at org.junit.platform.engine.support.hierarchical.NodeTestTask.executeRecursively(NodeTestTask.java:122)
at org.junit.platform.engine.support.hierarchical.NodeTestTask.execute(NodeTestTask.java:80)
at org.junit.platform.engine.support.hierarchical.SameThreadHierarchicalTestExecutorService.submit(SameThreadHierarchicalTestExecutorService.java:32)
at org.junit.platform.engine.support.hierarchical.HierarchicalTestExecutor.execute(HierarchicalTestExecutor.java:57)
at org.junit.platform.engine.support.hierarchical.HierarchicalTestEngine.execute(HierarchicalTestEngine.java:51)
at org.junit.platform.launcher.core.DefaultLauncher.execute(DefaultLauncher.java:229)
at org.junit.platform.launcher.core.DefaultLauncher.lambda$execute$6(DefaultLauncher.java:197)
at org.junit.platform.launcher.core.DefaultLauncher.withInterceptedStreams(DefaultLauncher.java:211)
at org.junit.platform.launcher.core.DefaultLauncher.execute(DefaultLauncher.java:191)
at org.junit.platform.launcher.core.DefaultLauncher.execute(DefaultLauncher.java:128)
at com.intellij.junit5.JUnit5IdeaTestRunner.startRunnerWithArgs(JUnit5IdeaTestRunner.java:71)
at com.intellij.rt.junit.IdeaTestRunner$Repeater.startRunnerWithArgs(IdeaTestRunner.java:33)
at com.intellij.rt.junit.JUnitStarter.prepareStreamsAndStart(JUnitStarter.java:235)
at com.intellij.rt.junit.JUnitStarter.main(JUnitStarter.java:54)
Below are the questions for which I am looking for an answers:
Does #DataJpaTest only works with in memory db? Here I am trying to connect with external sql server db is it really connecting to it?
If #DataJpaTest is able to connect to external sql server db then why it is failing as I already have records available for above test parameter.
3.How Can I use profiling here? There is an option as #ActiveProfile but I want to use same block of test for both the environment dev and qa, in that how this profiling will work?
4.In error log it is showing active profile as empty, what does that mean? Is it not picking up the development profile?
How can I achieve integration test connecting to my dev and qa db which already has data. I don't want to use in memory db.
Please help me out with this question.
The actual connection seems to work. #DataJpaTest works with any DataSource configuration, it just takes the opinionated approach to use an in-memory database. You already added the required code to opt-out and use your own DataSource with:
#AutoConfigureTestDatabase(replace = AutoConfigureTestDatabase.Replace.NONE)
The actual test failure happens because you try to compare an int with an actual Java object. Either return all objects and check the size:
Assertions.assertEquals(1, inventoryRepository.findAll().size());
... or assert that the result is not null:
Assertions.assertNotNull(inventoryRepository.findByInventoryIdAndCompanyId(1,1));
Take a closer look at the assertion error:
org.opentest4j.AssertionFailedError:
Expected :1
Actual :com.cropin.mwarehouse.common.entity.Inventory#5b58f639
<Click to see difference>
FYI: You should be able to reduce the test setup to:
// #ExtendWith(SpringExtension.class) already comes with #DataJpaTest
#DataJpaTest
// #ContextConfiguration
#AutoConfigureTestDatabase(replace = AutoConfigureTestDatabase.Replace.NONE)
public class InventoryRepositoryTest {

Parsing stdout chunks into arrays in bash or ruby

I am trying to find the most efficient way to turn stdout log entries from racadm (dell chassis/idrac) into individual arrays or json arrays so I can evaluate each entry one at a time. The output always has the same fields. The output below is pretty typical
$ racadm chassislog view -c Storage -b PDR
SeqNumber = 11700
Message ID = PDR17
Category = Storage
AgentID = CMC
Severity = Information
Timestamp = 2020-03-21 00:02:06
Message Arg 1 = Physical Disk 0:0:15
FQDD = Disk.Bay.15:Enclosure.Internal.0-0:RAID.ChassisIntegrated.1-1
Message = Global hot spare assigned to Physical Disk 0:0:15.
--------------------------------------------------------------------------------
SeqNumber = 11699
Message ID = PDR26
Category = Storage
AgentID = CMC
Severity = Information
Timestamp = 2020-03-21 00:02:04
Message Arg 1 = Physical Disk 0:0:3
FQDD = Disk.Bay.3:Enclosure.Internal.0-0:RAID.ChassisIntegrated.1-1
Message = Physical Disk 0:0:3 is online.
--------------------------------------------------------------------------------
SeqNumber = 11696
Message ID = PDR71
Category = Storage
AgentID = CMC
Severity = Information
Timestamp = 2020-03-21 00:02:01
Message Arg 1 = Physical Disk 0:0:15
Message Arg 2 = Physical Disk 0:0:3
FQDD = Disk.Bay.15:Enclosure.Internal.0-0:RAID.ChassisIntegrated.1-1
Message = Copyback completed from Physical Disk 0:0:15 to Physical Disk 0:0:3.
--------------------------------------------------------------------------------
SeqNumber = 11670
Message ID = PDR70
Category = Storage
AgentID = CMC
Severity = Information
Timestamp = 2020-03-20 21:45:47
Message Arg 1 = Physical Disk 0:0:15
Message Arg 2 = Physical Disk 0:0:3
FQDD = Disk.Bay.15:Enclosure.Internal.0-0:RAID.ChassisIntegrated.1-1
Message = Copyback started from Physical Disk 0:0:15 to Physical Disk 0:0:3.
--------------------------------------------------------------------------------
SeqNumber = 11667
Message ID = PDR8
Category = Storage
AgentID = CMC
Severity = Information
Timestamp = 2020-03-20 21:45:44
Message Arg 1 = Physical Disk 0:0:3
FQDD = Disk.Bay.3:Enclosure.Internal.0-0:RAID.ChassisIntegrated.1-1
Message = Physical Disk 0:0:3 is inserted.
--------------------------------------------------------------------------------
i'd really love to read the entire output into an associative array so I could step through
each entry in a for loop for events. Looking for guidance in ruby(chef) or bash.
This perl one-liner converts input like the above into an array of JSON objects which you can then process in any JSON-aware tool.
racadm chassislog view -c Storage -b PDR | \
perl -MJSON::PP -lne 'if (/([^=]*?)\s*=\s*(.*)/) { $obj{$1} = $2 }
elsif (/^-+$/) { push #records, { %obj }; undef %obj }
END { push #records, { %obj } if defined %obj;
print encode_json(\#records) }'
outputs (After pretty-printing):
[
{
"Timestamp": "2020-03-21 00:02:06",
"Message ID": "PDR17",
"Category": "Storage",
"Message": "Global hot spare assigned to Physical Disk 0:0:15.",
"AgentID": "CMC",
"Severity": "Information",
"SeqNumber": "11700",
"FQDD": "Disk.Bay.15:Enclosure.Internal.0-0:RAID.ChassisIntegrated.1-1",
"Message Arg 1": "Physical Disk 0:0:15"
},
{
"Category": "Storage",
"Message ID": "PDR26",
"Timestamp": "2020-03-21 00:02:04",
"SeqNumber": "11699",
"Message": "Physical Disk 0:0:3 is online.",
"Severity": "Information",
"AgentID": "CMC",
"Message Arg 1": "Physical Disk 0:0:3",
"FQDD": "Disk.Bay.3:Enclosure.Internal.0-0:RAID.ChassisIntegrated.1-1"
},
{
"FQDD": "Disk.Bay.15:Enclosure.Internal.0-0:RAID.ChassisIntegrated.1-1",
"Message Arg 2": "Physical Disk 0:0:3",
"Message Arg 1": "Physical Disk 0:0:15",
"Severity": "Information",
"AgentID": "CMC",
"Message": "Copyback completed from Physical Disk 0:0:15 to Physical Disk 0:0:3.",
"SeqNumber": "11696",
"Timestamp": "2020-03-21 00:02:01",
"Category": "Storage",
"Message ID": "PDR71"
},
{
"Message Arg 1": "Physical Disk 0:0:15",
"FQDD": "Disk.Bay.15:Enclosure.Internal.0-0:RAID.ChassisIntegrated.1-1",
"Message Arg 2": "Physical Disk 0:0:3",
"SeqNumber": "11670",
"Message": "Copyback started from Physical Disk 0:0:15 to Physical Disk 0:0:3.",
"Severity": "Information",
"AgentID": "CMC",
"Category": "Storage",
"Message ID": "PDR70",
"Timestamp": "2020-03-20 21:45:47"
},
{
"Timestamp": "2020-03-20 21:45:44",
"Message ID": "PDR8",
"Category": "Storage",
"Message": "Physical Disk 0:0:3 is inserted.",
"AgentID": "CMC",
"Severity": "Information",
"SeqNumber": "11667",
"FQDD": "Disk.Bay.3:Enclosure.Internal.0-0:RAID.ChassisIntegrated.1-1",
"Message Arg 1": "Physical Disk 0:0:3"
}
]
Not bash, since shell is for handling files and launching commands, but using GNU awk which is often falsely percieved as part of the shell, it's simple yet powerfull programming language. step through each entry in a for loop for events is not really a requirement so here is a small sample:
$ gawk -v item="Message Arg 2" ' # queried item as parameter
BEGIN {
RS="\n-+$\n" # record is separated by a bunch of -:s
FS="\n" # a line is a field within a record
}
{
for(nf=1;nf<=NF;nf++) { # loop all lines in a record
split($nf,t,/ *= */) # split lines by = and surrounding space
a[NR][t[1]]=t[2] # hash to a 2 dimensional array indexed by
} # record no. and the item, value as value
}
END { # after lines are hashed, make queries
for(nr in a) # for each record in hash
if(item in a[nr]) # if queried item is found in it
printf "%d: %s = %s\n", nr,item,a[nr][item] # output
}' file
Output for query item Message Arg 2:
3: Message Arg 2 = Physical Disk 0:0:3
4: Message Arg 2 = Physical Disk 0:0:3
Here is an alternate ending for match a condition im looking for in "Message" I would like to reference the corresponding FQDD:
$ gawk -v item=Message -v cond=started -v output=FQDD
BEGIN {
RS="\n-+$\n" # record is separated by a bunch of -:s
FS="\n" # a line is a field within a record
}
{
for(nf=1;nf<=NF;nf++) { # loop all lines in a record
split($nf,t,/ *= */) # split lines by = and surrounding space
a[NR][t[1]]=t[2] # hash to a 2 dimensional array indexed by
} # record no. and the item, value as value
}
END {
for(nr in a)
if((item in a[nr]) && a[nr][item]~cond)
printf "%d: %s = %s\n", nr,output,a[nr][output]
}
Output now:
4: FQDD = Disk.Bay.15:Enclosure.Internal.0-0:RAID.ChassisIntegrated.1-1
ie. if variable item is found in a[nr][item] and that array elemets value matches with cond print the value of a[nr]["FQDD"] in the same record.
In SQL that would be SELECT output FROMfileWHERE item LIKE '%cond%'
Based on Shawns one liner as a pattern, a colleague ended up finding a python 2.7 compatible way to do exactly what we want, code is below and offers the exact functionality I need.
import re
import json
from pprint import pprint
regex_string_1 = '([^=]*?)\s*=\s*(.*)'
regex_string_2 = '^-+$'
regex1 = re.compile(regex_string_1)
regex2 = re.compile(regex_string_2)
current_entry = {}
entries = []
lines = test.split('\n')
for line in lines:
if regex1.match(line):
key, value = [element.strip() for element in line.split('=')]
current_entry[key] = value
elif regex2.match(line):
entries.append(current_entry)
current_entry = {}
pprint(entries)

Cannot execute Solr queries on non-Search nodes

I'm using Datastax Enterprise. And I got the exception:
com.datastax.driver.core.exceptions.InvalidQueryException: Cannot execute Solr queries on non-Search nodes.
at com.datastax.driver.core.exceptions.InvalidQueryException.copy(InvalidQueryException.java:50)
at com.datastax.driver.core.DriverThrowables.propagateCause(DriverThrowables.java:37)
at com.datastax.driver.core.DefaultResultSetFuture.getUninterruptibly(DefaultResultSetFuture.java:245)
at com.datastax.driver.core.AbstractSession.execute(AbstractSession.java:64)
at com.datastax.driver.core.AbstractSession.execute(AbstractSession.java:39)
at com.example.cassandra.simple_client.App.main(App.java:98)
When I try to run the following:
Cluster cluster = Cluster.builder()
.addContactPoint("104.236.160.56")
.addContactPoint("104.236.160.96")
.withRetryPolicy(DefaultRetryPolicy.INSTANCE)
.withLoadBalancingPolicy(new TokenAwarePolicy(DCAwareRoundRobinPolicy.builder().build()))
.build();
Metadata metadata = cluster.getMetadata();
System.out.printf("Connected to cluster: %s\n", metadata.getClusterName());
for ( Host host : metadata.getAllHosts() )
System.out.printf("Datacenter: %s; Host: %s; Rack: %s; Dse version: %s; Cassandra version: %s\n", host.getDatacenter(), host.getAddress(), host.getRack(), host.getDseVersion(), host.getCassandraVersion());
try {
Session session = cluster.connect("rombie");
ResultSet result = session.execute("SELECT * FROM rombie.force WHERE solr_query='{\"q\":\"point:[ 30 TO *]\", \"sort\":\"point desc\"}' LIMIT 50 ALLOW FILTERING");
List<Row> list = result.all();
for (Row row : list)
System.out.println(row.getString("force_tag"));
} catch (Exception e) {
e.printStackTrace();
}
2 nodes in the same datacenter and can see each other:
104.236.160.56: Cassandra
104.236.160.96: Solr
Note: It's working if I comment the Cassandra node: 104.236.160.56 OR using a normal query instead of a Solr query

Setting a not-default db adapter in zend framework 1.12

i've a very specific problem, but i'm a niewbie with zend framework so i don't have idea of how exctly this db adapter works as a configuration, but i've already made a db connection with the default adapter of zend, and it was successful. Now i've to set two different database connections for two different db in the same application. So i've taken my application.ini and i've written the following lines:
;connessione al db
resources.db.adapter = pdo_mssql
resources.db.params.host = "ip"
resources.db.params.username = user
resources.db.params.password = pwd
resources.db.params.dbname = NAME
resources.db.isDefaultTableAdapter = true
resources.db.params.pdoType = dblib
;connessione al db1
resources.db1.adapter = pdo_mssql
resources.db1.params.host = "ip"
resources.db1.params.username = user
resources.db1.params.password = pwd
resources.db1.params.dbname = NAME
resources.db1.isDefaultTableAdapter = false
resources.db1.params.pdoType = dblib
then i went to my action controller and i wrote:
$db = Zend_Registry::get ( 'db' );
$result = $db->fetchRow("SELECT [Sell-to Customer No_] FROM dbo.SyncroPlanningTable WHERE id='".$id);
$rag_soc=$result->{"Sell-to Customer No_"};
$db1 = Zend_Registry::get ( 'db1' );
$result1 = $db1->fetchRow("SELECT [No_],[Name],[Address],[City],[Contact],[Name],[Phone] FROM `dbo.SOS$Customer` WHERE No_ = '".$rag_soc."'");
The error i'm getting is the following:
Fatal error: Uncaught exception 'Zend_Application_Bootstrap_Exception' with message 'Unable to resolve plugin "db1";
UPDATE:
My bootstrap.php is:
$resource = $this->getPluginResource ( "db" );
$db = $resource->getDbAdapter ();
$db->setFetchMode ( Zend_Db::FETCH_OBJ );
Zend_Db_Table_Abstract::setDefaultAdapter ( $db );
Zend_Registry::set ( "db", $db );
How can i change it? it is not mentioned in the manual page you gave me.
resources.db refers to Zend_Application_Resource_Db, so "db" here is not a variable name.
You should use Zend_Application_Resource_Multidb to support multiple database connections:
http://framework.zend.com/manual/1.12/en/zend.application.available-resources.html#zend.application.available-resources.multidb
Your code is expecting the DB adapters to be in the registry, so you need to grab them from the multiDB resource and store them:
$multiDB = $this->getPluginResource('multidb');
Zend_Registry::set('db1', $multiDB->getDb('db1');
Zend_Registry::set('db2', $multiDB->getDb('db2');
also, this line:
Zend_Db_Table_Abstract::setDefaultAdapter ( $db );
can be removed, as you're specifying the default adapter in the application.ini.

Data encryption issues with Oracle Advanced Security

I have used Oracle Advanced Security to encrypt data during data transfer. I have successfully configured ssl with below parameters and I have restarted the instance. I am retrieving data from a Java class given below. But I could read the data without decrypting, the data is not getting encrypted.
Environment:
Oragle 11g database
SQLNET.AUTHENTICATION_SERVICES= (BEQ, TCPS, NTS)
SSL_VERSION = 0
NAMES.DIRECTORY_PATH= (TNSNAMES, EZCONNECT)
SSL_CLIENT_AUTHENTICATION = FALSE
WALLET_LOCATION =
(SOURCE =
(METHOD = FILE)
(METHOD_DATA =
(DIRECTORY = C:\Users\kcr\Oracle\WALLETS)
)
)
SSL_CIPHER_SUITES= (SSL_RSA_EXPORT_WITH_RC4_40_MD5)
Java class:
try{
Properties properties = Utils.readProperties("weka/experiment/DatabaseUtils.props");
// Security.addProvider(new oracle.security.pki.OraclePKIProvider()); //Security syntax
String url = "jdbc:oracle:thin:#(DESCRIPTION =\n" +
" (ADDRESS = (PROTOCOL = TCP)(HOST = localhost)(PORT = 1521))\n" +
" (CONNECT_DATA =\n" +
" (SERVER = DEDICATED)\n" +
" (SERVICE_NAME = sal)\n" +
" )\n" +
" )";
java.util.Properties props = new java.util.Properties();
props.setProperty("user", "system");
props.setProperty("password", "weblogic");
// props.setProperty("javax.net.ssl.trustStore","C:\\Users\\kcr\\Oracle\\WALLETS\\ewallet.p12");
// props.setProperty("oracle.net.ssl_cipher_suites","SSL_RSA_EXPORT_WITH_RC4_40_MD5");
// props.setProperty("javax.net.ssl.trustStoreType","PKCS12");
//props.setProperty("javax.net.ssl.trustStorePassword","welcome2");
DriverManager.registerDriver(new OracleDriver());
Connection conn = DriverManager.getConnection(url, props);
/*8 OracleDataSource ods = new OracleDataSource();
ods.setUser("system");
ods.setPassword("weblogic");
ods.setURL(url);
Connection conn = ods.getConnection();*/
Statement stmt = conn.createStatement();
ResultSet rset = stmt.executeQuery("select * from iris");
///////////////////////////
while(rset.next()) {
for (int i=1; i<=5; i++) {
System.out.print(rset.getString(i));
}
}
Are you expecting that your SELECT statement would return encrypted data and that your System.out.print calls would result in encrypted output going to the screen? If so, that's not the way advanced security works-- Advanced Security allows you to encrypt data over the wire but the data is unencrypted in the SQLNet stack. Your SELECT statement, therefore, would always see the data in an unencrypted state. You would need to do a SQLNet trace or use some sort of packet sniffer to see the encrypted data flowing over the wire.
You'll find the documentation in "SSL With Oracle JDBC Thin Driver".
In particular you should probably use PROTOCOL = TCPS instead of PROTOCOL = TCP. I'd also suggest using a stronger cipher suite (and avoid the anonymous ones, since with them you don't verify the identity of the remote server).

Resources