1- """
2- Integration tests for the get_metrics plugin
3- """
4-
51import json
62import os
73
@@ -129,184 +125,59 @@ def test_conflicting_time_params():
129125 )
130126
131127
132- @pytest .mark .skipif (
133- not os .getenv ('JWE_TOKEN' ),
134- reason = "JWE_TOKEN environment variable required for monitoring tests"
135- )
136128@pytest .mark .smoke
137- def test_nodebalancer_metrics_basic ():
138- """Test get_metrics with nodebalancer service (with authentication)"""
139- # Use a non-existent entity ID to avoid side effects
140- # This will test the complete command flow but fail gracefully
141- try :
142- output = exec_test_command (BASE_CMD + [
143- "nodebalancer" ,
144- "--entity-ids" , "999999" ,
145- "--metrics" , "nb_ingress_traffic_rate:sum" ,
146- "--duration" , "15" ,
147- "--duration-unit" , "min"
148- ])
129+ def test_objstorage_metrics_basic ():
130+ """Test get_metrics with objectstorage service (with authentication)"""
131+ # Use objectstorage service which doesn't require entity-ids
132+ output = exec_test_command (BASE_CMD + [
133+ "objectstorage" ,
134+ "--metrics" , "obj_requests_num:sum" ,
135+ "--duration" , "15" ,
136+ "--duration-unit" , "min" ,
137+ "--entity-region" , "us-east"
138+ ])
149139
150- # If it succeeds, check for expected output structure
151- assert "Fetching metrics" in output or "data" in output .lower ()
140+ print ( f"SUCCESS: { output } " )
141+ assert "Fetching metrics" in output or "data" in output .lower ()
152142
153- except RuntimeError as e :
154- # Expected to fail with entity not found or similar API error
155- # Ensure it's not a command structure error
156- error_output = str (e )
157- assert "API request failed" in error_output or "entity" in error_output .lower ()
158- # Should not be argument parsing errors
159- assert "unrecognized arguments" not in error_output
160- assert "invalid choice" not in error_output
161143
144+ def test_obj_metrics_with_filters ():
145+ """Test get_metrics with objectstorage service and filters"""
146+ output = exec_test_command (BASE_CMD + [
147+ "objectstorage" ,
148+ "--metrics" , "obj_requests_num:sum" ,
149+ "--duration" , "30" ,
150+ "--duration-unit" , "min" ,
151+ "--entity-region" , "us-west" ,
152+ "--filters" , "request_type:eq:get"
153+ ])
162154
163- @pytest .mark .skipif (
164- not os .getenv ('JWE_TOKEN' ),
165- reason = "JWE_TOKEN environment variable required for monitoring tests"
166- )
167- def test_dbaas_metrics_with_filters ():
168- """Test get_metrics with dbaas service and filters"""
169- try :
170- output = exec_test_command (BASE_CMD + [
171- "dbaas" ,
172- "--entity-ids" , "999999" ,
173- "--metrics" , "cpu_usage:avg,memory_usage:max" ,
174- "--duration" , "30" ,
175- "--duration-unit" , "min" ,
176- "--filters" , "node_type:in:primary,secondary" ,
177- "--group-by" , "entity_id,node_type"
178- ])
179-
180- assert "Fetching metrics" in output or "data" in output .lower ()
155+ assert "Fetching metrics" in output or "data" in output .lower ()
181156
182- except RuntimeError as e :
183- error_output = str (e )
184- assert "API request failed" in error_output or "entity" in error_output .lower ()
185- assert "unrecognized arguments" not in error_output
186157
187158
188- @pytest .mark .skipif (
189- not os .getenv ('JWE_TOKEN' ),
190- reason = "JWE_TOKEN environment variable required for monitoring tests"
191- )
192159def test_absolute_time_metrics ():
193- """Test get_metrics with absolute time range"""
194- try :
195- output = exec_test_command (BASE_CMD + [
196- "linodes" ,
197- "--entity-ids" , "999999" ,
198- "--metrics" , "cpu_percent:avg" ,
199- "--start-time" , "2025-12-22T00:00:00Z" ,
200- "--end-time" , "2025-12-22T12:00:00Z" ,
201- "--granularity" , "5" ,
202- "--granularity-unit" , "min"
203- ])
204-
205- assert "Fetching metrics" in output or "data" in output .lower ()
206-
207- except RuntimeError as e :
208- error_output = str (e )
209- assert "API request failed" in error_output or "entity" in error_output .lower ()
210- assert "unrecognized arguments" not in error_output
211-
212-
213- @pytest .mark .skipif (
214- not os .getenv ('JWE_TOKEN' ),
215- reason = "JWE_TOKEN environment variable required for monitoring tests"
216- )
217- def test_multiple_entity_ids ():
218- """Test get_metrics with multiple entity IDs"""
219- try :
220- output = exec_test_command (BASE_CMD + [
221- "nodebalancer" ,
222- "--entity-ids" , "999999,888888,777777" ,
223- "--metrics" , "nb_ingress_traffic_rate:sum,nb_egress_traffic_rate:avg" ,
224- "--duration" , "1" ,
225- "--duration-unit" , "hr" ,
226- "--granularity" , "15" ,
227- "--granularity-unit" , "min"
228- ])
160+ """Test get_metrics with objectstorage service and absolute time range"""
161+ output = exec_test_command (BASE_CMD + [
162+ "objectstorage" ,
163+ "--metrics" , "obj_requests_num:sum" ,
164+ "--start-time" , "2025-12-22T00:00:00Z" ,
165+ "--end-time" , "2025-12-22T12:00:00Z" ,
166+ "--entity-region" , "us-southeast" ,
167+ "--granularity" , "5" ,
168+ "--granularity-unit" , "min"
169+ ])
229170
230- assert "Fetching metrics" in output or "data" in output .lower ()
171+ assert "Fetching metrics" in output or "data" in output .lower ()
231172
232- except RuntimeError as e :
233- error_output = str (e )
234- assert "API request failed" in error_output or "entity" in error_output .lower ()
235- assert "unrecognized arguments" not in error_output
236-
237-
238- @pytest .mark .skipif (
239- not os .getenv ('JWE_TOKEN' ),
240- reason = "JWE_TOKEN environment variable required for monitoring tests"
241- )
242- def test_complex_filters ():
243- """Test get_metrics with complex filter combinations"""
244- try :
245- output = exec_test_command (BASE_CMD + [
246- "dbaas" ,
247- "--entity-ids" , "999999" ,
248- "--metrics" , "cpu_usage:avg,memory_usage:avg,connections:count" ,
249- "--duration" , "2" ,
250- "--duration-unit" , "hr" ,
251- "--filters" , "node_type:in:primary,secondary;status:eq:active;environment:ne:test" ,
252- "--group-by" , "entity_id,node_type,environment" ,
253- "--granularity" , "30" ,
254- "--granularity-unit" , "min"
255- ])
256-
257- assert "Fetching metrics" in output or "data" in output .lower ()
258-
259- except RuntimeError as e :
260- error_output = str (e )
261- assert "API request failed" in error_output or "entity" in error_output .lower ()
262- assert "unrecognized arguments" not in error_output
263-
264-
265- def test_missing_token_error ():
266- """Test error handling when JWE_TOKEN is missing"""
267- # Temporarily remove token
268- original_token = os .getenv ('JWE_TOKEN' )
269- if 'JWE_TOKEN' in os .environ :
270- del os .environ ['JWE_TOKEN' ]
271-
272- try :
273- exec_failing_test_command (
274- BASE_CMD + [
275- "nodebalancer" ,
276- "--entity-ids" , "123" ,
277- "--metrics" , "cpu_usage:avg" ,
278- "--duration" , "15" ,
279- "--duration-unit" , "min"
280- ],
281- expected_code = ExitCodes .REQUEST_FAILED
282- )
283- finally :
284- # Restore token
285- if original_token :
286- os .environ ['JWE_TOKEN' ] = original_token
287-
288-
289- def test_empty_entity_ids ():
290- """Test handling of empty entity IDs"""
291- exec_failing_test_command (
292- BASE_CMD + [
293- "nodebalancer" ,
294- "--entity-ids" , "" ,
295- "--metrics" , "cpu_usage:avg" ,
296- "--duration" , "15" ,
297- "--duration-unit" , "min"
298- ],
299- expected_code = ExitCodes .REQUEST_FAILED
300- )
301173
302174
303175def test_malformed_filters ():
304176 """Test handling of malformed filter syntax"""
305177 exec_failing_test_command (
306178 BASE_CMD + [
307- "dbaas" ,
308- "--entity-ids" , "123" ,
309- "--metrics" , "cpu_usage:avg" ,
179+ "objectstorage" ,
180+ "--metrics" , "obj_requests_num:sum" ,
310181 "--duration" , "15" ,
311182 "--duration-unit" , "min" ,
312183 "--filters" , "invalid_filter_format"
@@ -315,25 +186,4 @@ def test_malformed_filters():
315186 )
316187
317188
318- def test_service_validation ():
319- """Test that valid services are recognized correctly"""
320- valid_services = ["nodebalancer" , "netloadbalancer" , "linodes" , "dbaas" ]
321-
322- for service in valid_services :
323- # This should fail due to missing authentication, not service validation
324- try :
325- exec_failing_test_command (
326- BASE_CMD + [
327- service ,
328- "--entity-ids" , "123" ,
329- "--metrics" , "cpu_usage:avg" ,
330- "--duration" , "15" ,
331- "--duration-unit" , "min"
332- ],
333- expected_code = ExitCodes .REQUEST_FAILED
334- )
335- except AssertionError as e :
336- # If it fails with wrong exit code, check it's not service validation error
337- error_msg = str (e ).lower ()
338- assert "invalid choice" not in error_msg
339- assert f"invalid choice: '{ service } '" not in error_msg
189+
0 commit comments