Skip to content
This repository was archived by the owner on Sep 3, 2022. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ use Github pull requests for this purpose.
### Running tests
We use [`tox`](https://tox.readthedocs.io/) for running our tests. To run tests
before sending out a pull request, just
[install tox](https://http://tox.readthedocs.io/en/latest/install.html) and run
[install tox](https://tox.readthedocs.io/en/latest/install.html) and run

```shell
$ tox
Expand Down
5 changes: 4 additions & 1 deletion datalab/stackdriver/monitoring/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,15 @@

from __future__ import absolute_import

from google.cloud.monitoring import Aligner, Reducer
from google.cloud.monitoring import enums
from ._group import Groups
from ._metric import MetricDescriptors
from ._query import Query
from ._query_metadata import QueryMetadata
from ._resource import ResourceDescriptors

Aligner = enums.Aggregation.Aligner
Reducer = enums.Aggregation.Reducer

__all__ = ['Aligner', 'Reducer', 'Groups', 'MetricDescriptors', 'Query', 'QueryMetadata',
'ResourceDescriptors']
17 changes: 7 additions & 10 deletions datalab/stackdriver/monitoring/_group.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@

import pandas

import datalab.context
import google.datalab

from . import _utils

Expand All @@ -32,17 +32,14 @@ class Groups(object):
_DISPLAY_HEADERS = ('Group ID', 'Group name', 'Parent ID', 'Parent name',
'Is cluster', 'Filter')

def __init__(self, project_id=None, context=None):
def __init__(self, context=None):
"""Initializes the Groups for a Stackdriver project.

Args:
project_id: An optional project ID or number to override the one provided
by the context.
context: An optional Context object to use instead of the global default.
"""
self._context = context or datalab.context.Context.default()
self._project_id = project_id or self._context.project_id
self._client = _utils.make_client(project_id, context)
self._context = context or google.datalab.Context.default()
self._client = _utils.make_client(self._context)
self._group_dict = None

def list(self, pattern='*'):
Expand All @@ -58,7 +55,7 @@ def list(self, pattern='*'):
"""
if self._group_dict is None:
self._group_dict = collections.OrderedDict(
(group.id, group) for group in self._client.list_groups())
(group.name, group) for group in self._client.list_groups())

return [group for group in self._group_dict.values()
if fnmatch.fnmatch(group.display_name, pattern)]
Expand All @@ -79,10 +76,10 @@ def as_dataframe(self, pattern='*', max_rows=None):
for i, group in enumerate(self.list(pattern)):
if max_rows is not None and i >= max_rows:
break
parent = self._group_dict.get(group.parent_id)
parent = self._group_dict.get(group.parent_name)
parent_display_name = '' if parent is None else parent.display_name
data.append([
group.id, group.display_name, group.parent_id,
group.name, group.display_name, group.parent_name,
parent_display_name, group.is_cluster, group.filter])

return pandas.DataFrame(data, columns=self._DISPLAY_HEADERS)
15 changes: 8 additions & 7 deletions datalab/stackdriver/monitoring/_metric.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@
from __future__ import absolute_import
from builtins import object

from google.cloud.monitoring_v3 import enums

import fnmatch
import pandas

Expand All @@ -27,20 +29,17 @@ class MetricDescriptors(object):
_DISPLAY_HEADERS = ('Metric type', 'Display name', 'Kind', 'Value', 'Unit',
'Labels')

def __init__(self, filter_string=None, type_prefix=None,
project_id=None, context=None):
def __init__(self, filter_string=None, type_prefix=None, context=None):
"""Initializes the MetricDescriptors based on the specified filters.

Args:
filter_string: An optional filter expression describing the resource
descriptors to be returned.
type_prefix: An optional prefix constraining the selected metric types.
This adds ``metric.type = starts_with("<prefix>")`` to the filter.
project_id: An optional project ID or number to override the one provided
by the context.
context: An optional Context object to use instead of the global default.
"""
self._client = _utils.make_client(project_id, context)
self._client = _utils.make_client(context)
self._filter_string = filter_string
self._type_prefix = type_prefix
self._descriptors = None
Expand Down Expand Up @@ -81,7 +80,9 @@ def as_dataframe(self, pattern='*', max_rows=None):
break
labels = ', '. join([l.key for l in metric.labels])
data.append([
metric.type, metric.display_name, metric.metric_kind,
metric.value_type, metric.unit, labels])
metric.type, metric.display_name,
enums.MetricDescriptor.MetricKind(metric.metric_kind).name,
enums.MetricDescriptor.ValueType(metric.value_type).name,
metric.unit, labels])

return pandas.DataFrame(data, columns=self._DISPLAY_HEADERS)
23 changes: 11 additions & 12 deletions datalab/stackdriver/monitoring/_query.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,33 +14,32 @@

from __future__ import absolute_import

import google.cloud.monitoring
import google.cloud.monitoring_v3.query

from . import _query_metadata
from . import _utils


class Query(google.cloud.monitoring.Query):
class Query(google.cloud.monitoring_v3.query.Query):
"""Query object for retrieving metric data."""

def __init__(self,
metric_type=google.cloud.monitoring.Query.DEFAULT_METRIC_TYPE,
end_time=None, days=0, hours=0, minutes=0,
project_id=None, context=None):
metric_type=google.cloud.monitoring_v3.query.Query.DEFAULT_METRIC_TYPE,
end_time=None, days=0, hours=0, minutes=0, context=None):
"""Initializes the core query parameters.

The start time (exclusive) is determined by combining the
values of ``days``, ``hours``, and ``minutes``, and subtracting
the resulting duration from the end time.

It is also allowed to omit the end time and duration here,
in which case :meth:`~google.cloud.monitoring.query.Query.select_interval`
in which case :meth:`~google.cloud.monitoring_v3.query.Query.select_interval`
must be called before the query is executed.

Args:
metric_type: The metric type name. The default value is
:data:`Query.DEFAULT_METRIC_TYPE
<google.cloud.monitoring.query.Query.DEFAULT_METRIC_TYPE>`, but
<google.cloud.monitoring_v3.query.Query.DEFAULT_METRIC_TYPE>`, but
please note that this default value is provided only for
demonstration purposes and is subject to change.
end_time: The end time (inclusive) of the time interval for which
Expand All @@ -49,18 +48,18 @@ def __init__(self,
days: The number of days in the time interval.
hours: The number of hours in the time interval.
minutes: The number of minutes in the time interval.
project_id: An optional project ID or number to override the one provided
by the context.
context: An optional Context object to use instead of the global default.

Raises:
ValueError: ``end_time`` was specified but ``days``, ``hours``, and
``minutes`` are all zero. If you really want to specify a point in
time, use
:meth:`~google.cloud.monitoring.query.Query.select_interval`.
:meth:`~google.cloud.monitoring_v3.query.Query.select_interval`.
"""
client = _utils.make_client(project_id, context)
super(Query, self).__init__(client, metric_type,
client = _utils.make_client(context)
super(Query, self).__init__(client.metrics_client,
project=client.project,
metric_type=metric_type,
end_time=end_time,
days=days, hours=hours, minutes=minutes)

Expand Down
10 changes: 6 additions & 4 deletions datalab/stackdriver/monitoring/_query_metadata.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,8 @@
from __future__ import unicode_literals
from builtins import object

import google.cloud.monitoring
from google.cloud.monitoring_v3 import _dataframe
from google.protobuf.json_format import MessageToDict
import pandas


Expand Down Expand Up @@ -64,8 +65,9 @@ def as_dataframe(self, max_rows=None):
"""
max_rows = len(self._timeseries_list) if max_rows is None else max_rows
headers = [{
'resource': ts.resource._asdict(), 'metric': ts.metric._asdict()}
for ts in self._timeseries_list[:max_rows]]
'resource': MessageToDict(ts.resource),
'metric': MessageToDict(ts.metric)
} for ts in self._timeseries_list[:max_rows]]

if not headers:
return pandas.DataFrame()
Expand All @@ -78,7 +80,7 @@ def as_dataframe(self, max_rows=None):
for col in dataframe.columns])

# Re-order the columns.
resource_keys = google.cloud.monitoring._dataframe._sorted_resource_labels(
resource_keys = _dataframe._sorted_resource_labels(
dataframe['resource.labels'].columns)
sorted_columns = [('resource.type', '')]
sorted_columns += [('resource.labels', key) for key in resource_keys]
Expand Down
6 changes: 2 additions & 4 deletions datalab/stackdriver/monitoring/_resource.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,17 +26,15 @@ class ResourceDescriptors(object):

_DISPLAY_HEADERS = ('Resource type', 'Display name', 'Labels')

def __init__(self, filter_string=None, project_id=None, context=None):
def __init__(self, filter_string=None, context=None):
"""Initializes the ResourceDescriptors based on the specified filters.

Args:
filter_string: An optional filter expression describing the resource
descriptors to be returned.
project_id: An optional project ID or number to override the one provided
by the context.
context: An optional Context object to use instead of the global default.
"""
self._client = _utils.make_client(project_id, context)
self._client = _utils.make_client(context)
self._filter_string = filter_string
self._descriptors = None

Expand Down
55 changes: 46 additions & 9 deletions datalab/stackdriver/monitoring/_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,17 +14,54 @@

from __future__ import absolute_import

import google.cloud.monitoring
from google.api_core.gapic_v1.client_info import ClientInfo
from google.cloud.monitoring_v3 import MetricServiceClient
from google.cloud.monitoring_v3 import GroupServiceClient

import datalab.context
import google.datalab


def make_client(project_id=None, context=None):
context = context or datalab.context.Context.default()
project_id = project_id or context.project_id
client = google.cloud.monitoring.Client(
project=project_id,
# _MonitoringClient holds instances of individual google.cloud.monitoring
# clients and translates each call from the old signature, since the prior
# client has been updated and has split into multiple client classes.
class _MonitoringClient(object):
def __init__(self, context):
self.project = context.project_id
client_info = ClientInfo(user_agent='pydatalab/v0')
self.metrics_client = MetricServiceClient(
credentials=context.credentials,
)
client._connection.USER_AGENT = 'pydatalab/v0'
client_info=client_info
)
self.group_client = GroupServiceClient(
credentials=context.credentials,
client_info=client_info
)

def list_metric_descriptors(self, filter_string=None, type_prefix=None):
filters = []
if filter_string is not None:
filters.append(filter_string)

if type_prefix is not None:
filters.append('metric.type = starts_with("{prefix}")'.format(
prefix=type_prefix))

metric_filter = ' AND '.join(filters)
metrics = self.metrics_client.list_metric_descriptors(
self.project, filter_=metric_filter)
return metrics

def list_resource_descriptors(self, filter_string=None):
resources = self.metrics_client.list_monitored_resource_descriptors(
self.project, filter_=filter_string)
return resources

def list_groups(self):
groups = self.group_client.list_groups(self.project)
return groups


def make_client(context=None):
context = context or google.datalab.Context.default()
client = _MonitoringClient(context)
return client
5 changes: 4 additions & 1 deletion google/datalab/stackdriver/monitoring/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,15 @@

from __future__ import absolute_import

from google.cloud.monitoring import Aligner, Reducer
from google.cloud.monitoring import enums
from ._group import Groups
from ._metric import MetricDescriptors
from ._query import Query
from ._query_metadata import QueryMetadata
from ._resource import ResourceDescriptors

Aligner = enums.Aggregation.Aligner
Reducer = enums.Aggregation.Reducer

__all__ = ['Aligner', 'Reducer', 'Groups', 'MetricDescriptors', 'Query', 'QueryMetadata',
'ResourceDescriptors']
6 changes: 3 additions & 3 deletions google/datalab/stackdriver/monitoring/_group.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def list(self, pattern='*'):
"""
if self._group_dict is None:
self._group_dict = collections.OrderedDict(
(group.id, group) for group in self._client.list_groups())
(group.name, group) for group in self._client.list_groups())

return [group for group in self._group_dict.values()
if fnmatch.fnmatch(group.display_name, pattern)]
Expand All @@ -76,10 +76,10 @@ def as_dataframe(self, pattern='*', max_rows=None):
for i, group in enumerate(self.list(pattern)):
if max_rows is not None and i >= max_rows:
break
parent = self._group_dict.get(group.parent_id)
parent = self._group_dict.get(group.parent_name)
parent_display_name = '' if parent is None else parent.display_name
data.append([
group.id, group.display_name, group.parent_id,
group.name, group.display_name, group.parent_name,
parent_display_name, group.is_cluster, group.filter])

return pandas.DataFrame(data, columns=self._DISPLAY_HEADERS)
8 changes: 6 additions & 2 deletions google/datalab/stackdriver/monitoring/_metric.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@
from __future__ import absolute_import
from builtins import object

from google.cloud.monitoring_v3 import enums

import fnmatch
import pandas

Expand Down Expand Up @@ -78,7 +80,9 @@ def as_dataframe(self, pattern='*', max_rows=None):
break
labels = ', '. join([l.key for l in metric.labels])
data.append([
metric.type, metric.display_name, metric.metric_kind,
metric.value_type, metric.unit, labels])
metric.type, metric.display_name,
enums.MetricDescriptor.MetricKind(metric.metric_kind).name,
enums.MetricDescriptor.ValueType(metric.value_type).name,
metric.unit, labels])

return pandas.DataFrame(data, columns=self._DISPLAY_HEADERS)
Loading