Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion bd-api/src/api.rs
Original file line number Diff line number Diff line change
Expand Up @@ -590,7 +590,7 @@ impl Api {
.iter()
.map(|(k, v)| {
(
k.to_string(),
k.clone(),
ProtoData {
data_type: Some(Data_type::StringData(v.clone())),
..Default::default()
Expand Down
68 changes: 37 additions & 31 deletions bd-logger/src/consumer_test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ async fn upload_retries() {
// upload attempts.
for _ in 0 .. 11 {
let log_upload = setup.next_upload().await;
assert_eq!(log_upload.payload.log_upload().logs.len(), 10);
assert_eq!(log_upload.payload.log_upload().legacy_flatbuffer_logs.len(), 10);
log_upload
.response_tx
.send(UploadResponse {
Expand All @@ -191,7 +191,7 @@ async fn upload_retries() {
// We should now receive an upload with the logs from the second batch, as we already gave up on
// the first one.
let log_upload = setup.next_upload().await;
assert_eq!(log_upload.payload.log_upload().logs[0], b"b");
assert_eq!(log_upload.payload.log_upload().legacy_flatbuffer_logs[0], b"b");
log_upload
.response_tx
.send(UploadResponse {
Expand Down Expand Up @@ -271,8 +271,11 @@ async fn continuous_buffer_upload_byte_limit() {

// The first upload should just one log, as the 150 byte log exceeds the 100 limit.
let log_upload = setup.next_upload().await;
assert_eq!(log_upload.payload.log_upload().logs.len(), 1);
assert_eq!(log_upload.payload.log_upload().logs[0].len(), 150);
assert_eq!(log_upload.payload.log_upload().legacy_flatbuffer_logs.len(), 1);
assert_eq!(
log_upload.payload.log_upload().legacy_flatbuffer_logs[0].len(),
150
);

log_upload
.response_tx
Expand All @@ -284,8 +287,11 @@ async fn continuous_buffer_upload_byte_limit() {

// We should then receive a second upload with the second log line.
let log_upload = setup.next_upload().await;
assert_eq!(log_upload.payload.log_upload().logs.len(), 1);
assert_eq!(log_upload.payload.log_upload().logs[0].len(), 150);
assert_eq!(log_upload.payload.log_upload().legacy_flatbuffer_logs.len(), 1);
assert_eq!(
log_upload.payload.log_upload().legacy_flatbuffer_logs[0].len(),
150
);
}

// Verifies that we shut down the continuous buffer even if there is a pending log upload.
Expand All @@ -306,7 +312,7 @@ async fn continuous_buffer_upload_shutdown() {
setup.producer.write(&[0; 150]).unwrap();

let log_upload = setup.next_upload().await;
assert_eq!(log_upload.payload.log_upload().logs.len(), 1);
assert_eq!(log_upload.payload.log_upload().legacy_flatbuffer_logs.len(), 1);

// Without responding to the upload, we shut down the continuous buffer.
setup.shutdown().await;
Expand Down Expand Up @@ -336,8 +342,8 @@ async fn uploading_full_batch_failure() {

// The first upload should contain 10 (batch size) logs, starting at the start of the buffer.
let log_upload = setup.next_upload().await;
assert_eq!(log_upload.payload.log_upload().logs.len(), 10);
assert_eq!(log_upload.payload.log_upload().logs[0], &[0]);
assert_eq!(log_upload.payload.log_upload().legacy_flatbuffer_logs.len(), 10);
assert_eq!(log_upload.payload.log_upload().legacy_flatbuffer_logs[0], &[0]);

let first_uuid = log_upload.uuid.clone();

Expand All @@ -352,8 +358,8 @@ async fn uploading_full_batch_failure() {

// The second upload should be the same as the previous one, with the same uuid as before.
let log_upload = setup.next_upload().await;
assert_eq!(log_upload.payload.log_upload().logs.len(), 10);
assert_eq!(log_upload.payload.log_upload().logs[0], &[0]);
assert_eq!(log_upload.payload.log_upload().legacy_flatbuffer_logs.len(), 10);
assert_eq!(log_upload.payload.log_upload().legacy_flatbuffer_logs[0], &[0]);
assert_eq!(log_upload.uuid, first_uuid);

// This time we signal that the upload was ack'd.
Expand All @@ -372,8 +378,8 @@ async fn uploading_full_batch_failure() {

// The third upload should only contain one log (11 - 10 = 1).
let log_upload = setup.next_upload().await;
assert_eq!(log_upload.payload.log_upload().logs.len(), 1);
assert_eq!(log_upload.payload.log_upload().logs[0], &[10]);
assert_eq!(log_upload.payload.log_upload().legacy_flatbuffer_logs.len(), 1);
assert_eq!(log_upload.payload.log_upload().legacy_flatbuffer_logs[0], &[10]);

// Since this is not retrying the first one, ensure that the uuid is different.
assert_ne!(log_upload.uuid, first_uuid);
Expand Down Expand Up @@ -402,7 +408,7 @@ async fn uploading_partial_batch_failure() {
// We haven't reached the batch limit, but awaiting will have us hit the time deadline once there
// are no more logs to read.
let log_upload = setup.next_upload().await;
assert_eq!(log_upload.payload.log_upload().logs.len(), 4);
assert_eq!(log_upload.payload.log_upload().legacy_flatbuffer_logs.len(), 4);

let uuid = log_upload.uuid;

Expand All @@ -423,7 +429,7 @@ async fn uploading_partial_batch_failure() {

let log_upload = setup.next_upload().await;
assert_eq!(log_upload.uuid, uuid);
assert_eq!(log_upload.payload.log_upload().logs.len(), 4);
assert_eq!(log_upload.payload.log_upload().legacy_flatbuffer_logs.len(), 4);

log_upload
.response_tx
Expand All @@ -450,7 +456,7 @@ async fn total_batch_upload_timeout() {
setup.await_logs_flushed(2).await;

let log_upload = setup.next_upload().await;
assert_eq!(log_upload.payload.log_upload().logs.len(), 2);
assert_eq!(log_upload.payload.log_upload().legacy_flatbuffer_logs.len(), 2);

setup.shutdown().await;
}
Expand Down Expand Up @@ -535,10 +541,10 @@ async fn age_limit_log_uploads() {

// We should only get the 2 most recent logs.
let log_upload = setup.next_upload().await;
assert_eq!(log_upload.payload.log_upload().logs.len(), 2);
assert_eq!(log_upload.payload.log_upload().legacy_flatbuffer_logs.len(), 2);
assert_eq!(
time::OffsetDateTime::from_unix_timestamp(
root_as_log(&log_upload.payload.log_upload().logs[0])
root_as_log(&log_upload.payload.log_upload().legacy_flatbuffer_logs[0])
.unwrap()
.timestamp()
.unwrap()
Expand Down Expand Up @@ -719,8 +725,8 @@ async fn upload_multiple_continuous_buffers() {

let upload_1_payload = upload_1.payload;
// The order is unspecified, so figure out which one is which.
if *upload_1_payload.log_upload().logs == vec![b"a".to_vec()] {
assert_eq!(*upload_2.payload.log_upload().logs, vec![b"b".to_vec()]);
if *upload_1_payload.log_upload().legacy_flatbuffer_logs == vec![b"a".to_vec()] {
assert_eq!(*upload_2.payload.log_upload().legacy_flatbuffer_logs, vec![b"b".to_vec()]);
upload_1
.response_tx
.send(UploadResponse {
Expand All @@ -729,8 +735,8 @@ async fn upload_multiple_continuous_buffers() {
})
.unwrap();
} else {
assert_eq!(*upload_1_payload.log_upload().logs, vec![b"b".to_vec()]);
assert_eq!(*upload_2.payload.log_upload().logs, vec![b"a".to_vec()]);
assert_eq!(*upload_1_payload.log_upload().legacy_flatbuffer_logs, vec![b"b".to_vec()]);
assert_eq!(*upload_2.payload.log_upload().legacy_flatbuffer_logs, vec![b"a".to_vec()]);
upload_2
.response_tx
.send(UploadResponse {
Expand All @@ -752,7 +758,7 @@ async fn upload_multiple_continuous_buffers() {
10.seconds().advance().await;

let upload_3 = setup.next_upload().await;
assert_eq!(*upload_3.payload.log_upload().logs, vec![b"a2".to_vec()]);
assert_eq!(*upload_3.payload.log_upload().legacy_flatbuffer_logs, vec![b"a2".to_vec()]);

// We intentionally leave both the response for buffer b and a open to validate that we don't
// require an ack for proper shutdown.
Expand Down Expand Up @@ -803,7 +809,7 @@ async fn trigger_upload_byte_size_limit() {

log::debug!("waiting for upload");
let upload = setup.next_upload().await;
assert_eq!(upload.payload.log_upload().logs.len(), 1);
assert_eq!(upload.payload.log_upload().legacy_flatbuffer_logs.len(), 1);
}

#[tokio::test]
Expand Down Expand Up @@ -864,7 +870,7 @@ async fn uploaded_trigger() {

let upload = setup.next_upload().await;

assert_eq!(upload.payload.log_upload().logs.len(), 2);
assert_eq!(upload.payload.log_upload().legacy_flatbuffer_logs.len(), 2);
}

#[tokio::test]
Expand Down Expand Up @@ -907,9 +913,9 @@ async fn log_streaming() {
producer.write(b"more data").unwrap();

assert_matches!(log_upload_rx.recv().await.unwrap(), DataUpload::AcklessLogsUpload(upload) => {
assert_eq!(upload.logs.len(), 2);
assert_eq!(upload.logs[0], b"data");
assert_eq!(upload.logs[1], b"more data");
assert_eq!(upload.legacy_flatbuffer_logs.len(), 2);
assert_eq!(upload.legacy_flatbuffer_logs[0], b"data");
assert_eq!(upload.legacy_flatbuffer_logs[1], b"more data");
});
}

Expand Down Expand Up @@ -965,12 +971,12 @@ async fn streaming_batch_size_flag() {

// Should batch foo+bar (due to batch size = 2)
assert_matches!(log_upload_rx.recv().await.unwrap(), DataUpload::AcklessLogsUpload(upload) => {
assert_eq!(upload.logs, vec![b"foo".to_vec(), b"bar".to_vec()]);
assert_eq!(upload.legacy_flatbuffer_logs, vec![b"foo".to_vec(), b"bar".to_vec()]);
});

// Next upload should contain "baz"
assert_matches!(log_upload_rx.recv().await.unwrap(), DataUpload::AcklessLogsUpload(upload) => {
assert_eq!(upload.logs, vec![b"baz".to_vec()]);
assert_eq!(upload.legacy_flatbuffer_logs, vec![b"baz".to_vec()]);
});
}

Expand Down Expand Up @@ -1019,7 +1025,7 @@ async fn log_streaming_shutdown() {

// Receive the upload request but do not complete it.
assert_matches!(log_upload_rx.recv().await.unwrap(), DataUpload::AcklessLogsUpload(upload) => {
assert_eq!(upload.logs[0], b"data");
assert_eq!(upload.legacy_flatbuffer_logs[0], b"data");
});

// Perform a shutdown, making sure that we complete the shutdown even if there is a pending
Expand Down
2 changes: 1 addition & 1 deletion bd-logger/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ impl tower::Service<UploadRequest> for Uploader {

let log_upload_request = LogUploadRequest {
upload_uuid: request.uuid.clone(),
logs: request.log_upload.logs.clone(),
legacy_flatbuffer_logs: request.log_upload.logs.clone(),
buffer_uuid: request.log_upload.buffer_id.clone(),
ackless: request.ackless,
..Default::default()
Expand Down
2 changes: 1 addition & 1 deletion bd-logger/src/test/embedded_logger_integration.rs
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ async fn configuration_update_with_log_uploads() {
);

assert_matches!(setup.server.next_log_upload().await, Some(log_upload) => {
assert_eq!(log_upload.logs.len(), 1);
assert_eq!(log_upload.legacy_flatbuffer_logs.len(), 1);
});

setup.shutdown.shutdown().await;
Expand Down
18 changes: 11 additions & 7 deletions bd-proto/src/flatbuffers/report_generated.rs
Original file line number Diff line number Diff line change
Expand Up @@ -50,18 +50,19 @@ pub mod v_1 {
#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")]
pub const ENUM_MIN_REPORT_TYPE: i8 = 0;
#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")]
pub const ENUM_MAX_REPORT_TYPE: i8 = 7;
pub const ENUM_MAX_REPORT_TYPE: i8 = 8;
#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")]
#[allow(non_camel_case_types)]
pub const ENUM_VALUES_REPORT_TYPE: [ReportType; 8] = [
pub const ENUM_VALUES_REPORT_TYPE: [ReportType; 9] = [
ReportType::Unknown,
ReportType::AppNotResponding,
ReportType::HandledError,
ReportType::JVMCrash,
ReportType::MemoryTermination,
ReportType::NativeCrash,
ReportType::StrictModeViolation,
ReportType::JavaScriptError,
ReportType::JavaScriptNonFatalError,
ReportType::JavaScriptFatalError,
];

#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
Expand All @@ -76,10 +77,11 @@ impl ReportType {
pub const MemoryTermination: Self = Self(4);
pub const NativeCrash: Self = Self(5);
pub const StrictModeViolation: Self = Self(6);
pub const JavaScriptError: Self = Self(7);
pub const JavaScriptNonFatalError: Self = Self(7);
pub const JavaScriptFatalError: Self = Self(8);

pub const ENUM_MIN: i8 = 0;
pub const ENUM_MAX: i8 = 7;
pub const ENUM_MAX: i8 = 8;
pub const ENUM_VALUES: &'static [Self] = &[
Self::Unknown,
Self::AppNotResponding,
Expand All @@ -88,7 +90,8 @@ impl ReportType {
Self::MemoryTermination,
Self::NativeCrash,
Self::StrictModeViolation,
Self::JavaScriptError,
Self::JavaScriptNonFatalError,
Self::JavaScriptFatalError,
];
/// Returns the variant's name or "" if unknown.
pub fn variant_name(self) -> Option<&'static str> {
Expand All @@ -100,7 +103,8 @@ impl ReportType {
Self::MemoryTermination => Some("MemoryTermination"),
Self::NativeCrash => Some("NativeCrash"),
Self::StrictModeViolation => Some("StrictModeViolation"),
Self::JavaScriptError => Some("JavaScriptError"),
Self::JavaScriptNonFatalError => Some("JavaScriptNonFatalError"),
Self::JavaScriptFatalError => Some("JavaScriptFatalError"),
_ => None,
}
}
Expand Down
Loading